aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c3
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/memory.c70
-rw-r--r--mm/memory_hotplug.c12
-rw-r--r--mm/migrate.c64
-rw-r--r--mm/mlock.c18
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_cgroup.c63
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c8
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swap.c20
-rw-r--r--mm/swapfile.c9
-rw-r--r--mm/vmalloc.c43
-rw-r--r--mm/vmscan.c46
16 files changed, 221 insertions, 150 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f2e574dbc300..801c08b046e6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -176,6 +176,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
176 int ret = 0; 176 int ret = 0;
177 struct device *dev; 177 struct device *dev;
178 178
179 if (bdi->dev) /* The driver needs to use separate queues per device */
180 goto exit;
181
179 va_start(args, fmt); 182 va_start(args, fmt);
180 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 183 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
181 va_end(args); 184 va_end(args);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d143ab67be44..6058b53dcb89 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1796,6 +1796,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1796static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 1796static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1797 struct page *page, unsigned long address) 1797 struct page *page, unsigned long address)
1798{ 1798{
1799 struct hstate *h = hstate_vma(vma);
1799 struct vm_area_struct *iter_vma; 1800 struct vm_area_struct *iter_vma;
1800 struct address_space *mapping; 1801 struct address_space *mapping;
1801 struct prio_tree_iter iter; 1802 struct prio_tree_iter iter;
@@ -1805,7 +1806,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1805 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 1806 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1806 * from page cache lookup which is in HPAGE_SIZE units. 1807 * from page cache lookup which is in HPAGE_SIZE units.
1807 */ 1808 */
1808 address = address & huge_page_mask(hstate_vma(vma)); 1809 address = address & huge_page_mask(h);
1809 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 1810 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1810 + (vma->vm_pgoff >> PAGE_SHIFT); 1811 + (vma->vm_pgoff >> PAGE_SHIFT);
1811 mapping = (struct address_space *)page_private(page); 1812 mapping = (struct address_space *)page_private(page);
@@ -1824,7 +1825,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1824 */ 1825 */
1825 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 1826 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1826 unmap_hugepage_range(iter_vma, 1827 unmap_hugepage_range(iter_vma,
1827 address, address + HPAGE_SIZE, 1828 address, address + huge_page_size(h),
1828 page); 1829 page);
1829 } 1830 }
1830 1831
diff --git a/mm/memory.c b/mm/memory.c
index 164951c47305..f01b7eed6e16 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -669,6 +669,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
669 if (is_vm_hugetlb_page(vma)) 669 if (is_vm_hugetlb_page(vma))
670 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 670 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
671 671
672 if (unlikely(is_pfn_mapping(vma))) {
673 /*
674 * We do not free on error cases below as remove_vma
675 * gets called on error from higher level routine
676 */
677 ret = track_pfn_vma_copy(vma);
678 if (ret)
679 return ret;
680 }
681
672 /* 682 /*
673 * We need to invalidate the secondary MMU mappings only when 683 * We need to invalidate the secondary MMU mappings only when
674 * there could be a permission downgrade on the ptes of the 684 * there could be a permission downgrade on the ptes of the
@@ -915,6 +925,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
915 if (vma->vm_flags & VM_ACCOUNT) 925 if (vma->vm_flags & VM_ACCOUNT)
916 *nr_accounted += (end - start) >> PAGE_SHIFT; 926 *nr_accounted += (end - start) >> PAGE_SHIFT;
917 927
928 if (unlikely(is_pfn_mapping(vma)))
929 untrack_pfn_vma(vma, 0, 0);
930
918 while (start != end) { 931 while (start != end) {
919 if (!tlb_start_valid) { 932 if (!tlb_start_valid) {
920 tlb_start = start; 933 tlb_start = start;
@@ -1430,6 +1443,7 @@ out:
1430int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1443int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1431 unsigned long pfn) 1444 unsigned long pfn)
1432{ 1445{
1446 int ret;
1433 /* 1447 /*
1434 * Technically, architectures with pte_special can avoid all these 1448 * Technically, architectures with pte_special can avoid all these
1435 * restrictions (same for remap_pfn_range). However we would like 1449 * restrictions (same for remap_pfn_range). However we would like
@@ -1444,7 +1458,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1444 1458
1445 if (addr < vma->vm_start || addr >= vma->vm_end) 1459 if (addr < vma->vm_start || addr >= vma->vm_end)
1446 return -EFAULT; 1460 return -EFAULT;
1447 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1461 if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
1462 return -EINVAL;
1463
1464 ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1465
1466 if (ret)
1467 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
1468
1469 return ret;
1448} 1470}
1449EXPORT_SYMBOL(vm_insert_pfn); 1471EXPORT_SYMBOL(vm_insert_pfn);
1450 1472
@@ -1575,14 +1597,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1575 * behaviour that some programs depend on. We mark the "original" 1597 * behaviour that some programs depend on. We mark the "original"
1576 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1598 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1577 */ 1599 */
1578 if (is_cow_mapping(vma->vm_flags)) { 1600 if (addr == vma->vm_start && end == vma->vm_end)
1579 if (addr != vma->vm_start || end != vma->vm_end)
1580 return -EINVAL;
1581 vma->vm_pgoff = pfn; 1601 vma->vm_pgoff = pfn;
1582 } 1602 else if (is_cow_mapping(vma->vm_flags))
1603 return -EINVAL;
1583 1604
1584 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1605 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1585 1606
1607 err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
1608 if (err)
1609 return -EINVAL;
1610
1586 BUG_ON(addr >= end); 1611 BUG_ON(addr >= end);
1587 pfn -= addr >> PAGE_SHIFT; 1612 pfn -= addr >> PAGE_SHIFT;
1588 pgd = pgd_offset(mm, addr); 1613 pgd = pgd_offset(mm, addr);
@@ -1594,6 +1619,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1594 if (err) 1619 if (err)
1595 break; 1620 break;
1596 } while (pgd++, addr = next, addr != end); 1621 } while (pgd++, addr = next, addr != end);
1622
1623 if (err)
1624 untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
1625
1597 return err; 1626 return err;
1598} 1627}
1599EXPORT_SYMBOL(remap_pfn_range); 1628EXPORT_SYMBOL(remap_pfn_range);
@@ -2865,9 +2894,9 @@ int in_gate_area_no_task(unsigned long addr)
2865#endif /* __HAVE_ARCH_GATE_AREA */ 2894#endif /* __HAVE_ARCH_GATE_AREA */
2866 2895
2867#ifdef CONFIG_HAVE_IOREMAP_PROT 2896#ifdef CONFIG_HAVE_IOREMAP_PROT
2868static resource_size_t follow_phys(struct vm_area_struct *vma, 2897int follow_phys(struct vm_area_struct *vma,
2869 unsigned long address, unsigned int flags, 2898 unsigned long address, unsigned int flags,
2870 unsigned long *prot) 2899 unsigned long *prot, resource_size_t *phys)
2871{ 2900{
2872 pgd_t *pgd; 2901 pgd_t *pgd;
2873 pud_t *pud; 2902 pud_t *pud;
@@ -2876,24 +2905,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
2876 spinlock_t *ptl; 2905 spinlock_t *ptl;
2877 resource_size_t phys_addr = 0; 2906 resource_size_t phys_addr = 0;
2878 struct mm_struct *mm = vma->vm_mm; 2907 struct mm_struct *mm = vma->vm_mm;
2908 int ret = -EINVAL;
2879 2909
2880 VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); 2910 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
2911 goto out;
2881 2912
2882 pgd = pgd_offset(mm, address); 2913 pgd = pgd_offset(mm, address);
2883 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 2914 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
2884 goto no_page_table; 2915 goto out;
2885 2916
2886 pud = pud_offset(pgd, address); 2917 pud = pud_offset(pgd, address);
2887 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 2918 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
2888 goto no_page_table; 2919 goto out;
2889 2920
2890 pmd = pmd_offset(pud, address); 2921 pmd = pmd_offset(pud, address);
2891 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 2922 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
2892 goto no_page_table; 2923 goto out;
2893 2924
2894 /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 2925 /* We cannot handle huge page PFN maps. Luckily they don't exist. */
2895 if (pmd_huge(*pmd)) 2926 if (pmd_huge(*pmd))
2896 goto no_page_table; 2927 goto out;
2897 2928
2898 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 2929 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
2899 if (!ptep) 2930 if (!ptep)
@@ -2908,13 +2939,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
2908 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ 2939 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
2909 2940
2910 *prot = pgprot_val(pte_pgprot(pte)); 2941 *prot = pgprot_val(pte_pgprot(pte));
2942 *phys = phys_addr;
2943 ret = 0;
2911 2944
2912unlock: 2945unlock:
2913 pte_unmap_unlock(ptep, ptl); 2946 pte_unmap_unlock(ptep, ptl);
2914out: 2947out:
2915 return phys_addr; 2948 return ret;
2916no_page_table:
2917 return 0;
2918} 2949}
2919 2950
2920int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 2951int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -2925,12 +2956,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2925 void *maddr; 2956 void *maddr;
2926 int offset = addr & (PAGE_SIZE-1); 2957 int offset = addr & (PAGE_SIZE-1);
2927 2958
2928 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 2959 if (follow_phys(vma, addr, write, &prot, &phys_addr))
2929 return -EINVAL;
2930
2931 phys_addr = follow_phys(vma, addr, write, &prot);
2932
2933 if (!phys_addr)
2934 return -EINVAL; 2960 return -EINVAL;
2935 2961
2936 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); 2962 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6837a1014372..b17371185468 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -22,7 +22,6 @@
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/cpuset.h>
26#include <linux/delay.h> 25#include <linux/delay.h>
27#include <linux/migrate.h> 26#include <linux/migrate.h>
28#include <linux/page-isolation.h> 27#include <linux/page-isolation.h>
@@ -190,7 +189,7 @@ static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
190 pgdat->node_start_pfn; 189 pgdat->node_start_pfn;
191} 190}
192 191
193static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 192static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
194{ 193{
195 struct pglist_data *pgdat = zone->zone_pgdat; 194 struct pglist_data *pgdat = zone->zone_pgdat;
196 int nr_pages = PAGES_PER_SECTION; 195 int nr_pages = PAGES_PER_SECTION;
@@ -217,7 +216,7 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
217 return 0; 216 return 0;
218} 217}
219 218
220static int __add_section(struct zone *zone, unsigned long phys_start_pfn) 219static int __meminit __add_section(struct zone *zone, unsigned long phys_start_pfn)
221{ 220{
222 int nr_pages = PAGES_PER_SECTION; 221 int nr_pages = PAGES_PER_SECTION;
223 int ret; 222 int ret;
@@ -274,7 +273,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms)
274 * call this function after deciding the zone to which to 273 * call this function after deciding the zone to which to
275 * add the new pages. 274 * add the new pages.
276 */ 275 */
277int __add_pages(struct zone *zone, unsigned long phys_start_pfn, 276int __ref __add_pages(struct zone *zone, unsigned long phys_start_pfn,
278 unsigned long nr_pages) 277 unsigned long nr_pages)
279{ 278{
280 unsigned long i; 279 unsigned long i;
@@ -471,7 +470,8 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
471} 470}
472 471
473 472
474int add_memory(int nid, u64 start, u64 size) 473/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
474int __ref add_memory(int nid, u64 start, u64 size)
475{ 475{
476 pg_data_t *pgdat = NULL; 476 pg_data_t *pgdat = NULL;
477 int new_pgdat = 0; 477 int new_pgdat = 0;
@@ -498,8 +498,6 @@ int add_memory(int nid, u64 start, u64 size)
498 /* we online node here. we can't roll back from here. */ 498 /* we online node here. we can't roll back from here. */
499 node_set_online(nid); 499 node_set_online(nid);
500 500
501 cpuset_track_online_nodes();
502
503 if (new_pgdat) { 501 if (new_pgdat) {
504 ret = register_one_node(nid); 502 ret = register_one_node(nid);
505 /* 503 /*
diff --git a/mm/migrate.c b/mm/migrate.c
index 385db89f0c33..037b0967c1e3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -522,15 +522,12 @@ static int writeout(struct address_space *mapping, struct page *page)
522 remove_migration_ptes(page, page); 522 remove_migration_ptes(page, page);
523 523
524 rc = mapping->a_ops->writepage(page, &wbc); 524 rc = mapping->a_ops->writepage(page, &wbc);
525 if (rc < 0)
526 /* I/O Error writing */
527 return -EIO;
528 525
529 if (rc != AOP_WRITEPAGE_ACTIVATE) 526 if (rc != AOP_WRITEPAGE_ACTIVATE)
530 /* unlocked. Relock */ 527 /* unlocked. Relock */
531 lock_page(page); 528 lock_page(page);
532 529
533 return -EAGAIN; 530 return (rc < 0) ? -EIO : -EAGAIN;
534} 531}
535 532
536/* 533/*
@@ -990,25 +987,18 @@ out:
990/* 987/*
991 * Determine the nodes of an array of pages and store it in an array of status. 988 * Determine the nodes of an array of pages and store it in an array of status.
992 */ 989 */
993static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 990static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
994 const void __user * __user *pages, 991 const void __user **pages, int *status)
995 int __user *status)
996{ 992{
997 unsigned long i; 993 unsigned long i;
998 int err;
999 994
1000 down_read(&mm->mmap_sem); 995 down_read(&mm->mmap_sem);
1001 996
1002 for (i = 0; i < nr_pages; i++) { 997 for (i = 0; i < nr_pages; i++) {
1003 const void __user *p; 998 unsigned long addr = (unsigned long)(*pages);
1004 unsigned long addr;
1005 struct vm_area_struct *vma; 999 struct vm_area_struct *vma;
1006 struct page *page; 1000 struct page *page;
1007 1001 int err = -EFAULT;
1008 err = -EFAULT;
1009 if (get_user(p, pages+i))
1010 goto out;
1011 addr = (unsigned long) p;
1012 1002
1013 vma = find_vma(mm, addr); 1003 vma = find_vma(mm, addr);
1014 if (!vma) 1004 if (!vma)
@@ -1027,12 +1017,52 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1027 1017
1028 err = page_to_nid(page); 1018 err = page_to_nid(page);
1029set_status: 1019set_status:
1030 put_user(err, status+i); 1020 *status = err;
1021
1022 pages++;
1023 status++;
1024 }
1025
1026 up_read(&mm->mmap_sem);
1027}
1028
1029/*
1030 * Determine the nodes of a user array of pages and store it in
1031 * a user array of status.
1032 */
1033static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1034 const void __user * __user *pages,
1035 int __user *status)
1036{
1037#define DO_PAGES_STAT_CHUNK_NR 16
1038 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1039 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1040 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1041 int err;
1042
1043 for (i = 0; i < nr_pages; i += chunk_nr) {
1044 if (chunk_nr + i > nr_pages)
1045 chunk_nr = nr_pages - i;
1046
1047 err = copy_from_user(chunk_pages, &pages[i],
1048 chunk_nr * sizeof(*chunk_pages));
1049 if (err) {
1050 err = -EFAULT;
1051 goto out;
1052 }
1053
1054 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1055
1056 err = copy_to_user(&status[i], chunk_status,
1057 chunk_nr * sizeof(*chunk_status));
1058 if (err) {
1059 err = -EFAULT;
1060 goto out;
1061 }
1031 } 1062 }
1032 err = 0; 1063 err = 0;
1033 1064
1034out: 1065out:
1035 up_read(&mm->mmap_sem);
1036 return err; 1066 return err;
1037} 1067}
1038 1068
diff --git a/mm/mlock.c b/mm/mlock.c
index 008ea70b7afa..1ada366570cb 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -66,14 +66,10 @@ void __clear_page_mlock(struct page *page)
66 putback_lru_page(page); 66 putback_lru_page(page);
67 } else { 67 } else {
68 /* 68 /*
69 * Page not on the LRU yet. Flush all pagevecs and retry. 69 * We lost the race. the page already moved to evictable list.
70 */ 70 */
71 lru_add_drain_all(); 71 if (PageUnevictable(page))
72 if (!isolate_lru_page(page))
73 putback_lru_page(page);
74 else if (PageUnevictable(page))
75 count_vm_event(UNEVICTABLE_PGSTRANDED); 72 count_vm_event(UNEVICTABLE_PGSTRANDED);
76
77 } 73 }
78} 74}
79 75
@@ -166,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
166 unsigned long addr = start; 162 unsigned long addr = start;
167 struct page *pages[16]; /* 16 gives a reasonable batch */ 163 struct page *pages[16]; /* 16 gives a reasonable batch */
168 int nr_pages = (end - start) / PAGE_SIZE; 164 int nr_pages = (end - start) / PAGE_SIZE;
169 int ret; 165 int ret = 0;
170 int gup_flags = 0; 166 int gup_flags = 0;
171 167
172 VM_BUG_ON(start & ~PAGE_MASK); 168 VM_BUG_ON(start & ~PAGE_MASK);
@@ -187,8 +183,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
187 if (vma->vm_flags & VM_WRITE) 183 if (vma->vm_flags & VM_WRITE)
188 gup_flags |= GUP_FLAGS_WRITE; 184 gup_flags |= GUP_FLAGS_WRITE;
189 185
190 lru_add_drain_all(); /* push cached pages to LRU */
191
192 while (nr_pages > 0) { 186 while (nr_pages > 0) {
193 int i; 187 int i;
194 188
@@ -251,8 +245,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
251 ret = 0; 245 ret = 0;
252 } 246 }
253 247
254 lru_add_drain_all(); /* to update stats */
255
256 return ret; /* count entire vma as locked_vm */ 248 return ret; /* count entire vma as locked_vm */
257} 249}
258 250
@@ -546,6 +538,8 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
546 if (!can_do_mlock()) 538 if (!can_do_mlock())
547 return -EPERM; 539 return -EPERM;
548 540
541 lru_add_drain_all(); /* flush pagevec */
542
549 down_write(&current->mm->mmap_sem); 543 down_write(&current->mm->mmap_sem);
550 len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); 544 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
551 start &= PAGE_MASK; 545 start &= PAGE_MASK;
@@ -612,6 +606,8 @@ asmlinkage long sys_mlockall(int flags)
612 if (!can_do_mlock()) 606 if (!can_do_mlock())
613 goto out; 607 goto out;
614 608
609 lru_add_drain_all(); /* flush pagevec */
610
615 down_write(&current->mm->mmap_sem); 611 down_write(&current->mm->mmap_sem);
616 612
617 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 613 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
diff --git a/mm/mmap.c b/mm/mmap.c
index de14ac21e5b5..d4855a682ab6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1704,7 +1704,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
1704 vma = find_vma_prev(mm, addr, &prev); 1704 vma = find_vma_prev(mm, addr, &prev);
1705 if (vma && (vma->vm_start <= addr)) 1705 if (vma && (vma->vm_start <= addr))
1706 return vma; 1706 return vma;
1707 if (expand_stack(prev, addr)) 1707 if (!prev || expand_stack(prev, addr))
1708 return NULL; 1708 return NULL;
1709 if (prev->vm_flags & VM_LOCKED) { 1709 if (prev->vm_flags & VM_LOCKED) {
1710 if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0) 1710 if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 54069e64e3a8..d8ac01474563 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1561,6 +1561,10 @@ nofail_alloc:
1561 1561
1562 /* We now go into synchronous reclaim */ 1562 /* We now go into synchronous reclaim */
1563 cpuset_memory_pressure_bump(); 1563 cpuset_memory_pressure_bump();
1564 /*
1565 * The task's cpuset might have expanded its set of allowable nodes
1566 */
1567 cpuset_update_task_memory_state();
1564 p->flags |= PF_MEMALLOC; 1568 p->flags |= PF_MEMALLOC;
1565 reclaim_state.reclaimed_slab = 0; 1569 reclaim_state.reclaimed_slab = 0;
1566 p->reclaim_state = &reclaim_state; 1570 p->reclaim_state = &reclaim_state;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index f59d797dc5a9..ab27ff750519 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -21,7 +21,7 @@ static unsigned long total_usage;
21#if !defined(CONFIG_SPARSEMEM) 21#if !defined(CONFIG_SPARSEMEM)
22 22
23 23
24void __init pgdat_page_cgroup_init(struct pglist_data *pgdat) 24void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
25{ 25{
26 pgdat->node_page_cgroup = NULL; 26 pgdat->node_page_cgroup = NULL;
27} 27}
@@ -49,6 +49,9 @@ static int __init alloc_node_page_cgroup(int nid)
49 start_pfn = NODE_DATA(nid)->node_start_pfn; 49 start_pfn = NODE_DATA(nid)->node_start_pfn;
50 nr_pages = NODE_DATA(nid)->node_spanned_pages; 50 nr_pages = NODE_DATA(nid)->node_spanned_pages;
51 51
52 if (!nr_pages)
53 return 0;
54
52 table_size = sizeof(struct page_cgroup) * nr_pages; 55 table_size = sizeof(struct page_cgroup) * nr_pages;
53 56
54 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), 57 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
@@ -97,7 +100,8 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
97 return section->page_cgroup + pfn; 100 return section->page_cgroup + pfn;
98} 101}
99 102
100int __meminit init_section_page_cgroup(unsigned long pfn) 103/* __alloc_bootmem...() is protected by !slab_available() */
104int __init_refok init_section_page_cgroup(unsigned long pfn)
101{ 105{
102 struct mem_section *section; 106 struct mem_section *section;
103 struct page_cgroup *base, *pc; 107 struct page_cgroup *base, *pc;
@@ -106,19 +110,29 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
106 110
107 section = __pfn_to_section(pfn); 111 section = __pfn_to_section(pfn);
108 112
109 if (section->page_cgroup) 113 if (!section->page_cgroup) {
110 return 0; 114 nid = page_to_nid(pfn_to_page(pfn));
111 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
112 nid = page_to_nid(pfn_to_page(pfn)); 116 if (slab_is_available()) {
113 117 base = kmalloc_node(table_size, GFP_KERNEL, nid);
114 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 118 if (!base)
115 if (slab_is_available()) { 119 base = vmalloc_node(table_size, nid);
116 base = kmalloc_node(table_size, GFP_KERNEL, nid); 120 } else {
117 if (!base) 121 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
118 base = vmalloc_node(table_size, nid); 122 table_size,
119 } else {
120 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
121 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 123 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
124 }
125 } else {
126 /*
127 * We don't have to allocate page_cgroup again, but
128 * address of memmap may be changed. So, we have to initialize
129 * again.
130 */
131 base = section->page_cgroup + pfn;
132 table_size = 0;
133 /* check address of memmap is changed or not. */
134 if (base->page == pfn_to_page(pfn))
135 return 0;
122 } 136 }
123 137
124 if (!base) { 138 if (!base) {
@@ -158,14 +172,14 @@ void __free_page_cgroup(unsigned long pfn)
158 } 172 }
159} 173}
160 174
161int online_page_cgroup(unsigned long start_pfn, 175int __meminit online_page_cgroup(unsigned long start_pfn,
162 unsigned long nr_pages, 176 unsigned long nr_pages,
163 int nid) 177 int nid)
164{ 178{
165 unsigned long start, end, pfn; 179 unsigned long start, end, pfn;
166 int fail = 0; 180 int fail = 0;
167 181
168 start = start_pfn & (PAGES_PER_SECTION - 1); 182 start = start_pfn & ~(PAGES_PER_SECTION - 1);
169 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); 183 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
170 184
171 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 185 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
@@ -183,12 +197,12 @@ int online_page_cgroup(unsigned long start_pfn,
183 return -ENOMEM; 197 return -ENOMEM;
184} 198}
185 199
186int offline_page_cgroup(unsigned long start_pfn, 200int __meminit offline_page_cgroup(unsigned long start_pfn,
187 unsigned long nr_pages, int nid) 201 unsigned long nr_pages, int nid)
188{ 202{
189 unsigned long start, end, pfn; 203 unsigned long start, end, pfn;
190 204
191 start = start_pfn & (PAGES_PER_SECTION - 1); 205 start = start_pfn & ~(PAGES_PER_SECTION - 1);
192 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); 206 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
193 207
194 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 208 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
@@ -197,7 +211,7 @@ int offline_page_cgroup(unsigned long start_pfn,
197 211
198} 212}
199 213
200static int page_cgroup_callback(struct notifier_block *self, 214static int __meminit page_cgroup_callback(struct notifier_block *self,
201 unsigned long action, void *arg) 215 unsigned long action, void *arg)
202{ 216{
203 struct memory_notify *mn = arg; 217 struct memory_notify *mn = arg;
@@ -207,18 +221,23 @@ static int page_cgroup_callback(struct notifier_block *self,
207 ret = online_page_cgroup(mn->start_pfn, 221 ret = online_page_cgroup(mn->start_pfn,
208 mn->nr_pages, mn->status_change_nid); 222 mn->nr_pages, mn->status_change_nid);
209 break; 223 break;
210 case MEM_CANCEL_ONLINE:
211 case MEM_OFFLINE: 224 case MEM_OFFLINE:
212 offline_page_cgroup(mn->start_pfn, 225 offline_page_cgroup(mn->start_pfn,
213 mn->nr_pages, mn->status_change_nid); 226 mn->nr_pages, mn->status_change_nid);
214 break; 227 break;
228 case MEM_CANCEL_ONLINE:
215 case MEM_GOING_OFFLINE: 229 case MEM_GOING_OFFLINE:
216 break; 230 break;
217 case MEM_ONLINE: 231 case MEM_ONLINE:
218 case MEM_CANCEL_OFFLINE: 232 case MEM_CANCEL_OFFLINE:
219 break; 233 break;
220 } 234 }
221 ret = notifier_from_errno(ret); 235
236 if (ret)
237 ret = notifier_from_errno(ret);
238 else
239 ret = NOTIFY_OK;
240
222 return ret; 241 return ret;
223} 242}
224 243
@@ -248,7 +267,7 @@ void __init page_cgroup_init(void)
248 " want\n"); 267 " want\n");
249} 268}
250 269
251void __init pgdat_page_cgroup_init(struct pglist_data *pgdat) 270void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
252{ 271{
253 return; 272 return;
254} 273}
diff --git a/mm/slob.c b/mm/slob.c
index cb675d126791..bf7e8fc3aed8 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
535 struct kmem_cache *c; 535 struct kmem_cache *c;
536 536
537 c = slob_alloc(sizeof(struct kmem_cache), 537 c = slob_alloc(sizeof(struct kmem_cache),
538 flags, ARCH_KMALLOC_MINALIGN, -1); 538 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
539 539
540 if (c) { 540 if (c) {
541 c->name = name; 541 c->name = name;
diff --git a/mm/slub.c b/mm/slub.c
index 7ad489af9561..a2cd47d89e0a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2931,8 +2931,10 @@ static int slab_memory_callback(struct notifier_block *self,
2931 case MEM_CANCEL_OFFLINE: 2931 case MEM_CANCEL_OFFLINE:
2932 break; 2932 break;
2933 } 2933 }
2934 2934 if (ret)
2935 ret = notifier_from_errno(ret); 2935 ret = notifier_from_errno(ret);
2936 else
2937 ret = NOTIFY_OK;
2936 return ret; 2938 return ret;
2937} 2939}
2938 2940
@@ -3595,7 +3597,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3595 for (i = 0; i < t.count; i++) { 3597 for (i = 0; i < t.count; i++) {
3596 struct location *l = &t.loc[i]; 3598 struct location *l = &t.loc[i];
3597 3599
3598 if (len > PAGE_SIZE - 100) 3600 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3599 break; 3601 break;
3600 len += sprintf(buf + len, "%7ld ", l->count); 3602 len += sprintf(buf + len, "%7ld ", l->count);
3601 3603
diff --git a/mm/sparse.c b/mm/sparse.c
index 39db301b920d..083f5b63e7a8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -570,7 +570,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
570 * set. If this is <=0, then that means that the passed-in 570 * set. If this is <=0, then that means that the passed-in
571 * map was not consumed and must be freed. 571 * map was not consumed and must be freed.
572 */ 572 */
573int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 573int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
574 int nr_pages) 574 int nr_pages)
575{ 575{
576 unsigned long section_nr = pfn_to_section_nr(start_pfn); 576 unsigned long section_nr = pfn_to_section_nr(start_pfn);
diff --git a/mm/swap.c b/mm/swap.c
index 2152e48a7b8f..b135ec90cdeb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -299,7 +299,6 @@ void lru_add_drain(void)
299 put_cpu(); 299 put_cpu();
300} 300}
301 301
302#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
303static void lru_add_drain_per_cpu(struct work_struct *dummy) 302static void lru_add_drain_per_cpu(struct work_struct *dummy)
304{ 303{
305 lru_add_drain(); 304 lru_add_drain();
@@ -313,18 +312,6 @@ int lru_add_drain_all(void)
313 return schedule_on_each_cpu(lru_add_drain_per_cpu); 312 return schedule_on_each_cpu(lru_add_drain_per_cpu);
314} 313}
315 314
316#else
317
318/*
319 * Returns 0 for success
320 */
321int lru_add_drain_all(void)
322{
323 lru_add_drain();
324 return 0;
325}
326#endif
327
328/* 315/*
329 * Batched page_cache_release(). Decrement the reference count on all the 316 * Batched page_cache_release(). Decrement the reference count on all the
330 * passed pages. If it fell to zero then remove the page from the LRU and 317 * passed pages. If it fell to zero then remove the page from the LRU and
@@ -445,6 +432,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
445 for (i = 0; i < pagevec_count(pvec); i++) { 432 for (i = 0; i < pagevec_count(pvec); i++) {
446 struct page *page = pvec->pages[i]; 433 struct page *page = pvec->pages[i];
447 struct zone *pagezone = page_zone(page); 434 struct zone *pagezone = page_zone(page);
435 int file;
448 436
449 if (pagezone != zone) { 437 if (pagezone != zone) {
450 if (zone) 438 if (zone)
@@ -456,8 +444,12 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
456 VM_BUG_ON(PageUnevictable(page)); 444 VM_BUG_ON(PageUnevictable(page));
457 VM_BUG_ON(PageLRU(page)); 445 VM_BUG_ON(PageLRU(page));
458 SetPageLRU(page); 446 SetPageLRU(page);
459 if (is_active_lru(lru)) 447 file = is_file_lru(lru);
448 zone->recent_scanned[file]++;
449 if (is_active_lru(lru)) {
460 SetPageActive(page); 450 SetPageActive(page);
451 zone->recent_rotated[file]++;
452 }
461 add_page_to_lru_list(zone, page, lru); 453 add_page_to_lru_list(zone, page, lru);
462 } 454 }
463 if (zone) 455 if (zone)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 90cb67a5417c..54a9f87e5162 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1462,6 +1462,15 @@ static int __init procswaps_init(void)
1462__initcall(procswaps_init); 1462__initcall(procswaps_init);
1463#endif /* CONFIG_PROC_FS */ 1463#endif /* CONFIG_PROC_FS */
1464 1464
1465#ifdef MAX_SWAPFILES_CHECK
1466static int __init max_swapfiles_check(void)
1467{
1468 MAX_SWAPFILES_CHECK();
1469 return 0;
1470}
1471late_initcall(max_swapfiles_check);
1472#endif
1473
1465/* 1474/*
1466 * Written 01/25/92 by Simmule Turner, heavily changed by Linus. 1475 * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1467 * 1476 *
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ba6b0f5f7fac..1ddb77ba3995 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -77,7 +77,6 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
77 77
78 BUG_ON(addr >= end); 78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr); 79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do { 80 do {
82 next = pgd_addr_end(addr, end); 81 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd)) 82 if (pgd_none_or_clear_bad(pgd))
@@ -324,14 +323,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
324 323
325 BUG_ON(size & ~PAGE_MASK); 324 BUG_ON(size & ~PAGE_MASK);
326 325
327 addr = ALIGN(vstart, align);
328
329 va = kmalloc_node(sizeof(struct vmap_area), 326 va = kmalloc_node(sizeof(struct vmap_area),
330 gfp_mask & GFP_RECLAIM_MASK, node); 327 gfp_mask & GFP_RECLAIM_MASK, node);
331 if (unlikely(!va)) 328 if (unlikely(!va))
332 return ERR_PTR(-ENOMEM); 329 return ERR_PTR(-ENOMEM);
333 330
334retry: 331retry:
332 addr = ALIGN(vstart, align);
333
335 spin_lock(&vmap_area_lock); 334 spin_lock(&vmap_area_lock);
336 /* XXX: could have a last_hole cache */ 335 /* XXX: could have a last_hole cache */
337 n = vmap_area_root.rb_node; 336 n = vmap_area_root.rb_node;
@@ -362,7 +361,7 @@ retry:
362 goto found; 361 goto found;
363 } 362 }
364 363
365 while (addr + size >= first->va_start && addr + size <= vend) { 364 while (addr + size > first->va_start && addr + size <= vend) {
366 addr = ALIGN(first->va_end + PAGE_SIZE, align); 365 addr = ALIGN(first->va_end + PAGE_SIZE, align);
367 366
368 n = rb_next(&first->rb_node); 367 n = rb_next(&first->rb_node);
@@ -522,24 +521,45 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
522} 521}
523 522
524/* 523/*
524 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
525 * is already purging.
526 */
527static void try_purge_vmap_area_lazy(void)
528{
529 unsigned long start = ULONG_MAX, end = 0;
530
531 __purge_vmap_area_lazy(&start, &end, 0, 0);
532}
533
534/*
525 * Kick off a purge of the outstanding lazy areas. 535 * Kick off a purge of the outstanding lazy areas.
526 */ 536 */
527static void purge_vmap_area_lazy(void) 537static void purge_vmap_area_lazy(void)
528{ 538{
529 unsigned long start = ULONG_MAX, end = 0; 539 unsigned long start = ULONG_MAX, end = 0;
530 540
531 __purge_vmap_area_lazy(&start, &end, 0, 0); 541 __purge_vmap_area_lazy(&start, &end, 1, 0);
532} 542}
533 543
534/* 544/*
535 * Free and unmap a vmap area 545 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
546 * called for the correct range previously.
536 */ 547 */
537static void free_unmap_vmap_area(struct vmap_area *va) 548static void free_unmap_vmap_area_noflush(struct vmap_area *va)
538{ 549{
539 va->flags |= VM_LAZY_FREE; 550 va->flags |= VM_LAZY_FREE;
540 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
541 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 552 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
542 purge_vmap_area_lazy(); 553 try_purge_vmap_area_lazy();
554}
555
556/*
557 * Free and unmap a vmap area
558 */
559static void free_unmap_vmap_area(struct vmap_area *va)
560{
561 flush_cache_vunmap(va->va_start, va->va_end);
562 free_unmap_vmap_area_noflush(va);
543} 563}
544 564
545static struct vmap_area *find_vmap_area(unsigned long addr) 565static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -723,7 +743,7 @@ static void free_vmap_block(struct vmap_block *vb)
723 spin_unlock(&vmap_block_tree_lock); 743 spin_unlock(&vmap_block_tree_lock);
724 BUG_ON(tmp != vb); 744 BUG_ON(tmp != vb);
725 745
726 free_unmap_vmap_area(vb->va); 746 free_unmap_vmap_area_noflush(vb->va);
727 call_rcu(&vb->rcu_head, rcu_free_vb); 747 call_rcu(&vb->rcu_head, rcu_free_vb);
728} 748}
729 749
@@ -785,6 +805,9 @@ static void vb_free(const void *addr, unsigned long size)
785 805
786 BUG_ON(size & ~PAGE_MASK); 806 BUG_ON(size & ~PAGE_MASK);
787 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 807 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
808
809 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
810
788 order = get_order(size); 811 order = get_order(size);
789 812
790 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 813 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
@@ -1694,7 +1717,7 @@ static int s_show(struct seq_file *m, void *p)
1694 v->addr, v->addr + v->size, v->size); 1717 v->addr, v->addr + v->size, v->size);
1695 1718
1696 if (v->caller) { 1719 if (v->caller) {
1697 char buff[2 * KSYM_NAME_LEN]; 1720 char buff[KSYM_SYMBOL_LEN];
1698 1721
1699 seq_putc(m, ' '); 1722 seq_putc(m, ' ');
1700 sprint_symbol(buff, (unsigned long)v->caller); 1723 sprint_symbol(buff, (unsigned long)v->caller);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3b5860294bb6..62e7f62fb559 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
623 * Try to allocate it some swap space here. 623 * Try to allocate it some swap space here.
624 */ 624 */
625 if (PageAnon(page) && !PageSwapCache(page)) { 625 if (PageAnon(page) && !PageSwapCache(page)) {
626 if (!(sc->gfp_mask & __GFP_IO))
627 goto keep_locked;
626 switch (try_to_munlock(page)) { 628 switch (try_to_munlock(page)) {
627 case SWAP_FAIL: /* shouldn't happen */ 629 case SWAP_FAIL: /* shouldn't happen */
628 case SWAP_AGAIN: 630 case SWAP_AGAIN:
@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
634 } 636 }
635 if (!add_to_swap(page, GFP_ATOMIC)) 637 if (!add_to_swap(page, GFP_ATOMIC))
636 goto activate_locked; 638 goto activate_locked;
639 may_enter_fs = 1;
637 } 640 }
638#endif /* CONFIG_SWAP */ 641#endif /* CONFIG_SWAP */
639 642
@@ -1245,6 +1248,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1245 list_add(&page->lru, &l_inactive); 1248 list_add(&page->lru, &l_inactive);
1246 } 1249 }
1247 1250
1251 spin_lock_irq(&zone->lru_lock);
1248 /* 1252 /*
1249 * Count referenced pages from currently used mappings as 1253 * Count referenced pages from currently used mappings as
1250 * rotated, even though they are moved to the inactive list. 1254 * rotated, even though they are moved to the inactive list.
@@ -1260,7 +1264,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1260 1264
1261 pgmoved = 0; 1265 pgmoved = 0;
1262 lru = LRU_BASE + file * LRU_FILE; 1266 lru = LRU_BASE + file * LRU_FILE;
1263 spin_lock_irq(&zone->lru_lock);
1264 while (!list_empty(&l_inactive)) { 1267 while (!list_empty(&l_inactive)) {
1265 page = lru_to_page(&l_inactive); 1268 page = lru_to_page(&l_inactive);
1266 prefetchw_prev_lru_page(page, &l_inactive, flags); 1269 prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1386,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1386 file_prio = 200 - sc->swappiness; 1389 file_prio = 200 - sc->swappiness;
1387 1390
1388 /* 1391 /*
1389 * anon recent_rotated[0] 1392 * The amount of pressure on anon vs file pages is inversely
1390 * %anon = 100 * ----------- / ----------------- * IO cost 1393 * proportional to the fraction of recently scanned pages on
1391 * anon + file rotate_sum 1394 * each list that were recently referenced and in active use.
1392 */ 1395 */
1393 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1396 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
1394 ap /= zone->recent_rotated[0] + 1; 1397 ap /= zone->recent_rotated[0] + 1;
@@ -2368,39 +2371,6 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
2368 return 1; 2371 return 1;
2369} 2372}
2370 2373
2371static void show_page_path(struct page *page)
2372{
2373 char buf[256];
2374 if (page_is_file_cache(page)) {
2375 struct address_space *mapping = page->mapping;
2376 struct dentry *dentry;
2377 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
2378
2379 spin_lock(&mapping->i_mmap_lock);
2380 dentry = d_find_alias(mapping->host);
2381 printk(KERN_INFO "rescued: %s %lu\n",
2382 dentry_path(dentry, buf, 256), pgoff);
2383 spin_unlock(&mapping->i_mmap_lock);
2384 } else {
2385#if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU)
2386 struct anon_vma *anon_vma;
2387 struct vm_area_struct *vma;
2388
2389 anon_vma = page_lock_anon_vma(page);
2390 if (!anon_vma)
2391 return;
2392
2393 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
2394 printk(KERN_INFO "rescued: anon %s\n",
2395 vma->vm_mm->owner->comm);
2396 break;
2397 }
2398 page_unlock_anon_vma(anon_vma);
2399#endif
2400 }
2401}
2402
2403
2404/** 2374/**
2405 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 2375 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2406 * @page: page to check evictability and move to appropriate lru list 2376 * @page: page to check evictability and move to appropriate lru list
@@ -2421,8 +2391,6 @@ retry:
2421 if (page_evictable(page, NULL)) { 2391 if (page_evictable(page, NULL)) {
2422 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); 2392 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
2423 2393
2424 show_page_path(page);
2425
2426 __dec_zone_state(zone, NR_UNEVICTABLE); 2394 __dec_zone_state(zone, NR_UNEVICTABLE);
2427 list_move(&page->lru, &zone->lru[l].list); 2395 list_move(&page->lru, &zone->lru[l].list);
2428 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2396 __inc_zone_state(zone, NR_INACTIVE_ANON + l);