aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/mempolicy.c128
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/rmap.c3
-rw-r--r--mm/slab.c122
-rw-r--r--mm/swap.c25
-rw-r--r--mm/vmscan.c3
7 files changed, 229 insertions, 70 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a918f77f02f3..1fe76d963ac2 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -130,6 +130,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
130 onlined_pages++; 130 onlined_pages++;
131 } 131 }
132 zone->present_pages += onlined_pages; 132 zone->present_pages += onlined_pages;
133 zone->zone_pgdat->node_present_pages += onlined_pages;
133 134
134 setup_per_zone_pages_min(); 135 setup_per_zone_pages_min();
135 136
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1a210088ad80..954981b14303 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -197,7 +197,7 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
197 return policy; 197 return policy;
198} 198}
199 199
200static void gather_stats(struct page *, void *); 200static void gather_stats(struct page *, void *, int pte_dirty);
201static void migrate_page_add(struct page *page, struct list_head *pagelist, 201static void migrate_page_add(struct page *page, struct list_head *pagelist,
202 unsigned long flags); 202 unsigned long flags);
203 203
@@ -239,7 +239,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
239 continue; 239 continue;
240 240
241 if (flags & MPOL_MF_STATS) 241 if (flags & MPOL_MF_STATS)
242 gather_stats(page, private); 242 gather_stats(page, private, pte_dirty(*pte));
243 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 243 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
244 migrate_page_add(page, private, flags); 244 migrate_page_add(page, private, flags);
245 else 245 else
@@ -1753,67 +1753,145 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1753struct numa_maps { 1753struct numa_maps {
1754 unsigned long pages; 1754 unsigned long pages;
1755 unsigned long anon; 1755 unsigned long anon;
1756 unsigned long mapped; 1756 unsigned long active;
1757 unsigned long writeback;
1757 unsigned long mapcount_max; 1758 unsigned long mapcount_max;
1759 unsigned long dirty;
1760 unsigned long swapcache;
1758 unsigned long node[MAX_NUMNODES]; 1761 unsigned long node[MAX_NUMNODES];
1759}; 1762};
1760 1763
1761static void gather_stats(struct page *page, void *private) 1764static void gather_stats(struct page *page, void *private, int pte_dirty)
1762{ 1765{
1763 struct numa_maps *md = private; 1766 struct numa_maps *md = private;
1764 int count = page_mapcount(page); 1767 int count = page_mapcount(page);
1765 1768
1766 if (count) 1769 md->pages++;
1767 md->mapped++; 1770 if (pte_dirty || PageDirty(page))
1771 md->dirty++;
1768 1772
1769 if (count > md->mapcount_max) 1773 if (PageSwapCache(page))
1770 md->mapcount_max = count; 1774 md->swapcache++;
1771 1775
1772 md->pages++; 1776 if (PageActive(page))
1777 md->active++;
1778
1779 if (PageWriteback(page))
1780 md->writeback++;
1773 1781
1774 if (PageAnon(page)) 1782 if (PageAnon(page))
1775 md->anon++; 1783 md->anon++;
1776 1784
1785 if (count > md->mapcount_max)
1786 md->mapcount_max = count;
1787
1777 md->node[page_to_nid(page)]++; 1788 md->node[page_to_nid(page)]++;
1778 cond_resched(); 1789 cond_resched();
1779} 1790}
1780 1791
1792#ifdef CONFIG_HUGETLB_PAGE
1793static void check_huge_range(struct vm_area_struct *vma,
1794 unsigned long start, unsigned long end,
1795 struct numa_maps *md)
1796{
1797 unsigned long addr;
1798 struct page *page;
1799
1800 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1801 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1802 pte_t pte;
1803
1804 if (!ptep)
1805 continue;
1806
1807 pte = *ptep;
1808 if (pte_none(pte))
1809 continue;
1810
1811 page = pte_page(pte);
1812 if (!page)
1813 continue;
1814
1815 gather_stats(page, md, pte_dirty(*ptep));
1816 }
1817}
1818#else
1819static inline void check_huge_range(struct vm_area_struct *vma,
1820 unsigned long start, unsigned long end,
1821 struct numa_maps *md)
1822{
1823}
1824#endif
1825
1781int show_numa_map(struct seq_file *m, void *v) 1826int show_numa_map(struct seq_file *m, void *v)
1782{ 1827{
1783 struct task_struct *task = m->private; 1828 struct task_struct *task = m->private;
1784 struct vm_area_struct *vma = v; 1829 struct vm_area_struct *vma = v;
1785 struct numa_maps *md; 1830 struct numa_maps *md;
1831 struct file *file = vma->vm_file;
1832 struct mm_struct *mm = vma->vm_mm;
1786 int n; 1833 int n;
1787 char buffer[50]; 1834 char buffer[50];
1788 1835
1789 if (!vma->vm_mm) 1836 if (!mm)
1790 return 0; 1837 return 0;
1791 1838
1792 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); 1839 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1793 if (!md) 1840 if (!md)
1794 return 0; 1841 return 0;
1795 1842
1796 if (!is_vm_hugetlb_page(vma)) 1843 mpol_to_str(buffer, sizeof(buffer),
1844 get_vma_policy(task, vma, vma->vm_start));
1845
1846 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1847
1848 if (file) {
1849 seq_printf(m, " file=");
1850 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= ");
1851 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1852 seq_printf(m, " heap");
1853 } else if (vma->vm_start <= mm->start_stack &&
1854 vma->vm_end >= mm->start_stack) {
1855 seq_printf(m, " stack");
1856 }
1857
1858 if (is_vm_hugetlb_page(vma)) {
1859 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1860 seq_printf(m, " huge");
1861 } else {
1797 check_pgd_range(vma, vma->vm_start, vma->vm_end, 1862 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1798 &node_online_map, MPOL_MF_STATS, md); 1863 &node_online_map, MPOL_MF_STATS, md);
1864 }
1799 1865
1800 if (md->pages) { 1866 if (!md->pages)
1801 mpol_to_str(buffer, sizeof(buffer), 1867 goto out;
1802 get_vma_policy(task, vma, vma->vm_start));
1803 1868
1804 seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu", 1869 if (md->anon)
1805 vma->vm_start, buffer, md->pages, 1870 seq_printf(m," anon=%lu",md->anon);
1806 md->mapped, md->mapcount_max);
1807 1871
1808 if (md->anon) 1872 if (md->dirty)
1809 seq_printf(m," anon=%lu",md->anon); 1873 seq_printf(m," dirty=%lu",md->dirty);
1810 1874
1811 for_each_online_node(n) 1875 if (md->pages != md->anon && md->pages != md->dirty)
1812 if (md->node[n]) 1876 seq_printf(m, " mapped=%lu", md->pages);
1813 seq_printf(m, " N%d=%lu", n, md->node[n]);
1814 1877
1815 seq_putc(m, '\n'); 1878 if (md->mapcount_max > 1)
1816 } 1879 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1880
1881 if (md->swapcache)
1882 seq_printf(m," swapcache=%lu", md->swapcache);
1883
1884 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1885 seq_printf(m," active=%lu", md->active);
1886
1887 if (md->writeback)
1888 seq_printf(m," writeback=%lu", md->writeback);
1889
1890 for_each_online_node(n)
1891 if (md->node[n])
1892 seq_printf(m, " N%d=%lu", n, md->node[n]);
1893out:
1894 seq_putc(m, '\n');
1817 kfree(md); 1895 kfree(md);
1818 1896
1819 if (m->count < m->size) 1897 if (m->count < m->size)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 791690d7d3fa..234bd4895d14 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -590,21 +590,20 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
590} 590}
591 591
592#ifdef CONFIG_NUMA 592#ifdef CONFIG_NUMA
593/* Called from the slab reaper to drain remote pagesets */ 593/*
594void drain_remote_pages(void) 594 * Called from the slab reaper to drain pagesets on a particular node that
595 * belong to the currently executing processor.
596 */
597void drain_node_pages(int nodeid)
595{ 598{
596 struct zone *zone; 599 int i, z;
597 int i;
598 unsigned long flags; 600 unsigned long flags;
599 601
600 local_irq_save(flags); 602 local_irq_save(flags);
601 for_each_zone(zone) { 603 for (z = 0; z < MAX_NR_ZONES; z++) {
604 struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
602 struct per_cpu_pageset *pset; 605 struct per_cpu_pageset *pset;
603 606
604 /* Do not drain local pagesets */
605 if (zone->zone_pgdat->node_id == numa_node_id())
606 continue;
607
608 pset = zone_pcp(zone, smp_processor_id()); 607 pset = zone_pcp(zone, smp_processor_id());
609 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 608 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
610 struct per_cpu_pages *pcp; 609 struct per_cpu_pages *pcp;
diff --git a/mm/rmap.c b/mm/rmap.c
index d8ce5ff61454..67f0e20b101f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -537,9 +537,6 @@ void page_add_new_anon_rmap(struct page *page,
537 */ 537 */
538void page_add_file_rmap(struct page *page) 538void page_add_file_rmap(struct page *page)
539{ 539{
540 BUG_ON(PageAnon(page));
541 BUG_ON(!pfn_valid(page_to_pfn(page)));
542
543 if (atomic_inc_and_test(&page->_mapcount)) 540 if (atomic_inc_and_test(&page->_mapcount))
544 __inc_page_state(nr_mapped); 541 __inc_page_state(nr_mapped);
545} 542}
diff --git a/mm/slab.c b/mm/slab.c
index add05d808a4a..d0bd7f07ab04 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -789,6 +789,47 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, char *
789 dump_stack(); 789 dump_stack();
790} 790}
791 791
792#ifdef CONFIG_NUMA
793/*
794 * Special reaping functions for NUMA systems called from cache_reap().
795 * These take care of doing round robin flushing of alien caches (containing
796 * objects freed on different nodes from which they were allocated) and the
797 * flushing of remote pcps by calling drain_node_pages.
798 */
799static DEFINE_PER_CPU(unsigned long, reap_node);
800
801static void init_reap_node(int cpu)
802{
803 int node;
804
805 node = next_node(cpu_to_node(cpu), node_online_map);
806 if (node == MAX_NUMNODES)
807 node = 0;
808
809 __get_cpu_var(reap_node) = node;
810}
811
812static void next_reap_node(void)
813{
814 int node = __get_cpu_var(reap_node);
815
816 /*
817 * Also drain per cpu pages on remote zones
818 */
819 if (node != numa_node_id())
820 drain_node_pages(node);
821
822 node = next_node(node, node_online_map);
823 if (unlikely(node >= MAX_NUMNODES))
824 node = first_node(node_online_map);
825 __get_cpu_var(reap_node) = node;
826}
827
828#else
829#define init_reap_node(cpu) do { } while (0)
830#define next_reap_node(void) do { } while (0)
831#endif
832
792/* 833/*
793 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 834 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
794 * via the workqueue/eventd. 835 * via the workqueue/eventd.
@@ -806,6 +847,7 @@ static void __devinit start_cpu_timer(int cpu)
806 * at that time. 847 * at that time.
807 */ 848 */
808 if (keventd_up() && reap_work->func == NULL) { 849 if (keventd_up() && reap_work->func == NULL) {
850 init_reap_node(cpu);
809 INIT_WORK(reap_work, cache_reap, NULL); 851 INIT_WORK(reap_work, cache_reap, NULL);
810 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 852 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
811 } 853 }
@@ -884,6 +926,23 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
884 } 926 }
885} 927}
886 928
929/*
930 * Called from cache_reap() to regularly drain alien caches round robin.
931 */
932static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
933{
934 int node = __get_cpu_var(reap_node);
935
936 if (l3->alien) {
937 struct array_cache *ac = l3->alien[node];
938 if (ac && ac->avail) {
939 spin_lock_irq(&ac->lock);
940 __drain_alien_cache(cachep, ac, node);
941 spin_unlock_irq(&ac->lock);
942 }
943 }
944}
945
887static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) 946static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
888{ 947{
889 int i = 0; 948 int i = 0;
@@ -902,6 +961,7 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **al
902#else 961#else
903 962
904#define drain_alien_cache(cachep, alien) do { } while (0) 963#define drain_alien_cache(cachep, alien) do { } while (0)
964#define reap_alien(cachep, l3) do { } while (0)
905 965
906static inline struct array_cache **alloc_alien_cache(int node, int limit) 966static inline struct array_cache **alloc_alien_cache(int node, int limit)
907{ 967{
@@ -1124,6 +1184,7 @@ void __init kmem_cache_init(void)
1124 struct cache_sizes *sizes; 1184 struct cache_sizes *sizes;
1125 struct cache_names *names; 1185 struct cache_names *names;
1126 int i; 1186 int i;
1187 int order;
1127 1188
1128 for (i = 0; i < NUM_INIT_LISTS; i++) { 1189 for (i = 0; i < NUM_INIT_LISTS; i++) {
1129 kmem_list3_init(&initkmem_list3[i]); 1190 kmem_list3_init(&initkmem_list3[i]);
@@ -1167,11 +1228,15 @@ void __init kmem_cache_init(void)
1167 1228
1168 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); 1229 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
1169 1230
1170 cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0, 1231 for (order = 0; order < MAX_ORDER; order++) {
1171 &left_over, &cache_cache.num); 1232 cache_estimate(order, cache_cache.buffer_size,
1233 cache_line_size(), 0, &left_over, &cache_cache.num);
1234 if (cache_cache.num)
1235 break;
1236 }
1172 if (!cache_cache.num) 1237 if (!cache_cache.num)
1173 BUG(); 1238 BUG();
1174 1239 cache_cache.gfporder = order;
1175 cache_cache.colour = left_over / cache_cache.colour_off; 1240 cache_cache.colour = left_over / cache_cache.colour_off;
1176 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1241 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1177 sizeof(struct slab), cache_line_size()); 1242 sizeof(struct slab), cache_line_size());
@@ -1628,36 +1693,44 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep,
1628 size_t size, size_t align, unsigned long flags) 1693 size_t size, size_t align, unsigned long flags)
1629{ 1694{
1630 size_t left_over = 0; 1695 size_t left_over = 0;
1696 int gfporder;
1631 1697
1632 for (;; cachep->gfporder++) { 1698 for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) {
1633 unsigned int num; 1699 unsigned int num;
1634 size_t remainder; 1700 size_t remainder;
1635 1701
1636 if (cachep->gfporder > MAX_GFP_ORDER) { 1702 cache_estimate(gfporder, size, align, flags, &remainder, &num);
1637 cachep->num = 0;
1638 break;
1639 }
1640
1641 cache_estimate(cachep->gfporder, size, align, flags,
1642 &remainder, &num);
1643 if (!num) 1703 if (!num)
1644 continue; 1704 continue;
1705
1645 /* More than offslab_limit objects will cause problems */ 1706 /* More than offslab_limit objects will cause problems */
1646 if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) 1707 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
1647 break; 1708 break;
1648 1709
1710 /* Found something acceptable - save it away */
1649 cachep->num = num; 1711 cachep->num = num;
1712 cachep->gfporder = gfporder;
1650 left_over = remainder; 1713 left_over = remainder;
1651 1714
1652 /* 1715 /*
1716 * A VFS-reclaimable slab tends to have most allocations
1717 * as GFP_NOFS and we really don't want to have to be allocating
1718 * higher-order pages when we are unable to shrink dcache.
1719 */
1720 if (flags & SLAB_RECLAIM_ACCOUNT)
1721 break;
1722
1723 /*
1653 * Large number of objects is good, but very large slabs are 1724 * Large number of objects is good, but very large slabs are
1654 * currently bad for the gfp()s. 1725 * currently bad for the gfp()s.
1655 */ 1726 */
1656 if (cachep->gfporder >= slab_break_gfp_order) 1727 if (gfporder >= slab_break_gfp_order)
1657 break; 1728 break;
1658 1729
1659 if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder)) 1730 /*
1660 /* Acceptable internal fragmentation */ 1731 * Acceptable internal fragmentation?
1732 */
1733 if ((left_over * 8) <= (PAGE_SIZE << gfporder))
1661 break; 1734 break;
1662 } 1735 }
1663 return left_over; 1736 return left_over;
@@ -1869,17 +1942,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1869 1942
1870 size = ALIGN(size, align); 1943 size = ALIGN(size, align);
1871 1944
1872 if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) { 1945 left_over = calculate_slab_order(cachep, size, align, flags);
1873 /*
1874 * A VFS-reclaimable slab tends to have most allocations
1875 * as GFP_NOFS and we really don't want to have to be allocating
1876 * higher-order pages when we are unable to shrink dcache.
1877 */
1878 cachep->gfporder = 0;
1879 cache_estimate(cachep->gfporder, size, align, flags,
1880 &left_over, &cachep->num);
1881 } else
1882 left_over = calculate_slab_order(cachep, size, align, flags);
1883 1946
1884 if (!cachep->num) { 1947 if (!cachep->num) {
1885 printk("kmem_cache_create: couldn't create cache %s.\n", name); 1948 printk("kmem_cache_create: couldn't create cache %s.\n", name);
@@ -2554,7 +2617,7 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2554 "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2617 "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2555 cachep->name, cachep->num, slabp, slabp->inuse); 2618 cachep->name, cachep->num, slabp, slabp->inuse);
2556 for (i = 0; 2619 for (i = 0;
2557 i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t); 2620 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2558 i++) { 2621 i++) {
2559 if ((i % 16) == 0) 2622 if ((i % 16) == 0)
2560 printk("\n%03x:", i); 2623 printk("\n%03x:", i);
@@ -3494,8 +3557,7 @@ static void cache_reap(void *unused)
3494 check_irq_on(); 3557 check_irq_on();
3495 3558
3496 l3 = searchp->nodelists[numa_node_id()]; 3559 l3 = searchp->nodelists[numa_node_id()];
3497 if (l3->alien) 3560 reap_alien(searchp, l3);
3498 drain_alien_cache(searchp, l3->alien);
3499 spin_lock_irq(&l3->list_lock); 3561 spin_lock_irq(&l3->list_lock);
3500 3562
3501 drain_array_locked(searchp, cpu_cache_get(searchp), 0, 3563 drain_array_locked(searchp, cpu_cache_get(searchp), 0,
@@ -3545,7 +3607,7 @@ static void cache_reap(void *unused)
3545 } 3607 }
3546 check_irq_on(); 3608 check_irq_on();
3547 mutex_unlock(&cache_chain_mutex); 3609 mutex_unlock(&cache_chain_mutex);
3548 drain_remote_pages(); 3610 next_reap_node();
3549 /* Setup the next iteration */ 3611 /* Setup the next iteration */
3550 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3612 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
3551} 3613}
diff --git a/mm/swap.c b/mm/swap.c
index cce3dda59c59..e9ec06d845e8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount)
489 if (count >= FBC_BATCH || count <= -FBC_BATCH) { 489 if (count >= FBC_BATCH || count <= -FBC_BATCH) {
490 spin_lock(&fbc->lock); 490 spin_lock(&fbc->lock);
491 fbc->count += count; 491 fbc->count += count;
492 *pcount = 0;
492 spin_unlock(&fbc->lock); 493 spin_unlock(&fbc->lock);
493 count = 0; 494 } else {
495 *pcount = count;
494 } 496 }
495 *pcount = count;
496 put_cpu(); 497 put_cpu();
497} 498}
498EXPORT_SYMBOL(percpu_counter_mod); 499EXPORT_SYMBOL(percpu_counter_mod);
500
501/*
502 * Add up all the per-cpu counts, return the result. This is a more accurate
503 * but much slower version of percpu_counter_read_positive()
504 */
505long percpu_counter_sum(struct percpu_counter *fbc)
506{
507 long ret;
508 int cpu;
509
510 spin_lock(&fbc->lock);
511 ret = fbc->count;
512 for_each_cpu(cpu) {
513 long *pcount = per_cpu_ptr(fbc->counters, cpu);
514 ret += *pcount;
515 }
516 spin_unlock(&fbc->lock);
517 return ret < 0 ? 0 : ret;
518}
519EXPORT_SYMBOL(percpu_counter_sum);
499#endif 520#endif
500 521
501/* 522/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b0af7593d01e..7ccf763bb30b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1883,7 +1883,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1883 1883
1884 if (!(gfp_mask & __GFP_WAIT) || 1884 if (!(gfp_mask & __GFP_WAIT) ||
1885 zone->all_unreclaimable || 1885 zone->all_unreclaimable ||
1886 atomic_read(&zone->reclaim_in_progress) > 0) 1886 atomic_read(&zone->reclaim_in_progress) > 0 ||
1887 (p->flags & PF_MEMALLOC))
1887 return 0; 1888 return 0;
1888 1889
1889 node_id = zone->zone_pgdat->node_id; 1890 node_id = zone->zone_pgdat->node_id;