aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-10-10 23:02:51 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2016-10-10 23:02:51 -0400
commit3873691e5ab34fa26948643d038a2b98c4437298 (patch)
tree5327469194c2167830bce38b56a618b754cdbeea /mm
parentc2050a454c7f123d7a57fa1d76ff61bd43643abb (diff)
parentaadfa8019e8114539cfa0b1eb2e5a9c83094a590 (diff)
Merge remote-tracking branch 'ovl/rename2' into for-linus
Diffstat (limited to 'mm')
-rw-r--r--mm/debug.c6
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/khugepaged.c25
-rw-r--r--mm/memcontrol.c31
-rw-r--r--mm/memory.c12
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/shmem.c7
-rw-r--r--mm/swapfile.c1
-rw-r--r--mm/usercopy.c5
-rw-r--r--mm/vmscan.c19
11 files changed, 60 insertions, 56 deletions
diff --git a/mm/debug.c b/mm/debug.c
index 8865bfb41b0b..74c7cae4f683 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -42,9 +42,11 @@ const struct trace_print_flags vmaflag_names[] = {
42 42
43void __dump_page(struct page *page, const char *reason) 43void __dump_page(struct page *page, const char *reason)
44{ 44{
45 int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
46
45 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", 47 pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
46 page, page_ref_count(page), page_mapcount(page), 48 page, page_ref_count(page), mapcount,
47 page->mapping, page->index); 49 page->mapping, page_to_pgoff(page));
48 if (PageCompound(page)) 50 if (PageCompound(page))
49 pr_cont(" compound_mapcount: %d", compound_mapcount(page)); 51 pr_cont(" compound_mapcount: %d", compound_mapcount(page));
50 pr_cont("\n"); 52 pr_cont("\n");
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a6abd76baa72..53ae6d00656a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1138,9 +1138,6 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1138 bool was_writable; 1138 bool was_writable;
1139 int flags = 0; 1139 int flags = 0;
1140 1140
1141 /* A PROT_NONE fault should not end up here */
1142 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1143
1144 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 1141 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
1145 if (unlikely(!pmd_same(pmd, *fe->pmd))) 1142 if (unlikely(!pmd_same(pmd, *fe->pmd)))
1146 goto out_unlock; 1143 goto out_unlock;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 79c52d0061af..728d7790dc2d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -838,7 +838,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
838 * value (scan code). 838 * value (scan code).
839 */ 839 */
840 840
841static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) 841static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
842 struct vm_area_struct **vmap)
842{ 843{
843 struct vm_area_struct *vma; 844 struct vm_area_struct *vma;
844 unsigned long hstart, hend; 845 unsigned long hstart, hend;
@@ -846,7 +847,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
846 if (unlikely(khugepaged_test_exit(mm))) 847 if (unlikely(khugepaged_test_exit(mm)))
847 return SCAN_ANY_PROCESS; 848 return SCAN_ANY_PROCESS;
848 849
849 vma = find_vma(mm, address); 850 *vmap = vma = find_vma(mm, address);
850 if (!vma) 851 if (!vma)
851 return SCAN_VMA_NULL; 852 return SCAN_VMA_NULL;
852 853
@@ -881,6 +882,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
881 .pmd = pmd, 882 .pmd = pmd,
882 }; 883 };
883 884
885 /* we only decide to swapin, if there is enough young ptes */
886 if (referenced < HPAGE_PMD_NR/2) {
887 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
888 return false;
889 }
884 fe.pte = pte_offset_map(pmd, address); 890 fe.pte = pte_offset_map(pmd, address);
885 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; 891 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
886 fe.pte++, fe.address += PAGE_SIZE) { 892 fe.pte++, fe.address += PAGE_SIZE) {
@@ -888,17 +894,12 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
888 if (!is_swap_pte(pteval)) 894 if (!is_swap_pte(pteval))
889 continue; 895 continue;
890 swapped_in++; 896 swapped_in++;
891 /* we only decide to swapin, if there is enough young ptes */
892 if (referenced < HPAGE_PMD_NR/2) {
893 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
894 return false;
895 }
896 ret = do_swap_page(&fe, pteval); 897 ret = do_swap_page(&fe, pteval);
897 898
898 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ 899 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
899 if (ret & VM_FAULT_RETRY) { 900 if (ret & VM_FAULT_RETRY) {
900 down_read(&mm->mmap_sem); 901 down_read(&mm->mmap_sem);
901 if (hugepage_vma_revalidate(mm, address)) { 902 if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
902 /* vma is no longer available, don't continue to swapin */ 903 /* vma is no longer available, don't continue to swapin */
903 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); 904 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
904 return false; 905 return false;
@@ -923,7 +924,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
923static void collapse_huge_page(struct mm_struct *mm, 924static void collapse_huge_page(struct mm_struct *mm,
924 unsigned long address, 925 unsigned long address,
925 struct page **hpage, 926 struct page **hpage,
926 struct vm_area_struct *vma,
927 int node, int referenced) 927 int node, int referenced)
928{ 928{
929 pmd_t *pmd, _pmd; 929 pmd_t *pmd, _pmd;
@@ -933,6 +933,7 @@ static void collapse_huge_page(struct mm_struct *mm,
933 spinlock_t *pmd_ptl, *pte_ptl; 933 spinlock_t *pmd_ptl, *pte_ptl;
934 int isolated = 0, result = 0; 934 int isolated = 0, result = 0;
935 struct mem_cgroup *memcg; 935 struct mem_cgroup *memcg;
936 struct vm_area_struct *vma;
936 unsigned long mmun_start; /* For mmu_notifiers */ 937 unsigned long mmun_start; /* For mmu_notifiers */
937 unsigned long mmun_end; /* For mmu_notifiers */ 938 unsigned long mmun_end; /* For mmu_notifiers */
938 gfp_t gfp; 939 gfp_t gfp;
@@ -961,7 +962,7 @@ static void collapse_huge_page(struct mm_struct *mm,
961 } 962 }
962 963
963 down_read(&mm->mmap_sem); 964 down_read(&mm->mmap_sem);
964 result = hugepage_vma_revalidate(mm, address); 965 result = hugepage_vma_revalidate(mm, address, &vma);
965 if (result) { 966 if (result) {
966 mem_cgroup_cancel_charge(new_page, memcg, true); 967 mem_cgroup_cancel_charge(new_page, memcg, true);
967 up_read(&mm->mmap_sem); 968 up_read(&mm->mmap_sem);
@@ -994,7 +995,7 @@ static void collapse_huge_page(struct mm_struct *mm,
994 * handled by the anon_vma lock + PG_lock. 995 * handled by the anon_vma lock + PG_lock.
995 */ 996 */
996 down_write(&mm->mmap_sem); 997 down_write(&mm->mmap_sem);
997 result = hugepage_vma_revalidate(mm, address); 998 result = hugepage_vma_revalidate(mm, address, &vma);
998 if (result) 999 if (result)
999 goto out; 1000 goto out;
1000 /* check if the pmd is still valid */ 1001 /* check if the pmd is still valid */
@@ -1202,7 +1203,7 @@ out_unmap:
1202 if (ret) { 1203 if (ret) {
1203 node = khugepaged_find_target_node(); 1204 node = khugepaged_find_target_node();
1204 /* collapse_huge_page will return with the mmap_sem released */ 1205 /* collapse_huge_page will return with the mmap_sem released */
1205 collapse_huge_page(mm, address, hpage, vma, node, referenced); 1206 collapse_huge_page(mm, address, hpage, node, referenced);
1206 } 1207 }
1207out: 1208out:
1208 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, 1209 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a6a51a7c416..4be518d4e68a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1740,17 +1740,22 @@ static DEFINE_MUTEX(percpu_charge_mutex);
1740static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1740static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1741{ 1741{
1742 struct memcg_stock_pcp *stock; 1742 struct memcg_stock_pcp *stock;
1743 unsigned long flags;
1743 bool ret = false; 1744 bool ret = false;
1744 1745
1745 if (nr_pages > CHARGE_BATCH) 1746 if (nr_pages > CHARGE_BATCH)
1746 return ret; 1747 return ret;
1747 1748
1748 stock = &get_cpu_var(memcg_stock); 1749 local_irq_save(flags);
1750
1751 stock = this_cpu_ptr(&memcg_stock);
1749 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1752 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1750 stock->nr_pages -= nr_pages; 1753 stock->nr_pages -= nr_pages;
1751 ret = true; 1754 ret = true;
1752 } 1755 }
1753 put_cpu_var(memcg_stock); 1756
1757 local_irq_restore(flags);
1758
1754 return ret; 1759 return ret;
1755} 1760}
1756 1761
@@ -1771,15 +1776,18 @@ static void drain_stock(struct memcg_stock_pcp *stock)
1771 stock->cached = NULL; 1776 stock->cached = NULL;
1772} 1777}
1773 1778
1774/*
1775 * This must be called under preempt disabled or must be called by
1776 * a thread which is pinned to local cpu.
1777 */
1778static void drain_local_stock(struct work_struct *dummy) 1779static void drain_local_stock(struct work_struct *dummy)
1779{ 1780{
1780 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); 1781 struct memcg_stock_pcp *stock;
1782 unsigned long flags;
1783
1784 local_irq_save(flags);
1785
1786 stock = this_cpu_ptr(&memcg_stock);
1781 drain_stock(stock); 1787 drain_stock(stock);
1782 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1788 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1789
1790 local_irq_restore(flags);
1783} 1791}
1784 1792
1785/* 1793/*
@@ -1788,14 +1796,19 @@ static void drain_local_stock(struct work_struct *dummy)
1788 */ 1796 */
1789static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1797static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1790{ 1798{
1791 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 1799 struct memcg_stock_pcp *stock;
1800 unsigned long flags;
1801
1802 local_irq_save(flags);
1792 1803
1804 stock = this_cpu_ptr(&memcg_stock);
1793 if (stock->cached != memcg) { /* reset if necessary */ 1805 if (stock->cached != memcg) { /* reset if necessary */
1794 drain_stock(stock); 1806 drain_stock(stock);
1795 stock->cached = memcg; 1807 stock->cached = memcg;
1796 } 1808 }
1797 stock->nr_pages += nr_pages; 1809 stock->nr_pages += nr_pages;
1798 put_cpu_var(memcg_stock); 1810
1811 local_irq_restore(flags);
1799} 1812}
1800 1813
1801/* 1814/*
diff --git a/mm/memory.c b/mm/memory.c
index 83be99d9d8a1..793fe0f9841c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3351,9 +3351,6 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
3351 bool was_writable = pte_write(pte); 3351 bool was_writable = pte_write(pte);
3352 int flags = 0; 3352 int flags = 0;
3353 3353
3354 /* A PROT_NONE fault should not end up here */
3355 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
3356
3357 /* 3354 /*
3358 * The "pte" at this point cannot be used safely without 3355 * The "pte" at this point cannot be used safely without
3359 * validation through pte_unmap_same(). It's of NUMA type but 3356 * validation through pte_unmap_same(). It's of NUMA type but
@@ -3458,6 +3455,11 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
3458 return VM_FAULT_FALLBACK; 3455 return VM_FAULT_FALLBACK;
3459} 3456}
3460 3457
3458static inline bool vma_is_accessible(struct vm_area_struct *vma)
3459{
3460 return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
3461}
3462
3461/* 3463/*
3462 * These routines also need to handle stuff like marking pages dirty 3464 * These routines also need to handle stuff like marking pages dirty
3463 * and/or accessed for architectures that don't do it in hardware (most 3465 * and/or accessed for architectures that don't do it in hardware (most
@@ -3524,7 +3526,7 @@ static int handle_pte_fault(struct fault_env *fe)
3524 if (!pte_present(entry)) 3526 if (!pte_present(entry))
3525 return do_swap_page(fe, entry); 3527 return do_swap_page(fe, entry);
3526 3528
3527 if (pte_protnone(entry)) 3529 if (pte_protnone(entry) && vma_is_accessible(fe->vma))
3528 return do_numa_page(fe, entry); 3530 return do_numa_page(fe, entry);
3529 3531
3530 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd); 3532 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
@@ -3590,7 +3592,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3590 3592
3591 barrier(); 3593 barrier();
3592 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { 3594 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
3593 if (pmd_protnone(orig_pmd)) 3595 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
3594 return do_huge_pmd_numa_page(&fe, orig_pmd); 3596 return do_huge_pmd_numa_page(&fe, orig_pmd);
3595 3597
3596 if ((fe.flags & FAULT_FLAG_WRITE) && 3598 if ((fe.flags & FAULT_FLAG_WRITE) &&
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 41266dc29f33..b58906b6215c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1567,7 +1567,9 @@ static struct page *new_node_page(struct page *page, unsigned long private,
1567 return alloc_huge_page_node(page_hstate(compound_head(page)), 1567 return alloc_huge_page_node(page_hstate(compound_head(page)),
1568 next_node_in(nid, nmask)); 1568 next_node_in(nid, nmask));
1569 1569
1570 node_clear(nid, nmask); 1570 if (nid != next_node_in(nid, nmask))
1571 node_clear(nid, nmask);
1572
1571 if (PageHighMem(page) 1573 if (PageHighMem(page)
1572 || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) 1574 || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1573 gfp_mask |= __GFP_HIGHMEM; 1575 gfp_mask |= __GFP_HIGHMEM;
diff --git a/mm/page_io.c b/mm/page_io.c
index 16bd82fad38c..eafe5ddc2b54 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -264,6 +264,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
264 int ret; 264 int ret;
265 struct swap_info_struct *sis = page_swap_info(page); 265 struct swap_info_struct *sis = page_swap_info(page);
266 266
267 BUG_ON(!PageSwapCache(page));
267 if (sis->flags & SWP_FILE) { 268 if (sis->flags & SWP_FILE) {
268 struct kiocb kiocb; 269 struct kiocb kiocb;
269 struct file *swap_file = sis->swap_file; 270 struct file *swap_file = sis->swap_file;
@@ -337,6 +338,7 @@ int swap_readpage(struct page *page)
337 int ret = 0; 338 int ret = 0;
338 struct swap_info_struct *sis = page_swap_info(page); 339 struct swap_info_struct *sis = page_swap_info(page);
339 340
341 BUG_ON(!PageSwapCache(page));
340 VM_BUG_ON_PAGE(!PageLocked(page), page); 342 VM_BUG_ON_PAGE(!PageLocked(page), page);
341 VM_BUG_ON_PAGE(PageUptodate(page), page); 343 VM_BUG_ON_PAGE(PageUptodate(page), page);
342 if (frontswap_load(page) == 0) { 344 if (frontswap_load(page) == 0) {
@@ -386,6 +388,7 @@ int swap_set_page_dirty(struct page *page)
386 388
387 if (sis->flags & SWP_FILE) { 389 if (sis->flags & SWP_FILE) {
388 struct address_space *mapping = sis->swap_file->f_mapping; 390 struct address_space *mapping = sis->swap_file->f_mapping;
391 BUG_ON(!PageSwapCache(page));
389 return mapping->a_ops->set_page_dirty(page); 392 return mapping->a_ops->set_page_dirty(page);
390 } else { 393 } else {
391 return __set_page_dirty_no_writeback(page); 394 return __set_page_dirty_no_writeback(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 28ae88bbeffa..e7e8f8c210c1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -270,7 +270,7 @@ bool shmem_charge(struct inode *inode, long pages)
270 info->alloced -= pages; 270 info->alloced -= pages;
271 shmem_recalc_inode(inode); 271 shmem_recalc_inode(inode);
272 spin_unlock_irqrestore(&info->lock, flags); 272 spin_unlock_irqrestore(&info->lock, flags);
273 273 shmem_unacct_blocks(info->flags, pages);
274 return false; 274 return false;
275 } 275 }
276 percpu_counter_add(&sbinfo->used_blocks, pages); 276 percpu_counter_add(&sbinfo->used_blocks, pages);
@@ -291,6 +291,7 @@ void shmem_uncharge(struct inode *inode, long pages)
291 291
292 if (sbinfo->max_blocks) 292 if (sbinfo->max_blocks)
293 percpu_counter_sub(&sbinfo->used_blocks, pages); 293 percpu_counter_sub(&sbinfo->used_blocks, pages);
294 shmem_unacct_blocks(info->flags, pages);
294} 295}
295 296
296/* 297/*
@@ -1980,7 +1981,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
1980 return addr; 1981 return addr;
1981 sb = shm_mnt->mnt_sb; 1982 sb = shm_mnt->mnt_sb;
1982 } 1983 }
1983 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) 1984 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
1984 return addr; 1985 return addr;
1985 } 1986 }
1986 1987
@@ -3813,7 +3814,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
3813 .mkdir = shmem_mkdir, 3814 .mkdir = shmem_mkdir,
3814 .rmdir = shmem_rmdir, 3815 .rmdir = shmem_rmdir,
3815 .mknod = shmem_mknod, 3816 .mknod = shmem_mknod,
3816 .rename2 = shmem_rename2, 3817 .rename = shmem_rename2,
3817 .tmpfile = shmem_tmpfile, 3818 .tmpfile = shmem_tmpfile,
3818#endif 3819#endif
3819#ifdef CONFIG_TMPFS_XATTR 3820#ifdef CONFIG_TMPFS_XATTR
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 78cfa292a29a..2657accc6e2b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2724,7 +2724,6 @@ int swapcache_prepare(swp_entry_t entry)
2724struct swap_info_struct *page_swap_info(struct page *page) 2724struct swap_info_struct *page_swap_info(struct page *page)
2725{ 2725{
2726 swp_entry_t swap = { .val = page_private(page) }; 2726 swp_entry_t swap = { .val = page_private(page) };
2727 BUG_ON(!PageSwapCache(page));
2728 return swap_info[swp_type(swap)]; 2727 return swap_info[swp_type(swap)];
2729} 2728}
2730 2729
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 089328f2b920..3c8da0af9695 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -207,8 +207,11 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
207 * Some architectures (arm64) return true for virt_addr_valid() on 207 * Some architectures (arm64) return true for virt_addr_valid() on
208 * vmalloced addresses. Work around this by checking for vmalloc 208 * vmalloced addresses. Work around this by checking for vmalloc
209 * first. 209 * first.
210 *
211 * We also need to check for module addresses explicitly since we
212 * may copy static data from modules to userspace
210 */ 213 */
211 if (is_vmalloc_addr(ptr)) 214 if (is_vmalloc_or_module_addr(ptr))
212 return NULL; 215 return NULL;
213 216
214 if (!virt_addr_valid(ptr)) 217 if (!virt_addr_valid(ptr))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b1e12a1ea9cf..0fe8b7113868 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2303,23 +2303,6 @@ out:
2303 } 2303 }
2304} 2304}
2305 2305
2306#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2307static void init_tlb_ubc(void)
2308{
2309 /*
2310 * This deliberately does not clear the cpumask as it's expensive
2311 * and unnecessary. If there happens to be data in there then the
2312 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
2313 * then will be cleared.
2314 */
2315 current->tlb_ubc.flush_required = false;
2316}
2317#else
2318static inline void init_tlb_ubc(void)
2319{
2320}
2321#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2322
2323/* 2306/*
2324 * This is a basic per-node page freer. Used by both kswapd and direct reclaim. 2307 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
2325 */ 2308 */
@@ -2355,8 +2338,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
2355 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && 2338 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2356 sc->priority == DEF_PRIORITY); 2339 sc->priority == DEF_PRIORITY);
2357 2340
2358 init_tlb_ubc();
2359
2360 blk_start_plug(&plug); 2341 blk_start_plug(&plug);
2361 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2342 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2362 nr[LRU_INACTIVE_FILE]) { 2343 nr[LRU_INACTIVE_FILE]) {