summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:45:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita52633d8e9c35832f1409dc5fa166019048a3f1f (patch)
tree489be85b88b8dc0749747d603448bb3669db0d14 /mm
parent75ef7184053989118d3814c558a9af62e7376a58 (diff)
mm, vmscan: move lru_lock to the node
Node-based reclaim requires node-based LRUs and locking. This is a preparation patch that just moves the lru_lock to the node so later patches are easier to review. It is a mechanical change but note this patch makes contention worse because the LRU lock is hotter and direct reclaim and kswapd can contend on the same lock even when reclaiming from different zones. Link: http://lkml.kernel.org/r/1467970510-21195-3-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c10
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/mlock.c10
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_idle.c4
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/swap.c30
-rw-r--r--mm/vmscan.c48
10 files changed, 62 insertions, 62 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 45eaa2a56517..5c65fad3f330 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -752,7 +752,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
752 * if contended. 752 * if contended.
753 */ 753 */
754 if (!(low_pfn % SWAP_CLUSTER_MAX) 754 if (!(low_pfn % SWAP_CLUSTER_MAX)
755 && compact_unlock_should_abort(&zone->lru_lock, flags, 755 && compact_unlock_should_abort(zone_lru_lock(zone), flags,
756 &locked, cc)) 756 &locked, cc))
757 break; 757 break;
758 758
@@ -813,7 +813,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
813 if (unlikely(__PageMovable(page)) && 813 if (unlikely(__PageMovable(page)) &&
814 !PageIsolated(page)) { 814 !PageIsolated(page)) {
815 if (locked) { 815 if (locked) {
816 spin_unlock_irqrestore(&zone->lru_lock, 816 spin_unlock_irqrestore(zone_lru_lock(zone),
817 flags); 817 flags);
818 locked = false; 818 locked = false;
819 } 819 }
@@ -836,7 +836,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
836 836
837 /* If we already hold the lock, we can skip some rechecking */ 837 /* If we already hold the lock, we can skip some rechecking */
838 if (!locked) { 838 if (!locked) {
839 locked = compact_trylock_irqsave(&zone->lru_lock, 839 locked = compact_trylock_irqsave(zone_lru_lock(zone),
840 &flags, cc); 840 &flags, cc);
841 if (!locked) 841 if (!locked)
842 break; 842 break;
@@ -899,7 +899,7 @@ isolate_fail:
899 */ 899 */
900 if (nr_isolated) { 900 if (nr_isolated) {
901 if (locked) { 901 if (locked) {
902 spin_unlock_irqrestore(&zone->lru_lock, flags); 902 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
903 locked = false; 903 locked = false;
904 } 904 }
905 acct_isolated(zone, cc); 905 acct_isolated(zone, cc);
@@ -927,7 +927,7 @@ isolate_fail:
927 low_pfn = end_pfn; 927 low_pfn = end_pfn;
928 928
929 if (locked) 929 if (locked)
930 spin_unlock_irqrestore(&zone->lru_lock, flags); 930 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
931 931
932 /* 932 /*
933 * Update the pageblock-skip information and cached scanner pfn, 933 * Update the pageblock-skip information and cached scanner pfn,
diff --git a/mm/filemap.c b/mm/filemap.c
index e90c1543ec2d..7ec50bd6f88c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -95,8 +95,8 @@
95 * ->swap_lock (try_to_unmap_one) 95 * ->swap_lock (try_to_unmap_one)
96 * ->private_lock (try_to_unmap_one) 96 * ->private_lock (try_to_unmap_one)
97 * ->tree_lock (try_to_unmap_one) 97 * ->tree_lock (try_to_unmap_one)
98 * ->zone.lru_lock (follow_page->mark_page_accessed) 98 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed)
99 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page)
100 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->private_lock (page_remove_rmap->set_page_dirty)
101 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 * ->tree_lock (page_remove_rmap->set_page_dirty)
102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3647334c2ef9..99578b63814b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1848,7 +1848,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
1848 spin_unlock(&head->mapping->tree_lock); 1848 spin_unlock(&head->mapping->tree_lock);
1849 } 1849 }
1850 1850
1851 spin_unlock_irqrestore(&page_zone(head)->lru_lock, flags); 1851 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
1852 1852
1853 unfreeze_page(head); 1853 unfreeze_page(head);
1854 1854
@@ -2034,7 +2034,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2034 lru_add_drain(); 2034 lru_add_drain();
2035 2035
2036 /* prevent PageLRU to go away from under us, and freeze lru stats */ 2036 /* prevent PageLRU to go away from under us, and freeze lru stats */
2037 spin_lock_irqsave(&page_zone(head)->lru_lock, flags); 2037 spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
2038 2038
2039 if (mapping) { 2039 if (mapping) {
2040 void **pslot; 2040 void **pslot;
@@ -2077,7 +2077,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2077 spin_unlock(&pgdata->split_queue_lock); 2077 spin_unlock(&pgdata->split_queue_lock);
2078fail: if (mapping) 2078fail: if (mapping)
2079 spin_unlock(&mapping->tree_lock); 2079 spin_unlock(&mapping->tree_lock);
2080 spin_unlock_irqrestore(&page_zone(head)->lru_lock, flags); 2080 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2081 unfreeze_page(head); 2081 unfreeze_page(head);
2082 ret = -EBUSY; 2082 ret = -EBUSY;
2083 } 2083 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 40dfca3ef4bb..9b70f9ca8ddf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2065,7 +2065,7 @@ static void lock_page_lru(struct page *page, int *isolated)
2065{ 2065{
2066 struct zone *zone = page_zone(page); 2066 struct zone *zone = page_zone(page);
2067 2067
2068 spin_lock_irq(&zone->lru_lock); 2068 spin_lock_irq(zone_lru_lock(zone));
2069 if (PageLRU(page)) { 2069 if (PageLRU(page)) {
2070 struct lruvec *lruvec; 2070 struct lruvec *lruvec;
2071 2071
@@ -2089,7 +2089,7 @@ static void unlock_page_lru(struct page *page, int isolated)
2089 SetPageLRU(page); 2089 SetPageLRU(page);
2090 add_page_to_lru_list(page, lruvec, page_lru(page)); 2090 add_page_to_lru_list(page, lruvec, page_lru(page));
2091 } 2091 }
2092 spin_unlock_irq(&zone->lru_lock); 2092 spin_unlock_irq(zone_lru_lock(zone));
2093} 2093}
2094 2094
2095static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2095static void commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -2389,7 +2389,7 @@ void memcg_kmem_uncharge(struct page *page, int order)
2389 2389
2390/* 2390/*
2391 * Because tail pages are not marked as "used", set it. We're under 2391 * Because tail pages are not marked as "used", set it. We're under
2392 * zone->lru_lock and migration entries setup in all page mappings. 2392 * zone_lru_lock and migration entries setup in all page mappings.
2393 */ 2393 */
2394void mem_cgroup_split_huge_fixup(struct page *head) 2394void mem_cgroup_split_huge_fixup(struct page *head)
2395{ 2395{
diff --git a/mm/mlock.c b/mm/mlock.c
index ef8dc9f395c4..997f63082ff5 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -188,7 +188,7 @@ unsigned int munlock_vma_page(struct page *page)
188 * might otherwise copy PageMlocked to part of the tail pages before 188 * might otherwise copy PageMlocked to part of the tail pages before
189 * we clear it in the head page. It also stabilizes hpage_nr_pages(). 189 * we clear it in the head page. It also stabilizes hpage_nr_pages().
190 */ 190 */
191 spin_lock_irq(&zone->lru_lock); 191 spin_lock_irq(zone_lru_lock(zone));
192 192
193 nr_pages = hpage_nr_pages(page); 193 nr_pages = hpage_nr_pages(page);
194 if (!TestClearPageMlocked(page)) 194 if (!TestClearPageMlocked(page))
@@ -197,14 +197,14 @@ unsigned int munlock_vma_page(struct page *page)
197 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); 197 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
198 198
199 if (__munlock_isolate_lru_page(page, true)) { 199 if (__munlock_isolate_lru_page(page, true)) {
200 spin_unlock_irq(&zone->lru_lock); 200 spin_unlock_irq(zone_lru_lock(zone));
201 __munlock_isolated_page(page); 201 __munlock_isolated_page(page);
202 goto out; 202 goto out;
203 } 203 }
204 __munlock_isolation_failed(page); 204 __munlock_isolation_failed(page);
205 205
206unlock_out: 206unlock_out:
207 spin_unlock_irq(&zone->lru_lock); 207 spin_unlock_irq(zone_lru_lock(zone));
208 208
209out: 209out:
210 return nr_pages - 1; 210 return nr_pages - 1;
@@ -289,7 +289,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
289 pagevec_init(&pvec_putback, 0); 289 pagevec_init(&pvec_putback, 0);
290 290
291 /* Phase 1: page isolation */ 291 /* Phase 1: page isolation */
292 spin_lock_irq(&zone->lru_lock); 292 spin_lock_irq(zone_lru_lock(zone));
293 for (i = 0; i < nr; i++) { 293 for (i = 0; i < nr; i++) {
294 struct page *page = pvec->pages[i]; 294 struct page *page = pvec->pages[i];
295 295
@@ -315,7 +315,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
315 } 315 }
316 delta_munlocked = -nr + pagevec_count(&pvec_putback); 316 delta_munlocked = -nr + pagevec_count(&pvec_putback);
317 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 317 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
318 spin_unlock_irq(&zone->lru_lock); 318 spin_unlock_irq(zone_lru_lock(zone));
319 319
320 /* Now we can release pins of pages that we are not munlocking */ 320 /* Now we can release pins of pages that we are not munlocking */
321 pagevec_release(&pvec_putback); 321 pagevec_release(&pvec_putback);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7d4ff81b973f..5760c626c309 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5904,6 +5904,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5904 init_waitqueue_head(&pgdat->kcompactd_wait); 5904 init_waitqueue_head(&pgdat->kcompactd_wait);
5905#endif 5905#endif
5906 pgdat_page_ext_init(pgdat); 5906 pgdat_page_ext_init(pgdat);
5907 spin_lock_init(&pgdat->lru_lock);
5907 5908
5908 for (j = 0; j < MAX_NR_ZONES; j++) { 5909 for (j = 0; j < MAX_NR_ZONES; j++) {
5909 struct zone *zone = pgdat->node_zones + j; 5910 struct zone *zone = pgdat->node_zones + j;
@@ -5958,10 +5959,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5958 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 5959 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
5959#endif 5960#endif
5960 zone->name = zone_names[j]; 5961 zone->name = zone_names[j];
5962 zone->zone_pgdat = pgdat;
5961 spin_lock_init(&zone->lock); 5963 spin_lock_init(&zone->lock);
5962 spin_lock_init(&zone->lru_lock);
5963 zone_seqlock_init(zone); 5964 zone_seqlock_init(zone);
5964 zone->zone_pgdat = pgdat;
5965 zone_pcp_init(zone); 5965 zone_pcp_init(zone);
5966 5966
5967 /* For bootup, initialized properly in watermark setup */ 5967 /* For bootup, initialized properly in watermark setup */
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 4ea9c4ef5146..ae11aa914e55 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -41,12 +41,12 @@ static struct page *page_idle_get_page(unsigned long pfn)
41 return NULL; 41 return NULL;
42 42
43 zone = page_zone(page); 43 zone = page_zone(page);
44 spin_lock_irq(&zone->lru_lock); 44 spin_lock_irq(zone_lru_lock(zone));
45 if (unlikely(!PageLRU(page))) { 45 if (unlikely(!PageLRU(page))) {
46 put_page(page); 46 put_page(page);
47 page = NULL; 47 page = NULL;
48 } 48 }
49 spin_unlock_irq(&zone->lru_lock); 49 spin_unlock_irq(zone_lru_lock(zone));
50 return page; 50 return page;
51} 51}
52 52
diff --git a/mm/rmap.c b/mm/rmap.c
index 8a13d9f7b566..dc28bfecbf80 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -27,7 +27,7 @@
27 * mapping->i_mmap_rwsem 27 * mapping->i_mmap_rwsem
28 * anon_vma->rwsem 28 * anon_vma->rwsem
29 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get) 31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others) 32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers) 33 * mapping->private_lock (in __set_page_dirty_buffers)
diff --git a/mm/swap.c b/mm/swap.c
index 616df4ddd870..bf37e5cfae81 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -62,12 +62,12 @@ static void __page_cache_release(struct page *page)
62 struct lruvec *lruvec; 62 struct lruvec *lruvec;
63 unsigned long flags; 63 unsigned long flags;
64 64
65 spin_lock_irqsave(&zone->lru_lock, flags); 65 spin_lock_irqsave(zone_lru_lock(zone), flags);
66 lruvec = mem_cgroup_page_lruvec(page, zone); 66 lruvec = mem_cgroup_page_lruvec(page, zone);
67 VM_BUG_ON_PAGE(!PageLRU(page), page); 67 VM_BUG_ON_PAGE(!PageLRU(page), page);
68 __ClearPageLRU(page); 68 __ClearPageLRU(page);
69 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 69 del_page_from_lru_list(page, lruvec, page_off_lru(page));
70 spin_unlock_irqrestore(&zone->lru_lock, flags); 70 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
71 } 71 }
72 mem_cgroup_uncharge(page); 72 mem_cgroup_uncharge(page);
73} 73}
@@ -189,16 +189,16 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
189 189
190 if (pagezone != zone) { 190 if (pagezone != zone) {
191 if (zone) 191 if (zone)
192 spin_unlock_irqrestore(&zone->lru_lock, flags); 192 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
193 zone = pagezone; 193 zone = pagezone;
194 spin_lock_irqsave(&zone->lru_lock, flags); 194 spin_lock_irqsave(zone_lru_lock(zone), flags);
195 } 195 }
196 196
197 lruvec = mem_cgroup_page_lruvec(page, zone); 197 lruvec = mem_cgroup_page_lruvec(page, zone);
198 (*move_fn)(page, lruvec, arg); 198 (*move_fn)(page, lruvec, arg);
199 } 199 }
200 if (zone) 200 if (zone)
201 spin_unlock_irqrestore(&zone->lru_lock, flags); 201 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
202 release_pages(pvec->pages, pvec->nr, pvec->cold); 202 release_pages(pvec->pages, pvec->nr, pvec->cold);
203 pagevec_reinit(pvec); 203 pagevec_reinit(pvec);
204} 204}
@@ -318,9 +318,9 @@ void activate_page(struct page *page)
318 struct zone *zone = page_zone(page); 318 struct zone *zone = page_zone(page);
319 319
320 page = compound_head(page); 320 page = compound_head(page);
321 spin_lock_irq(&zone->lru_lock); 321 spin_lock_irq(zone_lru_lock(zone));
322 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); 322 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
323 spin_unlock_irq(&zone->lru_lock); 323 spin_unlock_irq(zone_lru_lock(zone));
324} 324}
325#endif 325#endif
326 326
@@ -448,13 +448,13 @@ void add_page_to_unevictable_list(struct page *page)
448 struct zone *zone = page_zone(page); 448 struct zone *zone = page_zone(page);
449 struct lruvec *lruvec; 449 struct lruvec *lruvec;
450 450
451 spin_lock_irq(&zone->lru_lock); 451 spin_lock_irq(zone_lru_lock(zone));
452 lruvec = mem_cgroup_page_lruvec(page, zone); 452 lruvec = mem_cgroup_page_lruvec(page, zone);
453 ClearPageActive(page); 453 ClearPageActive(page);
454 SetPageUnevictable(page); 454 SetPageUnevictable(page);
455 SetPageLRU(page); 455 SetPageLRU(page);
456 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 456 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
457 spin_unlock_irq(&zone->lru_lock); 457 spin_unlock_irq(zone_lru_lock(zone));
458} 458}
459 459
460/** 460/**
@@ -744,7 +744,7 @@ void release_pages(struct page **pages, int nr, bool cold)
744 * same zone. The lock is held only if zone != NULL. 744 * same zone. The lock is held only if zone != NULL.
745 */ 745 */
746 if (zone && ++lock_batch == SWAP_CLUSTER_MAX) { 746 if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
747 spin_unlock_irqrestore(&zone->lru_lock, flags); 747 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
748 zone = NULL; 748 zone = NULL;
749 } 749 }
750 750
@@ -759,7 +759,7 @@ void release_pages(struct page **pages, int nr, bool cold)
759 759
760 if (PageCompound(page)) { 760 if (PageCompound(page)) {
761 if (zone) { 761 if (zone) {
762 spin_unlock_irqrestore(&zone->lru_lock, flags); 762 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
763 zone = NULL; 763 zone = NULL;
764 } 764 }
765 __put_compound_page(page); 765 __put_compound_page(page);
@@ -771,11 +771,11 @@ void release_pages(struct page **pages, int nr, bool cold)
771 771
772 if (pagezone != zone) { 772 if (pagezone != zone) {
773 if (zone) 773 if (zone)
774 spin_unlock_irqrestore(&zone->lru_lock, 774 spin_unlock_irqrestore(zone_lru_lock(zone),
775 flags); 775 flags);
776 lock_batch = 0; 776 lock_batch = 0;
777 zone = pagezone; 777 zone = pagezone;
778 spin_lock_irqsave(&zone->lru_lock, flags); 778 spin_lock_irqsave(zone_lru_lock(zone), flags);
779 } 779 }
780 780
781 lruvec = mem_cgroup_page_lruvec(page, zone); 781 lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -790,7 +790,7 @@ void release_pages(struct page **pages, int nr, bool cold)
790 list_add(&page->lru, &pages_to_free); 790 list_add(&page->lru, &pages_to_free);
791 } 791 }
792 if (zone) 792 if (zone)
793 spin_unlock_irqrestore(&zone->lru_lock, flags); 793 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
794 794
795 mem_cgroup_uncharge_list(&pages_to_free); 795 mem_cgroup_uncharge_list(&pages_to_free);
796 free_hot_cold_page_list(&pages_to_free, cold); 796 free_hot_cold_page_list(&pages_to_free, cold);
@@ -826,7 +826,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
826 VM_BUG_ON_PAGE(PageCompound(page_tail), page); 826 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
827 VM_BUG_ON_PAGE(PageLRU(page_tail), page); 827 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
828 VM_BUG_ON(NR_CPUS != 1 && 828 VM_BUG_ON(NR_CPUS != 1 &&
829 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 829 !spin_is_locked(zone_lru_lock(lruvec_zone(lruvec))));
830 830
831 if (!list) 831 if (!list)
832 SetPageLRU(page_tail); 832 SetPageLRU(page_tail);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 21d417ccff69..e7ffcd259cc4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1349,7 +1349,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1349} 1349}
1350 1350
1351/* 1351/*
1352 * zone->lru_lock is heavily contended. Some of the functions that 1352 * zone_lru_lock is heavily contended. Some of the functions that
1353 * shrink the lists perform better by taking out a batch of pages 1353 * shrink the lists perform better by taking out a batch of pages
1354 * and working on them outside the LRU lock. 1354 * and working on them outside the LRU lock.
1355 * 1355 *
@@ -1444,7 +1444,7 @@ int isolate_lru_page(struct page *page)
1444 struct zone *zone = page_zone(page); 1444 struct zone *zone = page_zone(page);
1445 struct lruvec *lruvec; 1445 struct lruvec *lruvec;
1446 1446
1447 spin_lock_irq(&zone->lru_lock); 1447 spin_lock_irq(zone_lru_lock(zone));
1448 lruvec = mem_cgroup_page_lruvec(page, zone); 1448 lruvec = mem_cgroup_page_lruvec(page, zone);
1449 if (PageLRU(page)) { 1449 if (PageLRU(page)) {
1450 int lru = page_lru(page); 1450 int lru = page_lru(page);
@@ -1453,7 +1453,7 @@ int isolate_lru_page(struct page *page)
1453 del_page_from_lru_list(page, lruvec, lru); 1453 del_page_from_lru_list(page, lruvec, lru);
1454 ret = 0; 1454 ret = 0;
1455 } 1455 }
1456 spin_unlock_irq(&zone->lru_lock); 1456 spin_unlock_irq(zone_lru_lock(zone));
1457 } 1457 }
1458 return ret; 1458 return ret;
1459} 1459}
@@ -1512,9 +1512,9 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1512 VM_BUG_ON_PAGE(PageLRU(page), page); 1512 VM_BUG_ON_PAGE(PageLRU(page), page);
1513 list_del(&page->lru); 1513 list_del(&page->lru);
1514 if (unlikely(!page_evictable(page))) { 1514 if (unlikely(!page_evictable(page))) {
1515 spin_unlock_irq(&zone->lru_lock); 1515 spin_unlock_irq(zone_lru_lock(zone));
1516 putback_lru_page(page); 1516 putback_lru_page(page);
1517 spin_lock_irq(&zone->lru_lock); 1517 spin_lock_irq(zone_lru_lock(zone));
1518 continue; 1518 continue;
1519 } 1519 }
1520 1520
@@ -1535,10 +1535,10 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1535 del_page_from_lru_list(page, lruvec, lru); 1535 del_page_from_lru_list(page, lruvec, lru);
1536 1536
1537 if (unlikely(PageCompound(page))) { 1537 if (unlikely(PageCompound(page))) {
1538 spin_unlock_irq(&zone->lru_lock); 1538 spin_unlock_irq(zone_lru_lock(zone));
1539 mem_cgroup_uncharge(page); 1539 mem_cgroup_uncharge(page);
1540 (*get_compound_page_dtor(page))(page); 1540 (*get_compound_page_dtor(page))(page);
1541 spin_lock_irq(&zone->lru_lock); 1541 spin_lock_irq(zone_lru_lock(zone));
1542 } else 1542 } else
1543 list_add(&page->lru, &pages_to_free); 1543 list_add(&page->lru, &pages_to_free);
1544 } 1544 }
@@ -1600,7 +1600,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1600 if (!sc->may_writepage) 1600 if (!sc->may_writepage)
1601 isolate_mode |= ISOLATE_CLEAN; 1601 isolate_mode |= ISOLATE_CLEAN;
1602 1602
1603 spin_lock_irq(&zone->lru_lock); 1603 spin_lock_irq(zone_lru_lock(zone));
1604 1604
1605 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, 1605 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1606 &nr_scanned, sc, isolate_mode, lru); 1606 &nr_scanned, sc, isolate_mode, lru);
@@ -1616,7 +1616,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1616 else 1616 else
1617 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); 1617 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1618 } 1618 }
1619 spin_unlock_irq(&zone->lru_lock); 1619 spin_unlock_irq(zone_lru_lock(zone));
1620 1620
1621 if (nr_taken == 0) 1621 if (nr_taken == 0)
1622 return 0; 1622 return 0;
@@ -1626,7 +1626,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1626 &nr_writeback, &nr_immediate, 1626 &nr_writeback, &nr_immediate,
1627 false); 1627 false);
1628 1628
1629 spin_lock_irq(&zone->lru_lock); 1629 spin_lock_irq(zone_lru_lock(zone));
1630 1630
1631 if (global_reclaim(sc)) { 1631 if (global_reclaim(sc)) {
1632 if (current_is_kswapd()) 1632 if (current_is_kswapd())
@@ -1641,7 +1641,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1641 1641
1642 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1642 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1643 1643
1644 spin_unlock_irq(&zone->lru_lock); 1644 spin_unlock_irq(zone_lru_lock(zone));
1645 1645
1646 mem_cgroup_uncharge_list(&page_list); 1646 mem_cgroup_uncharge_list(&page_list);
1647 free_hot_cold_page_list(&page_list, true); 1647 free_hot_cold_page_list(&page_list, true);
@@ -1715,9 +1715,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1715 * processes, from rmap. 1715 * processes, from rmap.
1716 * 1716 *
1717 * If the pages are mostly unmapped, the processing is fast and it is 1717 * If the pages are mostly unmapped, the processing is fast and it is
1718 * appropriate to hold zone->lru_lock across the whole operation. But if 1718 * appropriate to hold zone_lru_lock across the whole operation. But if
1719 * the pages are mapped, the processing is slow (page_referenced()) so we 1719 * the pages are mapped, the processing is slow (page_referenced()) so we
1720 * should drop zone->lru_lock around each page. It's impossible to balance 1720 * should drop zone_lru_lock around each page. It's impossible to balance
1721 * this, so instead we remove the pages from the LRU while processing them. 1721 * this, so instead we remove the pages from the LRU while processing them.
1722 * It is safe to rely on PG_active against the non-LRU pages in here because 1722 * It is safe to rely on PG_active against the non-LRU pages in here because
1723 * nobody will play with that bit on a non-LRU page. 1723 * nobody will play with that bit on a non-LRU page.
@@ -1754,10 +1754,10 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
1754 del_page_from_lru_list(page, lruvec, lru); 1754 del_page_from_lru_list(page, lruvec, lru);
1755 1755
1756 if (unlikely(PageCompound(page))) { 1756 if (unlikely(PageCompound(page))) {
1757 spin_unlock_irq(&zone->lru_lock); 1757 spin_unlock_irq(zone_lru_lock(zone));
1758 mem_cgroup_uncharge(page); 1758 mem_cgroup_uncharge(page);
1759 (*get_compound_page_dtor(page))(page); 1759 (*get_compound_page_dtor(page))(page);
1760 spin_lock_irq(&zone->lru_lock); 1760 spin_lock_irq(zone_lru_lock(zone));
1761 } else 1761 } else
1762 list_add(&page->lru, pages_to_free); 1762 list_add(&page->lru, pages_to_free);
1763 } 1763 }
@@ -1792,7 +1792,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1792 if (!sc->may_writepage) 1792 if (!sc->may_writepage)
1793 isolate_mode |= ISOLATE_CLEAN; 1793 isolate_mode |= ISOLATE_CLEAN;
1794 1794
1795 spin_lock_irq(&zone->lru_lock); 1795 spin_lock_irq(zone_lru_lock(zone));
1796 1796
1797 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, 1797 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1798 &nr_scanned, sc, isolate_mode, lru); 1798 &nr_scanned, sc, isolate_mode, lru);
@@ -1805,7 +1805,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1805 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); 1805 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1806 __count_zone_vm_events(PGREFILL, zone, nr_scanned); 1806 __count_zone_vm_events(PGREFILL, zone, nr_scanned);
1807 1807
1808 spin_unlock_irq(&zone->lru_lock); 1808 spin_unlock_irq(zone_lru_lock(zone));
1809 1809
1810 while (!list_empty(&l_hold)) { 1810 while (!list_empty(&l_hold)) {
1811 cond_resched(); 1811 cond_resched();
@@ -1850,7 +1850,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1850 /* 1850 /*
1851 * Move pages back to the lru list. 1851 * Move pages back to the lru list.
1852 */ 1852 */
1853 spin_lock_irq(&zone->lru_lock); 1853 spin_lock_irq(zone_lru_lock(zone));
1854 /* 1854 /*
1855 * Count referenced pages from currently used mappings as rotated, 1855 * Count referenced pages from currently used mappings as rotated,
1856 * even though only some of them are actually re-activated. This 1856 * even though only some of them are actually re-activated. This
@@ -1862,7 +1862,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1862 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); 1862 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1863 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); 1863 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1864 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1864 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1865 spin_unlock_irq(&zone->lru_lock); 1865 spin_unlock_irq(zone_lru_lock(zone));
1866 1866
1867 mem_cgroup_uncharge_list(&l_hold); 1867 mem_cgroup_uncharge_list(&l_hold);
1868 free_hot_cold_page_list(&l_hold, true); 1868 free_hot_cold_page_list(&l_hold, true);
@@ -2077,7 +2077,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2077 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) + 2077 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
2078 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE); 2078 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
2079 2079
2080 spin_lock_irq(&zone->lru_lock); 2080 spin_lock_irq(zone_lru_lock(zone));
2081 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 2081 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2082 reclaim_stat->recent_scanned[0] /= 2; 2082 reclaim_stat->recent_scanned[0] /= 2;
2083 reclaim_stat->recent_rotated[0] /= 2; 2083 reclaim_stat->recent_rotated[0] /= 2;
@@ -2098,7 +2098,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2098 2098
2099 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); 2099 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2100 fp /= reclaim_stat->recent_rotated[1] + 1; 2100 fp /= reclaim_stat->recent_rotated[1] + 1;
2101 spin_unlock_irq(&zone->lru_lock); 2101 spin_unlock_irq(zone_lru_lock(zone));
2102 2102
2103 fraction[0] = ap; 2103 fraction[0] = ap;
2104 fraction[1] = fp; 2104 fraction[1] = fp;
@@ -3791,9 +3791,9 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3791 pagezone = page_zone(page); 3791 pagezone = page_zone(page);
3792 if (pagezone != zone) { 3792 if (pagezone != zone) {
3793 if (zone) 3793 if (zone)
3794 spin_unlock_irq(&zone->lru_lock); 3794 spin_unlock_irq(zone_lru_lock(zone));
3795 zone = pagezone; 3795 zone = pagezone;
3796 spin_lock_irq(&zone->lru_lock); 3796 spin_lock_irq(zone_lru_lock(zone));
3797 } 3797 }
3798 lruvec = mem_cgroup_page_lruvec(page, zone); 3798 lruvec = mem_cgroup_page_lruvec(page, zone);
3799 3799
@@ -3814,7 +3814,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3814 if (zone) { 3814 if (zone) {
3815 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 3815 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3816 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 3816 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3817 spin_unlock_irq(&zone->lru_lock); 3817 spin_unlock_irq(zone_lru_lock(zone));
3818 } 3818 }
3819} 3819}
3820#endif /* CONFIG_SHMEM */ 3820#endif /* CONFIG_SHMEM */