summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:45:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita52633d8e9c35832f1409dc5fa166019048a3f1f (patch)
tree489be85b88b8dc0749747d603448bb3669db0d14 /mm/page_alloc.c
parent75ef7184053989118d3814c558a9af62e7376a58 (diff)
mm, vmscan: move lru_lock to the node
Node-based reclaim requires node-based LRUs and locking. This is a preparation patch that just moves the lru_lock to the node so later patches are easier to review. It is a mechanical change but note this patch makes contention worse because the LRU lock is hotter and direct reclaim and kswapd can contend on the same lock even when reclaiming from different zones. Link: http://lkml.kernel.org/r/1467970510-21195-3-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7d4ff81b973f..5760c626c309 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5904,6 +5904,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5904 init_waitqueue_head(&pgdat->kcompactd_wait); 5904 init_waitqueue_head(&pgdat->kcompactd_wait);
5905#endif 5905#endif
5906 pgdat_page_ext_init(pgdat); 5906 pgdat_page_ext_init(pgdat);
5907 spin_lock_init(&pgdat->lru_lock);
5907 5908
5908 for (j = 0; j < MAX_NR_ZONES; j++) { 5909 for (j = 0; j < MAX_NR_ZONES; j++) {
5909 struct zone *zone = pgdat->node_zones + j; 5910 struct zone *zone = pgdat->node_zones + j;
@@ -5958,10 +5959,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5958 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 5959 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
5959#endif 5960#endif
5960 zone->name = zone_names[j]; 5961 zone->name = zone_names[j];
5962 zone->zone_pgdat = pgdat;
5961 spin_lock_init(&zone->lock); 5963 spin_lock_init(&zone->lock);
5962 spin_lock_init(&zone->lru_lock);
5963 zone_seqlock_init(zone); 5964 zone_seqlock_init(zone);
5964 zone->zone_pgdat = pgdat;
5965 zone_pcp_init(zone); 5965 zone_pcp_init(zone);
5966 5966
5967 /* For bootup, initialized properly in watermark setup */ 5967 /* For bootup, initialized properly in watermark setup */