summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:45:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita52633d8e9c35832f1409dc5fa166019048a3f1f (patch)
tree489be85b88b8dc0749747d603448bb3669db0d14 /mm/memcontrol.c
parent75ef7184053989118d3814c558a9af62e7376a58 (diff)
mm, vmscan: move lru_lock to the node
Node-based reclaim requires node-based LRUs and locking. This is a preparation patch that just moves the lru_lock to the node so later patches are easier to review. It is a mechanical change but note this patch makes contention worse because the LRU lock is hotter and direct reclaim and kswapd can contend on the same lock even when reclaiming from different zones. Link: http://lkml.kernel.org/r/1467970510-21195-3-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 40dfca3ef4bb..9b70f9ca8ddf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2065,7 +2065,7 @@ static void lock_page_lru(struct page *page, int *isolated)
2065{ 2065{
2066 struct zone *zone = page_zone(page); 2066 struct zone *zone = page_zone(page);
2067 2067
2068 spin_lock_irq(&zone->lru_lock); 2068 spin_lock_irq(zone_lru_lock(zone));
2069 if (PageLRU(page)) { 2069 if (PageLRU(page)) {
2070 struct lruvec *lruvec; 2070 struct lruvec *lruvec;
2071 2071
@@ -2089,7 +2089,7 @@ static void unlock_page_lru(struct page *page, int isolated)
2089 SetPageLRU(page); 2089 SetPageLRU(page);
2090 add_page_to_lru_list(page, lruvec, page_lru(page)); 2090 add_page_to_lru_list(page, lruvec, page_lru(page));
2091 } 2091 }
2092 spin_unlock_irq(&zone->lru_lock); 2092 spin_unlock_irq(zone_lru_lock(zone));
2093} 2093}
2094 2094
2095static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2095static void commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -2389,7 +2389,7 @@ void memcg_kmem_uncharge(struct page *page, int order)
2389 2389
2390/* 2390/*
2391 * Because tail pages are not marked as "used", set it. We're under 2391 * Because tail pages are not marked as "used", set it. We're under
2392 * zone->lru_lock and migration entries setup in all page mappings. 2392 * zone_lru_lock and migration entries setup in all page mappings.
2393 */ 2393 */
2394void mem_cgroup_split_huge_fixup(struct page *head) 2394void mem_cgroup_split_huge_fixup(struct page *head)
2395{ 2395{