summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:45:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita52633d8e9c35832f1409dc5fa166019048a3f1f (patch)
tree489be85b88b8dc0749747d603448bb3669db0d14 /mm/compaction.c
parent75ef7184053989118d3814c558a9af62e7376a58 (diff)
mm, vmscan: move lru_lock to the node
Node-based reclaim requires node-based LRUs and locking. This is a preparation patch that just moves the lru_lock to the node so later patches are easier to review. It is a mechanical change but note this patch makes contention worse because the LRU lock is hotter and direct reclaim and kswapd can contend on the same lock even when reclaiming from different zones. Link: http://lkml.kernel.org/r/1467970510-21195-3-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 45eaa2a56517..5c65fad3f330 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -752,7 +752,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
752 * if contended. 752 * if contended.
753 */ 753 */
754 if (!(low_pfn % SWAP_CLUSTER_MAX) 754 if (!(low_pfn % SWAP_CLUSTER_MAX)
755 && compact_unlock_should_abort(&zone->lru_lock, flags, 755 && compact_unlock_should_abort(zone_lru_lock(zone), flags,
756 &locked, cc)) 756 &locked, cc))
757 break; 757 break;
758 758
@@ -813,7 +813,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
813 if (unlikely(__PageMovable(page)) && 813 if (unlikely(__PageMovable(page)) &&
814 !PageIsolated(page)) { 814 !PageIsolated(page)) {
815 if (locked) { 815 if (locked) {
816 spin_unlock_irqrestore(&zone->lru_lock, 816 spin_unlock_irqrestore(zone_lru_lock(zone),
817 flags); 817 flags);
818 locked = false; 818 locked = false;
819 } 819 }
@@ -836,7 +836,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
836 836
837 /* If we already hold the lock, we can skip some rechecking */ 837 /* If we already hold the lock, we can skip some rechecking */
838 if (!locked) { 838 if (!locked) {
839 locked = compact_trylock_irqsave(&zone->lru_lock, 839 locked = compact_trylock_irqsave(zone_lru_lock(zone),
840 &flags, cc); 840 &flags, cc);
841 if (!locked) 841 if (!locked)
842 break; 842 break;
@@ -899,7 +899,7 @@ isolate_fail:
899 */ 899 */
900 if (nr_isolated) { 900 if (nr_isolated) {
901 if (locked) { 901 if (locked) {
902 spin_unlock_irqrestore(&zone->lru_lock, flags); 902 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
903 locked = false; 903 locked = false;
904 } 904 }
905 acct_isolated(zone, cc); 905 acct_isolated(zone, cc);
@@ -927,7 +927,7 @@ isolate_fail:
927 low_pfn = end_pfn; 927 low_pfn = end_pfn;
928 928
929 if (locked) 929 if (locked)
930 spin_unlock_irqrestore(&zone->lru_lock, flags); 930 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
931 931
932 /* 932 /*
933 * Update the pageblock-skip information and cached scanner pfn, 933 * Update the pageblock-skip information and cached scanner pfn,