summaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:45:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita52633d8e9c35832f1409dc5fa166019048a3f1f (patch)
tree489be85b88b8dc0749747d603448bb3669db0d14 /mm/mlock.c
parent75ef7184053989118d3814c558a9af62e7376a58 (diff)
mm, vmscan: move lru_lock to the node
Node-based reclaim requires node-based LRUs and locking. This is a preparation patch that just moves the lru_lock to the node so later patches are easier to review. It is a mechanical change but note this patch makes contention worse because the LRU lock is hotter and direct reclaim and kswapd can contend on the same lock even when reclaiming from different zones. Link: http://lkml.kernel.org/r/1467970510-21195-3-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index ef8dc9f395c4..997f63082ff5 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -188,7 +188,7 @@ unsigned int munlock_vma_page(struct page *page)
188 * might otherwise copy PageMlocked to part of the tail pages before 188 * might otherwise copy PageMlocked to part of the tail pages before
189 * we clear it in the head page. It also stabilizes hpage_nr_pages(). 189 * we clear it in the head page. It also stabilizes hpage_nr_pages().
190 */ 190 */
191 spin_lock_irq(&zone->lru_lock); 191 spin_lock_irq(zone_lru_lock(zone));
192 192
193 nr_pages = hpage_nr_pages(page); 193 nr_pages = hpage_nr_pages(page);
194 if (!TestClearPageMlocked(page)) 194 if (!TestClearPageMlocked(page))
@@ -197,14 +197,14 @@ unsigned int munlock_vma_page(struct page *page)
197 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); 197 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
198 198
199 if (__munlock_isolate_lru_page(page, true)) { 199 if (__munlock_isolate_lru_page(page, true)) {
200 spin_unlock_irq(&zone->lru_lock); 200 spin_unlock_irq(zone_lru_lock(zone));
201 __munlock_isolated_page(page); 201 __munlock_isolated_page(page);
202 goto out; 202 goto out;
203 } 203 }
204 __munlock_isolation_failed(page); 204 __munlock_isolation_failed(page);
205 205
206unlock_out: 206unlock_out:
207 spin_unlock_irq(&zone->lru_lock); 207 spin_unlock_irq(zone_lru_lock(zone));
208 208
209out: 209out:
210 return nr_pages - 1; 210 return nr_pages - 1;
@@ -289,7 +289,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
289 pagevec_init(&pvec_putback, 0); 289 pagevec_init(&pvec_putback, 0);
290 290
291 /* Phase 1: page isolation */ 291 /* Phase 1: page isolation */
292 spin_lock_irq(&zone->lru_lock); 292 spin_lock_irq(zone_lru_lock(zone));
293 for (i = 0; i < nr; i++) { 293 for (i = 0; i < nr; i++) {
294 struct page *page = pvec->pages[i]; 294 struct page *page = pvec->pages[i];
295 295
@@ -315,7 +315,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
315 } 315 }
316 delta_munlocked = -nr + pagevec_count(&pvec_putback); 316 delta_munlocked = -nr + pagevec_count(&pvec_putback);
317 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 317 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
318 spin_unlock_irq(&zone->lru_lock); 318 spin_unlock_irq(zone_lru_lock(zone));
319 319
320 /* Now we can release pins of pages that we are not munlocking */ 320 /* Now we can release pins of pages that we are not munlocking */
321 pagevec_release(&pvec_putback); 321 pagevec_release(&pvec_putback);