aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c65
1 files changed, 45 insertions, 20 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index b16dd3822995..832c4183dccc 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -50,6 +50,11 @@ static inline bool migrate_async_suitable(int migratetype)
50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
51} 51}
52 52
53static inline bool should_release_lock(spinlock_t *lock)
54{
55 return need_resched() || spin_is_contended(lock);
56}
57
53/* 58/*
54 * Compaction requires the taking of some coarse locks that are potentially 59 * Compaction requires the taking of some coarse locks that are potentially
55 * very heavily contended. Check if the process needs to be scheduled or 60 * very heavily contended. Check if the process needs to be scheduled or
@@ -62,7 +67,7 @@ static inline bool migrate_async_suitable(int migratetype)
62static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, 67static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
63 bool locked, struct compact_control *cc) 68 bool locked, struct compact_control *cc)
64{ 69{
65 if (need_resched() || spin_is_contended(lock)) { 70 if (should_release_lock(lock)) {
66 if (locked) { 71 if (locked) {
67 spin_unlock_irqrestore(lock, *flags); 72 spin_unlock_irqrestore(lock, *flags);
68 locked = false; 73 locked = false;
@@ -327,7 +332,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
327 isolate_mode_t mode = 0; 332 isolate_mode_t mode = 0;
328 struct lruvec *lruvec; 333 struct lruvec *lruvec;
329 unsigned long flags; 334 unsigned long flags;
330 bool locked; 335 bool locked = false;
331 336
332 /* 337 /*
333 * Ensure that there are not too many pages isolated from the LRU 338 * Ensure that there are not too many pages isolated from the LRU
@@ -347,23 +352,17 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
347 352
348 /* Time to isolate some pages for migration */ 353 /* Time to isolate some pages for migration */
349 cond_resched(); 354 cond_resched();
350 spin_lock_irqsave(&zone->lru_lock, flags);
351 locked = true;
352 for (; low_pfn < end_pfn; low_pfn++) { 355 for (; low_pfn < end_pfn; low_pfn++) {
353 struct page *page; 356 struct page *page;
354 357
355 /* give a chance to irqs before checking need_resched() */ 358 /* give a chance to irqs before checking need_resched() */
356 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { 359 if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
357 spin_unlock_irqrestore(&zone->lru_lock, flags); 360 if (should_release_lock(&zone->lru_lock)) {
358 locked = false; 361 spin_unlock_irqrestore(&zone->lru_lock, flags);
362 locked = false;
363 }
359 } 364 }
360 365
361 /* Check if it is ok to still hold the lock */
362 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
363 locked, cc);
364 if (!locked || fatal_signal_pending(current))
365 break;
366
367 /* 366 /*
368 * migrate_pfn does not necessarily start aligned to a 367 * migrate_pfn does not necessarily start aligned to a
369 * pageblock. Ensure that pfn_valid is called when moving 368 * pageblock. Ensure that pfn_valid is called when moving
@@ -403,21 +402,40 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
403 pageblock_nr = low_pfn >> pageblock_order; 402 pageblock_nr = low_pfn >> pageblock_order;
404 if (!cc->sync && last_pageblock_nr != pageblock_nr && 403 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
405 !migrate_async_suitable(get_pageblock_migratetype(page))) { 404 !migrate_async_suitable(get_pageblock_migratetype(page))) {
406 low_pfn += pageblock_nr_pages; 405 goto next_pageblock;
407 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
408 last_pageblock_nr = pageblock_nr;
409 continue;
410 } 406 }
411 407
408 /* Check may be lockless but that's ok as we recheck later */
412 if (!PageLRU(page)) 409 if (!PageLRU(page))
413 continue; 410 continue;
414 411
415 /* 412 /*
416 * PageLRU is set, and lru_lock excludes isolation, 413 * PageLRU is set. lru_lock normally excludes isolation
417 * splitting and collapsing (collapsing has already 414 * splitting and collapsing (collapsing has already happened
418 * happened if PageLRU is set). 415 * if PageLRU is set) but the lock is not necessarily taken
416 * here and it is wasteful to take it just to check transhuge.
417 * Check TransHuge without lock and skip the whole pageblock if
418 * it's either a transhuge or hugetlbfs page, as calling
419 * compound_order() without preventing THP from splitting the
420 * page underneath us may return surprising results.
419 */ 421 */
420 if (PageTransHuge(page)) { 422 if (PageTransHuge(page)) {
423 if (!locked)
424 goto next_pageblock;
425 low_pfn += (1 << compound_order(page)) - 1;
426 continue;
427 }
428
429 /* Check if it is ok to still hold the lock */
430 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
431 locked, cc);
432 if (!locked || fatal_signal_pending(current))
433 break;
434
435 /* Recheck PageLRU and PageTransHuge under lock */
436 if (!PageLRU(page))
437 continue;
438 if (PageTransHuge(page)) {
421 low_pfn += (1 << compound_order(page)) - 1; 439 low_pfn += (1 << compound_order(page)) - 1;
422 continue; 440 continue;
423 } 441 }
@@ -444,6 +462,13 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
444 ++low_pfn; 462 ++low_pfn;
445 break; 463 break;
446 } 464 }
465
466 continue;
467
468next_pageblock:
469 low_pfn += pageblock_nr_pages;
470 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
471 last_pageblock_nr = pageblock_nr;
447 } 472 }
448 473
449 acct_isolated(zone, locked, cc); 474 acct_isolated(zone, locked, cc);