aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2017-12-04 04:35:16 -0500
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:38 -0400
commit89eb946a7432be639b452fac295c0c2e5186c4a4 (patch)
tree663e90354030fec3e432a3a13934db01ea932242 /mm/migrate.c
parent560d454bae08b5d5a132c5520177dede066334b7 (diff)
mm: Convert page migration to XArray
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c48
1 files changed, 18 insertions, 30 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index d6a2e89b086a..b3cde3fd094a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -323,7 +323,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
323 page = migration_entry_to_page(entry); 323 page = migration_entry_to_page(entry);
324 324
325 /* 325 /*
326 * Once radix-tree replacement of page migration started, page_count 326 * Once page cache replacement of page migration started, page_count
327 * *must* be zero. And, we don't want to call wait_on_page_locked() 327 * *must* be zero. And, we don't want to call wait_on_page_locked()
328 * against a page without get_page(). 328 * against a page without get_page().
329 * So, we use get_page_unless_zero(), here. Even failed, page fault 329 * So, we use get_page_unless_zero(), here. Even failed, page fault
@@ -438,10 +438,10 @@ int migrate_page_move_mapping(struct address_space *mapping,
438 struct buffer_head *head, enum migrate_mode mode, 438 struct buffer_head *head, enum migrate_mode mode,
439 int extra_count) 439 int extra_count)
440{ 440{
441 XA_STATE(xas, &mapping->i_pages, page_index(page));
441 struct zone *oldzone, *newzone; 442 struct zone *oldzone, *newzone;
442 int dirty; 443 int dirty;
443 int expected_count = 1 + extra_count; 444 int expected_count = 1 + extra_count;
444 void **pslot;
445 445
446 /* 446 /*
447 * Device public or private pages have an extra refcount as they are 447 * Device public or private pages have an extra refcount as they are
@@ -467,21 +467,16 @@ int migrate_page_move_mapping(struct address_space *mapping,
467 oldzone = page_zone(page); 467 oldzone = page_zone(page);
468 newzone = page_zone(newpage); 468 newzone = page_zone(newpage);
469 469
470 xa_lock_irq(&mapping->i_pages); 470 xas_lock_irq(&xas);
471
472 pslot = radix_tree_lookup_slot(&mapping->i_pages,
473 page_index(page));
474 471
475 expected_count += hpage_nr_pages(page) + page_has_private(page); 472 expected_count += hpage_nr_pages(page) + page_has_private(page);
476 if (page_count(page) != expected_count || 473 if (page_count(page) != expected_count || xas_load(&xas) != page) {
477 radix_tree_deref_slot_protected(pslot, 474 xas_unlock_irq(&xas);
478 &mapping->i_pages.xa_lock) != page) {
479 xa_unlock_irq(&mapping->i_pages);
480 return -EAGAIN; 475 return -EAGAIN;
481 } 476 }
482 477
483 if (!page_ref_freeze(page, expected_count)) { 478 if (!page_ref_freeze(page, expected_count)) {
484 xa_unlock_irq(&mapping->i_pages); 479 xas_unlock_irq(&xas);
485 return -EAGAIN; 480 return -EAGAIN;
486 } 481 }
487 482
@@ -495,7 +490,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
495 if (mode == MIGRATE_ASYNC && head && 490 if (mode == MIGRATE_ASYNC && head &&
496 !buffer_migrate_lock_buffers(head, mode)) { 491 !buffer_migrate_lock_buffers(head, mode)) {
497 page_ref_unfreeze(page, expected_count); 492 page_ref_unfreeze(page, expected_count);
498 xa_unlock_irq(&mapping->i_pages); 493 xas_unlock_irq(&xas);
499 return -EAGAIN; 494 return -EAGAIN;
500 } 495 }
501 496
@@ -523,16 +518,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
523 SetPageDirty(newpage); 518 SetPageDirty(newpage);
524 } 519 }
525 520
526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); 521 xas_store(&xas, newpage);
527 if (PageTransHuge(page)) { 522 if (PageTransHuge(page)) {
528 int i; 523 int i;
529 int index = page_index(page);
530 524
531 for (i = 1; i < HPAGE_PMD_NR; i++) { 525 for (i = 1; i < HPAGE_PMD_NR; i++) {
532 pslot = radix_tree_lookup_slot(&mapping->i_pages, 526 xas_next(&xas);
533 index + i); 527 xas_store(&xas, newpage + i);
534 radix_tree_replace_slot(&mapping->i_pages, pslot,
535 newpage + i);
536 } 528 }
537 } 529 }
538 530
@@ -543,7 +535,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
543 */ 535 */
544 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); 536 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
545 537
546 xa_unlock(&mapping->i_pages); 538 xas_unlock(&xas);
547 /* Leave irq disabled to prevent preemption while updating stats */ 539 /* Leave irq disabled to prevent preemption while updating stats */
548 540
549 /* 541 /*
@@ -583,22 +575,18 @@ EXPORT_SYMBOL(migrate_page_move_mapping);
583int migrate_huge_page_move_mapping(struct address_space *mapping, 575int migrate_huge_page_move_mapping(struct address_space *mapping,
584 struct page *newpage, struct page *page) 576 struct page *newpage, struct page *page)
585{ 577{
578 XA_STATE(xas, &mapping->i_pages, page_index(page));
586 int expected_count; 579 int expected_count;
587 void **pslot;
588
589 xa_lock_irq(&mapping->i_pages);
590
591 pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page));
592 580
581 xas_lock_irq(&xas);
593 expected_count = 2 + page_has_private(page); 582 expected_count = 2 + page_has_private(page);
594 if (page_count(page) != expected_count || 583 if (page_count(page) != expected_count || xas_load(&xas) != page) {
595 radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) { 584 xas_unlock_irq(&xas);
596 xa_unlock_irq(&mapping->i_pages);
597 return -EAGAIN; 585 return -EAGAIN;
598 } 586 }
599 587
600 if (!page_ref_freeze(page, expected_count)) { 588 if (!page_ref_freeze(page, expected_count)) {
601 xa_unlock_irq(&mapping->i_pages); 589 xas_unlock_irq(&xas);
602 return -EAGAIN; 590 return -EAGAIN;
603 } 591 }
604 592
@@ -607,11 +595,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
607 595
608 get_page(newpage); 596 get_page(newpage);
609 597
610 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); 598 xas_store(&xas, newpage);
611 599
612 page_ref_unfreeze(page, expected_count - 1); 600 page_ref_unfreeze(page, expected_count - 1);
613 601
614 xa_unlock_irq(&mapping->i_pages); 602 xas_unlock_irq(&xas);
615 603
616 return MIGRATEPAGE_SUCCESS; 604 return MIGRATEPAGE_SUCCESS;
617} 605}