diff options
author | Matthew Wilcox <willy@infradead.org> | 2018-06-12 09:46:30 -0400 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-10-21 10:46:44 -0400 |
commit | 9f32d221301c3e754b24c77ab11bf793b19f51b5 (patch) | |
tree | bcab75e761b6f24e771de49c02bf46ec161ee149 | |
parent | 9fc747f68d49f4b63029e3a1e87c49d23771a199 (diff) |
dax: Convert dax_lock_mapping_entry to XArray
Instead of always retrying when we slept, only retry if the page has
moved.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
-rw-r--r-- | fs/dax.c | 83 |
1 files changed, 35 insertions, 48 deletions
@@ -99,6 +99,17 @@ static void *dax_make_locked(unsigned long pfn, unsigned long flags) | |||
99 | DAX_LOCKED); | 99 | DAX_LOCKED); |
100 | } | 100 | } |
101 | 101 | ||
102 | static void *dax_make_entry(pfn_t pfn, unsigned long flags) | ||
103 | { | ||
104 | return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); | ||
105 | } | ||
106 | |||
107 | static void *dax_make_page_entry(struct page *page) | ||
108 | { | ||
109 | pfn_t pfn = page_to_pfn_t(page); | ||
110 | return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0); | ||
111 | } | ||
112 | |||
102 | static bool dax_is_locked(void *entry) | 113 | static bool dax_is_locked(void *entry) |
103 | { | 114 | { |
104 | return xa_to_value(entry) & DAX_LOCKED; | 115 | return xa_to_value(entry) & DAX_LOCKED; |
@@ -487,33 +498,16 @@ static struct page *dax_busy_page(void *entry) | |||
487 | return NULL; | 498 | return NULL; |
488 | } | 499 | } |
489 | 500 | ||
490 | static bool entry_wait_revalidate(void) | ||
491 | { | ||
492 | rcu_read_unlock(); | ||
493 | schedule(); | ||
494 | rcu_read_lock(); | ||
495 | |||
496 | /* | ||
497 | * Tell __get_unlocked_mapping_entry() to take a break, we need | ||
498 | * to revalidate page->mapping after dropping locks | ||
499 | */ | ||
500 | return true; | ||
501 | } | ||
502 | |||
503 | bool dax_lock_mapping_entry(struct page *page) | 501 | bool dax_lock_mapping_entry(struct page *page) |
504 | { | 502 | { |
505 | pgoff_t index; | 503 | XA_STATE(xas, NULL, 0); |
506 | struct inode *inode; | 504 | void *entry; |
507 | bool did_lock = false; | ||
508 | void *entry = NULL, **slot; | ||
509 | struct address_space *mapping; | ||
510 | 505 | ||
511 | rcu_read_lock(); | ||
512 | for (;;) { | 506 | for (;;) { |
513 | mapping = READ_ONCE(page->mapping); | 507 | struct address_space *mapping = READ_ONCE(page->mapping); |
514 | 508 | ||
515 | if (!dax_mapping(mapping)) | 509 | if (!dax_mapping(mapping)) |
516 | break; | 510 | return false; |
517 | 511 | ||
518 | /* | 512 | /* |
519 | * In the device-dax case there's no need to lock, a | 513 | * In the device-dax case there's no need to lock, a |
@@ -522,47 +516,40 @@ bool dax_lock_mapping_entry(struct page *page) | |||
522 | * otherwise we would not have a valid pfn_to_page() | 516 | * otherwise we would not have a valid pfn_to_page() |
523 | * translation. | 517 | * translation. |
524 | */ | 518 | */ |
525 | inode = mapping->host; | 519 | if (S_ISCHR(mapping->host->i_mode)) |
526 | if (S_ISCHR(inode->i_mode)) { | 520 | return true; |
527 | did_lock = true; | ||
528 | break; | ||
529 | } | ||
530 | 521 | ||
531 | xa_lock_irq(&mapping->i_pages); | 522 | xas.xa = &mapping->i_pages; |
523 | xas_lock_irq(&xas); | ||
532 | if (mapping != page->mapping) { | 524 | if (mapping != page->mapping) { |
533 | xa_unlock_irq(&mapping->i_pages); | 525 | xas_unlock_irq(&xas); |
534 | continue; | 526 | continue; |
535 | } | 527 | } |
536 | index = page->index; | 528 | xas_set(&xas, page->index); |
537 | 529 | entry = xas_load(&xas); | |
538 | entry = __get_unlocked_mapping_entry(mapping, index, &slot, | 530 | if (dax_is_locked(entry)) { |
539 | entry_wait_revalidate); | 531 | entry = get_unlocked_entry(&xas); |
540 | if (!entry) { | 532 | /* Did the page move while we slept? */ |
541 | xa_unlock_irq(&mapping->i_pages); | 533 | if (dax_to_pfn(entry) != page_to_pfn(page)) { |
542 | break; | 534 | xas_unlock_irq(&xas); |
543 | } else if (IS_ERR(entry)) { | 535 | continue; |
544 | WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); | 536 | } |
545 | continue; | ||
546 | } | 537 | } |
547 | lock_slot(mapping, slot); | 538 | dax_lock_entry(&xas, entry); |
548 | did_lock = true; | 539 | xas_unlock_irq(&xas); |
549 | xa_unlock_irq(&mapping->i_pages); | 540 | return true; |
550 | break; | ||
551 | } | 541 | } |
552 | rcu_read_unlock(); | ||
553 | |||
554 | return did_lock; | ||
555 | } | 542 | } |
556 | 543 | ||
557 | void dax_unlock_mapping_entry(struct page *page) | 544 | void dax_unlock_mapping_entry(struct page *page) |
558 | { | 545 | { |
559 | struct address_space *mapping = page->mapping; | 546 | struct address_space *mapping = page->mapping; |
560 | struct inode *inode = mapping->host; | 547 | XA_STATE(xas, &mapping->i_pages, page->index); |
561 | 548 | ||
562 | if (S_ISCHR(inode->i_mode)) | 549 | if (S_ISCHR(mapping->host->i_mode)) |
563 | return; | 550 | return; |
564 | 551 | ||
565 | unlock_mapping_entry(mapping, page->index); | 552 | dax_unlock_entry(&xas, dax_make_page_entry(page)); |
566 | } | 553 | } |
567 | 554 | ||
568 | /* | 555 | /* |