diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2018-04-10 19:36:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 13:28:39 -0400 |
commit | b93b016313b3ba8003c3b8bb71f569af91f19fc7 (patch) | |
tree | ad4be96414189dcdf8c972f351ba430996e9fdff /mm/truncate.c | |
parent | f6bb2a2c0b81c47282ddb7883f92e65a063c27dd (diff) |
page cache: use xa_lock
Remove the address_space ->tree_lock and use the xa_lock newly added to
the radix_tree_root. Rename the address_space ->page_tree to ->i_pages,
since we don't really care that it's a tree.
[willy@infradead.org: fix nds32, fs/dax.c]
Link: http://lkml.kernel.org/r/20180406145415.GB20605@bombadil.infradead.orgLink: http://lkml.kernel.org/r/20180313132639.17387-9-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Jeff Layton <jlayton@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/truncate.c')
-rw-r--r-- | mm/truncate.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/truncate.c b/mm/truncate.c index c34e2fd4f583..1d2fb2dca96f 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -36,11 +36,11 @@ static inline void __clear_shadow_entry(struct address_space *mapping, | |||
36 | struct radix_tree_node *node; | 36 | struct radix_tree_node *node; |
37 | void **slot; | 37 | void **slot; |
38 | 38 | ||
39 | if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) | 39 | if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot)) |
40 | return; | 40 | return; |
41 | if (*slot != entry) | 41 | if (*slot != entry) |
42 | return; | 42 | return; |
43 | __radix_tree_replace(&mapping->page_tree, node, slot, NULL, | 43 | __radix_tree_replace(&mapping->i_pages, node, slot, NULL, |
44 | workingset_update_node); | 44 | workingset_update_node); |
45 | mapping->nrexceptional--; | 45 | mapping->nrexceptional--; |
46 | } | 46 | } |
@@ -48,9 +48,9 @@ static inline void __clear_shadow_entry(struct address_space *mapping, | |||
48 | static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, | 48 | static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, |
49 | void *entry) | 49 | void *entry) |
50 | { | 50 | { |
51 | spin_lock_irq(&mapping->tree_lock); | 51 | xa_lock_irq(&mapping->i_pages); |
52 | __clear_shadow_entry(mapping, index, entry); | 52 | __clear_shadow_entry(mapping, index, entry); |
53 | spin_unlock_irq(&mapping->tree_lock); | 53 | xa_unlock_irq(&mapping->i_pages); |
54 | } | 54 | } |
55 | 55 | ||
56 | /* | 56 | /* |
@@ -79,7 +79,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, | |||
79 | dax = dax_mapping(mapping); | 79 | dax = dax_mapping(mapping); |
80 | lock = !dax && indices[j] < end; | 80 | lock = !dax && indices[j] < end; |
81 | if (lock) | 81 | if (lock) |
82 | spin_lock_irq(&mapping->tree_lock); | 82 | xa_lock_irq(&mapping->i_pages); |
83 | 83 | ||
84 | for (i = j; i < pagevec_count(pvec); i++) { | 84 | for (i = j; i < pagevec_count(pvec); i++) { |
85 | struct page *page = pvec->pages[i]; | 85 | struct page *page = pvec->pages[i]; |
@@ -102,7 +102,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, | |||
102 | } | 102 | } |
103 | 103 | ||
104 | if (lock) | 104 | if (lock) |
105 | spin_unlock_irq(&mapping->tree_lock); | 105 | xa_unlock_irq(&mapping->i_pages); |
106 | pvec->nr = j; | 106 | pvec->nr = j; |
107 | } | 107 | } |
108 | 108 | ||
@@ -518,8 +518,8 @@ void truncate_inode_pages_final(struct address_space *mapping) | |||
518 | * modification that does not see AS_EXITING is | 518 | * modification that does not see AS_EXITING is |
519 | * completed before starting the final truncate. | 519 | * completed before starting the final truncate. |
520 | */ | 520 | */ |
521 | spin_lock_irq(&mapping->tree_lock); | 521 | xa_lock_irq(&mapping->i_pages); |
522 | spin_unlock_irq(&mapping->tree_lock); | 522 | xa_unlock_irq(&mapping->i_pages); |
523 | 523 | ||
524 | truncate_inode_pages(mapping, 0); | 524 | truncate_inode_pages(mapping, 0); |
525 | } | 525 | } |
@@ -627,13 +627,13 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) | |||
627 | if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) | 627 | if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
628 | return 0; | 628 | return 0; |
629 | 629 | ||
630 | spin_lock_irqsave(&mapping->tree_lock, flags); | 630 | xa_lock_irqsave(&mapping->i_pages, flags); |
631 | if (PageDirty(page)) | 631 | if (PageDirty(page)) |
632 | goto failed; | 632 | goto failed; |
633 | 633 | ||
634 | BUG_ON(page_has_private(page)); | 634 | BUG_ON(page_has_private(page)); |
635 | __delete_from_page_cache(page, NULL); | 635 | __delete_from_page_cache(page, NULL); |
636 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 636 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
637 | 637 | ||
638 | if (mapping->a_ops->freepage) | 638 | if (mapping->a_ops->freepage) |
639 | mapping->a_ops->freepage(page); | 639 | mapping->a_ops->freepage(page); |
@@ -641,7 +641,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) | |||
641 | put_page(page); /* pagecache ref */ | 641 | put_page(page); /* pagecache ref */ |
642 | return 1; | 642 | return 1; |
643 | failed: | 643 | failed: |
644 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 644 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
645 | return 0; | 645 | return 0; |
646 | } | 646 | } |
647 | 647 | ||