diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2018-04-10 19:36:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 13:28:39 -0400 |
commit | b93b016313b3ba8003c3b8bb71f569af91f19fc7 (patch) | |
tree | ad4be96414189dcdf8c972f351ba430996e9fdff /mm/swap_state.c | |
parent | f6bb2a2c0b81c47282ddb7883f92e65a063c27dd (diff) |
page cache: use xa_lock
Remove the address_space ->tree_lock and use the xa_lock newly added to
the radix_tree_root. Rename the address_space ->page_tree to ->i_pages,
since we don't really care that it's a tree.
[willy@infradead.org: fix nds32, fs/dax.c]
Link: http://lkml.kernel.org/r/20180406145415.GB20605@bombadil.infradead.orgLink: http://lkml.kernel.org/r/20180313132639.17387-9-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Jeff Layton <jlayton@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r-- | mm/swap_state.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c index f233dccd3b1b..07f9aa2340c3 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -124,10 +124,10 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) | |||
124 | SetPageSwapCache(page); | 124 | SetPageSwapCache(page); |
125 | 125 | ||
126 | address_space = swap_address_space(entry); | 126 | address_space = swap_address_space(entry); |
127 | spin_lock_irq(&address_space->tree_lock); | 127 | xa_lock_irq(&address_space->i_pages); |
128 | for (i = 0; i < nr; i++) { | 128 | for (i = 0; i < nr; i++) { |
129 | set_page_private(page + i, entry.val + i); | 129 | set_page_private(page + i, entry.val + i); |
130 | error = radix_tree_insert(&address_space->page_tree, | 130 | error = radix_tree_insert(&address_space->i_pages, |
131 | idx + i, page + i); | 131 | idx + i, page + i); |
132 | if (unlikely(error)) | 132 | if (unlikely(error)) |
133 | break; | 133 | break; |
@@ -145,13 +145,13 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) | |||
145 | VM_BUG_ON(error == -EEXIST); | 145 | VM_BUG_ON(error == -EEXIST); |
146 | set_page_private(page + i, 0UL); | 146 | set_page_private(page + i, 0UL); |
147 | while (i--) { | 147 | while (i--) { |
148 | radix_tree_delete(&address_space->page_tree, idx + i); | 148 | radix_tree_delete(&address_space->i_pages, idx + i); |
149 | set_page_private(page + i, 0UL); | 149 | set_page_private(page + i, 0UL); |
150 | } | 150 | } |
151 | ClearPageSwapCache(page); | 151 | ClearPageSwapCache(page); |
152 | page_ref_sub(page, nr); | 152 | page_ref_sub(page, nr); |
153 | } | 153 | } |
154 | spin_unlock_irq(&address_space->tree_lock); | 154 | xa_unlock_irq(&address_space->i_pages); |
155 | 155 | ||
156 | return error; | 156 | return error; |
157 | } | 157 | } |
@@ -188,7 +188,7 @@ void __delete_from_swap_cache(struct page *page) | |||
188 | address_space = swap_address_space(entry); | 188 | address_space = swap_address_space(entry); |
189 | idx = swp_offset(entry); | 189 | idx = swp_offset(entry); |
190 | for (i = 0; i < nr; i++) { | 190 | for (i = 0; i < nr; i++) { |
191 | radix_tree_delete(&address_space->page_tree, idx + i); | 191 | radix_tree_delete(&address_space->i_pages, idx + i); |
192 | set_page_private(page + i, 0); | 192 | set_page_private(page + i, 0); |
193 | } | 193 | } |
194 | ClearPageSwapCache(page); | 194 | ClearPageSwapCache(page); |
@@ -272,9 +272,9 @@ void delete_from_swap_cache(struct page *page) | |||
272 | entry.val = page_private(page); | 272 | entry.val = page_private(page); |
273 | 273 | ||
274 | address_space = swap_address_space(entry); | 274 | address_space = swap_address_space(entry); |
275 | spin_lock_irq(&address_space->tree_lock); | 275 | xa_lock_irq(&address_space->i_pages); |
276 | __delete_from_swap_cache(page); | 276 | __delete_from_swap_cache(page); |
277 | spin_unlock_irq(&address_space->tree_lock); | 277 | xa_unlock_irq(&address_space->i_pages); |
278 | 278 | ||
279 | put_swap_page(page, entry); | 279 | put_swap_page(page, entry); |
280 | page_ref_sub(page, hpage_nr_pages(page)); | 280 | page_ref_sub(page, hpage_nr_pages(page)); |
@@ -628,12 +628,11 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages) | |||
628 | return -ENOMEM; | 628 | return -ENOMEM; |
629 | for (i = 0; i < nr; i++) { | 629 | for (i = 0; i < nr; i++) { |
630 | space = spaces + i; | 630 | space = spaces + i; |
631 | INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN); | 631 | INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN); |
632 | atomic_set(&space->i_mmap_writable, 0); | 632 | atomic_set(&space->i_mmap_writable, 0); |
633 | space->a_ops = &swap_aops; | 633 | space->a_ops = &swap_aops; |
634 | /* swap cache doesn't use writeback related tags */ | 634 | /* swap cache doesn't use writeback related tags */ |
635 | mapping_set_no_writeback_tags(space); | 635 | mapping_set_no_writeback_tags(space); |
636 | spin_lock_init(&space->tree_lock); | ||
637 | } | 636 | } |
638 | nr_swapper_spaces[type] = nr; | 637 | nr_swapper_spaces[type] = nr; |
639 | rcu_assign_pointer(swapper_spaces[type], spaces); | 638 | rcu_assign_pointer(swapper_spaces[type], spaces); |