summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2018-04-10 19:36:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-11 13:28:39 -0400
commitb93b016313b3ba8003c3b8bb71f569af91f19fc7 (patch)
treead4be96414189dcdf8c972f351ba430996e9fdff /fs
parentf6bb2a2c0b81c47282ddb7883f92e65a063c27dd (diff)
page cache: use xa_lock
Remove the address_space ->tree_lock and use the xa_lock newly added to the radix_tree_root. Rename the address_space ->page_tree to ->i_pages, since we don't really care that it's a tree. [willy@infradead.org: fix nds32, fs/dax.c] Link: http://lkml.kernel.org/r/20180406145415.GB20605@bombadil.infradead.orgLink: http://lkml.kernel.org/r/20180313132639.17387-9-willy@infradead.org Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Acked-by: Jeff Layton <jlayton@redhat.com> Cc: Darrick J. Wong <darrick.wong@oracle.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/write.c9
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/extent_io.c16
-rw-r--r--fs/buffer.c13
-rw-r--r--fs/cifs/file.c9
-rw-r--r--fs/dax.c124
-rw-r--r--fs/f2fs/data.c6
-rw-r--r--fs/f2fs/dir.c6
-rw-r--r--fs/f2fs/inline.c6
-rw-r--r--fs/f2fs/node.c8
-rw-r--r--fs/fs-writeback.c22
-rw-r--r--fs/inode.c11
-rw-r--r--fs/nilfs2/btnode.c20
-rw-r--r--fs/nilfs2/page.c22
14 files changed, 134 insertions, 140 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 9370e2feb999..dbc3c0b0142d 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -570,10 +570,11 @@ static int afs_writepages_region(struct address_space *mapping,
570 570
571 _debug("wback %lx", page->index); 571 _debug("wback %lx", page->index);
572 572
573 /* at this point we hold neither mapping->tree_lock nor lock on 573 /*
574 * the page itself: the page may be truncated or invalidated 574 * at this point we hold neither the i_pages lock nor the
575 * (changing page->mapping to NULL), or even swizzled back from 575 * page lock: the page may be truncated or invalidated
576 * swapper_space to tmpfs file mapping 576 * (changing page->mapping to NULL), or even swizzled
577 * back from swapper_space to tmpfs file mapping
577 */ 578 */
578 ret = lock_page_killable(page); 579 ret = lock_page_killable(page);
579 if (ret < 0) { 580 if (ret < 0) {
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 562c3e633403..578181cd96b5 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -458,7 +458,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
458 break; 458 break;
459 459
460 rcu_read_lock(); 460 rcu_read_lock();
461 page = radix_tree_lookup(&mapping->page_tree, pg_index); 461 page = radix_tree_lookup(&mapping->i_pages, pg_index);
462 rcu_read_unlock(); 462 rcu_read_unlock();
463 if (page && !radix_tree_exceptional_entry(page)) { 463 if (page && !radix_tree_exceptional_entry(page)) {
464 misses++; 464 misses++;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 47a8fe9d22e8..cf87976e389d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3963,11 +3963,11 @@ retry:
3963 3963
3964 done_index = page->index; 3964 done_index = page->index;
3965 /* 3965 /*
3966 * At this point we hold neither mapping->tree_lock nor 3966 * At this point we hold neither the i_pages lock nor
3967 * lock on the page itself: the page may be truncated or 3967 * the page lock: the page may be truncated or
3968 * invalidated (changing page->mapping to NULL), or even 3968 * invalidated (changing page->mapping to NULL),
3969 * swizzled back from swapper_space to tmpfs file 3969 * or even swizzled back from swapper_space to
3970 * mapping 3970 * tmpfs file mapping
3971 */ 3971 */
3972 if (!trylock_page(page)) { 3972 if (!trylock_page(page)) {
3973 flush_write_bio(epd); 3973 flush_write_bio(epd);
@@ -5174,13 +5174,13 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
5174 WARN_ON(!PagePrivate(page)); 5174 WARN_ON(!PagePrivate(page));
5175 5175
5176 clear_page_dirty_for_io(page); 5176 clear_page_dirty_for_io(page);
5177 spin_lock_irq(&page->mapping->tree_lock); 5177 xa_lock_irq(&page->mapping->i_pages);
5178 if (!PageDirty(page)) { 5178 if (!PageDirty(page)) {
5179 radix_tree_tag_clear(&page->mapping->page_tree, 5179 radix_tree_tag_clear(&page->mapping->i_pages,
5180 page_index(page), 5180 page_index(page),
5181 PAGECACHE_TAG_DIRTY); 5181 PAGECACHE_TAG_DIRTY);
5182 } 5182 }
5183 spin_unlock_irq(&page->mapping->tree_lock); 5183 xa_unlock_irq(&page->mapping->i_pages);
5184 ClearPageError(page); 5184 ClearPageError(page);
5185 unlock_page(page); 5185 unlock_page(page);
5186 } 5186 }
diff --git a/fs/buffer.c b/fs/buffer.c
index 64b1e2065b6b..f3491074b035 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -185,10 +185,9 @@ EXPORT_SYMBOL(end_buffer_write_sync);
185 * we get exclusion from try_to_free_buffers with the blockdev mapping's 185 * we get exclusion from try_to_free_buffers with the blockdev mapping's
186 * private_lock. 186 * private_lock.
187 * 187 *
188 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 188 * Hack idea: for the blockdev mapping, private_lock contention
189 * may be quite high. This code could TryLock the page, and if that 189 * may be quite high. This code could TryLock the page, and if that
190 * succeeds, there is no need to take private_lock. (But if 190 * succeeds, there is no need to take private_lock.
191 * private_lock is contended then so is mapping->tree_lock).
192 */ 191 */
193static struct buffer_head * 192static struct buffer_head *
194__find_get_block_slow(struct block_device *bdev, sector_t block) 193__find_get_block_slow(struct block_device *bdev, sector_t block)
@@ -599,14 +598,14 @@ void __set_page_dirty(struct page *page, struct address_space *mapping,
599{ 598{
600 unsigned long flags; 599 unsigned long flags;
601 600
602 spin_lock_irqsave(&mapping->tree_lock, flags); 601 xa_lock_irqsave(&mapping->i_pages, flags);
603 if (page->mapping) { /* Race with truncate? */ 602 if (page->mapping) { /* Race with truncate? */
604 WARN_ON_ONCE(warn && !PageUptodate(page)); 603 WARN_ON_ONCE(warn && !PageUptodate(page));
605 account_page_dirtied(page, mapping); 604 account_page_dirtied(page, mapping);
606 radix_tree_tag_set(&mapping->page_tree, 605 radix_tree_tag_set(&mapping->i_pages,
607 page_index(page), PAGECACHE_TAG_DIRTY); 606 page_index(page), PAGECACHE_TAG_DIRTY);
608 } 607 }
609 spin_unlock_irqrestore(&mapping->tree_lock, flags); 608 xa_unlock_irqrestore(&mapping->i_pages, flags);
610} 609}
611EXPORT_SYMBOL_GPL(__set_page_dirty); 610EXPORT_SYMBOL_GPL(__set_page_dirty);
612 611
@@ -1096,7 +1095,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
1096 * inode list. 1095 * inode list.
1097 * 1096 *
1098 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1097 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1099 * mapping->tree_lock and mapping->host->i_lock. 1098 * i_pages lock and mapping->host->i_lock.
1100 */ 1099 */
1101void mark_buffer_dirty(struct buffer_head *bh) 1100void mark_buffer_dirty(struct buffer_head *bh)
1102{ 1101{
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7cee97b93a61..4bcd4e838b47 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1987,11 +1987,10 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1987 for (i = 0; i < found_pages; i++) { 1987 for (i = 0; i < found_pages; i++) {
1988 page = wdata->pages[i]; 1988 page = wdata->pages[i];
1989 /* 1989 /*
1990 * At this point we hold neither mapping->tree_lock nor 1990 * At this point we hold neither the i_pages lock nor the
1991 * lock on the page itself: the page may be truncated or 1991 * page lock: the page may be truncated or invalidated
1992 * invalidated (changing page->mapping to NULL), or even 1992 * (changing page->mapping to NULL), or even swizzled
1993 * swizzled back from swapper_space to tmpfs file 1993 * back from swapper_space to tmpfs file mapping
1994 * mapping
1995 */ 1994 */
1996 1995
1997 if (nr_pages == 0) 1996 if (nr_pages == 0)
diff --git a/fs/dax.c b/fs/dax.c
index a77394fe586e..aaec72ded1b6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -158,11 +158,9 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mo
158} 158}
159 159
160/* 160/*
161 * We do not necessarily hold the mapping->tree_lock when we call this 161 * @entry may no longer be the entry at the index in the mapping.
162 * function so it is possible that 'entry' is no longer a valid item in the 162 * The important information it's conveying is whether the entry at
163 * radix tree. This is okay because all we really need to do is to find the 163 * this index used to be a PMD entry.
164 * correct waitqueue where tasks might be waiting for that old 'entry' and
165 * wake them.
166 */ 164 */
167static void dax_wake_mapping_entry_waiter(struct address_space *mapping, 165static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
168 pgoff_t index, void *entry, bool wake_all) 166 pgoff_t index, void *entry, bool wake_all)
@@ -174,7 +172,7 @@ static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
174 172
175 /* 173 /*
176 * Checking for locked entry and prepare_to_wait_exclusive() happens 174 * Checking for locked entry and prepare_to_wait_exclusive() happens
177 * under mapping->tree_lock, ditto for entry handling in our callers. 175 * under the i_pages lock, ditto for entry handling in our callers.
178 * So at this point all tasks that could have seen our entry locked 176 * So at this point all tasks that could have seen our entry locked
179 * must be in the waitqueue and the following check will see them. 177 * must be in the waitqueue and the following check will see them.
180 */ 178 */
@@ -183,41 +181,39 @@ static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
183} 181}
184 182
185/* 183/*
186 * Check whether the given slot is locked. The function must be called with 184 * Check whether the given slot is locked. Must be called with the i_pages
187 * mapping->tree_lock held 185 * lock held.
188 */ 186 */
189static inline int slot_locked(struct address_space *mapping, void **slot) 187static inline int slot_locked(struct address_space *mapping, void **slot)
190{ 188{
191 unsigned long entry = (unsigned long) 189 unsigned long entry = (unsigned long)
192 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
193 return entry & RADIX_DAX_ENTRY_LOCK; 191 return entry & RADIX_DAX_ENTRY_LOCK;
194} 192}
195 193
196/* 194/*
197 * Mark the given slot is locked. The function must be called with 195 * Mark the given slot as locked. Must be called with the i_pages lock held.
198 * mapping->tree_lock held
199 */ 196 */
200static inline void *lock_slot(struct address_space *mapping, void **slot) 197static inline void *lock_slot(struct address_space *mapping, void **slot)
201{ 198{
202 unsigned long entry = (unsigned long) 199 unsigned long entry = (unsigned long)
203 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
204 201
205 entry |= RADIX_DAX_ENTRY_LOCK; 202 entry |= RADIX_DAX_ENTRY_LOCK;
206 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
207 return (void *)entry; 204 return (void *)entry;
208} 205}
209 206
210/* 207/*
211 * Mark the given slot is unlocked. The function must be called with 208 * Mark the given slot as unlocked. Must be called with the i_pages lock held.
212 * mapping->tree_lock held
213 */ 209 */
214static inline void *unlock_slot(struct address_space *mapping, void **slot) 210static inline void *unlock_slot(struct address_space *mapping, void **slot)
215{ 211{
216 unsigned long entry = (unsigned long) 212 unsigned long entry = (unsigned long)
217 radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
218 214
219 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; 215 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
220 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); 216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
221 return (void *)entry; 217 return (void *)entry;
222} 218}
223 219
@@ -228,7 +224,7 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
228 * put_locked_mapping_entry() when he locked the entry and now wants to 224 * put_locked_mapping_entry() when he locked the entry and now wants to
229 * unlock it. 225 * unlock it.
230 * 226 *
231 * The function must be called with mapping->tree_lock held. 227 * Must be called with the i_pages lock held.
232 */ 228 */
233static void *get_unlocked_mapping_entry(struct address_space *mapping, 229static void *get_unlocked_mapping_entry(struct address_space *mapping,
234 pgoff_t index, void ***slotp) 230 pgoff_t index, void ***slotp)
@@ -241,7 +237,7 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
241 ewait.wait.func = wake_exceptional_entry_func; 237 ewait.wait.func = wake_exceptional_entry_func;
242 238
243 for (;;) { 239 for (;;) {
244 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, 240 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
245 &slot); 241 &slot);
246 if (!entry || 242 if (!entry ||
247 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || 243 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
@@ -254,10 +250,10 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
254 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); 250 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
255 prepare_to_wait_exclusive(wq, &ewait.wait, 251 prepare_to_wait_exclusive(wq, &ewait.wait,
256 TASK_UNINTERRUPTIBLE); 252 TASK_UNINTERRUPTIBLE);
257 spin_unlock_irq(&mapping->tree_lock); 253 xa_unlock_irq(&mapping->i_pages);
258 schedule(); 254 schedule();
259 finish_wait(wq, &ewait.wait); 255 finish_wait(wq, &ewait.wait);
260 spin_lock_irq(&mapping->tree_lock); 256 xa_lock_irq(&mapping->i_pages);
261 } 257 }
262} 258}
263 259
@@ -266,15 +262,15 @@ static void dax_unlock_mapping_entry(struct address_space *mapping,
266{ 262{
267 void *entry, **slot; 263 void *entry, **slot;
268 264
269 spin_lock_irq(&mapping->tree_lock); 265 xa_lock_irq(&mapping->i_pages);
270 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); 266 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
271 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || 267 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
272 !slot_locked(mapping, slot))) { 268 !slot_locked(mapping, slot))) {
273 spin_unlock_irq(&mapping->tree_lock); 269 xa_unlock_irq(&mapping->i_pages);
274 return; 270 return;
275 } 271 }
276 unlock_slot(mapping, slot); 272 unlock_slot(mapping, slot);
277 spin_unlock_irq(&mapping->tree_lock); 273 xa_unlock_irq(&mapping->i_pages);
278 dax_wake_mapping_entry_waiter(mapping, index, entry, false); 274 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
279} 275}
280 276
@@ -388,7 +384,7 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
388 void *entry, **slot; 384 void *entry, **slot;
389 385
390restart: 386restart:
391 spin_lock_irq(&mapping->tree_lock); 387 xa_lock_irq(&mapping->i_pages);
392 entry = get_unlocked_mapping_entry(mapping, index, &slot); 388 entry = get_unlocked_mapping_entry(mapping, index, &slot);
393 389
394 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { 390 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
@@ -420,12 +416,12 @@ restart:
420 if (pmd_downgrade) { 416 if (pmd_downgrade) {
421 /* 417 /*
422 * Make sure 'entry' remains valid while we drop 418 * Make sure 'entry' remains valid while we drop
423 * mapping->tree_lock. 419 * the i_pages lock.
424 */ 420 */
425 entry = lock_slot(mapping, slot); 421 entry = lock_slot(mapping, slot);
426 } 422 }
427 423
428 spin_unlock_irq(&mapping->tree_lock); 424 xa_unlock_irq(&mapping->i_pages);
429 /* 425 /*
430 * Besides huge zero pages the only other thing that gets 426 * Besides huge zero pages the only other thing that gets
431 * downgraded are empty entries which don't need to be 427 * downgraded are empty entries which don't need to be
@@ -442,27 +438,27 @@ restart:
442 put_locked_mapping_entry(mapping, index); 438 put_locked_mapping_entry(mapping, index);
443 return ERR_PTR(err); 439 return ERR_PTR(err);
444 } 440 }
445 spin_lock_irq(&mapping->tree_lock); 441 xa_lock_irq(&mapping->i_pages);
446 442
447 if (!entry) { 443 if (!entry) {
448 /* 444 /*
449 * We needed to drop the page_tree lock while calling 445 * We needed to drop the i_pages lock while calling
450 * radix_tree_preload() and we didn't have an entry to 446 * radix_tree_preload() and we didn't have an entry to
451 * lock. See if another thread inserted an entry at 447 * lock. See if another thread inserted an entry at
452 * our index during this time. 448 * our index during this time.
453 */ 449 */
454 entry = __radix_tree_lookup(&mapping->page_tree, index, 450 entry = __radix_tree_lookup(&mapping->i_pages, index,
455 NULL, &slot); 451 NULL, &slot);
456 if (entry) { 452 if (entry) {
457 radix_tree_preload_end(); 453 radix_tree_preload_end();
458 spin_unlock_irq(&mapping->tree_lock); 454 xa_unlock_irq(&mapping->i_pages);
459 goto restart; 455 goto restart;
460 } 456 }
461 } 457 }
462 458
463 if (pmd_downgrade) { 459 if (pmd_downgrade) {
464 dax_disassociate_entry(entry, mapping, false); 460 dax_disassociate_entry(entry, mapping, false);
465 radix_tree_delete(&mapping->page_tree, index); 461 radix_tree_delete(&mapping->i_pages, index);
466 mapping->nrexceptional--; 462 mapping->nrexceptional--;
467 dax_wake_mapping_entry_waiter(mapping, index, entry, 463 dax_wake_mapping_entry_waiter(mapping, index, entry,
468 true); 464 true);
@@ -470,11 +466,11 @@ restart:
470 466
471 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); 467 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
472 468
473 err = __radix_tree_insert(&mapping->page_tree, index, 469 err = __radix_tree_insert(&mapping->i_pages, index,
474 dax_radix_order(entry), entry); 470 dax_radix_order(entry), entry);
475 radix_tree_preload_end(); 471 radix_tree_preload_end();
476 if (err) { 472 if (err) {
477 spin_unlock_irq(&mapping->tree_lock); 473 xa_unlock_irq(&mapping->i_pages);
478 /* 474 /*
479 * Our insertion of a DAX entry failed, most likely 475 * Our insertion of a DAX entry failed, most likely
480 * because we were inserting a PMD entry and it 476 * because we were inserting a PMD entry and it
@@ -487,12 +483,12 @@ restart:
487 } 483 }
488 /* Good, we have inserted empty locked entry into the tree. */ 484 /* Good, we have inserted empty locked entry into the tree. */
489 mapping->nrexceptional++; 485 mapping->nrexceptional++;
490 spin_unlock_irq(&mapping->tree_lock); 486 xa_unlock_irq(&mapping->i_pages);
491 return entry; 487 return entry;
492 } 488 }
493 entry = lock_slot(mapping, slot); 489 entry = lock_slot(mapping, slot);
494 out_unlock: 490 out_unlock:
495 spin_unlock_irq(&mapping->tree_lock); 491 xa_unlock_irq(&mapping->i_pages);
496 return entry; 492 return entry;
497} 493}
498 494
@@ -501,23 +497,23 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping,
501{ 497{
502 int ret = 0; 498 int ret = 0;
503 void *entry; 499 void *entry;
504 struct radix_tree_root *page_tree = &mapping->page_tree; 500 struct radix_tree_root *pages = &mapping->i_pages;
505 501
506 spin_lock_irq(&mapping->tree_lock); 502 xa_lock_irq(pages);
507 entry = get_unlocked_mapping_entry(mapping, index, NULL); 503 entry = get_unlocked_mapping_entry(mapping, index, NULL);
508 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) 504 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
509 goto out; 505 goto out;
510 if (!trunc && 506 if (!trunc &&
511 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || 507 (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
512 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) 508 radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
513 goto out; 509 goto out;
514 dax_disassociate_entry(entry, mapping, trunc); 510 dax_disassociate_entry(entry, mapping, trunc);
515 radix_tree_delete(page_tree, index); 511 radix_tree_delete(pages, index);
516 mapping->nrexceptional--; 512 mapping->nrexceptional--;
517 ret = 1; 513 ret = 1;
518out: 514out:
519 put_unlocked_mapping_entry(mapping, index, entry); 515 put_unlocked_mapping_entry(mapping, index, entry);
520 spin_unlock_irq(&mapping->tree_lock); 516 xa_unlock_irq(pages);
521 return ret; 517 return ret;
522} 518}
523/* 519/*
@@ -587,7 +583,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
587 void *entry, pfn_t pfn_t, 583 void *entry, pfn_t pfn_t,
588 unsigned long flags, bool dirty) 584 unsigned long flags, bool dirty)
589{ 585{
590 struct radix_tree_root *page_tree = &mapping->page_tree; 586 struct radix_tree_root *pages = &mapping->i_pages;
591 unsigned long pfn = pfn_t_to_pfn(pfn_t); 587 unsigned long pfn = pfn_t_to_pfn(pfn_t);
592 pgoff_t index = vmf->pgoff; 588 pgoff_t index = vmf->pgoff;
593 void *new_entry; 589 void *new_entry;
@@ -604,7 +600,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
604 unmap_mapping_pages(mapping, vmf->pgoff, 1, false); 600 unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
605 } 601 }
606 602
607 spin_lock_irq(&mapping->tree_lock); 603 xa_lock_irq(pages);
608 new_entry = dax_radix_locked_entry(pfn, flags); 604 new_entry = dax_radix_locked_entry(pfn, flags);
609 if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 605 if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
610 dax_disassociate_entry(entry, mapping, false); 606 dax_disassociate_entry(entry, mapping, false);
@@ -624,17 +620,17 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
624 void **slot; 620 void **slot;
625 void *ret; 621 void *ret;
626 622
627 ret = __radix_tree_lookup(page_tree, index, &node, &slot); 623 ret = __radix_tree_lookup(pages, index, &node, &slot);
628 WARN_ON_ONCE(ret != entry); 624 WARN_ON_ONCE(ret != entry);
629 __radix_tree_replace(page_tree, node, slot, 625 __radix_tree_replace(pages, node, slot,
630 new_entry, NULL); 626 new_entry, NULL);
631 entry = new_entry; 627 entry = new_entry;
632 } 628 }
633 629
634 if (dirty) 630 if (dirty)
635 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); 631 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
636 632
637 spin_unlock_irq(&mapping->tree_lock); 633 xa_unlock_irq(pages);
638 return entry; 634 return entry;
639} 635}
640 636
@@ -723,7 +719,7 @@ unlock_pte:
723static int dax_writeback_one(struct dax_device *dax_dev, 719static int dax_writeback_one(struct dax_device *dax_dev,
724 struct address_space *mapping, pgoff_t index, void *entry) 720 struct address_space *mapping, pgoff_t index, void *entry)
725{ 721{
726 struct radix_tree_root *page_tree = &mapping->page_tree; 722 struct radix_tree_root *pages = &mapping->i_pages;
727 void *entry2, **slot; 723 void *entry2, **slot;
728 unsigned long pfn; 724 unsigned long pfn;
729 long ret = 0; 725 long ret = 0;
@@ -736,7 +732,7 @@ static int dax_writeback_one(struct dax_device *dax_dev,
736 if (WARN_ON(!radix_tree_exceptional_entry(entry))) 732 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
737 return -EIO; 733 return -EIO;
738 734
739 spin_lock_irq(&mapping->tree_lock); 735 xa_lock_irq(pages);
740 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); 736 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
741 /* Entry got punched out / reallocated? */ 737 /* Entry got punched out / reallocated? */
742 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) 738 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
@@ -755,7 +751,7 @@ static int dax_writeback_one(struct dax_device *dax_dev,
755 } 751 }
756 752
757 /* Another fsync thread may have already written back this entry */ 753 /* Another fsync thread may have already written back this entry */
758 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) 754 if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
759 goto put_unlocked; 755 goto put_unlocked;
760 /* Lock the entry to serialize with page faults */ 756 /* Lock the entry to serialize with page faults */
761 entry = lock_slot(mapping, slot); 757 entry = lock_slot(mapping, slot);
@@ -763,11 +759,11 @@ static int dax_writeback_one(struct dax_device *dax_dev,
763 * We can clear the tag now but we have to be careful so that concurrent 759 * We can clear the tag now but we have to be careful so that concurrent
764 * dax_writeback_one() calls for the same index cannot finish before we 760 * dax_writeback_one() calls for the same index cannot finish before we
765 * actually flush the caches. This is achieved as the calls will look 761 * actually flush the caches. This is achieved as the calls will look
766 * at the entry only under tree_lock and once they do that they will 762 * at the entry only under the i_pages lock and once they do that
767 * see the entry locked and wait for it to unlock. 763 * they will see the entry locked and wait for it to unlock.
768 */ 764 */
769 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); 765 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
770 spin_unlock_irq(&mapping->tree_lock); 766 xa_unlock_irq(pages);
771 767
772 /* 768 /*
773 * Even if dax_writeback_mapping_range() was given a wbc->range_start 769 * Even if dax_writeback_mapping_range() was given a wbc->range_start
@@ -787,16 +783,16 @@ static int dax_writeback_one(struct dax_device *dax_dev,
787 * the pfn mappings are writeprotected and fault waits for mapping 783 * the pfn mappings are writeprotected and fault waits for mapping
788 * entry lock. 784 * entry lock.
789 */ 785 */
790 spin_lock_irq(&mapping->tree_lock); 786 xa_lock_irq(pages);
791 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 787 radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
792 spin_unlock_irq(&mapping->tree_lock); 788 xa_unlock_irq(pages);
793 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 789 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
794 put_locked_mapping_entry(mapping, index); 790 put_locked_mapping_entry(mapping, index);
795 return ret; 791 return ret;
796 792
797 put_unlocked: 793 put_unlocked:
798 put_unlocked_mapping_entry(mapping, index, entry2); 794 put_unlocked_mapping_entry(mapping, index, entry2);
799 spin_unlock_irq(&mapping->tree_lock); 795 xa_unlock_irq(pages);
800 return ret; 796 return ret;
801} 797}
802 798
@@ -1566,21 +1562,21 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1566 pgoff_t index = vmf->pgoff; 1562 pgoff_t index = vmf->pgoff;
1567 int vmf_ret, error; 1563 int vmf_ret, error;
1568 1564
1569 spin_lock_irq(&mapping->tree_lock); 1565 xa_lock_irq(&mapping->i_pages);
1570 entry = get_unlocked_mapping_entry(mapping, index, &slot); 1566 entry = get_unlocked_mapping_entry(mapping, index, &slot);
1571 /* Did we race with someone splitting entry or so? */ 1567 /* Did we race with someone splitting entry or so? */
1572 if (!entry || 1568 if (!entry ||
1573 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || 1569 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1574 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { 1570 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1575 put_unlocked_mapping_entry(mapping, index, entry); 1571 put_unlocked_mapping_entry(mapping, index, entry);
1576 spin_unlock_irq(&mapping->tree_lock); 1572 xa_unlock_irq(&mapping->i_pages);
1577 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, 1573 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1578 VM_FAULT_NOPAGE); 1574 VM_FAULT_NOPAGE);
1579 return VM_FAULT_NOPAGE; 1575 return VM_FAULT_NOPAGE;
1580 } 1576 }
1581 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); 1577 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1582 entry = lock_slot(mapping, slot); 1578 entry = lock_slot(mapping, slot);
1583 spin_unlock_irq(&mapping->tree_lock); 1579 xa_unlock_irq(&mapping->i_pages);
1584 switch (pe_size) { 1580 switch (pe_size) {
1585 case PE_SIZE_PTE: 1581 case PE_SIZE_PTE:
1586 error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); 1582 error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index db50686f5096..02237d4d91f5 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2424,12 +2424,12 @@ void f2fs_set_page_dirty_nobuffers(struct page *page)
2424 SetPageDirty(page); 2424 SetPageDirty(page);
2425 spin_unlock(&mapping->private_lock); 2425 spin_unlock(&mapping->private_lock);
2426 2426
2427 spin_lock_irqsave(&mapping->tree_lock, flags); 2427 xa_lock_irqsave(&mapping->i_pages, flags);
2428 WARN_ON_ONCE(!PageUptodate(page)); 2428 WARN_ON_ONCE(!PageUptodate(page));
2429 account_page_dirtied(page, mapping); 2429 account_page_dirtied(page, mapping);
2430 radix_tree_tag_set(&mapping->page_tree, 2430 radix_tree_tag_set(&mapping->i_pages,
2431 page_index(page), PAGECACHE_TAG_DIRTY); 2431 page_index(page), PAGECACHE_TAG_DIRTY);
2432 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2432 xa_unlock_irqrestore(&mapping->i_pages, flags);
2433 unlock_page_memcg(page); 2433 unlock_page_memcg(page);
2434 2434
2435 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 2435 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index fe661274ff10..8c9c2f31b253 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -732,10 +732,10 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
732 732
733 if (bit_pos == NR_DENTRY_IN_BLOCK && 733 if (bit_pos == NR_DENTRY_IN_BLOCK &&
734 !truncate_hole(dir, page->index, page->index + 1)) { 734 !truncate_hole(dir, page->index, page->index + 1)) {
735 spin_lock_irqsave(&mapping->tree_lock, flags); 735 xa_lock_irqsave(&mapping->i_pages, flags);
736 radix_tree_tag_clear(&mapping->page_tree, page_index(page), 736 radix_tree_tag_clear(&mapping->i_pages, page_index(page),
737 PAGECACHE_TAG_DIRTY); 737 PAGECACHE_TAG_DIRTY);
738 spin_unlock_irqrestore(&mapping->tree_lock, flags); 738 xa_unlock_irqrestore(&mapping->i_pages, flags);
739 739
740 clear_page_dirty_for_io(page); 740 clear_page_dirty_for_io(page);
741 ClearPagePrivate(page); 741 ClearPagePrivate(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 3b77d6421218..265da200daa8 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -226,10 +226,10 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
226 kunmap_atomic(src_addr); 226 kunmap_atomic(src_addr);
227 set_page_dirty(dn.inode_page); 227 set_page_dirty(dn.inode_page);
228 228
229 spin_lock_irqsave(&mapping->tree_lock, flags); 229 xa_lock_irqsave(&mapping->i_pages, flags);
230 radix_tree_tag_clear(&mapping->page_tree, page_index(page), 230 radix_tree_tag_clear(&mapping->i_pages, page_index(page),
231 PAGECACHE_TAG_DIRTY); 231 PAGECACHE_TAG_DIRTY);
232 spin_unlock_irqrestore(&mapping->tree_lock, flags); 232 xa_unlock_irqrestore(&mapping->i_pages, flags);
233 233
234 set_inode_flag(inode, FI_APPEND_WRITE); 234 set_inode_flag(inode, FI_APPEND_WRITE);
235 set_inode_flag(inode, FI_DATA_EXIST); 235 set_inode_flag(inode, FI_DATA_EXIST);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9a99243054ba..f202398e20ea 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -91,11 +91,11 @@ static void clear_node_page_dirty(struct page *page)
91 unsigned int long flags; 91 unsigned int long flags;
92 92
93 if (PageDirty(page)) { 93 if (PageDirty(page)) {
94 spin_lock_irqsave(&mapping->tree_lock, flags); 94 xa_lock_irqsave(&mapping->i_pages, flags);
95 radix_tree_tag_clear(&mapping->page_tree, 95 radix_tree_tag_clear(&mapping->i_pages,
96 page_index(page), 96 page_index(page),
97 PAGECACHE_TAG_DIRTY); 97 PAGECACHE_TAG_DIRTY);
98 spin_unlock_irqrestore(&mapping->tree_lock, flags); 98 xa_unlock_irqrestore(&mapping->i_pages, flags);
99 99
100 clear_page_dirty_for_io(page); 100 clear_page_dirty_for_io(page);
101 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 101 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
@@ -1161,7 +1161,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1161 f2fs_bug_on(sbi, check_nid_range(sbi, nid)); 1161 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1162 1162
1163 rcu_read_lock(); 1163 rcu_read_lock();
1164 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid); 1164 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid);
1165 rcu_read_unlock(); 1165 rcu_read_unlock();
1166 if (apage) 1166 if (apage)
1167 return; 1167 return;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1280f915079b..4b12ba70a895 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -347,9 +347,9 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
347 * By the time control reaches here, RCU grace period has passed 347 * By the time control reaches here, RCU grace period has passed
348 * since I_WB_SWITCH assertion and all wb stat update transactions 348 * since I_WB_SWITCH assertion and all wb stat update transactions
349 * between unlocked_inode_to_wb_begin/end() are guaranteed to be 349 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
350 * synchronizing against mapping->tree_lock. 350 * synchronizing against the i_pages lock.
351 * 351 *
352 * Grabbing old_wb->list_lock, inode->i_lock and mapping->tree_lock 352 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
353 * gives us exclusion against all wb related operations on @inode 353 * gives us exclusion against all wb related operations on @inode
354 * including IO list manipulations and stat updates. 354 * including IO list manipulations and stat updates.
355 */ 355 */
@@ -361,7 +361,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
361 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); 361 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
362 } 362 }
363 spin_lock(&inode->i_lock); 363 spin_lock(&inode->i_lock);
364 spin_lock_irq(&mapping->tree_lock); 364 xa_lock_irq(&mapping->i_pages);
365 365
366 /* 366 /*
367 * Once I_FREEING is visible under i_lock, the eviction path owns 367 * Once I_FREEING is visible under i_lock, the eviction path owns
@@ -373,22 +373,22 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
373 /* 373 /*
374 * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points 374 * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
375 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to 375 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
376 * pages actually under underwriteback. 376 * pages actually under writeback.
377 */ 377 */
378 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0, 378 radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
379 PAGECACHE_TAG_DIRTY) { 379 PAGECACHE_TAG_DIRTY) {
380 struct page *page = radix_tree_deref_slot_protected(slot, 380 struct page *page = radix_tree_deref_slot_protected(slot,
381 &mapping->tree_lock); 381 &mapping->i_pages.xa_lock);
382 if (likely(page) && PageDirty(page)) { 382 if (likely(page) && PageDirty(page)) {
383 dec_wb_stat(old_wb, WB_RECLAIMABLE); 383 dec_wb_stat(old_wb, WB_RECLAIMABLE);
384 inc_wb_stat(new_wb, WB_RECLAIMABLE); 384 inc_wb_stat(new_wb, WB_RECLAIMABLE);
385 } 385 }
386 } 386 }
387 387
388 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0, 388 radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
389 PAGECACHE_TAG_WRITEBACK) { 389 PAGECACHE_TAG_WRITEBACK) {
390 struct page *page = radix_tree_deref_slot_protected(slot, 390 struct page *page = radix_tree_deref_slot_protected(slot,
391 &mapping->tree_lock); 391 &mapping->i_pages.xa_lock);
392 if (likely(page)) { 392 if (likely(page)) {
393 WARN_ON_ONCE(!PageWriteback(page)); 393 WARN_ON_ONCE(!PageWriteback(page));
394 dec_wb_stat(old_wb, WB_WRITEBACK); 394 dec_wb_stat(old_wb, WB_WRITEBACK);
@@ -430,7 +430,7 @@ skip_switch:
430 */ 430 */
431 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); 431 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
432 432
433 spin_unlock_irq(&mapping->tree_lock); 433 xa_unlock_irq(&mapping->i_pages);
434 spin_unlock(&inode->i_lock); 434 spin_unlock(&inode->i_lock);
435 spin_unlock(&new_wb->list_lock); 435 spin_unlock(&new_wb->list_lock);
436 spin_unlock(&old_wb->list_lock); 436 spin_unlock(&old_wb->list_lock);
@@ -506,8 +506,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
506 506
507 /* 507 /*
508 * In addition to synchronizing among switchers, I_WB_SWITCH tells 508 * In addition to synchronizing among switchers, I_WB_SWITCH tells
509 * the RCU protected stat update paths to grab the mapping's 509 * the RCU protected stat update paths to grab the i_page
510 * tree_lock so that stat transfer can synchronize against them. 510 * lock so that stat transfer can synchronize against them.
511 * Let's continue after I_WB_SWITCH is guaranteed to be visible. 511 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
512 */ 512 */
513 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); 513 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
diff --git a/fs/inode.c b/fs/inode.c
index b153aeaa61ea..13ceb98c3bd3 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -348,8 +348,7 @@ EXPORT_SYMBOL(inc_nlink);
348 348
349static void __address_space_init_once(struct address_space *mapping) 349static void __address_space_init_once(struct address_space *mapping)
350{ 350{
351 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT); 351 INIT_RADIX_TREE(&mapping->i_pages, GFP_ATOMIC | __GFP_ACCOUNT);
352 spin_lock_init(&mapping->tree_lock);
353 init_rwsem(&mapping->i_mmap_rwsem); 352 init_rwsem(&mapping->i_mmap_rwsem);
354 INIT_LIST_HEAD(&mapping->private_list); 353 INIT_LIST_HEAD(&mapping->private_list);
355 spin_lock_init(&mapping->private_lock); 354 spin_lock_init(&mapping->private_lock);
@@ -504,14 +503,14 @@ EXPORT_SYMBOL(__remove_inode_hash);
504void clear_inode(struct inode *inode) 503void clear_inode(struct inode *inode)
505{ 504{
506 /* 505 /*
507 * We have to cycle tree_lock here because reclaim can be still in the 506 * We have to cycle the i_pages lock here because reclaim can be in the
508 * process of removing the last page (in __delete_from_page_cache()) 507 * process of removing the last page (in __delete_from_page_cache())
509 * and we must not free mapping under it. 508 * and we must not free the mapping under it.
510 */ 509 */
511 spin_lock_irq(&inode->i_data.tree_lock); 510 xa_lock_irq(&inode->i_data.i_pages);
512 BUG_ON(inode->i_data.nrpages); 511 BUG_ON(inode->i_data.nrpages);
513 BUG_ON(inode->i_data.nrexceptional); 512 BUG_ON(inode->i_data.nrexceptional);
514 spin_unlock_irq(&inode->i_data.tree_lock); 513 xa_unlock_irq(&inode->i_data.i_pages);
515 BUG_ON(!list_empty(&inode->i_data.private_list)); 514 BUG_ON(!list_empty(&inode->i_data.private_list));
516 BUG_ON(!(inode->i_state & I_FREEING)); 515 BUG_ON(!(inode->i_state & I_FREEING));
517 BUG_ON(inode->i_state & I_CLEAR); 516 BUG_ON(inode->i_state & I_CLEAR);
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index c21e0b4454a6..dec98cab729d 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -193,9 +193,9 @@ retry:
193 (unsigned long long)oldkey, 193 (unsigned long long)oldkey,
194 (unsigned long long)newkey); 194 (unsigned long long)newkey);
195 195
196 spin_lock_irq(&btnc->tree_lock); 196 xa_lock_irq(&btnc->i_pages);
197 err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page); 197 err = radix_tree_insert(&btnc->i_pages, newkey, obh->b_page);
198 spin_unlock_irq(&btnc->tree_lock); 198 xa_unlock_irq(&btnc->i_pages);
199 /* 199 /*
200 * Note: page->index will not change to newkey until 200 * Note: page->index will not change to newkey until
201 * nilfs_btnode_commit_change_key() will be called. 201 * nilfs_btnode_commit_change_key() will be called.
@@ -251,11 +251,11 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
251 (unsigned long long)newkey); 251 (unsigned long long)newkey);
252 mark_buffer_dirty(obh); 252 mark_buffer_dirty(obh);
253 253
254 spin_lock_irq(&btnc->tree_lock); 254 xa_lock_irq(&btnc->i_pages);
255 radix_tree_delete(&btnc->page_tree, oldkey); 255 radix_tree_delete(&btnc->i_pages, oldkey);
256 radix_tree_tag_set(&btnc->page_tree, newkey, 256 radix_tree_tag_set(&btnc->i_pages, newkey,
257 PAGECACHE_TAG_DIRTY); 257 PAGECACHE_TAG_DIRTY);
258 spin_unlock_irq(&btnc->tree_lock); 258 xa_unlock_irq(&btnc->i_pages);
259 259
260 opage->index = obh->b_blocknr = newkey; 260 opage->index = obh->b_blocknr = newkey;
261 unlock_page(opage); 261 unlock_page(opage);
@@ -283,9 +283,9 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
283 return; 283 return;
284 284
285 if (nbh == NULL) { /* blocksize == pagesize */ 285 if (nbh == NULL) { /* blocksize == pagesize */
286 spin_lock_irq(&btnc->tree_lock); 286 xa_lock_irq(&btnc->i_pages);
287 radix_tree_delete(&btnc->page_tree, newkey); 287 radix_tree_delete(&btnc->i_pages, newkey);
288 spin_unlock_irq(&btnc->tree_lock); 288 xa_unlock_irq(&btnc->i_pages);
289 unlock_page(ctxt->bh->b_page); 289 unlock_page(ctxt->bh->b_page);
290 } else 290 } else
291 brelse(nbh); 291 brelse(nbh);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 68241512d7c1..4cb850a6f1c2 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -331,15 +331,15 @@ repeat:
331 struct page *page2; 331 struct page *page2;
332 332
333 /* move the page to the destination cache */ 333 /* move the page to the destination cache */
334 spin_lock_irq(&smap->tree_lock); 334 xa_lock_irq(&smap->i_pages);
335 page2 = radix_tree_delete(&smap->page_tree, offset); 335 page2 = radix_tree_delete(&smap->i_pages, offset);
336 WARN_ON(page2 != page); 336 WARN_ON(page2 != page);
337 337
338 smap->nrpages--; 338 smap->nrpages--;
339 spin_unlock_irq(&smap->tree_lock); 339 xa_unlock_irq(&smap->i_pages);
340 340
341 spin_lock_irq(&dmap->tree_lock); 341 xa_lock_irq(&dmap->i_pages);
342 err = radix_tree_insert(&dmap->page_tree, offset, page); 342 err = radix_tree_insert(&dmap->i_pages, offset, page);
343 if (unlikely(err < 0)) { 343 if (unlikely(err < 0)) {
344 WARN_ON(err == -EEXIST); 344 WARN_ON(err == -EEXIST);
345 page->mapping = NULL; 345 page->mapping = NULL;
@@ -348,11 +348,11 @@ repeat:
348 page->mapping = dmap; 348 page->mapping = dmap;
349 dmap->nrpages++; 349 dmap->nrpages++;
350 if (PageDirty(page)) 350 if (PageDirty(page))
351 radix_tree_tag_set(&dmap->page_tree, 351 radix_tree_tag_set(&dmap->i_pages,
352 offset, 352 offset,
353 PAGECACHE_TAG_DIRTY); 353 PAGECACHE_TAG_DIRTY);
354 } 354 }
355 spin_unlock_irq(&dmap->tree_lock); 355 xa_unlock_irq(&dmap->i_pages);
356 } 356 }
357 unlock_page(page); 357 unlock_page(page);
358 } 358 }
@@ -474,15 +474,15 @@ int __nilfs_clear_page_dirty(struct page *page)
474 struct address_space *mapping = page->mapping; 474 struct address_space *mapping = page->mapping;
475 475
476 if (mapping) { 476 if (mapping) {
477 spin_lock_irq(&mapping->tree_lock); 477 xa_lock_irq(&mapping->i_pages);
478 if (test_bit(PG_dirty, &page->flags)) { 478 if (test_bit(PG_dirty, &page->flags)) {
479 radix_tree_tag_clear(&mapping->page_tree, 479 radix_tree_tag_clear(&mapping->i_pages,
480 page_index(page), 480 page_index(page),
481 PAGECACHE_TAG_DIRTY); 481 PAGECACHE_TAG_DIRTY);
482 spin_unlock_irq(&mapping->tree_lock); 482 xa_unlock_irq(&mapping->i_pages);
483 return clear_page_dirty_for_io(page); 483 return clear_page_dirty_for_io(page);
484 } 484 }
485 spin_unlock_irq(&mapping->tree_lock); 485 xa_unlock_irq(&mapping->i_pages);
486 return 0; 486 return 0;
487 } 487 }
488 return TestClearPageDirty(page); 488 return TestClearPageDirty(page);