diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2018-04-10 19:36:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 13:28:39 -0400 |
commit | b93b016313b3ba8003c3b8bb71f569af91f19fc7 (patch) | |
tree | ad4be96414189dcdf8c972f351ba430996e9fdff /fs/dax.c | |
parent | f6bb2a2c0b81c47282ddb7883f92e65a063c27dd (diff) |
page cache: use xa_lock
Remove the address_space ->tree_lock and use the xa_lock newly added to
the radix_tree_root. Rename the address_space ->page_tree to ->i_pages,
since we don't really care that it's a tree.
[willy@infradead.org: fix nds32, fs/dax.c]
Link: http://lkml.kernel.org/r/20180406145415.GB20605@bombadil.infradead.orgLink: http://lkml.kernel.org/r/20180313132639.17387-9-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Jeff Layton <jlayton@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 124 |
1 files changed, 60 insertions, 64 deletions
@@ -158,11 +158,9 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mo | |||
158 | } | 158 | } |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * We do not necessarily hold the mapping->tree_lock when we call this | 161 | * @entry may no longer be the entry at the index in the mapping. |
162 | * function so it is possible that 'entry' is no longer a valid item in the | 162 | * The important information it's conveying is whether the entry at |
163 | * radix tree. This is okay because all we really need to do is to find the | 163 | * this index used to be a PMD entry. |
164 | * correct waitqueue where tasks might be waiting for that old 'entry' and | ||
165 | * wake them. | ||
166 | */ | 164 | */ |
167 | static void dax_wake_mapping_entry_waiter(struct address_space *mapping, | 165 | static void dax_wake_mapping_entry_waiter(struct address_space *mapping, |
168 | pgoff_t index, void *entry, bool wake_all) | 166 | pgoff_t index, void *entry, bool wake_all) |
@@ -174,7 +172,7 @@ static void dax_wake_mapping_entry_waiter(struct address_space *mapping, | |||
174 | 172 | ||
175 | /* | 173 | /* |
176 | * Checking for locked entry and prepare_to_wait_exclusive() happens | 174 | * Checking for locked entry and prepare_to_wait_exclusive() happens |
177 | * under mapping->tree_lock, ditto for entry handling in our callers. | 175 | * under the i_pages lock, ditto for entry handling in our callers. |
178 | * So at this point all tasks that could have seen our entry locked | 176 | * So at this point all tasks that could have seen our entry locked |
179 | * must be in the waitqueue and the following check will see them. | 177 | * must be in the waitqueue and the following check will see them. |
180 | */ | 178 | */ |
@@ -183,41 +181,39 @@ static void dax_wake_mapping_entry_waiter(struct address_space *mapping, | |||
183 | } | 181 | } |
184 | 182 | ||
185 | /* | 183 | /* |
186 | * Check whether the given slot is locked. The function must be called with | 184 | * Check whether the given slot is locked. Must be called with the i_pages |
187 | * mapping->tree_lock held | 185 | * lock held. |
188 | */ | 186 | */ |
189 | static inline int slot_locked(struct address_space *mapping, void **slot) | 187 | static inline int slot_locked(struct address_space *mapping, void **slot) |
190 | { | 188 | { |
191 | unsigned long entry = (unsigned long) | 189 | unsigned long entry = (unsigned long) |
192 | radix_tree_deref_slot_protected(slot, &mapping->tree_lock); | 190 | radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); |
193 | return entry & RADIX_DAX_ENTRY_LOCK; | 191 | return entry & RADIX_DAX_ENTRY_LOCK; |
194 | } | 192 | } |
195 | 193 | ||
196 | /* | 194 | /* |
197 | * Mark the given slot is locked. The function must be called with | 195 | * Mark the given slot as locked. Must be called with the i_pages lock held. |
198 | * mapping->tree_lock held | ||
199 | */ | 196 | */ |
200 | static inline void *lock_slot(struct address_space *mapping, void **slot) | 197 | static inline void *lock_slot(struct address_space *mapping, void **slot) |
201 | { | 198 | { |
202 | unsigned long entry = (unsigned long) | 199 | unsigned long entry = (unsigned long) |
203 | radix_tree_deref_slot_protected(slot, &mapping->tree_lock); | 200 | radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); |
204 | 201 | ||
205 | entry |= RADIX_DAX_ENTRY_LOCK; | 202 | entry |= RADIX_DAX_ENTRY_LOCK; |
206 | radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); | 203 | radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); |
207 | return (void *)entry; | 204 | return (void *)entry; |
208 | } | 205 | } |
209 | 206 | ||
210 | /* | 207 | /* |
211 | * Mark the given slot is unlocked. The function must be called with | 208 | * Mark the given slot as unlocked. Must be called with the i_pages lock held. |
212 | * mapping->tree_lock held | ||
213 | */ | 209 | */ |
214 | static inline void *unlock_slot(struct address_space *mapping, void **slot) | 210 | static inline void *unlock_slot(struct address_space *mapping, void **slot) |
215 | { | 211 | { |
216 | unsigned long entry = (unsigned long) | 212 | unsigned long entry = (unsigned long) |
217 | radix_tree_deref_slot_protected(slot, &mapping->tree_lock); | 213 | radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); |
218 | 214 | ||
219 | entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; | 215 | entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK; |
220 | radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry); | 216 | radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); |
221 | return (void *)entry; | 217 | return (void *)entry; |
222 | } | 218 | } |
223 | 219 | ||
@@ -228,7 +224,7 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot) | |||
228 | * put_locked_mapping_entry() when he locked the entry and now wants to | 224 | * put_locked_mapping_entry() when he locked the entry and now wants to |
229 | * unlock it. | 225 | * unlock it. |
230 | * | 226 | * |
231 | * The function must be called with mapping->tree_lock held. | 227 | * Must be called with the i_pages lock held. |
232 | */ | 228 | */ |
233 | static void *get_unlocked_mapping_entry(struct address_space *mapping, | 229 | static void *get_unlocked_mapping_entry(struct address_space *mapping, |
234 | pgoff_t index, void ***slotp) | 230 | pgoff_t index, void ***slotp) |
@@ -241,7 +237,7 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, | |||
241 | ewait.wait.func = wake_exceptional_entry_func; | 237 | ewait.wait.func = wake_exceptional_entry_func; |
242 | 238 | ||
243 | for (;;) { | 239 | for (;;) { |
244 | entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, | 240 | entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, |
245 | &slot); | 241 | &slot); |
246 | if (!entry || | 242 | if (!entry || |
247 | WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || | 243 | WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) || |
@@ -254,10 +250,10 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, | |||
254 | wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); | 250 | wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); |
255 | prepare_to_wait_exclusive(wq, &ewait.wait, | 251 | prepare_to_wait_exclusive(wq, &ewait.wait, |
256 | TASK_UNINTERRUPTIBLE); | 252 | TASK_UNINTERRUPTIBLE); |
257 | spin_unlock_irq(&mapping->tree_lock); | 253 | xa_unlock_irq(&mapping->i_pages); |
258 | schedule(); | 254 | schedule(); |
259 | finish_wait(wq, &ewait.wait); | 255 | finish_wait(wq, &ewait.wait); |
260 | spin_lock_irq(&mapping->tree_lock); | 256 | xa_lock_irq(&mapping->i_pages); |
261 | } | 257 | } |
262 | } | 258 | } |
263 | 259 | ||
@@ -266,15 +262,15 @@ static void dax_unlock_mapping_entry(struct address_space *mapping, | |||
266 | { | 262 | { |
267 | void *entry, **slot; | 263 | void *entry, **slot; |
268 | 264 | ||
269 | spin_lock_irq(&mapping->tree_lock); | 265 | xa_lock_irq(&mapping->i_pages); |
270 | entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); | 266 | entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); |
271 | if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || | 267 | if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || |
272 | !slot_locked(mapping, slot))) { | 268 | !slot_locked(mapping, slot))) { |
273 | spin_unlock_irq(&mapping->tree_lock); | 269 | xa_unlock_irq(&mapping->i_pages); |
274 | return; | 270 | return; |
275 | } | 271 | } |
276 | unlock_slot(mapping, slot); | 272 | unlock_slot(mapping, slot); |
277 | spin_unlock_irq(&mapping->tree_lock); | 273 | xa_unlock_irq(&mapping->i_pages); |
278 | dax_wake_mapping_entry_waiter(mapping, index, entry, false); | 274 | dax_wake_mapping_entry_waiter(mapping, index, entry, false); |
279 | } | 275 | } |
280 | 276 | ||
@@ -388,7 +384,7 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, | |||
388 | void *entry, **slot; | 384 | void *entry, **slot; |
389 | 385 | ||
390 | restart: | 386 | restart: |
391 | spin_lock_irq(&mapping->tree_lock); | 387 | xa_lock_irq(&mapping->i_pages); |
392 | entry = get_unlocked_mapping_entry(mapping, index, &slot); | 388 | entry = get_unlocked_mapping_entry(mapping, index, &slot); |
393 | 389 | ||
394 | if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { | 390 | if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) { |
@@ -420,12 +416,12 @@ restart: | |||
420 | if (pmd_downgrade) { | 416 | if (pmd_downgrade) { |
421 | /* | 417 | /* |
422 | * Make sure 'entry' remains valid while we drop | 418 | * Make sure 'entry' remains valid while we drop |
423 | * mapping->tree_lock. | 419 | * the i_pages lock. |
424 | */ | 420 | */ |
425 | entry = lock_slot(mapping, slot); | 421 | entry = lock_slot(mapping, slot); |
426 | } | 422 | } |
427 | 423 | ||
428 | spin_unlock_irq(&mapping->tree_lock); | 424 | xa_unlock_irq(&mapping->i_pages); |
429 | /* | 425 | /* |
430 | * Besides huge zero pages the only other thing that gets | 426 | * Besides huge zero pages the only other thing that gets |
431 | * downgraded are empty entries which don't need to be | 427 | * downgraded are empty entries which don't need to be |
@@ -442,27 +438,27 @@ restart: | |||
442 | put_locked_mapping_entry(mapping, index); | 438 | put_locked_mapping_entry(mapping, index); |
443 | return ERR_PTR(err); | 439 | return ERR_PTR(err); |
444 | } | 440 | } |
445 | spin_lock_irq(&mapping->tree_lock); | 441 | xa_lock_irq(&mapping->i_pages); |
446 | 442 | ||
447 | if (!entry) { | 443 | if (!entry) { |
448 | /* | 444 | /* |
449 | * We needed to drop the page_tree lock while calling | 445 | * We needed to drop the i_pages lock while calling |
450 | * radix_tree_preload() and we didn't have an entry to | 446 | * radix_tree_preload() and we didn't have an entry to |
451 | * lock. See if another thread inserted an entry at | 447 | * lock. See if another thread inserted an entry at |
452 | * our index during this time. | 448 | * our index during this time. |
453 | */ | 449 | */ |
454 | entry = __radix_tree_lookup(&mapping->page_tree, index, | 450 | entry = __radix_tree_lookup(&mapping->i_pages, index, |
455 | NULL, &slot); | 451 | NULL, &slot); |
456 | if (entry) { | 452 | if (entry) { |
457 | radix_tree_preload_end(); | 453 | radix_tree_preload_end(); |
458 | spin_unlock_irq(&mapping->tree_lock); | 454 | xa_unlock_irq(&mapping->i_pages); |
459 | goto restart; | 455 | goto restart; |
460 | } | 456 | } |
461 | } | 457 | } |
462 | 458 | ||
463 | if (pmd_downgrade) { | 459 | if (pmd_downgrade) { |
464 | dax_disassociate_entry(entry, mapping, false); | 460 | dax_disassociate_entry(entry, mapping, false); |
465 | radix_tree_delete(&mapping->page_tree, index); | 461 | radix_tree_delete(&mapping->i_pages, index); |
466 | mapping->nrexceptional--; | 462 | mapping->nrexceptional--; |
467 | dax_wake_mapping_entry_waiter(mapping, index, entry, | 463 | dax_wake_mapping_entry_waiter(mapping, index, entry, |
468 | true); | 464 | true); |
@@ -470,11 +466,11 @@ restart: | |||
470 | 466 | ||
471 | entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); | 467 | entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); |
472 | 468 | ||
473 | err = __radix_tree_insert(&mapping->page_tree, index, | 469 | err = __radix_tree_insert(&mapping->i_pages, index, |
474 | dax_radix_order(entry), entry); | 470 | dax_radix_order(entry), entry); |
475 | radix_tree_preload_end(); | 471 | radix_tree_preload_end(); |
476 | if (err) { | 472 | if (err) { |
477 | spin_unlock_irq(&mapping->tree_lock); | 473 | xa_unlock_irq(&mapping->i_pages); |
478 | /* | 474 | /* |
479 | * Our insertion of a DAX entry failed, most likely | 475 | * Our insertion of a DAX entry failed, most likely |
480 | * because we were inserting a PMD entry and it | 476 | * because we were inserting a PMD entry and it |
@@ -487,12 +483,12 @@ restart: | |||
487 | } | 483 | } |
488 | /* Good, we have inserted empty locked entry into the tree. */ | 484 | /* Good, we have inserted empty locked entry into the tree. */ |
489 | mapping->nrexceptional++; | 485 | mapping->nrexceptional++; |
490 | spin_unlock_irq(&mapping->tree_lock); | 486 | xa_unlock_irq(&mapping->i_pages); |
491 | return entry; | 487 | return entry; |
492 | } | 488 | } |
493 | entry = lock_slot(mapping, slot); | 489 | entry = lock_slot(mapping, slot); |
494 | out_unlock: | 490 | out_unlock: |
495 | spin_unlock_irq(&mapping->tree_lock); | 491 | xa_unlock_irq(&mapping->i_pages); |
496 | return entry; | 492 | return entry; |
497 | } | 493 | } |
498 | 494 | ||
@@ -501,23 +497,23 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping, | |||
501 | { | 497 | { |
502 | int ret = 0; | 498 | int ret = 0; |
503 | void *entry; | 499 | void *entry; |
504 | struct radix_tree_root *page_tree = &mapping->page_tree; | 500 | struct radix_tree_root *pages = &mapping->i_pages; |
505 | 501 | ||
506 | spin_lock_irq(&mapping->tree_lock); | 502 | xa_lock_irq(pages); |
507 | entry = get_unlocked_mapping_entry(mapping, index, NULL); | 503 | entry = get_unlocked_mapping_entry(mapping, index, NULL); |
508 | if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) | 504 | if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry))) |
509 | goto out; | 505 | goto out; |
510 | if (!trunc && | 506 | if (!trunc && |
511 | (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || | 507 | (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) || |
512 | radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) | 508 | radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))) |
513 | goto out; | 509 | goto out; |
514 | dax_disassociate_entry(entry, mapping, trunc); | 510 | dax_disassociate_entry(entry, mapping, trunc); |
515 | radix_tree_delete(page_tree, index); | 511 | radix_tree_delete(pages, index); |
516 | mapping->nrexceptional--; | 512 | mapping->nrexceptional--; |
517 | ret = 1; | 513 | ret = 1; |
518 | out: | 514 | out: |
519 | put_unlocked_mapping_entry(mapping, index, entry); | 515 | put_unlocked_mapping_entry(mapping, index, entry); |
520 | spin_unlock_irq(&mapping->tree_lock); | 516 | xa_unlock_irq(pages); |
521 | return ret; | 517 | return ret; |
522 | } | 518 | } |
523 | /* | 519 | /* |
@@ -587,7 +583,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, | |||
587 | void *entry, pfn_t pfn_t, | 583 | void *entry, pfn_t pfn_t, |
588 | unsigned long flags, bool dirty) | 584 | unsigned long flags, bool dirty) |
589 | { | 585 | { |
590 | struct radix_tree_root *page_tree = &mapping->page_tree; | 586 | struct radix_tree_root *pages = &mapping->i_pages; |
591 | unsigned long pfn = pfn_t_to_pfn(pfn_t); | 587 | unsigned long pfn = pfn_t_to_pfn(pfn_t); |
592 | pgoff_t index = vmf->pgoff; | 588 | pgoff_t index = vmf->pgoff; |
593 | void *new_entry; | 589 | void *new_entry; |
@@ -604,7 +600,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, | |||
604 | unmap_mapping_pages(mapping, vmf->pgoff, 1, false); | 600 | unmap_mapping_pages(mapping, vmf->pgoff, 1, false); |
605 | } | 601 | } |
606 | 602 | ||
607 | spin_lock_irq(&mapping->tree_lock); | 603 | xa_lock_irq(pages); |
608 | new_entry = dax_radix_locked_entry(pfn, flags); | 604 | new_entry = dax_radix_locked_entry(pfn, flags); |
609 | if (dax_entry_size(entry) != dax_entry_size(new_entry)) { | 605 | if (dax_entry_size(entry) != dax_entry_size(new_entry)) { |
610 | dax_disassociate_entry(entry, mapping, false); | 606 | dax_disassociate_entry(entry, mapping, false); |
@@ -624,17 +620,17 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, | |||
624 | void **slot; | 620 | void **slot; |
625 | void *ret; | 621 | void *ret; |
626 | 622 | ||
627 | ret = __radix_tree_lookup(page_tree, index, &node, &slot); | 623 | ret = __radix_tree_lookup(pages, index, &node, &slot); |
628 | WARN_ON_ONCE(ret != entry); | 624 | WARN_ON_ONCE(ret != entry); |
629 | __radix_tree_replace(page_tree, node, slot, | 625 | __radix_tree_replace(pages, node, slot, |
630 | new_entry, NULL); | 626 | new_entry, NULL); |
631 | entry = new_entry; | 627 | entry = new_entry; |
632 | } | 628 | } |
633 | 629 | ||
634 | if (dirty) | 630 | if (dirty) |
635 | radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY); | 631 | radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY); |
636 | 632 | ||
637 | spin_unlock_irq(&mapping->tree_lock); | 633 | xa_unlock_irq(pages); |
638 | return entry; | 634 | return entry; |
639 | } | 635 | } |
640 | 636 | ||
@@ -723,7 +719,7 @@ unlock_pte: | |||
723 | static int dax_writeback_one(struct dax_device *dax_dev, | 719 | static int dax_writeback_one(struct dax_device *dax_dev, |
724 | struct address_space *mapping, pgoff_t index, void *entry) | 720 | struct address_space *mapping, pgoff_t index, void *entry) |
725 | { | 721 | { |
726 | struct radix_tree_root *page_tree = &mapping->page_tree; | 722 | struct radix_tree_root *pages = &mapping->i_pages; |
727 | void *entry2, **slot; | 723 | void *entry2, **slot; |
728 | unsigned long pfn; | 724 | unsigned long pfn; |
729 | long ret = 0; | 725 | long ret = 0; |
@@ -736,7 +732,7 @@ static int dax_writeback_one(struct dax_device *dax_dev, | |||
736 | if (WARN_ON(!radix_tree_exceptional_entry(entry))) | 732 | if (WARN_ON(!radix_tree_exceptional_entry(entry))) |
737 | return -EIO; | 733 | return -EIO; |
738 | 734 | ||
739 | spin_lock_irq(&mapping->tree_lock); | 735 | xa_lock_irq(pages); |
740 | entry2 = get_unlocked_mapping_entry(mapping, index, &slot); | 736 | entry2 = get_unlocked_mapping_entry(mapping, index, &slot); |
741 | /* Entry got punched out / reallocated? */ | 737 | /* Entry got punched out / reallocated? */ |
742 | if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) | 738 | if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2))) |
@@ -755,7 +751,7 @@ static int dax_writeback_one(struct dax_device *dax_dev, | |||
755 | } | 751 | } |
756 | 752 | ||
757 | /* Another fsync thread may have already written back this entry */ | 753 | /* Another fsync thread may have already written back this entry */ |
758 | if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) | 754 | if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)) |
759 | goto put_unlocked; | 755 | goto put_unlocked; |
760 | /* Lock the entry to serialize with page faults */ | 756 | /* Lock the entry to serialize with page faults */ |
761 | entry = lock_slot(mapping, slot); | 757 | entry = lock_slot(mapping, slot); |
@@ -763,11 +759,11 @@ static int dax_writeback_one(struct dax_device *dax_dev, | |||
763 | * We can clear the tag now but we have to be careful so that concurrent | 759 | * We can clear the tag now but we have to be careful so that concurrent |
764 | * dax_writeback_one() calls for the same index cannot finish before we | 760 | * dax_writeback_one() calls for the same index cannot finish before we |
765 | * actually flush the caches. This is achieved as the calls will look | 761 | * actually flush the caches. This is achieved as the calls will look |
766 | * at the entry only under tree_lock and once they do that they will | 762 | * at the entry only under the i_pages lock and once they do that |
767 | * see the entry locked and wait for it to unlock. | 763 | * they will see the entry locked and wait for it to unlock. |
768 | */ | 764 | */ |
769 | radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); | 765 | radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE); |
770 | spin_unlock_irq(&mapping->tree_lock); | 766 | xa_unlock_irq(pages); |
771 | 767 | ||
772 | /* | 768 | /* |
773 | * Even if dax_writeback_mapping_range() was given a wbc->range_start | 769 | * Even if dax_writeback_mapping_range() was given a wbc->range_start |
@@ -787,16 +783,16 @@ static int dax_writeback_one(struct dax_device *dax_dev, | |||
787 | * the pfn mappings are writeprotected and fault waits for mapping | 783 | * the pfn mappings are writeprotected and fault waits for mapping |
788 | * entry lock. | 784 | * entry lock. |
789 | */ | 785 | */ |
790 | spin_lock_irq(&mapping->tree_lock); | 786 | xa_lock_irq(pages); |
791 | radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); | 787 | radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY); |
792 | spin_unlock_irq(&mapping->tree_lock); | 788 | xa_unlock_irq(pages); |
793 | trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); | 789 | trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); |
794 | put_locked_mapping_entry(mapping, index); | 790 | put_locked_mapping_entry(mapping, index); |
795 | return ret; | 791 | return ret; |
796 | 792 | ||
797 | put_unlocked: | 793 | put_unlocked: |
798 | put_unlocked_mapping_entry(mapping, index, entry2); | 794 | put_unlocked_mapping_entry(mapping, index, entry2); |
799 | spin_unlock_irq(&mapping->tree_lock); | 795 | xa_unlock_irq(pages); |
800 | return ret; | 796 | return ret; |
801 | } | 797 | } |
802 | 798 | ||
@@ -1566,21 +1562,21 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf, | |||
1566 | pgoff_t index = vmf->pgoff; | 1562 | pgoff_t index = vmf->pgoff; |
1567 | int vmf_ret, error; | 1563 | int vmf_ret, error; |
1568 | 1564 | ||
1569 | spin_lock_irq(&mapping->tree_lock); | 1565 | xa_lock_irq(&mapping->i_pages); |
1570 | entry = get_unlocked_mapping_entry(mapping, index, &slot); | 1566 | entry = get_unlocked_mapping_entry(mapping, index, &slot); |
1571 | /* Did we race with someone splitting entry or so? */ | 1567 | /* Did we race with someone splitting entry or so? */ |
1572 | if (!entry || | 1568 | if (!entry || |
1573 | (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || | 1569 | (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) || |
1574 | (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { | 1570 | (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) { |
1575 | put_unlocked_mapping_entry(mapping, index, entry); | 1571 | put_unlocked_mapping_entry(mapping, index, entry); |
1576 | spin_unlock_irq(&mapping->tree_lock); | 1572 | xa_unlock_irq(&mapping->i_pages); |
1577 | trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, | 1573 | trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, |
1578 | VM_FAULT_NOPAGE); | 1574 | VM_FAULT_NOPAGE); |
1579 | return VM_FAULT_NOPAGE; | 1575 | return VM_FAULT_NOPAGE; |
1580 | } | 1576 | } |
1581 | radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); | 1577 | radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); |
1582 | entry = lock_slot(mapping, slot); | 1578 | entry = lock_slot(mapping, slot); |
1583 | spin_unlock_irq(&mapping->tree_lock); | 1579 | xa_unlock_irq(&mapping->i_pages); |
1584 | switch (pe_size) { | 1580 | switch (pe_size) { |
1585 | case PE_SIZE_PTE: | 1581 | case PE_SIZE_PTE: |
1586 | error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); | 1582 | error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); |