diff options
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1280f915079b..4b12ba70a895 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -347,9 +347,9 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) | |||
347 | * By the time control reaches here, RCU grace period has passed | 347 | * By the time control reaches here, RCU grace period has passed |
348 | * since I_WB_SWITCH assertion and all wb stat update transactions | 348 | * since I_WB_SWITCH assertion and all wb stat update transactions |
349 | * between unlocked_inode_to_wb_begin/end() are guaranteed to be | 349 | * between unlocked_inode_to_wb_begin/end() are guaranteed to be |
350 | * synchronizing against mapping->tree_lock. | 350 | * synchronizing against the i_pages lock. |
351 | * | 351 | * |
352 | * Grabbing old_wb->list_lock, inode->i_lock and mapping->tree_lock | 352 | * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock |
353 | * gives us exclusion against all wb related operations on @inode | 353 | * gives us exclusion against all wb related operations on @inode |
354 | * including IO list manipulations and stat updates. | 354 | * including IO list manipulations and stat updates. |
355 | */ | 355 | */ |
@@ -361,7 +361,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) | |||
361 | spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); | 361 | spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); |
362 | } | 362 | } |
363 | spin_lock(&inode->i_lock); | 363 | spin_lock(&inode->i_lock); |
364 | spin_lock_irq(&mapping->tree_lock); | 364 | xa_lock_irq(&mapping->i_pages); |
365 | 365 | ||
366 | /* | 366 | /* |
367 | * Once I_FREEING is visible under i_lock, the eviction path owns | 367 | * Once I_FREEING is visible under i_lock, the eviction path owns |
@@ -373,22 +373,22 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) | |||
373 | /* | 373 | /* |
374 | * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points | 374 | * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points |
375 | * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to | 375 | * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to |
376 | * pages actually under underwriteback. | 376 | * pages actually under writeback. |
377 | */ | 377 | */ |
378 | radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0, | 378 | radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0, |
379 | PAGECACHE_TAG_DIRTY) { | 379 | PAGECACHE_TAG_DIRTY) { |
380 | struct page *page = radix_tree_deref_slot_protected(slot, | 380 | struct page *page = radix_tree_deref_slot_protected(slot, |
381 | &mapping->tree_lock); | 381 | &mapping->i_pages.xa_lock); |
382 | if (likely(page) && PageDirty(page)) { | 382 | if (likely(page) && PageDirty(page)) { |
383 | dec_wb_stat(old_wb, WB_RECLAIMABLE); | 383 | dec_wb_stat(old_wb, WB_RECLAIMABLE); |
384 | inc_wb_stat(new_wb, WB_RECLAIMABLE); | 384 | inc_wb_stat(new_wb, WB_RECLAIMABLE); |
385 | } | 385 | } |
386 | } | 386 | } |
387 | 387 | ||
388 | radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0, | 388 | radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0, |
389 | PAGECACHE_TAG_WRITEBACK) { | 389 | PAGECACHE_TAG_WRITEBACK) { |
390 | struct page *page = radix_tree_deref_slot_protected(slot, | 390 | struct page *page = radix_tree_deref_slot_protected(slot, |
391 | &mapping->tree_lock); | 391 | &mapping->i_pages.xa_lock); |
392 | if (likely(page)) { | 392 | if (likely(page)) { |
393 | WARN_ON_ONCE(!PageWriteback(page)); | 393 | WARN_ON_ONCE(!PageWriteback(page)); |
394 | dec_wb_stat(old_wb, WB_WRITEBACK); | 394 | dec_wb_stat(old_wb, WB_WRITEBACK); |
@@ -430,7 +430,7 @@ skip_switch: | |||
430 | */ | 430 | */ |
431 | smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); | 431 | smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); |
432 | 432 | ||
433 | spin_unlock_irq(&mapping->tree_lock); | 433 | xa_unlock_irq(&mapping->i_pages); |
434 | spin_unlock(&inode->i_lock); | 434 | spin_unlock(&inode->i_lock); |
435 | spin_unlock(&new_wb->list_lock); | 435 | spin_unlock(&new_wb->list_lock); |
436 | spin_unlock(&old_wb->list_lock); | 436 | spin_unlock(&old_wb->list_lock); |
@@ -506,8 +506,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) | |||
506 | 506 | ||
507 | /* | 507 | /* |
508 | * In addition to synchronizing among switchers, I_WB_SWITCH tells | 508 | * In addition to synchronizing among switchers, I_WB_SWITCH tells |
509 | * the RCU protected stat update paths to grab the mapping's | 509 | * the RCU protected stat update paths to grab the i_page |
510 | * tree_lock so that stat transfer can synchronize against them. | 510 | * lock so that stat transfer can synchronize against them. |
511 | * Let's continue after I_WB_SWITCH is guaranteed to be visible. | 511 | * Let's continue after I_WB_SWITCH is guaranteed to be visible. |
512 | */ | 512 | */ |
513 | call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); | 513 | call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); |