summaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 64b1e2065b6b..f3491074b035 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -185,10 +185,9 @@ EXPORT_SYMBOL(end_buffer_write_sync);
185 * we get exclusion from try_to_free_buffers with the blockdev mapping's 185 * we get exclusion from try_to_free_buffers with the blockdev mapping's
186 * private_lock. 186 * private_lock.
187 * 187 *
188 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 188 * Hack idea: for the blockdev mapping, private_lock contention
189 * may be quite high. This code could TryLock the page, and if that 189 * may be quite high. This code could TryLock the page, and if that
190 * succeeds, there is no need to take private_lock. (But if 190 * succeeds, there is no need to take private_lock.
191 * private_lock is contended then so is mapping->tree_lock).
192 */ 191 */
193static struct buffer_head * 192static struct buffer_head *
194__find_get_block_slow(struct block_device *bdev, sector_t block) 193__find_get_block_slow(struct block_device *bdev, sector_t block)
@@ -599,14 +598,14 @@ void __set_page_dirty(struct page *page, struct address_space *mapping,
599{ 598{
600 unsigned long flags; 599 unsigned long flags;
601 600
602 spin_lock_irqsave(&mapping->tree_lock, flags); 601 xa_lock_irqsave(&mapping->i_pages, flags);
603 if (page->mapping) { /* Race with truncate? */ 602 if (page->mapping) { /* Race with truncate? */
604 WARN_ON_ONCE(warn && !PageUptodate(page)); 603 WARN_ON_ONCE(warn && !PageUptodate(page));
605 account_page_dirtied(page, mapping); 604 account_page_dirtied(page, mapping);
606 radix_tree_tag_set(&mapping->page_tree, 605 radix_tree_tag_set(&mapping->i_pages,
607 page_index(page), PAGECACHE_TAG_DIRTY); 606 page_index(page), PAGECACHE_TAG_DIRTY);
608 } 607 }
609 spin_unlock_irqrestore(&mapping->tree_lock, flags); 608 xa_unlock_irqrestore(&mapping->i_pages, flags);
610} 609}
611EXPORT_SYMBOL_GPL(__set_page_dirty); 610EXPORT_SYMBOL_GPL(__set_page_dirty);
612 611
@@ -1096,7 +1095,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
1096 * inode list. 1095 * inode list.
1097 * 1096 *
1098 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1097 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1099 * mapping->tree_lock and mapping->host->i_lock. 1098 * i_pages lock and mapping->host->i_lock.
1100 */ 1099 */
1101void mark_buffer_dirty(struct buffer_head *bh) 1100void mark_buffer_dirty(struct buffer_head *bh)
1102{ 1101{