diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2018-04-10 19:36:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 13:28:39 -0400 |
commit | b93b016313b3ba8003c3b8bb71f569af91f19fc7 (patch) | |
tree | ad4be96414189dcdf8c972f351ba430996e9fdff /mm/workingset.c | |
parent | f6bb2a2c0b81c47282ddb7883f92e65a063c27dd (diff) |
page cache: use xa_lock
Remove the address_space ->tree_lock and use the xa_lock newly added to
the radix_tree_root. Rename the address_space ->page_tree to ->i_pages,
since we don't really care that it's a tree.
[willy@infradead.org: fix nds32, fs/dax.c]
Link: http://lkml.kernel.org/r/20180406145415.GB20605@bombadil.infradead.orgLink: http://lkml.kernel.org/r/20180313132639.17387-9-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Jeff Layton <jlayton@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/workingset.c')
-rw-r--r-- | mm/workingset.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/workingset.c b/mm/workingset.c index b7d616a3bbbe..40ee02c83978 100644 --- a/mm/workingset.c +++ b/mm/workingset.c | |||
@@ -202,7 +202,7 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, | |||
202 | * @mapping: address space the page was backing | 202 | * @mapping: address space the page was backing |
203 | * @page: the page being evicted | 203 | * @page: the page being evicted |
204 | * | 204 | * |
205 | * Returns a shadow entry to be stored in @mapping->page_tree in place | 205 | * Returns a shadow entry to be stored in @mapping->i_pages in place |
206 | * of the evicted @page so that a later refault can be detected. | 206 | * of the evicted @page so that a later refault can be detected. |
207 | */ | 207 | */ |
208 | void *workingset_eviction(struct address_space *mapping, struct page *page) | 208 | void *workingset_eviction(struct address_space *mapping, struct page *page) |
@@ -348,7 +348,7 @@ void workingset_update_node(struct radix_tree_node *node) | |||
348 | * | 348 | * |
349 | * Avoid acquiring the list_lru lock when the nodes are | 349 | * Avoid acquiring the list_lru lock when the nodes are |
350 | * already where they should be. The list_empty() test is safe | 350 | * already where they should be. The list_empty() test is safe |
351 | * as node->private_list is protected by &mapping->tree_lock. | 351 | * as node->private_list is protected by the i_pages lock. |
352 | */ | 352 | */ |
353 | if (node->count && node->count == node->exceptional) { | 353 | if (node->count && node->count == node->exceptional) { |
354 | if (list_empty(&node->private_list)) | 354 | if (list_empty(&node->private_list)) |
@@ -366,7 +366,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, | |||
366 | unsigned long nodes; | 366 | unsigned long nodes; |
367 | unsigned long cache; | 367 | unsigned long cache; |
368 | 368 | ||
369 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ | 369 | /* list_lru lock nests inside the IRQ-safe i_pages lock */ |
370 | local_irq_disable(); | 370 | local_irq_disable(); |
371 | nodes = list_lru_shrink_count(&shadow_nodes, sc); | 371 | nodes = list_lru_shrink_count(&shadow_nodes, sc); |
372 | local_irq_enable(); | 372 | local_irq_enable(); |
@@ -419,21 +419,21 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, | |||
419 | 419 | ||
420 | /* | 420 | /* |
421 | * Page cache insertions and deletions synchroneously maintain | 421 | * Page cache insertions and deletions synchroneously maintain |
422 | * the shadow node LRU under the mapping->tree_lock and the | 422 | * the shadow node LRU under the i_pages lock and the |
423 | * lru_lock. Because the page cache tree is emptied before | 423 | * lru_lock. Because the page cache tree is emptied before |
424 | * the inode can be destroyed, holding the lru_lock pins any | 424 | * the inode can be destroyed, holding the lru_lock pins any |
425 | * address_space that has radix tree nodes on the LRU. | 425 | * address_space that has radix tree nodes on the LRU. |
426 | * | 426 | * |
427 | * We can then safely transition to the mapping->tree_lock to | 427 | * We can then safely transition to the i_pages lock to |
428 | * pin only the address_space of the particular node we want | 428 | * pin only the address_space of the particular node we want |
429 | * to reclaim, take the node off-LRU, and drop the lru_lock. | 429 | * to reclaim, take the node off-LRU, and drop the lru_lock. |
430 | */ | 430 | */ |
431 | 431 | ||
432 | node = container_of(item, struct radix_tree_node, private_list); | 432 | node = container_of(item, struct radix_tree_node, private_list); |
433 | mapping = container_of(node->root, struct address_space, page_tree); | 433 | mapping = container_of(node->root, struct address_space, i_pages); |
434 | 434 | ||
435 | /* Coming from the list, invert the lock order */ | 435 | /* Coming from the list, invert the lock order */ |
436 | if (!spin_trylock(&mapping->tree_lock)) { | 436 | if (!xa_trylock(&mapping->i_pages)) { |
437 | spin_unlock(lru_lock); | 437 | spin_unlock(lru_lock); |
438 | ret = LRU_RETRY; | 438 | ret = LRU_RETRY; |
439 | goto out; | 439 | goto out; |
@@ -468,11 +468,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, | |||
468 | if (WARN_ON_ONCE(node->exceptional)) | 468 | if (WARN_ON_ONCE(node->exceptional)) |
469 | goto out_invalid; | 469 | goto out_invalid; |
470 | inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM); | 470 | inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM); |
471 | __radix_tree_delete_node(&mapping->page_tree, node, | 471 | __radix_tree_delete_node(&mapping->i_pages, node, |
472 | workingset_lookup_update(mapping)); | 472 | workingset_lookup_update(mapping)); |
473 | 473 | ||
474 | out_invalid: | 474 | out_invalid: |
475 | spin_unlock(&mapping->tree_lock); | 475 | xa_unlock(&mapping->i_pages); |
476 | ret = LRU_REMOVED_RETRY; | 476 | ret = LRU_REMOVED_RETRY; |
477 | out: | 477 | out: |
478 | local_irq_enable(); | 478 | local_irq_enable(); |
@@ -487,7 +487,7 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker, | |||
487 | { | 487 | { |
488 | unsigned long ret; | 488 | unsigned long ret; |
489 | 489 | ||
490 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ | 490 | /* list_lru lock nests inside the IRQ-safe i_pages lock */ |
491 | local_irq_disable(); | 491 | local_irq_disable(); |
492 | ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); | 492 | ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); |
493 | local_irq_enable(); | 493 | local_irq_enable(); |
@@ -503,7 +503,7 @@ static struct shrinker workingset_shadow_shrinker = { | |||
503 | 503 | ||
504 | /* | 504 | /* |
505 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe | 505 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe |
506 | * mapping->tree_lock. | 506 | * i_pages lock. |
507 | */ | 507 | */ |
508 | static struct lock_class_key shadow_nodes_key; | 508 | static struct lock_class_key shadow_nodes_key; |
509 | 509 | ||