aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-05-16 16:12:50 -0400
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:34 -0400
commit4c7472c0df2f889df417a37571e622e02b5058fe (patch)
tree15557c1b56edf6cc1f0092b59eb2145a80ce5a29 /mm/filemap.c
parent5c024e6a4ebc1740db9f0f075aaa476210108a97 (diff)
page cache: Convert find_get_entry to XArray
Slightly shorter and simpler code. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c63
1 files changed, 28 insertions, 35 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 414efbdc95df..2bf9f0742082 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1382,47 +1382,40 @@ EXPORT_SYMBOL(page_cache_prev_miss);
1382 */ 1382 */
1383struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1383struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1384{ 1384{
1385 void **pagep; 1385 XA_STATE(xas, &mapping->i_pages, offset);
1386 struct page *head, *page; 1386 struct page *head, *page;
1387 1387
1388 rcu_read_lock(); 1388 rcu_read_lock();
1389repeat: 1389repeat:
1390 page = NULL; 1390 xas_reset(&xas);
1391 pagep = radix_tree_lookup_slot(&mapping->i_pages, offset); 1391 page = xas_load(&xas);
1392 if (pagep) { 1392 if (xas_retry(&xas, page))
1393 page = radix_tree_deref_slot(pagep); 1393 goto repeat;
1394 if (unlikely(!page)) 1394 /*
1395 goto out; 1395 * A shadow entry of a recently evicted page, or a swap entry from
1396 if (radix_tree_exception(page)) { 1396 * shmem/tmpfs. Return it without attempting to raise page count.
1397 if (radix_tree_deref_retry(page)) 1397 */
1398 goto repeat; 1398 if (!page || xa_is_value(page))
1399 /* 1399 goto out;
1400 * A shadow entry of a recently evicted page,
1401 * or a swap entry from shmem/tmpfs. Return
1402 * it without attempting to raise page count.
1403 */
1404 goto out;
1405 }
1406 1400
1407 head = compound_head(page); 1401 head = compound_head(page);
1408 if (!page_cache_get_speculative(head)) 1402 if (!page_cache_get_speculative(head))
1409 goto repeat; 1403 goto repeat;
1410 1404
1411 /* The page was split under us? */ 1405 /* The page was split under us? */
1412 if (compound_head(page) != head) { 1406 if (compound_head(page) != head) {
1413 put_page(head); 1407 put_page(head);
1414 goto repeat; 1408 goto repeat;
1415 } 1409 }
1416 1410
1417 /* 1411 /*
1418 * Has the page moved? 1412 * Has the page moved?
1419 * This is part of the lockless pagecache protocol. See 1413 * This is part of the lockless pagecache protocol. See
1420 * include/linux/pagemap.h for details. 1414 * include/linux/pagemap.h for details.
1421 */ 1415 */
1422 if (unlikely(page != *pagep)) { 1416 if (unlikely(page != xas_reload(&xas))) {
1423 put_page(head); 1417 put_page(head);
1424 goto repeat; 1418 goto repeat;
1425 }
1426 } 1419 }
1427out: 1420out:
1428 rcu_read_unlock(); 1421 rcu_read_unlock();
@@ -1453,7 +1446,7 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1453 1446
1454repeat: 1447repeat:
1455 page = find_get_entry(mapping, offset); 1448 page = find_get_entry(mapping, offset);
1456 if (page && !radix_tree_exception(page)) { 1449 if (page && !xa_is_value(page)) {
1457 lock_page(page); 1450 lock_page(page);
1458 /* Has the page been truncated? */ 1451 /* Has the page been truncated? */
1459 if (unlikely(page_mapping(page) != mapping)) { 1452 if (unlikely(page_mapping(page) != mapping)) {