diff options
author | Matthew Wilcox <willy@infradead.org> | 2017-11-21 14:07:06 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-10-21 10:46:33 -0400 |
commit | 0d3f92966629e536b0c5c2355c1ada8e21c245f6 (patch) | |
tree | 1fe6674dbc2e0164749a904c29065dec9a707ac1 /mm/filemap.c | |
parent | eb797a8ee0ab4cd03df556980ce7bf167cadaa50 (diff) |
page cache: Convert hole search to XArray
The page cache offers the ability to search for a miss in the previous or
next N locations. Rather than teach the XArray about the page cache's
definition of a miss, use xas_prev() and xas_next() to search the page
array. This should be more efficient as it does not have to start the
lookup from the top for each index.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 110 |
1 files changed, 50 insertions, 60 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 4de14e75c4ec..714d3d0f60f5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1326,86 +1326,76 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm, | |||
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | /** | 1328 | /** |
1329 | * page_cache_next_hole - find the next hole (not-present entry) | 1329 | * page_cache_next_miss() - Find the next gap in the page cache. |
1330 | * @mapping: mapping | 1330 | * @mapping: Mapping. |
1331 | * @index: index | 1331 | * @index: Index. |
1332 | * @max_scan: maximum range to search | 1332 | * @max_scan: Maximum range to search. |
1333 | * | 1333 | * |
1334 | * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the | 1334 | * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the |
1335 | * lowest indexed hole. | 1335 | * gap with the lowest index. |
1336 | * | 1336 | * |
1337 | * Returns: the index of the hole if found, otherwise returns an index | 1337 | * This function may be called under the rcu_read_lock. However, this will |
1338 | * outside of the set specified (in which case 'return - index >= | 1338 | * not atomically search a snapshot of the cache at a single point in time. |
1339 | * max_scan' will be true). In rare cases of index wrap-around, 0 will | 1339 | * For example, if a gap is created at index 5, then subsequently a gap is |
1340 | * be returned. | 1340 | * created at index 10, page_cache_next_miss covering both indices may |
1341 | * | 1341 | * return 10 if called under the rcu_read_lock. |
1342 | * page_cache_next_hole may be called under rcu_read_lock. However, | 1342 | * |
1343 | * like radix_tree_gang_lookup, this will not atomically search a | 1343 | * Return: The index of the gap if found, otherwise an index outside the |
1344 | * snapshot of the tree at a single point in time. For example, if a | 1344 | * range specified (in which case 'return - index >= max_scan' will be true). |
1345 | * hole is created at index 5, then subsequently a hole is created at | 1345 | * In the rare case of index wrap-around, 0 will be returned. |
1346 | * index 10, page_cache_next_hole covering both indexes may return 10 | ||
1347 | * if called under rcu_read_lock. | ||
1348 | */ | 1346 | */ |
1349 | pgoff_t page_cache_next_hole(struct address_space *mapping, | 1347 | pgoff_t page_cache_next_miss(struct address_space *mapping, |
1350 | pgoff_t index, unsigned long max_scan) | 1348 | pgoff_t index, unsigned long max_scan) |
1351 | { | 1349 | { |
1352 | unsigned long i; | 1350 | XA_STATE(xas, &mapping->i_pages, index); |
1353 | 1351 | ||
1354 | for (i = 0; i < max_scan; i++) { | 1352 | while (max_scan--) { |
1355 | struct page *page; | 1353 | void *entry = xas_next(&xas); |
1356 | 1354 | if (!entry || xa_is_value(entry)) | |
1357 | page = radix_tree_lookup(&mapping->i_pages, index); | ||
1358 | if (!page || xa_is_value(page)) | ||
1359 | break; | 1355 | break; |
1360 | index++; | 1356 | if (xas.xa_index == 0) |
1361 | if (index == 0) | ||
1362 | break; | 1357 | break; |
1363 | } | 1358 | } |
1364 | 1359 | ||
1365 | return index; | 1360 | return xas.xa_index; |
1366 | } | 1361 | } |
1367 | EXPORT_SYMBOL(page_cache_next_hole); | 1362 | EXPORT_SYMBOL(page_cache_next_miss); |
1368 | 1363 | ||
1369 | /** | 1364 | /** |
1370 | * page_cache_prev_hole - find the prev hole (not-present entry) | 1365 | * page_cache_prev_miss() - Find the next gap in the page cache. |
1371 | * @mapping: mapping | 1366 | * @mapping: Mapping. |
1372 | * @index: index | 1367 | * @index: Index. |
1373 | * @max_scan: maximum range to search | 1368 | * @max_scan: Maximum range to search. |
1374 | * | 1369 | * |
1375 | * Search backwards in the range [max(index-max_scan+1, 0), index] for | 1370 | * Search the range [max(index - max_scan + 1, 0), index] for the |
1376 | * the first hole. | 1371 | * gap with the highest index. |
1377 | * | 1372 | * |
1378 | * Returns: the index of the hole if found, otherwise returns an index | 1373 | * This function may be called under the rcu_read_lock. However, this will |
1379 | * outside of the set specified (in which case 'index - return >= | 1374 | * not atomically search a snapshot of the cache at a single point in time. |
1380 | * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX | 1375 | * For example, if a gap is created at index 10, then subsequently a gap is |
1381 | * will be returned. | 1376 | * created at index 5, page_cache_prev_miss() covering both indices may |
1382 | * | 1377 | * return 5 if called under the rcu_read_lock. |
1383 | * page_cache_prev_hole may be called under rcu_read_lock. However, | 1378 | * |
1384 | * like radix_tree_gang_lookup, this will not atomically search a | 1379 | * Return: The index of the gap if found, otherwise an index outside the |
1385 | * snapshot of the tree at a single point in time. For example, if a | 1380 | * range specified (in which case 'index - return >= max_scan' will be true). |
1386 | * hole is created at index 10, then subsequently a hole is created at | 1381 | * In the rare case of wrap-around, ULONG_MAX will be returned. |
1387 | * index 5, page_cache_prev_hole covering both indexes may return 5 if | ||
1388 | * called under rcu_read_lock. | ||
1389 | */ | 1382 | */ |
1390 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | 1383 | pgoff_t page_cache_prev_miss(struct address_space *mapping, |
1391 | pgoff_t index, unsigned long max_scan) | 1384 | pgoff_t index, unsigned long max_scan) |
1392 | { | 1385 | { |
1393 | unsigned long i; | 1386 | XA_STATE(xas, &mapping->i_pages, index); |
1394 | |||
1395 | for (i = 0; i < max_scan; i++) { | ||
1396 | struct page *page; | ||
1397 | 1387 | ||
1398 | page = radix_tree_lookup(&mapping->i_pages, index); | 1388 | while (max_scan--) { |
1399 | if (!page || xa_is_value(page)) | 1389 | void *entry = xas_prev(&xas); |
1390 | if (!entry || xa_is_value(entry)) | ||
1400 | break; | 1391 | break; |
1401 | index--; | 1392 | if (xas.xa_index == ULONG_MAX) |
1402 | if (index == ULONG_MAX) | ||
1403 | break; | 1393 | break; |
1404 | } | 1394 | } |
1405 | 1395 | ||
1406 | return index; | 1396 | return xas.xa_index; |
1407 | } | 1397 | } |
1408 | EXPORT_SYMBOL(page_cache_prev_hole); | 1398 | EXPORT_SYMBOL(page_cache_prev_miss); |
1409 | 1399 | ||
1410 | /** | 1400 | /** |
1411 | * find_get_entry - find and get a page cache entry | 1401 | * find_get_entry - find and get a page cache entry |