diff options
author | Matthew Wilcox <willy@infradead.org> | 2017-12-01 13:25:14 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-10-21 10:46:40 -0400 |
commit | 552446a4166189a8c2515571dd6e25fd64a2dc78 (patch) | |
tree | 63f5f81a3982064332da1d1d891ce2da2118a554 /mm/shmem.c | |
parent | e21a29552fa3f44ea41c53488875015ae70fd7f8 (diff) |
shmem: Convert shmem_add_to_page_cache to XArray
We can use xas_find_conflict() instead of radix_tree_gang_lookup_slot()
to find any conflicting entry and combine the three paths through this
function into one.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 81 |
1 files changed, 34 insertions, 47 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index a305529d6b89..8633bd3dc433 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -577,9 +577,11 @@ static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) | |||
577 | */ | 577 | */ |
578 | static int shmem_add_to_page_cache(struct page *page, | 578 | static int shmem_add_to_page_cache(struct page *page, |
579 | struct address_space *mapping, | 579 | struct address_space *mapping, |
580 | pgoff_t index, void *expected) | 580 | pgoff_t index, void *expected, gfp_t gfp) |
581 | { | 581 | { |
582 | int error, nr = hpage_nr_pages(page); | 582 | XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); |
583 | unsigned long i = 0; | ||
584 | unsigned long nr = 1UL << compound_order(page); | ||
583 | 585 | ||
584 | VM_BUG_ON_PAGE(PageTail(page), page); | 586 | VM_BUG_ON_PAGE(PageTail(page), page); |
585 | VM_BUG_ON_PAGE(index != round_down(index, nr), page); | 587 | VM_BUG_ON_PAGE(index != round_down(index, nr), page); |
@@ -591,46 +593,39 @@ static int shmem_add_to_page_cache(struct page *page, | |||
591 | page->mapping = mapping; | 593 | page->mapping = mapping; |
592 | page->index = index; | 594 | page->index = index; |
593 | 595 | ||
594 | xa_lock_irq(&mapping->i_pages); | 596 | do { |
595 | if (PageTransHuge(page)) { | 597 | void *entry; |
596 | void __rcu **results; | 598 | xas_lock_irq(&xas); |
597 | pgoff_t idx; | 599 | entry = xas_find_conflict(&xas); |
598 | int i; | 600 | if (entry != expected) |
599 | 601 | xas_set_err(&xas, -EEXIST); | |
600 | error = 0; | 602 | xas_create_range(&xas); |
601 | if (radix_tree_gang_lookup_slot(&mapping->i_pages, | 603 | if (xas_error(&xas)) |
602 | &results, &idx, index, 1) && | 604 | goto unlock; |
603 | idx < index + HPAGE_PMD_NR) { | 605 | next: |
604 | error = -EEXIST; | 606 | xas_store(&xas, page + i); |
607 | if (++i < nr) { | ||
608 | xas_next(&xas); | ||
609 | goto next; | ||
605 | } | 610 | } |
606 | 611 | if (PageTransHuge(page)) { | |
607 | if (!error) { | ||
608 | for (i = 0; i < HPAGE_PMD_NR; i++) { | ||
609 | error = radix_tree_insert(&mapping->i_pages, | ||
610 | index + i, page + i); | ||
611 | VM_BUG_ON(error); | ||
612 | } | ||
613 | count_vm_event(THP_FILE_ALLOC); | 612 | count_vm_event(THP_FILE_ALLOC); |
613 | __inc_node_page_state(page, NR_SHMEM_THPS); | ||
614 | } | 614 | } |
615 | } else if (!expected) { | ||
616 | error = radix_tree_insert(&mapping->i_pages, index, page); | ||
617 | } else { | ||
618 | error = shmem_replace_entry(mapping, index, expected, page); | ||
619 | } | ||
620 | |||
621 | if (!error) { | ||
622 | mapping->nrpages += nr; | 615 | mapping->nrpages += nr; |
623 | if (PageTransHuge(page)) | ||
624 | __inc_node_page_state(page, NR_SHMEM_THPS); | ||
625 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); | 616 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); |
626 | __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); | 617 | __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); |
627 | xa_unlock_irq(&mapping->i_pages); | 618 | unlock: |
628 | } else { | 619 | xas_unlock_irq(&xas); |
620 | } while (xas_nomem(&xas, gfp)); | ||
621 | |||
622 | if (xas_error(&xas)) { | ||
629 | page->mapping = NULL; | 623 | page->mapping = NULL; |
630 | xa_unlock_irq(&mapping->i_pages); | ||
631 | page_ref_sub(page, nr); | 624 | page_ref_sub(page, nr); |
625 | return xas_error(&xas); | ||
632 | } | 626 | } |
633 | return error; | 627 | |
628 | return 0; | ||
634 | } | 629 | } |
635 | 630 | ||
636 | /* | 631 | /* |
@@ -1183,7 +1178,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, | |||
1183 | */ | 1178 | */ |
1184 | if (!error) | 1179 | if (!error) |
1185 | error = shmem_add_to_page_cache(*pagep, mapping, index, | 1180 | error = shmem_add_to_page_cache(*pagep, mapping, index, |
1186 | radswap); | 1181 | radswap, gfp); |
1187 | if (error != -ENOMEM) { | 1182 | if (error != -ENOMEM) { |
1188 | /* | 1183 | /* |
1189 | * Truncation and eviction use free_swap_and_cache(), which | 1184 | * Truncation and eviction use free_swap_and_cache(), which |
@@ -1700,7 +1695,7 @@ repeat: | |||
1700 | false); | 1695 | false); |
1701 | if (!error) { | 1696 | if (!error) { |
1702 | error = shmem_add_to_page_cache(page, mapping, index, | 1697 | error = shmem_add_to_page_cache(page, mapping, index, |
1703 | swp_to_radix_entry(swap)); | 1698 | swp_to_radix_entry(swap), gfp); |
1704 | /* | 1699 | /* |
1705 | * We already confirmed swap under page lock, and make | 1700 | * We already confirmed swap under page lock, and make |
1706 | * no memory allocation here, so usually no possibility | 1701 | * no memory allocation here, so usually no possibility |
@@ -1806,13 +1801,8 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, | |||
1806 | PageTransHuge(page)); | 1801 | PageTransHuge(page)); |
1807 | if (error) | 1802 | if (error) |
1808 | goto unacct; | 1803 | goto unacct; |
1809 | error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, | 1804 | error = shmem_add_to_page_cache(page, mapping, hindex, |
1810 | compound_order(page)); | 1805 | NULL, gfp & GFP_RECLAIM_MASK); |
1811 | if (!error) { | ||
1812 | error = shmem_add_to_page_cache(page, mapping, hindex, | ||
1813 | NULL); | ||
1814 | radix_tree_preload_end(); | ||
1815 | } | ||
1816 | if (error) { | 1806 | if (error) { |
1817 | mem_cgroup_cancel_charge(page, memcg, | 1807 | mem_cgroup_cancel_charge(page, memcg, |
1818 | PageTransHuge(page)); | 1808 | PageTransHuge(page)); |
@@ -2281,11 +2271,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, | |||
2281 | if (ret) | 2271 | if (ret) |
2282 | goto out_release; | 2272 | goto out_release; |
2283 | 2273 | ||
2284 | ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); | 2274 | ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, |
2285 | if (!ret) { | 2275 | gfp & GFP_RECLAIM_MASK); |
2286 | ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL); | ||
2287 | radix_tree_preload_end(); | ||
2288 | } | ||
2289 | if (ret) | 2276 | if (ret) |
2290 | goto out_release_uncharge; | 2277 | goto out_release_uncharge; |
2291 | 2278 | ||