diff options
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 84 |
1 files changed, 41 insertions, 43 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 693f62212a59..ab77e19ab09c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -66,7 +66,7 @@ | |||
66 | * ->i_mmap_rwsem (truncate_pagecache) | 66 | * ->i_mmap_rwsem (truncate_pagecache) |
67 | * ->private_lock (__free_pte->__set_page_dirty_buffers) | 67 | * ->private_lock (__free_pte->__set_page_dirty_buffers) |
68 | * ->swap_lock (exclusive_swap_page, others) | 68 | * ->swap_lock (exclusive_swap_page, others) |
69 | * ->mapping->tree_lock | 69 | * ->i_pages lock |
70 | * | 70 | * |
71 | * ->i_mutex | 71 | * ->i_mutex |
72 | * ->i_mmap_rwsem (truncate->unmap_mapping_range) | 72 | * ->i_mmap_rwsem (truncate->unmap_mapping_range) |
@@ -74,7 +74,7 @@ | |||
74 | * ->mmap_sem | 74 | * ->mmap_sem |
75 | * ->i_mmap_rwsem | 75 | * ->i_mmap_rwsem |
76 | * ->page_table_lock or pte_lock (various, mainly in memory.c) | 76 | * ->page_table_lock or pte_lock (various, mainly in memory.c) |
77 | * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) | 77 | * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) |
78 | * | 78 | * |
79 | * ->mmap_sem | 79 | * ->mmap_sem |
80 | * ->lock_page (access_process_vm) | 80 | * ->lock_page (access_process_vm) |
@@ -84,7 +84,7 @@ | |||
84 | * | 84 | * |
85 | * bdi->wb.list_lock | 85 | * bdi->wb.list_lock |
86 | * sb_lock (fs/fs-writeback.c) | 86 | * sb_lock (fs/fs-writeback.c) |
87 | * ->mapping->tree_lock (__sync_single_inode) | 87 | * ->i_pages lock (__sync_single_inode) |
88 | * | 88 | * |
89 | * ->i_mmap_rwsem | 89 | * ->i_mmap_rwsem |
90 | * ->anon_vma.lock (vma_adjust) | 90 | * ->anon_vma.lock (vma_adjust) |
@@ -95,11 +95,11 @@ | |||
95 | * ->page_table_lock or pte_lock | 95 | * ->page_table_lock or pte_lock |
96 | * ->swap_lock (try_to_unmap_one) | 96 | * ->swap_lock (try_to_unmap_one) |
97 | * ->private_lock (try_to_unmap_one) | 97 | * ->private_lock (try_to_unmap_one) |
98 | * ->tree_lock (try_to_unmap_one) | 98 | * ->i_pages lock (try_to_unmap_one) |
99 | * ->zone_lru_lock(zone) (follow_page->mark_page_accessed) | 99 | * ->zone_lru_lock(zone) (follow_page->mark_page_accessed) |
100 | * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page) | 100 | * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page) |
101 | * ->private_lock (page_remove_rmap->set_page_dirty) | 101 | * ->private_lock (page_remove_rmap->set_page_dirty) |
102 | * ->tree_lock (page_remove_rmap->set_page_dirty) | 102 | * ->i_pages lock (page_remove_rmap->set_page_dirty) |
103 | * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) | 103 | * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) |
104 | * ->inode->i_lock (page_remove_rmap->set_page_dirty) | 104 | * ->inode->i_lock (page_remove_rmap->set_page_dirty) |
105 | * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) | 105 | * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) |
@@ -118,14 +118,15 @@ static int page_cache_tree_insert(struct address_space *mapping, | |||
118 | void **slot; | 118 | void **slot; |
119 | int error; | 119 | int error; |
120 | 120 | ||
121 | error = __radix_tree_create(&mapping->page_tree, page->index, 0, | 121 | error = __radix_tree_create(&mapping->i_pages, page->index, 0, |
122 | &node, &slot); | 122 | &node, &slot); |
123 | if (error) | 123 | if (error) |
124 | return error; | 124 | return error; |
125 | if (*slot) { | 125 | if (*slot) { |
126 | void *p; | 126 | void *p; |
127 | 127 | ||
128 | p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); | 128 | p = radix_tree_deref_slot_protected(slot, |
129 | &mapping->i_pages.xa_lock); | ||
129 | if (!radix_tree_exceptional_entry(p)) | 130 | if (!radix_tree_exceptional_entry(p)) |
130 | return -EEXIST; | 131 | return -EEXIST; |
131 | 132 | ||
@@ -133,7 +134,7 @@ static int page_cache_tree_insert(struct address_space *mapping, | |||
133 | if (shadowp) | 134 | if (shadowp) |
134 | *shadowp = p; | 135 | *shadowp = p; |
135 | } | 136 | } |
136 | __radix_tree_replace(&mapping->page_tree, node, slot, page, | 137 | __radix_tree_replace(&mapping->i_pages, node, slot, page, |
137 | workingset_lookup_update(mapping)); | 138 | workingset_lookup_update(mapping)); |
138 | mapping->nrpages++; | 139 | mapping->nrpages++; |
139 | return 0; | 140 | return 0; |
@@ -155,13 +156,13 @@ static void page_cache_tree_delete(struct address_space *mapping, | |||
155 | struct radix_tree_node *node; | 156 | struct radix_tree_node *node; |
156 | void **slot; | 157 | void **slot; |
157 | 158 | ||
158 | __radix_tree_lookup(&mapping->page_tree, page->index + i, | 159 | __radix_tree_lookup(&mapping->i_pages, page->index + i, |
159 | &node, &slot); | 160 | &node, &slot); |
160 | 161 | ||
161 | VM_BUG_ON_PAGE(!node && nr != 1, page); | 162 | VM_BUG_ON_PAGE(!node && nr != 1, page); |
162 | 163 | ||
163 | radix_tree_clear_tags(&mapping->page_tree, node, slot); | 164 | radix_tree_clear_tags(&mapping->i_pages, node, slot); |
164 | __radix_tree_replace(&mapping->page_tree, node, slot, shadow, | 165 | __radix_tree_replace(&mapping->i_pages, node, slot, shadow, |
165 | workingset_lookup_update(mapping)); | 166 | workingset_lookup_update(mapping)); |
166 | } | 167 | } |
167 | 168 | ||
@@ -253,7 +254,7 @@ static void unaccount_page_cache_page(struct address_space *mapping, | |||
253 | /* | 254 | /* |
254 | * Delete a page from the page cache and free it. Caller has to make | 255 | * Delete a page from the page cache and free it. Caller has to make |
255 | * sure the page is locked and that nobody else uses it - or that usage | 256 | * sure the page is locked and that nobody else uses it - or that usage |
256 | * is safe. The caller must hold the mapping's tree_lock. | 257 | * is safe. The caller must hold the i_pages lock. |
257 | */ | 258 | */ |
258 | void __delete_from_page_cache(struct page *page, void *shadow) | 259 | void __delete_from_page_cache(struct page *page, void *shadow) |
259 | { | 260 | { |
@@ -296,9 +297,9 @@ void delete_from_page_cache(struct page *page) | |||
296 | unsigned long flags; | 297 | unsigned long flags; |
297 | 298 | ||
298 | BUG_ON(!PageLocked(page)); | 299 | BUG_ON(!PageLocked(page)); |
299 | spin_lock_irqsave(&mapping->tree_lock, flags); | 300 | xa_lock_irqsave(&mapping->i_pages, flags); |
300 | __delete_from_page_cache(page, NULL); | 301 | __delete_from_page_cache(page, NULL); |
301 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 302 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
302 | 303 | ||
303 | page_cache_free_page(mapping, page); | 304 | page_cache_free_page(mapping, page); |
304 | } | 305 | } |
@@ -309,14 +310,14 @@ EXPORT_SYMBOL(delete_from_page_cache); | |||
309 | * @mapping: the mapping to which pages belong | 310 | * @mapping: the mapping to which pages belong |
310 | * @pvec: pagevec with pages to delete | 311 | * @pvec: pagevec with pages to delete |
311 | * | 312 | * |
312 | * The function walks over mapping->page_tree and removes pages passed in @pvec | 313 | * The function walks over mapping->i_pages and removes pages passed in @pvec |
313 | * from the radix tree. The function expects @pvec to be sorted by page index. | 314 | * from the mapping. The function expects @pvec to be sorted by page index. |
314 | * It tolerates holes in @pvec (radix tree entries at those indices are not | 315 | * It tolerates holes in @pvec (mapping entries at those indices are not |
315 | * modified). The function expects only THP head pages to be present in the | 316 | * modified). The function expects only THP head pages to be present in the |
316 | * @pvec and takes care to delete all corresponding tail pages from the radix | 317 | * @pvec and takes care to delete all corresponding tail pages from the |
317 | * tree as well. | 318 | * mapping as well. |
318 | * | 319 | * |
319 | * The function expects mapping->tree_lock to be held. | 320 | * The function expects the i_pages lock to be held. |
320 | */ | 321 | */ |
321 | static void | 322 | static void |
322 | page_cache_tree_delete_batch(struct address_space *mapping, | 323 | page_cache_tree_delete_batch(struct address_space *mapping, |
@@ -330,11 +331,11 @@ page_cache_tree_delete_batch(struct address_space *mapping, | |||
330 | pgoff_t start; | 331 | pgoff_t start; |
331 | 332 | ||
332 | start = pvec->pages[0]->index; | 333 | start = pvec->pages[0]->index; |
333 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { | 334 | radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { |
334 | if (i >= pagevec_count(pvec) && !tail_pages) | 335 | if (i >= pagevec_count(pvec) && !tail_pages) |
335 | break; | 336 | break; |
336 | page = radix_tree_deref_slot_protected(slot, | 337 | page = radix_tree_deref_slot_protected(slot, |
337 | &mapping->tree_lock); | 338 | &mapping->i_pages.xa_lock); |
338 | if (radix_tree_exceptional_entry(page)) | 339 | if (radix_tree_exceptional_entry(page)) |
339 | continue; | 340 | continue; |
340 | if (!tail_pages) { | 341 | if (!tail_pages) { |
@@ -357,8 +358,8 @@ page_cache_tree_delete_batch(struct address_space *mapping, | |||
357 | } else { | 358 | } else { |
358 | tail_pages--; | 359 | tail_pages--; |
359 | } | 360 | } |
360 | radix_tree_clear_tags(&mapping->page_tree, iter.node, slot); | 361 | radix_tree_clear_tags(&mapping->i_pages, iter.node, slot); |
361 | __radix_tree_replace(&mapping->page_tree, iter.node, slot, NULL, | 362 | __radix_tree_replace(&mapping->i_pages, iter.node, slot, NULL, |
362 | workingset_lookup_update(mapping)); | 363 | workingset_lookup_update(mapping)); |
363 | total_pages++; | 364 | total_pages++; |
364 | } | 365 | } |
@@ -374,14 +375,14 @@ void delete_from_page_cache_batch(struct address_space *mapping, | |||
374 | if (!pagevec_count(pvec)) | 375 | if (!pagevec_count(pvec)) |
375 | return; | 376 | return; |
376 | 377 | ||
377 | spin_lock_irqsave(&mapping->tree_lock, flags); | 378 | xa_lock_irqsave(&mapping->i_pages, flags); |
378 | for (i = 0; i < pagevec_count(pvec); i++) { | 379 | for (i = 0; i < pagevec_count(pvec); i++) { |
379 | trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); | 380 | trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); |
380 | 381 | ||
381 | unaccount_page_cache_page(mapping, pvec->pages[i]); | 382 | unaccount_page_cache_page(mapping, pvec->pages[i]); |
382 | } | 383 | } |
383 | page_cache_tree_delete_batch(mapping, pvec); | 384 | page_cache_tree_delete_batch(mapping, pvec); |
384 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 385 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
385 | 386 | ||
386 | for (i = 0; i < pagevec_count(pvec); i++) | 387 | for (i = 0; i < pagevec_count(pvec); i++) |
387 | page_cache_free_page(mapping, pvec->pages[i]); | 388 | page_cache_free_page(mapping, pvec->pages[i]); |
@@ -798,7 +799,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
798 | new->mapping = mapping; | 799 | new->mapping = mapping; |
799 | new->index = offset; | 800 | new->index = offset; |
800 | 801 | ||
801 | spin_lock_irqsave(&mapping->tree_lock, flags); | 802 | xa_lock_irqsave(&mapping->i_pages, flags); |
802 | __delete_from_page_cache(old, NULL); | 803 | __delete_from_page_cache(old, NULL); |
803 | error = page_cache_tree_insert(mapping, new, NULL); | 804 | error = page_cache_tree_insert(mapping, new, NULL); |
804 | BUG_ON(error); | 805 | BUG_ON(error); |
@@ -810,7 +811,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
810 | __inc_node_page_state(new, NR_FILE_PAGES); | 811 | __inc_node_page_state(new, NR_FILE_PAGES); |
811 | if (PageSwapBacked(new)) | 812 | if (PageSwapBacked(new)) |
812 | __inc_node_page_state(new, NR_SHMEM); | 813 | __inc_node_page_state(new, NR_SHMEM); |
813 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 814 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
814 | mem_cgroup_migrate(old, new); | 815 | mem_cgroup_migrate(old, new); |
815 | radix_tree_preload_end(); | 816 | radix_tree_preload_end(); |
816 | if (freepage) | 817 | if (freepage) |
@@ -852,7 +853,7 @@ static int __add_to_page_cache_locked(struct page *page, | |||
852 | page->mapping = mapping; | 853 | page->mapping = mapping; |
853 | page->index = offset; | 854 | page->index = offset; |
854 | 855 | ||
855 | spin_lock_irq(&mapping->tree_lock); | 856 | xa_lock_irq(&mapping->i_pages); |
856 | error = page_cache_tree_insert(mapping, page, shadowp); | 857 | error = page_cache_tree_insert(mapping, page, shadowp); |
857 | radix_tree_preload_end(); | 858 | radix_tree_preload_end(); |
858 | if (unlikely(error)) | 859 | if (unlikely(error)) |
@@ -861,7 +862,7 @@ static int __add_to_page_cache_locked(struct page *page, | |||
861 | /* hugetlb pages do not participate in page cache accounting. */ | 862 | /* hugetlb pages do not participate in page cache accounting. */ |
862 | if (!huge) | 863 | if (!huge) |
863 | __inc_node_page_state(page, NR_FILE_PAGES); | 864 | __inc_node_page_state(page, NR_FILE_PAGES); |
864 | spin_unlock_irq(&mapping->tree_lock); | 865 | xa_unlock_irq(&mapping->i_pages); |
865 | if (!huge) | 866 | if (!huge) |
866 | mem_cgroup_commit_charge(page, memcg, false, false); | 867 | mem_cgroup_commit_charge(page, memcg, false, false); |
867 | trace_mm_filemap_add_to_page_cache(page); | 868 | trace_mm_filemap_add_to_page_cache(page); |
@@ -869,7 +870,7 @@ static int __add_to_page_cache_locked(struct page *page, | |||
869 | err_insert: | 870 | err_insert: |
870 | page->mapping = NULL; | 871 | page->mapping = NULL; |
871 | /* Leave page->index set: truncation relies upon it */ | 872 | /* Leave page->index set: truncation relies upon it */ |
872 | spin_unlock_irq(&mapping->tree_lock); | 873 | xa_unlock_irq(&mapping->i_pages); |
873 | if (!huge) | 874 | if (!huge) |
874 | mem_cgroup_cancel_charge(page, memcg, false); | 875 | mem_cgroup_cancel_charge(page, memcg, false); |
875 | put_page(page); | 876 | put_page(page); |
@@ -1353,7 +1354,7 @@ pgoff_t page_cache_next_hole(struct address_space *mapping, | |||
1353 | for (i = 0; i < max_scan; i++) { | 1354 | for (i = 0; i < max_scan; i++) { |
1354 | struct page *page; | 1355 | struct page *page; |
1355 | 1356 | ||
1356 | page = radix_tree_lookup(&mapping->page_tree, index); | 1357 | page = radix_tree_lookup(&mapping->i_pages, index); |
1357 | if (!page || radix_tree_exceptional_entry(page)) | 1358 | if (!page || radix_tree_exceptional_entry(page)) |
1358 | break; | 1359 | break; |
1359 | index++; | 1360 | index++; |
@@ -1394,7 +1395,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping, | |||
1394 | for (i = 0; i < max_scan; i++) { | 1395 | for (i = 0; i < max_scan; i++) { |
1395 | struct page *page; | 1396 | struct page *page; |
1396 | 1397 | ||
1397 | page = radix_tree_lookup(&mapping->page_tree, index); | 1398 | page = radix_tree_lookup(&mapping->i_pages, index); |
1398 | if (!page || radix_tree_exceptional_entry(page)) | 1399 | if (!page || radix_tree_exceptional_entry(page)) |
1399 | break; | 1400 | break; |
1400 | index--; | 1401 | index--; |
@@ -1427,7 +1428,7 @@ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) | |||
1427 | rcu_read_lock(); | 1428 | rcu_read_lock(); |
1428 | repeat: | 1429 | repeat: |
1429 | page = NULL; | 1430 | page = NULL; |
1430 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); | 1431 | pagep = radix_tree_lookup_slot(&mapping->i_pages, offset); |
1431 | if (pagep) { | 1432 | if (pagep) { |
1432 | page = radix_tree_deref_slot(pagep); | 1433 | page = radix_tree_deref_slot(pagep); |
1433 | if (unlikely(!page)) | 1434 | if (unlikely(!page)) |
@@ -1633,7 +1634,7 @@ unsigned find_get_entries(struct address_space *mapping, | |||
1633 | return 0; | 1634 | return 0; |
1634 | 1635 | ||
1635 | rcu_read_lock(); | 1636 | rcu_read_lock(); |
1636 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { | 1637 | radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { |
1637 | struct page *head, *page; | 1638 | struct page *head, *page; |
1638 | repeat: | 1639 | repeat: |
1639 | page = radix_tree_deref_slot(slot); | 1640 | page = radix_tree_deref_slot(slot); |
@@ -1710,7 +1711,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, | |||
1710 | return 0; | 1711 | return 0; |
1711 | 1712 | ||
1712 | rcu_read_lock(); | 1713 | rcu_read_lock(); |
1713 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, *start) { | 1714 | radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, *start) { |
1714 | struct page *head, *page; | 1715 | struct page *head, *page; |
1715 | 1716 | ||
1716 | if (iter.index > end) | 1717 | if (iter.index > end) |
@@ -1795,7 +1796,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, | |||
1795 | return 0; | 1796 | return 0; |
1796 | 1797 | ||
1797 | rcu_read_lock(); | 1798 | rcu_read_lock(); |
1798 | radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { | 1799 | radix_tree_for_each_contig(slot, &mapping->i_pages, &iter, index) { |
1799 | struct page *head, *page; | 1800 | struct page *head, *page; |
1800 | repeat: | 1801 | repeat: |
1801 | page = radix_tree_deref_slot(slot); | 1802 | page = radix_tree_deref_slot(slot); |
@@ -1875,8 +1876,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, | |||
1875 | return 0; | 1876 | return 0; |
1876 | 1877 | ||
1877 | rcu_read_lock(); | 1878 | rcu_read_lock(); |
1878 | radix_tree_for_each_tagged(slot, &mapping->page_tree, | 1879 | radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) { |
1879 | &iter, *index, tag) { | ||
1880 | struct page *head, *page; | 1880 | struct page *head, *page; |
1881 | 1881 | ||
1882 | if (iter.index > end) | 1882 | if (iter.index > end) |
@@ -1969,8 +1969,7 @@ unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, | |||
1969 | return 0; | 1969 | return 0; |
1970 | 1970 | ||
1971 | rcu_read_lock(); | 1971 | rcu_read_lock(); |
1972 | radix_tree_for_each_tagged(slot, &mapping->page_tree, | 1972 | radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) { |
1973 | &iter, start, tag) { | ||
1974 | struct page *head, *page; | 1973 | struct page *head, *page; |
1975 | repeat: | 1974 | repeat: |
1976 | page = radix_tree_deref_slot(slot); | 1975 | page = radix_tree_deref_slot(slot); |
@@ -2624,8 +2623,7 @@ void filemap_map_pages(struct vm_fault *vmf, | |||
2624 | struct page *head, *page; | 2623 | struct page *head, *page; |
2625 | 2624 | ||
2626 | rcu_read_lock(); | 2625 | rcu_read_lock(); |
2627 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, | 2626 | radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start_pgoff) { |
2628 | start_pgoff) { | ||
2629 | if (iter.index > end_pgoff) | 2627 | if (iter.index > end_pgoff) |
2630 | break; | 2628 | break; |
2631 | repeat: | 2629 | repeat: |