aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c114
1 files changed, 57 insertions, 57 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 8a287dfc5372..2d0986a64f1f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -110,6 +110,62 @@
110 * ->tasklist_lock (memory_failure, collect_procs_ao) 110 * ->tasklist_lock (memory_failure, collect_procs_ao)
111 */ 111 */
112 112
113static int page_cache_tree_insert(struct address_space *mapping,
114 struct page *page, void **shadowp)
115{
116 struct radix_tree_node *node;
117 void **slot;
118 int error;
119
120 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
121 &node, &slot);
122 if (error)
123 return error;
124 if (*slot) {
125 void *p;
126
127 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
128 if (!radix_tree_exceptional_entry(p))
129 return -EEXIST;
130
131 mapping->nrexceptional--;
132 if (!dax_mapping(mapping)) {
133 if (shadowp)
134 *shadowp = p;
135 if (node)
136 workingset_node_shadows_dec(node);
137 } else {
138 /* DAX can replace empty locked entry with a hole */
139 WARN_ON_ONCE(p !=
140 (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
141 RADIX_DAX_ENTRY_LOCK));
142 /* DAX accounts exceptional entries as normal pages */
143 if (node)
144 workingset_node_pages_dec(node);
145 /* Wakeup waiters for exceptional entry lock */
146 dax_wake_mapping_entry_waiter(mapping, page->index,
147 false);
148 }
149 }
150 radix_tree_replace_slot(slot, page);
151 mapping->nrpages++;
152 if (node) {
153 workingset_node_pages_inc(node);
154 /*
155 * Don't track node that contains actual pages.
156 *
157 * Avoid acquiring the list_lru lock if already
158 * untracked. The list_empty() test is safe as
159 * node->private_list is protected by
160 * mapping->tree_lock.
161 */
162 if (!list_empty(&node->private_list))
163 list_lru_del(&workingset_shadow_nodes,
164 &node->private_list);
165 }
166 return 0;
167}
168
113static void page_cache_tree_delete(struct address_space *mapping, 169static void page_cache_tree_delete(struct address_space *mapping,
114 struct page *page, void *shadow) 170 struct page *page, void *shadow)
115{ 171{
@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
561 617
562 spin_lock_irqsave(&mapping->tree_lock, flags); 618 spin_lock_irqsave(&mapping->tree_lock, flags);
563 __delete_from_page_cache(old, NULL); 619 __delete_from_page_cache(old, NULL);
564 error = radix_tree_insert(&mapping->page_tree, offset, new); 620 error = page_cache_tree_insert(mapping, new, NULL);
565 BUG_ON(error); 621 BUG_ON(error);
566 mapping->nrpages++; 622 mapping->nrpages++;
567 623
@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
584} 640}
585EXPORT_SYMBOL_GPL(replace_page_cache_page); 641EXPORT_SYMBOL_GPL(replace_page_cache_page);
586 642
587static int page_cache_tree_insert(struct address_space *mapping,
588 struct page *page, void **shadowp)
589{
590 struct radix_tree_node *node;
591 void **slot;
592 int error;
593
594 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
595 &node, &slot);
596 if (error)
597 return error;
598 if (*slot) {
599 void *p;
600
601 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
602 if (!radix_tree_exceptional_entry(p))
603 return -EEXIST;
604
605 mapping->nrexceptional--;
606 if (!dax_mapping(mapping)) {
607 if (shadowp)
608 *shadowp = p;
609 if (node)
610 workingset_node_shadows_dec(node);
611 } else {
612 /* DAX can replace empty locked entry with a hole */
613 WARN_ON_ONCE(p !=
614 (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
615 RADIX_DAX_ENTRY_LOCK));
616 /* DAX accounts exceptional entries as normal pages */
617 if (node)
618 workingset_node_pages_dec(node);
619 /* Wakeup waiters for exceptional entry lock */
620 dax_wake_mapping_entry_waiter(mapping, page->index,
621 false);
622 }
623 }
624 radix_tree_replace_slot(slot, page);
625 mapping->nrpages++;
626 if (node) {
627 workingset_node_pages_inc(node);
628 /*
629 * Don't track node that contains actual pages.
630 *
631 * Avoid acquiring the list_lru lock if already
632 * untracked. The list_empty() test is safe as
633 * node->private_list is protected by
634 * mapping->tree_lock.
635 */
636 if (!list_empty(&node->private_list))
637 list_lru_del(&workingset_shadow_nodes,
638 &node->private_list);
639 }
640 return 0;
641}
642
643static int __add_to_page_cache_locked(struct page *page, 643static int __add_to_page_cache_locked(struct page *page,
644 struct address_space *mapping, 644 struct address_space *mapping,
645 pgoff_t offset, gfp_t gfp_mask, 645 pgoff_t offset, gfp_t gfp_mask,