diff options
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 71 |
1 files changed, 13 insertions, 58 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index db26ebc6c62f..69568388c699 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -132,43 +132,28 @@ static int page_cache_tree_insert(struct address_space *mapping, | |||
132 | if (!dax_mapping(mapping)) { | 132 | if (!dax_mapping(mapping)) { |
133 | if (shadowp) | 133 | if (shadowp) |
134 | *shadowp = p; | 134 | *shadowp = p; |
135 | if (node) | ||
136 | workingset_node_shadows_dec(node); | ||
137 | } else { | 135 | } else { |
138 | /* DAX can replace empty locked entry with a hole */ | 136 | /* DAX can replace empty locked entry with a hole */ |
139 | WARN_ON_ONCE(p != | 137 | WARN_ON_ONCE(p != |
140 | dax_radix_locked_entry(0, RADIX_DAX_EMPTY)); | 138 | dax_radix_locked_entry(0, RADIX_DAX_EMPTY)); |
141 | /* DAX accounts exceptional entries as normal pages */ | ||
142 | if (node) | ||
143 | workingset_node_pages_dec(node); | ||
144 | /* Wakeup waiters for exceptional entry lock */ | 139 | /* Wakeup waiters for exceptional entry lock */ |
145 | dax_wake_mapping_entry_waiter(mapping, page->index, p, | 140 | dax_wake_mapping_entry_waiter(mapping, page->index, p, |
146 | false); | 141 | false); |
147 | } | 142 | } |
148 | } | 143 | } |
149 | radix_tree_replace_slot(slot, page); | 144 | __radix_tree_replace(&mapping->page_tree, node, slot, page, |
145 | workingset_update_node, mapping); | ||
150 | mapping->nrpages++; | 146 | mapping->nrpages++; |
151 | if (node) { | ||
152 | workingset_node_pages_inc(node); | ||
153 | /* | ||
154 | * Don't track node that contains actual pages. | ||
155 | * | ||
156 | * Avoid acquiring the list_lru lock if already | ||
157 | * untracked. The list_empty() test is safe as | ||
158 | * node->private_list is protected by | ||
159 | * mapping->tree_lock. | ||
160 | */ | ||
161 | if (!list_empty(&node->private_list)) | ||
162 | list_lru_del(&workingset_shadow_nodes, | ||
163 | &node->private_list); | ||
164 | } | ||
165 | return 0; | 147 | return 0; |
166 | } | 148 | } |
167 | 149 | ||
168 | static void page_cache_tree_delete(struct address_space *mapping, | 150 | static void page_cache_tree_delete(struct address_space *mapping, |
169 | struct page *page, void *shadow) | 151 | struct page *page, void *shadow) |
170 | { | 152 | { |
171 | int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page); | 153 | int i, nr; |
154 | |||
155 | /* hugetlb pages are represented by one entry in the radix tree */ | ||
156 | nr = PageHuge(page) ? 1 : hpage_nr_pages(page); | ||
172 | 157 | ||
173 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 158 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
174 | VM_BUG_ON_PAGE(PageTail(page), page); | 159 | VM_BUG_ON_PAGE(PageTail(page), page); |
@@ -181,44 +166,11 @@ static void page_cache_tree_delete(struct address_space *mapping, | |||
181 | __radix_tree_lookup(&mapping->page_tree, page->index + i, | 166 | __radix_tree_lookup(&mapping->page_tree, page->index + i, |
182 | &node, &slot); | 167 | &node, &slot); |
183 | 168 | ||
184 | radix_tree_clear_tags(&mapping->page_tree, node, slot); | 169 | VM_BUG_ON_PAGE(!node && nr != 1, page); |
185 | |||
186 | if (!node) { | ||
187 | VM_BUG_ON_PAGE(nr != 1, page); | ||
188 | /* | ||
189 | * We need a node to properly account shadow | ||
190 | * entries. Don't plant any without. XXX | ||
191 | */ | ||
192 | shadow = NULL; | ||
193 | } | ||
194 | |||
195 | radix_tree_replace_slot(slot, shadow); | ||
196 | 170 | ||
197 | if (!node) | 171 | radix_tree_clear_tags(&mapping->page_tree, node, slot); |
198 | break; | 172 | __radix_tree_replace(&mapping->page_tree, node, slot, shadow, |
199 | 173 | workingset_update_node, mapping); | |
200 | workingset_node_pages_dec(node); | ||
201 | if (shadow) | ||
202 | workingset_node_shadows_inc(node); | ||
203 | else | ||
204 | if (__radix_tree_delete_node(&mapping->page_tree, node)) | ||
205 | continue; | ||
206 | |||
207 | /* | ||
208 | * Track node that only contains shadow entries. DAX mappings | ||
209 | * contain no shadow entries and may contain other exceptional | ||
210 | * entries so skip those. | ||
211 | * | ||
212 | * Avoid acquiring the list_lru lock if already tracked. | ||
213 | * The list_empty() test is safe as node->private_list is | ||
214 | * protected by mapping->tree_lock. | ||
215 | */ | ||
216 | if (!dax_mapping(mapping) && !workingset_node_pages(node) && | ||
217 | list_empty(&node->private_list)) { | ||
218 | node->private_data = mapping; | ||
219 | list_lru_add(&workingset_shadow_nodes, | ||
220 | &node->private_list); | ||
221 | } | ||
222 | } | 174 | } |
223 | 175 | ||
224 | if (shadow) { | 176 | if (shadow) { |
@@ -1731,6 +1683,9 @@ find_page: | |||
1731 | if (inode->i_blkbits == PAGE_SHIFT || | 1683 | if (inode->i_blkbits == PAGE_SHIFT || |
1732 | !mapping->a_ops->is_partially_uptodate) | 1684 | !mapping->a_ops->is_partially_uptodate) |
1733 | goto page_not_up_to_date; | 1685 | goto page_not_up_to_date; |
1686 | /* pipes can't handle partially uptodate pages */ | ||
1687 | if (unlikely(iter->type & ITER_PIPE)) | ||
1688 | goto page_not_up_to_date; | ||
1734 | if (!trylock_page(page)) | 1689 | if (!trylock_page(page)) |
1735 | goto page_not_up_to_date; | 1690 | goto page_not_up_to_date; |
1736 | /* Did it get truncated before we got the lock? */ | 1691 | /* Did it get truncated before we got the lock? */ |