aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-10-04 16:02:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 12:17:56 -0400
commitd3798ae8c6f3767c726403c2ca6ecc317752c9dd (patch)
treedda86fd5c0e63f1d61cdaa3f2ad54637b1442f01
parenta3443cda5588985a2724d6d0f4a5f04e625be6eb (diff)
mm: filemap: don't plant shadow entries without radix tree node
When the underflow checks were added to workingset_node_shadow_dec(), they triggered immediately: kernel BUG at ./include/linux/swap.h:276! invalid opcode: 0000 [#1] SMP Modules linked in: isofs usb_storage fuse xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_REJECT nf_reject_ipv6 soundcore wmi acpi_als pinctrl_sunrisepoint kfifo_buf tpm_tis industrialio acpi_pad pinctrl_intel tpm_tis_core tpm nfsd auth_rpcgss nfs_acl lockd grace sunrpc dm_crypt CPU: 0 PID: 20929 Comm: blkid Not tainted 4.8.0-rc8-00087-gbe67d60ba944 #1 Hardware name: System manufacturer System Product Name/Z170-K, BIOS 1803 05/06/2016 task: ffff8faa93ecd940 task.stack: ffff8faa7f478000 RIP: page_cache_tree_insert+0xf1/0x100 Call Trace: __add_to_page_cache_locked+0x12e/0x270 add_to_page_cache_lru+0x4e/0xe0 mpage_readpages+0x112/0x1d0 blkdev_readpages+0x1d/0x20 __do_page_cache_readahead+0x1ad/0x290 force_page_cache_readahead+0xaa/0x100 page_cache_sync_readahead+0x3f/0x50 generic_file_read_iter+0x5af/0x740 blkdev_read_iter+0x35/0x40 __vfs_read+0xe1/0x130 vfs_read+0x96/0x130 SyS_read+0x55/0xc0 entry_SYSCALL_64_fastpath+0x13/0x8f Code: 03 00 48 8b 5d d8 65 48 33 1c 25 28 00 00 00 44 89 e8 75 19 48 83 c4 18 5b 41 5c 41 5d 41 5e 5d c3 0f 0b 41 bd ef ff ff ff eb d7 <0f> 0b e8 88 68 ef ff 0f 1f 84 00 RIP page_cache_tree_insert+0xf1/0x100 This is a long-standing bug in the way shadow entries are accounted in the radix tree nodes. The shrinker needs to know when radix tree nodes contain only shadow entries, no pages, so node->count is split in half to count shadows in the upper bits and pages in the lower bits. Unfortunately, the radix tree implementation doesn't know of this and assumes all entries are in node->count. When there is a shadow entry directly in root->rnode and the tree is later extended, the radix tree implementation will copy that entry into the new node and and bump its node->count, i.e. increases the page count bits. Once the shadow gets removed and we subtract from the upper counter, node->count underflows and triggers the warning. Afterwards, without node->count reaching 0 again, the radix tree node is leaked. Limit shadow entries to when we have actual radix tree nodes and can count them properly. That means we lose the ability to detect refaults from files that had only the first page faulted in at eviction time. Fixes: 449dd6984d0e ("mm: keep page cache radix tree nodes in check") Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reported-and-tested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/radix-tree.h6
-rw-r--r--lib/radix-tree.c14
-rw-r--r--mm/filemap.c46
3 files changed, 36 insertions, 30 deletions
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 4c45105dece3..52b97db93830 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root,
280 struct radix_tree_node *node); 280 struct radix_tree_node *node);
281void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); 281void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
282void *radix_tree_delete(struct radix_tree_root *, unsigned long); 282void *radix_tree_delete(struct radix_tree_root *, unsigned long);
283struct radix_tree_node *radix_tree_replace_clear_tags( 283void radix_tree_clear_tags(struct radix_tree_root *root,
284 struct radix_tree_root *root, 284 struct radix_tree_node *node,
285 unsigned long index, void *entry); 285 void **slot);
286unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, 286unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
287 void **results, unsigned long first_index, 287 void **results, unsigned long first_index,
288 unsigned int max_items); 288 unsigned int max_items);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 91f0727e3cad..8e6d552c40dd 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1583} 1583}
1584EXPORT_SYMBOL(radix_tree_delete); 1584EXPORT_SYMBOL(radix_tree_delete);
1585 1585
1586struct radix_tree_node *radix_tree_replace_clear_tags( 1586void radix_tree_clear_tags(struct radix_tree_root *root,
1587 struct radix_tree_root *root, 1587 struct radix_tree_node *node,
1588 unsigned long index, void *entry) 1588 void **slot)
1589{ 1589{
1590 struct radix_tree_node *node;
1591 void **slot;
1592
1593 __radix_tree_lookup(root, index, &node, &slot);
1594
1595 if (node) { 1590 if (node) {
1596 unsigned int tag, offset = get_slot_offset(node, slot); 1591 unsigned int tag, offset = get_slot_offset(node, slot);
1597 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 1592 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags(
1600 /* Clear root node tags */ 1595 /* Clear root node tags */
1601 root->gfp_mask &= __GFP_BITS_MASK; 1596 root->gfp_mask &= __GFP_BITS_MASK;
1602 } 1597 }
1603
1604 radix_tree_replace_slot(slot, entry);
1605 return node;
1606} 1598}
1607 1599
1608/** 1600/**
diff --git a/mm/filemap.c b/mm/filemap.c
index 2d0986a64f1f..96b9e9c30630 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct address_space *mapping,
169static void page_cache_tree_delete(struct address_space *mapping, 169static void page_cache_tree_delete(struct address_space *mapping,
170 struct page *page, void *shadow) 170 struct page *page, void *shadow)
171{ 171{
172 struct radix_tree_node *node;
173 int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page); 172 int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
174 173
175 VM_BUG_ON_PAGE(!PageLocked(page), page); 174 VM_BUG_ON_PAGE(!PageLocked(page), page);
176 VM_BUG_ON_PAGE(PageTail(page), page); 175 VM_BUG_ON_PAGE(PageTail(page), page);
177 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 176 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
178 177
179 if (shadow) {
180 mapping->nrexceptional += nr;
181 /*
182 * Make sure the nrexceptional update is committed before
183 * the nrpages update so that final truncate racing
184 * with reclaim does not see both counters 0 at the
185 * same time and miss a shadow entry.
186 */
187 smp_wmb();
188 }
189 mapping->nrpages -= nr;
190
191 for (i = 0; i < nr; i++) { 178 for (i = 0; i < nr; i++) {
192 node = radix_tree_replace_clear_tags(&mapping->page_tree, 179 struct radix_tree_node *node;
193 page->index + i, shadow); 180 void **slot;
181
182 __radix_tree_lookup(&mapping->page_tree, page->index + i,
183 &node, &slot);
184
185 radix_tree_clear_tags(&mapping->page_tree, node, slot);
186
194 if (!node) { 187 if (!node) {
195 VM_BUG_ON_PAGE(nr != 1, page); 188 VM_BUG_ON_PAGE(nr != 1, page);
196 return; 189 /*
190 * We need a node to properly account shadow
191 * entries. Don't plant any without. XXX
192 */
193 shadow = NULL;
197 } 194 }
198 195
196 radix_tree_replace_slot(slot, shadow);
197
198 if (!node)
199 break;
200
199 workingset_node_pages_dec(node); 201 workingset_node_pages_dec(node);
200 if (shadow) 202 if (shadow)
201 workingset_node_shadows_inc(node); 203 workingset_node_shadows_inc(node);
@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struct address_space *mapping,
219 &node->private_list); 221 &node->private_list);
220 } 222 }
221 } 223 }
224
225 if (shadow) {
226 mapping->nrexceptional += nr;
227 /*
228 * Make sure the nrexceptional update is committed before
229 * the nrpages update so that final truncate racing
230 * with reclaim does not see both counters 0 at the
231 * same time and miss a shadow entry.
232 */
233 smp_wmb();
234 }
235 mapping->nrpages -= nr;
222} 236}
223 237
224/* 238/*