aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-09 21:27:52 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-09 21:27:52 -0500
commit380173ff56c7faea02d1278e8d9783093fc776f6 (patch)
tree0f0984f977aaf331d03a2fc3d6c8f329e789a7f0
parent2f0d94ea413a0d56629435e4b9f43b52035a3e79 (diff)
parentd6b7eaeb03421139e32800324ef04ab50bba886d (diff)
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "13 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: dma-mapping: avoid oops when parameter cpu_addr is null mm/hugetlb: use EOPNOTSUPP in hugetlb sysctl handlers memremap: check pfn validity before passing to pfn_to_page() mm, thp: fix migration of PTE-mapped transparent huge pages dax: check return value of dax_radix_entry() ocfs2: fix return value from ocfs2_page_mkwrite() arm64: kasan: clear stale stack poison sched/kasan: remove stale KASAN poison after hotplug kasan: add functions to clear stack poison mm: fix mixed zone detection in devm_memremap_pages list: kill list_force_poison() mm: __delete_from_page_cache show Bad page if mapped mm/hugetlb: hugetlb_no_page: rate-limit warning message
-rw-r--r--arch/arm64/kernel/sleep.S4
-rw-r--r--fs/dax.c9
-rw-r--r--fs/ocfs2/mmap.c4
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/kasan.h6
-rw-r--r--include/linux/list.h11
-rw-r--r--kernel/memremap.c24
-rw-r--r--kernel/sched/core.c3
-rw-r--r--lib/list_debug.c9
-rw-r--r--mm/filemap.c25
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/kasan/kasan.c20
-rw-r--r--mm/mempolicy.c2
13 files changed, 88 insertions, 37 deletions
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index e33fe33876ab..fd10eb663868 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -145,6 +145,10 @@ ENTRY(cpu_resume_mmu)
145ENDPROC(cpu_resume_mmu) 145ENDPROC(cpu_resume_mmu)
146 .popsection 146 .popsection
147cpu_resume_after_mmu: 147cpu_resume_after_mmu:
148#ifdef CONFIG_KASAN
149 mov x0, sp
150 bl kasan_unpoison_remaining_stack
151#endif
148 mov x0, #0 // return zero on success 152 mov x0, #0 // return zero on success
149 ldp x19, x20, [sp, #16] 153 ldp x19, x20, [sp, #16]
150 ldp x21, x22, [sp, #32] 154 ldp x21, x22, [sp, #32]
diff --git a/fs/dax.c b/fs/dax.c
index 711172450da6..bbb2ad783770 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1056,6 +1056,7 @@ EXPORT_SYMBOL_GPL(dax_pmd_fault);
1056int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 1056int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1057{ 1057{
1058 struct file *file = vma->vm_file; 1058 struct file *file = vma->vm_file;
1059 int error;
1059 1060
1060 /* 1061 /*
1061 * We pass NO_SECTOR to dax_radix_entry() because we expect that a 1062 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
@@ -1065,7 +1066,13 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1065 * saves us from having to make a call to get_block() here to look 1066 * saves us from having to make a call to get_block() here to look
1066 * up the sector. 1067 * up the sector.
1067 */ 1068 */
1068 dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false, true); 1069 error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
1070 true);
1071
1072 if (error == -ENOMEM)
1073 return VM_FAULT_OOM;
1074 if (error)
1075 return VM_FAULT_SIGBUS;
1069 return VM_FAULT_NOPAGE; 1076 return VM_FAULT_NOPAGE;
1070} 1077}
1071EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); 1078EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 9581d190f6e1..77ebc2bc1cca 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -147,6 +147,10 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
147 ret = ocfs2_inode_lock(inode, &di_bh, 1); 147 ret = ocfs2_inode_lock(inode, &di_bh, 1);
148 if (ret < 0) { 148 if (ret < 0) {
149 mlog_errno(ret); 149 mlog_errno(ret);
150 if (ret == -ENOMEM)
151 ret = VM_FAULT_OOM;
152 else
153 ret = VM_FAULT_SIGBUS;
150 goto out; 154 goto out;
151 } 155 }
152 156
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 75857cda38e9..728ef074602a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -386,7 +386,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
386 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 386 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
387 return; 387 return;
388 388
389 if (!ops->free) 389 if (!ops->free || !cpu_addr)
390 return; 390 return;
391 391
392 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 392 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 4b9f85c963d0..0fdc798e3ff7 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_KASAN_H 1#ifndef _LINUX_KASAN_H
2#define _LINUX_KASAN_H 2#define _LINUX_KASAN_H
3 3
4#include <linux/sched.h>
4#include <linux/types.h> 5#include <linux/types.h>
5 6
6struct kmem_cache; 7struct kmem_cache;
@@ -13,7 +14,6 @@ struct vm_struct;
13 14
14#include <asm/kasan.h> 15#include <asm/kasan.h>
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
16#include <linux/sched.h>
17 17
18extern unsigned char kasan_zero_page[PAGE_SIZE]; 18extern unsigned char kasan_zero_page[PAGE_SIZE];
19extern pte_t kasan_zero_pte[PTRS_PER_PTE]; 19extern pte_t kasan_zero_pte[PTRS_PER_PTE];
@@ -43,6 +43,8 @@ static inline void kasan_disable_current(void)
43 43
44void kasan_unpoison_shadow(const void *address, size_t size); 44void kasan_unpoison_shadow(const void *address, size_t size);
45 45
46void kasan_unpoison_task_stack(struct task_struct *task);
47
46void kasan_alloc_pages(struct page *page, unsigned int order); 48void kasan_alloc_pages(struct page *page, unsigned int order);
47void kasan_free_pages(struct page *page, unsigned int order); 49void kasan_free_pages(struct page *page, unsigned int order);
48 50
@@ -66,6 +68,8 @@ void kasan_free_shadow(const struct vm_struct *vm);
66 68
67static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 69static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
68 70
71static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
72
69static inline void kasan_enable_current(void) {} 73static inline void kasan_enable_current(void) {}
70static inline void kasan_disable_current(void) {} 74static inline void kasan_disable_current(void) {}
71 75
diff --git a/include/linux/list.h b/include/linux/list.h
index 30cf4200ab40..5356f4d661a7 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -113,17 +113,6 @@ extern void __list_del_entry(struct list_head *entry);
113extern void list_del(struct list_head *entry); 113extern void list_del(struct list_head *entry);
114#endif 114#endif
115 115
116#ifdef CONFIG_DEBUG_LIST
117/*
118 * See devm_memremap_pages() which wants DEBUG_LIST=y to assert if one
119 * of the pages it allocates is ever passed to list_add()
120 */
121extern void list_force_poison(struct list_head *entry);
122#else
123/* fallback to the less strict LIST_POISON* definitions */
124#define list_force_poison list_del
125#endif
126
127/** 116/**
128 * list_replace - replace old entry by new one 117 * list_replace - replace old entry by new one
129 * @old : the element to be replaced 118 * @old : the element to be replaced
diff --git a/kernel/memremap.c b/kernel/memremap.c
index b981a7b023f0..6cf54615a9c4 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -29,10 +29,10 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
29 29
30static void *try_ram_remap(resource_size_t offset, size_t size) 30static void *try_ram_remap(resource_size_t offset, size_t size)
31{ 31{
32 struct page *page = pfn_to_page(offset >> PAGE_SHIFT); 32 unsigned long pfn = PHYS_PFN(offset);
33 33
34 /* In the simple case just return the existing linear address */ 34 /* In the simple case just return the existing linear address */
35 if (!PageHighMem(page)) 35 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
36 return __va(offset); 36 return __va(offset);
37 return NULL; /* fallback to ioremap_cache */ 37 return NULL; /* fallback to ioremap_cache */
38} 38}
@@ -270,13 +270,16 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
270void *devm_memremap_pages(struct device *dev, struct resource *res, 270void *devm_memremap_pages(struct device *dev, struct resource *res,
271 struct percpu_ref *ref, struct vmem_altmap *altmap) 271 struct percpu_ref *ref, struct vmem_altmap *altmap)
272{ 272{
273 int is_ram = region_intersects(res->start, resource_size(res),
274 "System RAM");
275 resource_size_t key, align_start, align_size, align_end; 273 resource_size_t key, align_start, align_size, align_end;
276 struct dev_pagemap *pgmap; 274 struct dev_pagemap *pgmap;
277 struct page_map *page_map; 275 struct page_map *page_map;
276 int error, nid, is_ram;
278 unsigned long pfn; 277 unsigned long pfn;
279 int error, nid; 278
279 align_start = res->start & ~(SECTION_SIZE - 1);
280 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
281 - align_start;
282 is_ram = region_intersects(align_start, align_size, "System RAM");
280 283
281 if (is_ram == REGION_MIXED) { 284 if (is_ram == REGION_MIXED) {
282 WARN_ONCE(1, "%s attempted on mixed region %pr\n", 285 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
@@ -314,8 +317,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
314 317
315 mutex_lock(&pgmap_lock); 318 mutex_lock(&pgmap_lock);
316 error = 0; 319 error = 0;
317 align_start = res->start & ~(SECTION_SIZE - 1);
318 align_size = ALIGN(resource_size(res), SECTION_SIZE);
319 align_end = align_start + align_size - 1; 320 align_end = align_start + align_size - 1;
320 for (key = align_start; key <= align_end; key += SECTION_SIZE) { 321 for (key = align_start; key <= align_end; key += SECTION_SIZE) {
321 struct dev_pagemap *dup; 322 struct dev_pagemap *dup;
@@ -351,8 +352,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
351 for_each_device_pfn(pfn, page_map) { 352 for_each_device_pfn(pfn, page_map) {
352 struct page *page = pfn_to_page(pfn); 353 struct page *page = pfn_to_page(pfn);
353 354
354 /* ZONE_DEVICE pages must never appear on a slab lru */ 355 /*
355 list_force_poison(&page->lru); 356 * ZONE_DEVICE pages union ->lru with a ->pgmap back
357 * pointer. It is a bug if a ZONE_DEVICE page is ever
358 * freed or placed on a driver-private list. Seed the
359 * storage with LIST_POISON* values.
360 */
361 list_del(&page->lru);
356 page->pgmap = pgmap; 362 page->pgmap = pgmap;
357 } 363 }
358 devres_add(dev, page_map); 364 devres_add(dev, page_map);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9503d590e5ef..41f6b2215aa8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -26,6 +26,7 @@
26 * Thomas Gleixner, Mike Kravetz 26 * Thomas Gleixner, Mike Kravetz
27 */ 27 */
28 28
29#include <linux/kasan.h>
29#include <linux/mm.h> 30#include <linux/mm.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/nmi.h> 32#include <linux/nmi.h>
@@ -5096,6 +5097,8 @@ void init_idle(struct task_struct *idle, int cpu)
5096 idle->state = TASK_RUNNING; 5097 idle->state = TASK_RUNNING;
5097 idle->se.exec_start = sched_clock(); 5098 idle->se.exec_start = sched_clock();
5098 5099
5100 kasan_unpoison_task_stack(idle);
5101
5099#ifdef CONFIG_SMP 5102#ifdef CONFIG_SMP
5100 /* 5103 /*
5101 * Its possible that init_idle() gets called multiple times on a task, 5104 * Its possible that init_idle() gets called multiple times on a task,
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3345a089ef7b..3859bf63561c 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -12,13 +12,6 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/rculist.h> 13#include <linux/rculist.h>
14 14
15static struct list_head force_poison;
16void list_force_poison(struct list_head *entry)
17{
18 entry->next = &force_poison;
19 entry->prev = &force_poison;
20}
21
22/* 15/*
23 * Insert a new entry between two known consecutive entries. 16 * Insert a new entry between two known consecutive entries.
24 * 17 *
@@ -30,8 +23,6 @@ void __list_add(struct list_head *new,
30 struct list_head *prev, 23 struct list_head *prev,
31 struct list_head *next) 24 struct list_head *next)
32{ 25{
33 WARN(new->next == &force_poison || new->prev == &force_poison,
34 "list_add attempted on force-poisoned entry\n");
35 WARN(next->prev != prev, 26 WARN(next->prev != prev,
36 "list_add corruption. next->prev should be " 27 "list_add corruption. next->prev should be "
37 "prev (%p), but was %p. (next=%p).\n", 28 "prev (%p), but was %p. (next=%p).\n",
diff --git a/mm/filemap.c b/mm/filemap.c
index 3461d97ecb30..da7a35d83de7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -195,6 +195,30 @@ void __delete_from_page_cache(struct page *page, void *shadow,
195 else 195 else
196 cleancache_invalidate_page(mapping, page); 196 cleancache_invalidate_page(mapping, page);
197 197
198 VM_BUG_ON_PAGE(page_mapped(page), page);
199 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
200 int mapcount;
201
202 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
203 current->comm, page_to_pfn(page));
204 dump_page(page, "still mapped when deleted");
205 dump_stack();
206 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
207
208 mapcount = page_mapcount(page);
209 if (mapping_exiting(mapping) &&
210 page_count(page) >= mapcount + 2) {
211 /*
212 * All vmas have already been torn down, so it's
213 * a good bet that actually the page is unmapped,
214 * and we'd prefer not to leak it: if we're wrong,
215 * some other bad page check should catch it later.
216 */
217 page_mapcount_reset(page);
218 atomic_sub(mapcount, &page->_count);
219 }
220 }
221
198 page_cache_tree_delete(mapping, page, shadow); 222 page_cache_tree_delete(mapping, page, shadow);
199 223
200 page->mapping = NULL; 224 page->mapping = NULL;
@@ -205,7 +229,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
205 __dec_zone_page_state(page, NR_FILE_PAGES); 229 __dec_zone_page_state(page, NR_FILE_PAGES);
206 if (PageSwapBacked(page)) 230 if (PageSwapBacked(page))
207 __dec_zone_page_state(page, NR_SHMEM); 231 __dec_zone_page_state(page, NR_SHMEM);
208 VM_BUG_ON_PAGE(page_mapped(page), page);
209 232
210 /* 233 /*
211 * At this point page must be either written or cleaned by truncate. 234 * At this point page must be either written or cleaned by truncate.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 01f2b48c8618..aefba5a9cc47 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2751,7 +2751,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2751 int ret; 2751 int ret;
2752 2752
2753 if (!hugepages_supported()) 2753 if (!hugepages_supported())
2754 return -ENOTSUPP; 2754 return -EOPNOTSUPP;
2755 2755
2756 table->data = &tmp; 2756 table->data = &tmp;
2757 table->maxlen = sizeof(unsigned long); 2757 table->maxlen = sizeof(unsigned long);
@@ -2792,7 +2792,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2792 int ret; 2792 int ret;
2793 2793
2794 if (!hugepages_supported()) 2794 if (!hugepages_supported())
2795 return -ENOTSUPP; 2795 return -EOPNOTSUPP;
2796 2796
2797 tmp = h->nr_overcommit_huge_pages; 2797 tmp = h->nr_overcommit_huge_pages;
2798 2798
@@ -3502,7 +3502,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3502 * COW. Warn that such a situation has occurred as it may not be obvious 3502 * COW. Warn that such a situation has occurred as it may not be obvious
3503 */ 3503 */
3504 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 3504 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3505 pr_warning("PID %d killed due to inadequate hugepage pool\n", 3505 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3506 current->pid); 3506 current->pid);
3507 return ret; 3507 return ret;
3508 } 3508 }
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index bc0a8d8b8f42..1ad20ade8c91 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/kmemleak.h> 22#include <linux/kmemleak.h>
23#include <linux/linkage.h>
23#include <linux/memblock.h> 24#include <linux/memblock.h>
24#include <linux/memory.h> 25#include <linux/memory.h>
25#include <linux/mm.h> 26#include <linux/mm.h>
@@ -60,6 +61,25 @@ void kasan_unpoison_shadow(const void *address, size_t size)
60 } 61 }
61} 62}
62 63
64static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
65{
66 void *base = task_stack_page(task);
67 size_t size = sp - base;
68
69 kasan_unpoison_shadow(base, size);
70}
71
72/* Unpoison the entire stack for a task. */
73void kasan_unpoison_task_stack(struct task_struct *task)
74{
75 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
76}
77
78/* Unpoison the stack for the current task beyond a watermark sp value. */
79asmlinkage void kasan_unpoison_remaining_stack(void *sp)
80{
81 __kasan_unpoison_stack(current, sp);
82}
63 83
64/* 84/*
65 * All functions below always inlined so compiler could 85 * All functions below always inlined so compiler could
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4c4187c0e1de..9a3f6b90e628 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -532,7 +532,7 @@ retry:
532 nid = page_to_nid(page); 532 nid = page_to_nid(page);
533 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) 533 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
534 continue; 534 continue;
535 if (PageTail(page) && PageAnon(page)) { 535 if (PageTransCompound(page) && PageAnon(page)) {
536 get_page(page); 536 get_page(page);
537 pte_unmap_unlock(pte, ptl); 537 pte_unmap_unlock(pte, ptl);
538 lock_page(page); 538 lock_page(page);