diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-25 21:02:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-25 21:02:27 -0400 |
commit | b3242dba9ff285962fe84d1135cafe9383d721f0 (patch) | |
tree | cf4399b9a45bca863a0b404bfc7edfeff670ab1e | |
parent | 67a3b5cb33633f39db8809ae56c8c1752b541daa (diff) | |
parent | 91b540f98872a206ea1c49e4aa6ea8eed0886644 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton:
"6 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm/memblock.c: reversed logic in memblock_discard()
fork: fix incorrect fput of ->exe_file causing use-after-free
mm/madvise.c: fix freeing of locked page with MADV_FREE
dax: fix deadlock due to misaligned PMD faults
mm, shmem: fix handling /sys/kernel/mm/transparent_hugepage/shmem_enabled
PM/hibernate: touch NMI watchdog when creating snapshot
-rw-r--r-- | fs/dax.c | 10 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | mm/madvise.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 20 | ||||
-rw-r--r-- | mm/shmem.c | 4 |
6 files changed, 33 insertions, 6 deletions
@@ -1383,6 +1383,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, | |||
1383 | 1383 | ||
1384 | trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); | 1384 | trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); |
1385 | 1385 | ||
1386 | /* | ||
1387 | * Make sure that the faulting address's PMD offset (color) matches | ||
1388 | * the PMD offset from the start of the file. This is necessary so | ||
1389 | * that a PMD range in the page table overlaps exactly with a PMD | ||
1390 | * range in the radix tree. | ||
1391 | */ | ||
1392 | if ((vmf->pgoff & PG_PMD_COLOUR) != | ||
1393 | ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) | ||
1394 | goto fallback; | ||
1395 | |||
1386 | /* Fall back to PTEs if we're going to COW */ | 1396 | /* Fall back to PTEs if we're going to COW */ |
1387 | if (write && !(vma->vm_flags & VM_SHARED)) | 1397 | if (write && !(vma->vm_flags & VM_SHARED)) |
1388 | goto fallback; | 1398 | goto fallback; |
diff --git a/kernel/fork.c b/kernel/fork.c index e075b7780421..cbbea277b3fb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -806,6 +806,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, | |||
806 | mm_init_cpumask(mm); | 806 | mm_init_cpumask(mm); |
807 | mm_init_aio(mm); | 807 | mm_init_aio(mm); |
808 | mm_init_owner(mm, p); | 808 | mm_init_owner(mm, p); |
809 | RCU_INIT_POINTER(mm->exe_file, NULL); | ||
809 | mmu_notifier_mm_init(mm); | 810 | mmu_notifier_mm_init(mm); |
810 | init_tlb_flush_pending(mm); | 811 | init_tlb_flush_pending(mm); |
811 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS | 812 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
diff --git a/mm/madvise.c b/mm/madvise.c index 47d8d8a25eae..23ed525bc2bc 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, | |||
368 | pte_offset_map_lock(mm, pmd, addr, &ptl); | 368 | pte_offset_map_lock(mm, pmd, addr, &ptl); |
369 | goto out; | 369 | goto out; |
370 | } | 370 | } |
371 | put_page(page); | ||
372 | unlock_page(page); | 371 | unlock_page(page); |
372 | put_page(page); | ||
373 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | 373 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
374 | pte--; | 374 | pte--; |
375 | addr -= PAGE_SIZE; | 375 | addr -= PAGE_SIZE; |
diff --git a/mm/memblock.c b/mm/memblock.c index bf14aea6ab70..91205780e6b1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -299,7 +299,7 @@ void __init memblock_discard(void) | |||
299 | __memblock_free_late(addr, size); | 299 | __memblock_free_late(addr, size); |
300 | } | 300 | } |
301 | 301 | ||
302 | if (memblock.memory.regions == memblock_memory_init_regions) { | 302 | if (memblock.memory.regions != memblock_memory_init_regions) { |
303 | addr = __pa(memblock.memory.regions); | 303 | addr = __pa(memblock.memory.regions); |
304 | size = PAGE_ALIGN(sizeof(struct memblock_region) * | 304 | size = PAGE_ALIGN(sizeof(struct memblock_region) * |
305 | memblock.memory.max); | 305 | memblock.memory.max); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1bad301820c7..7a58eb5757e3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -66,6 +66,7 @@ | |||
66 | #include <linux/kthread.h> | 66 | #include <linux/kthread.h> |
67 | #include <linux/memcontrol.h> | 67 | #include <linux/memcontrol.h> |
68 | #include <linux/ftrace.h> | 68 | #include <linux/ftrace.h> |
69 | #include <linux/nmi.h> | ||
69 | 70 | ||
70 | #include <asm/sections.h> | 71 | #include <asm/sections.h> |
71 | #include <asm/tlbflush.h> | 72 | #include <asm/tlbflush.h> |
@@ -2535,9 +2536,14 @@ void drain_all_pages(struct zone *zone) | |||
2535 | 2536 | ||
2536 | #ifdef CONFIG_HIBERNATION | 2537 | #ifdef CONFIG_HIBERNATION |
2537 | 2538 | ||
2539 | /* | ||
2540 | * Touch the watchdog for every WD_PAGE_COUNT pages. | ||
2541 | */ | ||
2542 | #define WD_PAGE_COUNT (128*1024) | ||
2543 | |||
2538 | void mark_free_pages(struct zone *zone) | 2544 | void mark_free_pages(struct zone *zone) |
2539 | { | 2545 | { |
2540 | unsigned long pfn, max_zone_pfn; | 2546 | unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; |
2541 | unsigned long flags; | 2547 | unsigned long flags; |
2542 | unsigned int order, t; | 2548 | unsigned int order, t; |
2543 | struct page *page; | 2549 | struct page *page; |
@@ -2552,6 +2558,11 @@ void mark_free_pages(struct zone *zone) | |||
2552 | if (pfn_valid(pfn)) { | 2558 | if (pfn_valid(pfn)) { |
2553 | page = pfn_to_page(pfn); | 2559 | page = pfn_to_page(pfn); |
2554 | 2560 | ||
2561 | if (!--page_count) { | ||
2562 | touch_nmi_watchdog(); | ||
2563 | page_count = WD_PAGE_COUNT; | ||
2564 | } | ||
2565 | |||
2555 | if (page_zone(page) != zone) | 2566 | if (page_zone(page) != zone) |
2556 | continue; | 2567 | continue; |
2557 | 2568 | ||
@@ -2565,8 +2576,13 @@ void mark_free_pages(struct zone *zone) | |||
2565 | unsigned long i; | 2576 | unsigned long i; |
2566 | 2577 | ||
2567 | pfn = page_to_pfn(page); | 2578 | pfn = page_to_pfn(page); |
2568 | for (i = 0; i < (1UL << order); i++) | 2579 | for (i = 0; i < (1UL << order); i++) { |
2580 | if (!--page_count) { | ||
2581 | touch_nmi_watchdog(); | ||
2582 | page_count = WD_PAGE_COUNT; | ||
2583 | } | ||
2569 | swsusp_set_page_free(pfn_to_page(pfn + i)); | 2584 | swsusp_set_page_free(pfn_to_page(pfn + i)); |
2585 | } | ||
2570 | } | 2586 | } |
2571 | } | 2587 | } |
2572 | spin_unlock_irqrestore(&zone->lock, flags); | 2588 | spin_unlock_irqrestore(&zone->lock, flags); |
diff --git a/mm/shmem.c b/mm/shmem.c index 6540e5982444..fbcb3c96a186 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -3967,7 +3967,7 @@ int __init shmem_init(void) | |||
3967 | } | 3967 | } |
3968 | 3968 | ||
3969 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE | 3969 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
3970 | if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) | 3970 | if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) |
3971 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; | 3971 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; |
3972 | else | 3972 | else |
3973 | shmem_huge = 0; /* just in case it was patched */ | 3973 | shmem_huge = 0; /* just in case it was patched */ |
@@ -4028,7 +4028,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, | |||
4028 | return -EINVAL; | 4028 | return -EINVAL; |
4029 | 4029 | ||
4030 | shmem_huge = huge; | 4030 | shmem_huge = huge; |
4031 | if (shmem_huge < SHMEM_HUGE_DENY) | 4031 | if (shmem_huge > SHMEM_HUGE_DENY) |
4032 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; | 4032 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; |
4033 | return count; | 4033 | return count; |
4034 | } | 4034 | } |