summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/early_ioremap.c2
-rw-r--r--mm/frame_vector.c6
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hmm.c8
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memory.c11
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/slab.c23
12 files changed, 53 insertions, 36 deletions
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index d04ac1ec0559..1826f191e72c 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -111,7 +111,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
111 enum fixed_addresses idx; 111 enum fixed_addresses idx;
112 int i, slot; 112 int i, slot;
113 113
114 WARN_ON(system_state != SYSTEM_BOOTING); 114 WARN_ON(system_state >= SYSTEM_RUNNING);
115 115
116 slot = -1; 116 slot = -1;
117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 297c7238f7d4..c64dca6e27c2 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -62,8 +62,10 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
62 * get_user_pages_longterm() and disallow it for filesystem-dax 62 * get_user_pages_longterm() and disallow it for filesystem-dax
63 * mappings. 63 * mappings.
64 */ 64 */
65 if (vma_is_fsdax(vma)) 65 if (vma_is_fsdax(vma)) {
66 return -EOPNOTSUPP; 66 ret = -EOPNOTSUPP;
67 goto out;
68 }
67 69
68 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { 70 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
69 vec->got_ref = true; 71 vec->got_ref = true;
diff --git a/mm/gup.c b/mm/gup.c
index d3fb60e5bfac..e0d82b6706d7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
66 */ 66 */
67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) 67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
68{ 68{
69 return pte_access_permitted(pte, WRITE) || 69 return pte_write(pte) ||
70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); 70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
71} 71}
72 72
diff --git a/mm/hmm.c b/mm/hmm.c
index 3a5c172af560..ea19742a5d60 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -391,11 +391,11 @@ again:
391 if (pmd_protnone(pmd)) 391 if (pmd_protnone(pmd))
392 return hmm_vma_walk_clear(start, end, walk); 392 return hmm_vma_walk_clear(start, end, walk);
393 393
394 if (!pmd_access_permitted(pmd, write_fault)) 394 if (write_fault && !pmd_write(pmd))
395 return hmm_vma_walk_clear(start, end, walk); 395 return hmm_vma_walk_clear(start, end, walk);
396 396
397 pfn = pmd_pfn(pmd) + pte_index(addr); 397 pfn = pmd_pfn(pmd) + pte_index(addr);
398 flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0; 398 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
399 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) 399 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
400 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; 400 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
401 return 0; 401 return 0;
@@ -456,11 +456,11 @@ again:
456 continue; 456 continue;
457 } 457 }
458 458
459 if (!pte_access_permitted(pte, write_fault)) 459 if (write_fault && !pte_write(pte))
460 goto fault; 460 goto fault;
461 461
462 pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag; 462 pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag;
463 pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0; 463 pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
464 continue; 464 continue;
465 465
466fault: 466fault:
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2f2f5e774902..0e7ded98d114 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -870,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
870 */ 870 */
871 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 871 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
872 872
873 if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE)) 873 if (flags & FOLL_WRITE && !pmd_write(*pmd))
874 return NULL; 874 return NULL;
875 875
876 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 876 if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -1012,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1012 1012
1013 assert_spin_locked(pud_lockptr(mm, pud)); 1013 assert_spin_locked(pud_lockptr(mm, pud));
1014 1014
1015 if (!pud_access_permitted(*pud, flags & FOLL_WRITE)) 1015 if (flags & FOLL_WRITE && !pud_write(*pud))
1016 return NULL; 1016 return NULL;
1017 1017
1018 if (pud_present(*pud) && pud_devmap(*pud)) 1018 if (pud_present(*pud) && pud_devmap(*pud))
@@ -1386,7 +1386,7 @@ out_unlock:
1386 */ 1386 */
1387static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 1387static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1388{ 1388{
1389 return pmd_access_permitted(pmd, WRITE) || 1389 return pmd_write(pmd) ||
1390 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 1390 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1391} 1391}
1392 1392
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 3d4781756d50..d73c14294f3a 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1523,7 +1523,7 @@ static void kmemleak_scan(void)
1523 if (page_count(page) == 0) 1523 if (page_count(page) == 0)
1524 continue; 1524 continue;
1525 scan_block(page, page + 1, NULL); 1525 scan_block(page, page + 1, NULL);
1526 if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) 1526 if (!(pfn & 63))
1527 cond_resched(); 1527 cond_resched();
1528 } 1528 }
1529 } 1529 }
diff --git a/mm/memory.c b/mm/memory.c
index 5eb3d2524bdc..ca5674cbaff2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3831,7 +3831,8 @@ static inline int create_huge_pmd(struct vm_fault *vmf)
3831 return VM_FAULT_FALLBACK; 3831 return VM_FAULT_FALLBACK;
3832} 3832}
3833 3833
3834static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) 3834/* `inline' is required to avoid gcc 4.1.2 build error */
3835static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
3835{ 3836{
3836 if (vma_is_anonymous(vmf->vma)) 3837 if (vma_is_anonymous(vmf->vma))
3837 return do_huge_pmd_wp_page(vmf, orig_pmd); 3838 return do_huge_pmd_wp_page(vmf, orig_pmd);
@@ -3948,7 +3949,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
3948 if (unlikely(!pte_same(*vmf->pte, entry))) 3949 if (unlikely(!pte_same(*vmf->pte, entry)))
3949 goto unlock; 3950 goto unlock;
3950 if (vmf->flags & FAULT_FLAG_WRITE) { 3951 if (vmf->flags & FAULT_FLAG_WRITE) {
3951 if (!pte_access_permitted(entry, WRITE)) 3952 if (!pte_write(entry))
3952 return do_wp_page(vmf); 3953 return do_wp_page(vmf);
3953 entry = pte_mkdirty(entry); 3954 entry = pte_mkdirty(entry);
3954 } 3955 }
@@ -4013,7 +4014,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4013 4014
4014 /* NUMA case for anonymous PUDs would go here */ 4015 /* NUMA case for anonymous PUDs would go here */
4015 4016
4016 if (dirty && !pud_access_permitted(orig_pud, WRITE)) { 4017 if (dirty && !pud_write(orig_pud)) {
4017 ret = wp_huge_pud(&vmf, orig_pud); 4018 ret = wp_huge_pud(&vmf, orig_pud);
4018 if (!(ret & VM_FAULT_FALLBACK)) 4019 if (!(ret & VM_FAULT_FALLBACK))
4019 return ret; 4020 return ret;
@@ -4046,7 +4047,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4046 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) 4047 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4047 return do_huge_pmd_numa_page(&vmf, orig_pmd); 4048 return do_huge_pmd_numa_page(&vmf, orig_pmd);
4048 4049
4049 if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) { 4050 if (dirty && !pmd_write(orig_pmd)) {
4050 ret = wp_huge_pmd(&vmf, orig_pmd); 4051 ret = wp_huge_pmd(&vmf, orig_pmd);
4051 if (!(ret & VM_FAULT_FALLBACK)) 4052 if (!(ret & VM_FAULT_FALLBACK))
4052 return ret; 4053 return ret;
@@ -4336,7 +4337,7 @@ int follow_phys(struct vm_area_struct *vma,
4336 goto out; 4337 goto out;
4337 pte = *ptep; 4338 pte = *ptep;
4338 4339
4339 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 4340 if ((flags & FOLL_WRITE) && !pte_write(pte))
4340 goto unlock; 4341 goto unlock;
4341 4342
4342 *prot = pgprot_val(pte_pgprot(pte)); 4343 *prot = pgprot_val(pte_pgprot(pte));
diff --git a/mm/mmap.c b/mm/mmap.c
index a4d546821214..9efdc021ad22 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3019,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm)
3019 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 3019 /* Use -1 here to ensure all VMAs in the mm are unmapped */
3020 unmap_vmas(&tlb, vma, 0, -1); 3020 unmap_vmas(&tlb, vma, 0, -1);
3021 3021
3022 set_bit(MMF_OOM_SKIP, &mm->flags); 3022 if (unlikely(mm_is_oom_victim(mm))) {
3023 if (unlikely(tsk_is_oom_victim(current))) {
3024 /* 3023 /*
3025 * Wait for oom_reap_task() to stop working on this 3024 * Wait for oom_reap_task() to stop working on this
3026 * mm. Because MMF_OOM_SKIP is already set before 3025 * mm. Because MMF_OOM_SKIP is already set before
3027 * calling down_read(), oom_reap_task() will not run 3026 * calling down_read(), oom_reap_task() will not run
3028 * on this "mm" post up_write(). 3027 * on this "mm" post up_write().
3029 * 3028 *
3030 * tsk_is_oom_victim() cannot be set from under us 3029 * mm_is_oom_victim() cannot be set from under us
3031 * either because current->mm is already set to NULL 3030 * either because victim->mm is already set to NULL
3032 * under task_lock before calling mmput and oom_mm is 3031 * under task_lock before calling mmput and oom_mm is
3033 * set not NULL by the OOM killer only if current->mm 3032 * set not NULL by the OOM killer only if victim->mm
3034 * is found not NULL while holding the task_lock. 3033 * is found not NULL while holding the task_lock.
3035 */ 3034 */
3035 set_bit(MMF_OOM_SKIP, &mm->flags);
3036 down_write(&mm->mmap_sem); 3036 down_write(&mm->mmap_sem);
3037 up_write(&mm->mmap_sem); 3037 up_write(&mm->mmap_sem);
3038 } 3038 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c957be32b27a..29f855551efe 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -683,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk)
683 return; 683 return;
684 684
685 /* oom_mm is bound to the signal struct life time. */ 685 /* oom_mm is bound to the signal struct life time. */
686 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) 686 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
687 mmgrab(tsk->signal->oom_mm); 687 mmgrab(tsk->signal->oom_mm);
688 set_bit(MMF_OOM_VICTIM, &mm->flags);
689 }
688 690
689 /* 691 /*
690 * Make sure that the task is woken up from uninterruptible sleep 692 * Make sure that the task is woken up from uninterruptible sleep
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73f5d4556b3d..7e5e775e97f4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
2684{ 2684{
2685 struct page *page, *next; 2685 struct page *page, *next;
2686 unsigned long flags, pfn; 2686 unsigned long flags, pfn;
2687 int batch_count = 0;
2687 2688
2688 /* Prepare pages for freeing */ 2689 /* Prepare pages for freeing */
2689 list_for_each_entry_safe(page, next, list, lru) { 2690 list_for_each_entry_safe(page, next, list, lru) {
@@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
2700 set_page_private(page, 0); 2701 set_page_private(page, 0);
2701 trace_mm_page_free_batched(page); 2702 trace_mm_page_free_batched(page);
2702 free_unref_page_commit(page, pfn); 2703 free_unref_page_commit(page, pfn);
2704
2705 /*
2706 * Guard against excessive IRQ disabled times when we get
2707 * a large list of pages to free.
2708 */
2709 if (++batch_count == SWAP_CLUSTER_MAX) {
2710 local_irq_restore(flags);
2711 batch_count = 0;
2712 local_irq_save(flags);
2713 }
2703 } 2714 }
2704 local_irq_restore(flags); 2715 local_irq_restore(flags);
2705} 2716}
diff --git a/mm/percpu.c b/mm/percpu.c
index 79e3549cab0f..50e7fdf84055 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2719,7 +2719,11 @@ void __init setup_per_cpu_areas(void)
2719 2719
2720 if (pcpu_setup_first_chunk(ai, fc) < 0) 2720 if (pcpu_setup_first_chunk(ai, fc) < 0)
2721 panic("Failed to initialize percpu areas."); 2721 panic("Failed to initialize percpu areas.");
2722#ifdef CONFIG_CRIS
2723#warning "the CRIS architecture has physical and virtual addresses confused"
2724#else
2722 pcpu_free_alloc_info(ai); 2725 pcpu_free_alloc_info(ai);
2726#endif
2723} 2727}
2724 2728
2725#endif /* CONFIG_SMP */ 2729#endif /* CONFIG_SMP */
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..4e51ef954026 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1584,11 +1584,8 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1584 *dbg_redzone2(cachep, objp)); 1584 *dbg_redzone2(cachep, objp));
1585 } 1585 }
1586 1586
1587 if (cachep->flags & SLAB_STORE_USER) { 1587 if (cachep->flags & SLAB_STORE_USER)
1588 pr_err("Last user: [<%p>](%pSR)\n", 1588 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1589 *dbg_userword(cachep, objp),
1590 *dbg_userword(cachep, objp));
1591 }
1592 realobj = (char *)objp + obj_offset(cachep); 1589 realobj = (char *)objp + obj_offset(cachep);
1593 size = cachep->object_size; 1590 size = cachep->object_size;
1594 for (i = 0; i < size && lines; i += 16, lines--) { 1591 for (i = 0; i < size && lines; i += 16, lines--) {
@@ -1621,7 +1618,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1621 /* Mismatch ! */ 1618 /* Mismatch ! */
1622 /* Print header */ 1619 /* Print header */
1623 if (lines == 0) { 1620 if (lines == 0) {
1624 pr_err("Slab corruption (%s): %s start=%p, len=%d\n", 1621 pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1625 print_tainted(), cachep->name, 1622 print_tainted(), cachep->name,
1626 realobj, size); 1623 realobj, size);
1627 print_objinfo(cachep, objp, 0); 1624 print_objinfo(cachep, objp, 0);
@@ -1650,13 +1647,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1650 if (objnr) { 1647 if (objnr) {
1651 objp = index_to_obj(cachep, page, objnr - 1); 1648 objp = index_to_obj(cachep, page, objnr - 1);
1652 realobj = (char *)objp + obj_offset(cachep); 1649 realobj = (char *)objp + obj_offset(cachep);
1653 pr_err("Prev obj: start=%p, len=%d\n", realobj, size); 1650 pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1654 print_objinfo(cachep, objp, 2); 1651 print_objinfo(cachep, objp, 2);
1655 } 1652 }
1656 if (objnr + 1 < cachep->num) { 1653 if (objnr + 1 < cachep->num) {
1657 objp = index_to_obj(cachep, page, objnr + 1); 1654 objp = index_to_obj(cachep, page, objnr + 1);
1658 realobj = (char *)objp + obj_offset(cachep); 1655 realobj = (char *)objp + obj_offset(cachep);
1659 pr_err("Next obj: start=%p, len=%d\n", realobj, size); 1656 pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1660 print_objinfo(cachep, objp, 2); 1657 print_objinfo(cachep, objp, 2);
1661 } 1658 }
1662 } 1659 }
@@ -2608,7 +2605,7 @@ static void slab_put_obj(struct kmem_cache *cachep,
2608 /* Verify double free bug */ 2605 /* Verify double free bug */
2609 for (i = page->active; i < cachep->num; i++) { 2606 for (i = page->active; i < cachep->num; i++) {
2610 if (get_free_obj(page, i) == objnr) { 2607 if (get_free_obj(page, i) == objnr) {
2611 pr_err("slab: double free detected in cache '%s', objp %p\n", 2608 pr_err("slab: double free detected in cache '%s', objp %px\n",
2612 cachep->name, objp); 2609 cachep->name, objp);
2613 BUG(); 2610 BUG();
2614 } 2611 }
@@ -2772,7 +2769,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2772 else 2769 else
2773 slab_error(cache, "memory outside object was overwritten"); 2770 slab_error(cache, "memory outside object was overwritten");
2774 2771
2775 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 2772 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2776 obj, redzone1, redzone2); 2773 obj, redzone1, redzone2);
2777} 2774}
2778 2775
@@ -3078,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3078 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3075 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3079 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3076 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3080 slab_error(cachep, "double free, or memory outside object was overwritten"); 3077 slab_error(cachep, "double free, or memory outside object was overwritten");
3081 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 3078 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3082 objp, *dbg_redzone1(cachep, objp), 3079 objp, *dbg_redzone1(cachep, objp),
3083 *dbg_redzone2(cachep, objp)); 3080 *dbg_redzone2(cachep, objp));
3084 } 3081 }
@@ -3091,7 +3088,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3091 cachep->ctor(objp); 3088 cachep->ctor(objp);
3092 if (ARCH_SLAB_MINALIGN && 3089 if (ARCH_SLAB_MINALIGN &&
3093 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 3090 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3094 pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3091 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3095 objp, (int)ARCH_SLAB_MINALIGN); 3092 objp, (int)ARCH_SLAB_MINALIGN);
3096 } 3093 }
3097 return objp; 3094 return objp;
@@ -4283,7 +4280,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
4283 return; 4280 return;
4284 } 4281 }
4285#endif 4282#endif
4286 seq_printf(m, "%p", (void *)address); 4283 seq_printf(m, "%px", (void *)address);
4287} 4284}
4288 4285
4289static int leaks_show(struct seq_file *m, void *p) 4286static int leaks_show(struct seq_file *m, void *p)