diff options
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6768ce9e57d2..22dfa617bddb 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | #include <linux/gfp.h> | 59 | #include <linux/gfp.h> |
| 60 | #include <linux/migrate.h> | 60 | #include <linux/migrate.h> |
| 61 | #include <linux/string.h> | 61 | #include <linux/string.h> |
| 62 | #include <linux/dma-debug.h> | ||
| 62 | 63 | ||
| 63 | #include <asm/io.h> | 64 | #include <asm/io.h> |
| 64 | #include <asm/pgalloc.h> | 65 | #include <asm/pgalloc.h> |
| @@ -288,7 +289,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
| 288 | return 0; | 289 | return 0; |
| 289 | batch = tlb->active; | 290 | batch = tlb->active; |
| 290 | } | 291 | } |
| 291 | VM_BUG_ON(batch->nr > batch->max); | 292 | VM_BUG_ON_PAGE(batch->nr > batch->max, page); |
| 292 | 293 | ||
| 293 | return batch->max - batch->nr; | 294 | return batch->max - batch->nr; |
| 294 | } | 295 | } |
| @@ -670,7 +671,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
| 670 | current->comm, | 671 | current->comm, |
| 671 | (long long)pte_val(pte), (long long)pmd_val(*pmd)); | 672 | (long long)pte_val(pte), (long long)pmd_val(*pmd)); |
| 672 | if (page) | 673 | if (page) |
| 673 | dump_page(page); | 674 | dump_page(page, "bad pte"); |
| 674 | printk(KERN_ALERT | 675 | printk(KERN_ALERT |
| 675 | "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", | 676 | "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", |
| 676 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); | 677 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); |
| @@ -2559,6 +2560,8 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, | |||
| 2559 | 2560 | ||
| 2560 | static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) | 2561 | static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) |
| 2561 | { | 2562 | { |
| 2563 | debug_dma_assert_idle(src); | ||
| 2564 | |||
| 2562 | /* | 2565 | /* |
| 2563 | * If the source page was a PFN mapping, we don't have | 2566 | * If the source page was a PFN mapping, we don't have |
| 2564 | * a "struct page" for it. We do a best-effort copy by | 2567 | * a "struct page" for it. We do a best-effort copy by |
| @@ -2699,7 +2702,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2699 | goto unwritable_page; | 2702 | goto unwritable_page; |
| 2700 | } | 2703 | } |
| 2701 | } else | 2704 | } else |
| 2702 | VM_BUG_ON(!PageLocked(old_page)); | 2705 | VM_BUG_ON_PAGE(!PageLocked(old_page), old_page); |
| 2703 | 2706 | ||
| 2704 | /* | 2707 | /* |
| 2705 | * Since we dropped the lock we need to revalidate | 2708 | * Since we dropped the lock we need to revalidate |
| @@ -3345,6 +3348,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3345 | if (ret & VM_FAULT_LOCKED) | 3348 | if (ret & VM_FAULT_LOCKED) |
| 3346 | unlock_page(vmf.page); | 3349 | unlock_page(vmf.page); |
| 3347 | ret = VM_FAULT_HWPOISON; | 3350 | ret = VM_FAULT_HWPOISON; |
| 3351 | page_cache_release(vmf.page); | ||
| 3348 | goto uncharge_out; | 3352 | goto uncharge_out; |
| 3349 | } | 3353 | } |
| 3350 | 3354 | ||
| @@ -3355,7 +3359,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3355 | if (unlikely(!(ret & VM_FAULT_LOCKED))) | 3359 | if (unlikely(!(ret & VM_FAULT_LOCKED))) |
| 3356 | lock_page(vmf.page); | 3360 | lock_page(vmf.page); |
| 3357 | else | 3361 | else |
| 3358 | VM_BUG_ON(!PageLocked(vmf.page)); | 3362 | VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); |
| 3359 | 3363 | ||
| 3360 | /* | 3364 | /* |
| 3361 | * Should we do an early C-O-W break? | 3365 | * Should we do an early C-O-W break? |
| @@ -3392,7 +3396,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3392 | goto unwritable_page; | 3396 | goto unwritable_page; |
| 3393 | } | 3397 | } |
| 3394 | } else | 3398 | } else |
| 3395 | VM_BUG_ON(!PageLocked(page)); | 3399 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 3396 | page_mkwrite = 1; | 3400 | page_mkwrite = 1; |
| 3397 | } | 3401 | } |
| 3398 | } | 3402 | } |
| @@ -3700,7 +3704,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3700 | if (unlikely(is_vm_hugetlb_page(vma))) | 3704 | if (unlikely(is_vm_hugetlb_page(vma))) |
| 3701 | return hugetlb_fault(mm, vma, address, flags); | 3705 | return hugetlb_fault(mm, vma, address, flags); |
| 3702 | 3706 | ||
| 3703 | retry: | ||
| 3704 | pgd = pgd_offset(mm, address); | 3707 | pgd = pgd_offset(mm, address); |
| 3705 | pud = pud_alloc(mm, pgd, address); | 3708 | pud = pud_alloc(mm, pgd, address); |
| 3706 | if (!pud) | 3709 | if (!pud) |
| @@ -3738,20 +3741,13 @@ retry: | |||
| 3738 | if (dirty && !pmd_write(orig_pmd)) { | 3741 | if (dirty && !pmd_write(orig_pmd)) { |
| 3739 | ret = do_huge_pmd_wp_page(mm, vma, address, pmd, | 3742 | ret = do_huge_pmd_wp_page(mm, vma, address, pmd, |
| 3740 | orig_pmd); | 3743 | orig_pmd); |
| 3741 | /* | 3744 | if (!(ret & VM_FAULT_FALLBACK)) |
| 3742 | * If COW results in an oom, the huge pmd will | 3745 | return ret; |
| 3743 | * have been split, so retry the fault on the | ||
| 3744 | * pte for a smaller charge. | ||
| 3745 | */ | ||
| 3746 | if (unlikely(ret & VM_FAULT_OOM)) | ||
| 3747 | goto retry; | ||
| 3748 | return ret; | ||
| 3749 | } else { | 3746 | } else { |
| 3750 | huge_pmd_set_accessed(mm, vma, address, pmd, | 3747 | huge_pmd_set_accessed(mm, vma, address, pmd, |
| 3751 | orig_pmd, dirty); | 3748 | orig_pmd, dirty); |
| 3749 | return 0; | ||
| 3752 | } | 3750 | } |
| 3753 | |||
| 3754 | return 0; | ||
| 3755 | } | 3751 | } |
| 3756 | } | 3752 | } |
| 3757 | 3753 | ||
| @@ -4272,11 +4268,20 @@ void copy_user_huge_page(struct page *dst, struct page *src, | |||
| 4272 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | 4268 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
| 4273 | 4269 | ||
| 4274 | #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS | 4270 | #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS |
| 4271 | |||
| 4272 | static struct kmem_cache *page_ptl_cachep; | ||
| 4273 | |||
| 4274 | void __init ptlock_cache_init(void) | ||
| 4275 | { | ||
| 4276 | page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, | ||
| 4277 | SLAB_PANIC, NULL); | ||
| 4278 | } | ||
| 4279 | |||
| 4275 | bool ptlock_alloc(struct page *page) | 4280 | bool ptlock_alloc(struct page *page) |
| 4276 | { | 4281 | { |
| 4277 | spinlock_t *ptl; | 4282 | spinlock_t *ptl; |
| 4278 | 4283 | ||
| 4279 | ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); | 4284 | ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); |
| 4280 | if (!ptl) | 4285 | if (!ptl) |
| 4281 | return false; | 4286 | return false; |
| 4282 | page->ptl = ptl; | 4287 | page->ptl = ptl; |
| @@ -4285,6 +4290,6 @@ bool ptlock_alloc(struct page *page) | |||
| 4285 | 4290 | ||
| 4286 | void ptlock_free(struct page *page) | 4291 | void ptlock_free(struct page *page) |
| 4287 | { | 4292 | { |
| 4288 | kfree(page->ptl); | 4293 | kmem_cache_free(page_ptl_cachep, page->ptl); |
| 4289 | } | 4294 | } |
| 4290 | #endif | 4295 | #endif |
