diff options
Diffstat (limited to 'mm/msync.c')
-rw-r--r-- | mm/msync.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/mm/msync.c b/mm/msync.c index 3b5f1c521d4b..860395486060 100644 --- a/mm/msync.c +++ b/mm/msync.c | |||
@@ -25,6 +25,7 @@ | |||
25 | static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | 25 | static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
26 | unsigned long addr, unsigned long end) | 26 | unsigned long addr, unsigned long end) |
27 | { | 27 | { |
28 | struct mm_struct *mm = vma->vm_mm; | ||
28 | pte_t *pte; | 29 | pte_t *pte; |
29 | int progress = 0; | 30 | int progress = 0; |
30 | 31 | ||
@@ -37,7 +38,7 @@ again: | |||
37 | if (progress >= 64) { | 38 | if (progress >= 64) { |
38 | progress = 0; | 39 | progress = 0; |
39 | if (need_resched() || | 40 | if (need_resched() || |
40 | need_lockbreak(&vma->vm_mm->page_table_lock)) | 41 | need_lockbreak(&mm->page_table_lock)) |
41 | break; | 42 | break; |
42 | } | 43 | } |
43 | progress++; | 44 | progress++; |
@@ -46,11 +47,11 @@ again: | |||
46 | if (!pte_maybe_dirty(*pte)) | 47 | if (!pte_maybe_dirty(*pte)) |
47 | continue; | 48 | continue; |
48 | pfn = pte_pfn(*pte); | 49 | pfn = pte_pfn(*pte); |
49 | if (!pfn_valid(pfn)) | 50 | if (unlikely(!pfn_valid(pfn))) { |
51 | print_bad_pte(vma, *pte, addr); | ||
50 | continue; | 52 | continue; |
53 | } | ||
51 | page = pfn_to_page(pfn); | 54 | page = pfn_to_page(pfn); |
52 | if (PageReserved(page)) | ||
53 | continue; | ||
54 | 55 | ||
55 | if (ptep_clear_flush_dirty(vma, addr, pte) || | 56 | if (ptep_clear_flush_dirty(vma, addr, pte) || |
56 | page_test_and_clear_dirty(page)) | 57 | page_test_and_clear_dirty(page)) |
@@ -58,7 +59,7 @@ again: | |||
58 | progress += 3; | 59 | progress += 3; |
59 | } while (pte++, addr += PAGE_SIZE, addr != end); | 60 | } while (pte++, addr += PAGE_SIZE, addr != end); |
60 | pte_unmap(pte - 1); | 61 | pte_unmap(pte - 1); |
61 | cond_resched_lock(&vma->vm_mm->page_table_lock); | 62 | cond_resched_lock(&mm->page_table_lock); |
62 | if (addr != end) | 63 | if (addr != end) |
63 | goto again; | 64 | goto again; |
64 | } | 65 | } |
@@ -102,8 +103,10 @@ static void msync_page_range(struct vm_area_struct *vma, | |||
102 | 103 | ||
103 | /* For hugepages we can't go walking the page table normally, | 104 | /* For hugepages we can't go walking the page table normally, |
104 | * but that's ok, hugetlbfs is memory based, so we don't need | 105 | * but that's ok, hugetlbfs is memory based, so we don't need |
105 | * to do anything more on an msync() */ | 106 | * to do anything more on an msync(). |
106 | if (is_vm_hugetlb_page(vma)) | 107 | * Can't do anything with VM_RESERVED regions either. |
108 | */ | ||
109 | if (vma->vm_flags & (VM_HUGETLB|VM_RESERVED)) | ||
107 | return; | 110 | return; |
108 | 111 | ||
109 | BUG_ON(addr >= end); | 112 | BUG_ON(addr >= end); |