aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c27
1 files changed, 9 insertions, 18 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index 616facc3d28a..8de77b632a20 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -28,9 +28,6 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
28 pud_t *pud; 28 pud_t *pud;
29 pmd_t *pmd; 29 pmd_t *pmd;
30 30
31 /*
32 * We don't need page_table_lock: we have mmap_sem exclusively.
33 */
34 pgd = pgd_offset(mm, addr); 31 pgd = pgd_offset(mm, addr);
35 if (pgd_none_or_clear_bad(pgd)) 32 if (pgd_none_or_clear_bad(pgd))
36 return NULL; 33 return NULL;
@@ -50,25 +47,20 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
50{ 47{
51 pgd_t *pgd; 48 pgd_t *pgd;
52 pud_t *pud; 49 pud_t *pud;
53 pmd_t *pmd = NULL; 50 pmd_t *pmd;
54 51
55 /*
56 * We do need page_table_lock: because allocators expect that.
57 */
58 spin_lock(&mm->page_table_lock);
59 pgd = pgd_offset(mm, addr); 52 pgd = pgd_offset(mm, addr);
60 pud = pud_alloc(mm, pgd, addr); 53 pud = pud_alloc(mm, pgd, addr);
61 if (!pud) 54 if (!pud)
62 goto out; 55 return NULL;
63 56
64 pmd = pmd_alloc(mm, pud, addr); 57 pmd = pmd_alloc(mm, pud, addr);
65 if (!pmd) 58 if (!pmd)
66 goto out; 59 return NULL;
67 60
68 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr)) 61 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
69 pmd = NULL; 62 return NULL;
70out: 63
71 spin_unlock(&mm->page_table_lock);
72 return pmd; 64 return pmd;
73} 65}
74 66
@@ -80,6 +72,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
80 struct address_space *mapping = NULL; 72 struct address_space *mapping = NULL;
81 struct mm_struct *mm = vma->vm_mm; 73 struct mm_struct *mm = vma->vm_mm;
82 pte_t *old_pte, *new_pte, pte; 74 pte_t *old_pte, *new_pte, pte;
75 spinlock_t *old_ptl;
83 76
84 if (vma->vm_file) { 77 if (vma->vm_file) {
85 /* 78 /*
@@ -95,9 +88,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
95 new_vma->vm_truncate_count = 0; 88 new_vma->vm_truncate_count = 0;
96 } 89 }
97 90
98 spin_lock(&mm->page_table_lock); 91 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
99 old_pte = pte_offset_map(old_pmd, old_addr); 92 new_pte = pte_offset_map_nested(new_pmd, new_addr);
100 new_pte = pte_offset_map_nested(new_pmd, new_addr);
101 93
102 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 94 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
103 new_pte++, new_addr += PAGE_SIZE) { 95 new_pte++, new_addr += PAGE_SIZE) {
@@ -110,8 +102,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
110 } 102 }
111 103
112 pte_unmap_nested(new_pte - 1); 104 pte_unmap_nested(new_pte - 1);
113 pte_unmap(old_pte - 1); 105 pte_unmap_unlock(old_pte - 1, old_ptl);
114 spin_unlock(&mm->page_table_lock);
115 if (mapping) 106 if (mapping)
116 spin_unlock(&mapping->i_mmap_lock); 107 spin_unlock(&mapping->i_mmap_lock);
117} 108}