diff options
-rw-r--r-- | mm/memory.c | 12 | ||||
-rw-r--r-- | mm/mmap.c | 23 |
2 files changed, 16 insertions, 19 deletions
diff --git a/mm/memory.c b/mm/memory.c index 24ba688876d6..4ea89a2e3a83 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -260,6 +260,12 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | |||
260 | struct vm_area_struct *next = vma->vm_next; | 260 | struct vm_area_struct *next = vma->vm_next; |
261 | unsigned long addr = vma->vm_start; | 261 | unsigned long addr = vma->vm_start; |
262 | 262 | ||
263 | /* | ||
264 | * Hide vma from rmap and vmtruncate before freeing pgtables | ||
265 | */ | ||
266 | anon_vma_unlink(vma); | ||
267 | unlink_file_vma(vma); | ||
268 | |||
263 | if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { | 269 | if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { |
264 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, | 270 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, |
265 | floor, next? next->vm_start: ceiling); | 271 | floor, next? next->vm_start: ceiling); |
@@ -272,6 +278,8 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | |||
272 | HPAGE_SIZE)) { | 278 | HPAGE_SIZE)) { |
273 | vma = next; | 279 | vma = next; |
274 | next = vma->vm_next; | 280 | next = vma->vm_next; |
281 | anon_vma_unlink(vma); | ||
282 | unlink_file_vma(vma); | ||
275 | } | 283 | } |
276 | free_pgd_range(tlb, addr, vma->vm_end, | 284 | free_pgd_range(tlb, addr, vma->vm_end, |
277 | floor, next? next->vm_start: ceiling); | 285 | floor, next? next->vm_start: ceiling); |
@@ -798,12 +806,12 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | |||
798 | } | 806 | } |
799 | 807 | ||
800 | lru_add_drain(); | 808 | lru_add_drain(); |
801 | spin_lock(&mm->page_table_lock); | ||
802 | tlb = tlb_gather_mmu(mm, 0); | 809 | tlb = tlb_gather_mmu(mm, 0); |
803 | update_hiwater_rss(mm); | 810 | update_hiwater_rss(mm); |
811 | spin_lock(&mm->page_table_lock); | ||
804 | end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); | 812 | end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); |
805 | tlb_finish_mmu(tlb, address, end); | ||
806 | spin_unlock(&mm->page_table_lock); | 813 | spin_unlock(&mm->page_table_lock); |
814 | tlb_finish_mmu(tlb, address, end); | ||
807 | return end; | 815 | return end; |
808 | } | 816 | } |
809 | 817 | ||
@@ -203,14 +203,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
203 | { | 203 | { |
204 | struct vm_area_struct *next = vma->vm_next; | 204 | struct vm_area_struct *next = vma->vm_next; |
205 | 205 | ||
206 | /* | ||
207 | * Hide vma from rmap and vmtruncate before freeing page tables: | ||
208 | * to be moved into free_pgtables once page_table_lock is lifted | ||
209 | * from it, but until then lock ordering forbids that move. | ||
210 | */ | ||
211 | anon_vma_unlink(vma); | ||
212 | unlink_file_vma(vma); | ||
213 | |||
214 | might_sleep(); | 206 | might_sleep(); |
215 | if (vma->vm_ops && vma->vm_ops->close) | 207 | if (vma->vm_ops && vma->vm_ops->close) |
216 | vma->vm_ops->close(vma); | 208 | vma->vm_ops->close(vma); |
@@ -1679,15 +1671,15 @@ static void unmap_region(struct mm_struct *mm, | |||
1679 | unsigned long nr_accounted = 0; | 1671 | unsigned long nr_accounted = 0; |
1680 | 1672 | ||
1681 | lru_add_drain(); | 1673 | lru_add_drain(); |
1682 | spin_lock(&mm->page_table_lock); | ||
1683 | tlb = tlb_gather_mmu(mm, 0); | 1674 | tlb = tlb_gather_mmu(mm, 0); |
1684 | update_hiwater_rss(mm); | 1675 | update_hiwater_rss(mm); |
1676 | spin_lock(&mm->page_table_lock); | ||
1685 | unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); | 1677 | unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); |
1678 | spin_unlock(&mm->page_table_lock); | ||
1686 | vm_unacct_memory(nr_accounted); | 1679 | vm_unacct_memory(nr_accounted); |
1687 | free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, | 1680 | free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, |
1688 | next? next->vm_start: 0); | 1681 | next? next->vm_start: 0); |
1689 | tlb_finish_mmu(tlb, start, end); | 1682 | tlb_finish_mmu(tlb, start, end); |
1690 | spin_unlock(&mm->page_table_lock); | ||
1691 | } | 1683 | } |
1692 | 1684 | ||
1693 | /* | 1685 | /* |
@@ -1962,23 +1954,20 @@ void exit_mmap(struct mm_struct *mm) | |||
1962 | unsigned long end; | 1954 | unsigned long end; |
1963 | 1955 | ||
1964 | lru_add_drain(); | 1956 | lru_add_drain(); |
1965 | |||
1966 | spin_lock(&mm->page_table_lock); | ||
1967 | |||
1968 | flush_cache_mm(mm); | 1957 | flush_cache_mm(mm); |
1969 | tlb = tlb_gather_mmu(mm, 1); | 1958 | tlb = tlb_gather_mmu(mm, 1); |
1970 | /* Don't update_hiwater_rss(mm) here, do_exit already did */ | 1959 | /* Don't update_hiwater_rss(mm) here, do_exit already did */ |
1971 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 1960 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
1961 | spin_lock(&mm->page_table_lock); | ||
1972 | end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); | 1962 | end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); |
1963 | spin_unlock(&mm->page_table_lock); | ||
1973 | vm_unacct_memory(nr_accounted); | 1964 | vm_unacct_memory(nr_accounted); |
1974 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); | 1965 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); |
1975 | tlb_finish_mmu(tlb, 0, end); | 1966 | tlb_finish_mmu(tlb, 0, end); |
1976 | 1967 | ||
1977 | spin_unlock(&mm->page_table_lock); | ||
1978 | |||
1979 | /* | 1968 | /* |
1980 | * Walk the list again, actually closing and freeing it | 1969 | * Walk the list again, actually closing and freeing it, |
1981 | * without holding any MM locks. | 1970 | * with preemption enabled, without holding any MM locks. |
1982 | */ | 1971 | */ |
1983 | while (vma) | 1972 | while (vma) |
1984 | vma = remove_vma(vma); | 1973 | vma = remove_vma(vma); |