diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 23 |
1 files changed, 6 insertions, 17 deletions
@@ -203,14 +203,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
203 | { | 203 | { |
204 | struct vm_area_struct *next = vma->vm_next; | 204 | struct vm_area_struct *next = vma->vm_next; |
205 | 205 | ||
206 | /* | ||
207 | * Hide vma from rmap and vmtruncate before freeing page tables: | ||
208 | * to be moved into free_pgtables once page_table_lock is lifted | ||
209 | * from it, but until then lock ordering forbids that move. | ||
210 | */ | ||
211 | anon_vma_unlink(vma); | ||
212 | unlink_file_vma(vma); | ||
213 | |||
214 | might_sleep(); | 206 | might_sleep(); |
215 | if (vma->vm_ops && vma->vm_ops->close) | 207 | if (vma->vm_ops && vma->vm_ops->close) |
216 | vma->vm_ops->close(vma); | 208 | vma->vm_ops->close(vma); |
@@ -1679,15 +1671,15 @@ static void unmap_region(struct mm_struct *mm, | |||
1679 | unsigned long nr_accounted = 0; | 1671 | unsigned long nr_accounted = 0; |
1680 | 1672 | ||
1681 | lru_add_drain(); | 1673 | lru_add_drain(); |
1682 | spin_lock(&mm->page_table_lock); | ||
1683 | tlb = tlb_gather_mmu(mm, 0); | 1674 | tlb = tlb_gather_mmu(mm, 0); |
1684 | update_hiwater_rss(mm); | 1675 | update_hiwater_rss(mm); |
1676 | spin_lock(&mm->page_table_lock); | ||
1685 | unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); | 1677 | unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); |
1678 | spin_unlock(&mm->page_table_lock); | ||
1686 | vm_unacct_memory(nr_accounted); | 1679 | vm_unacct_memory(nr_accounted); |
1687 | free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, | 1680 | free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, |
1688 | next? next->vm_start: 0); | 1681 | next? next->vm_start: 0); |
1689 | tlb_finish_mmu(tlb, start, end); | 1682 | tlb_finish_mmu(tlb, start, end); |
1690 | spin_unlock(&mm->page_table_lock); | ||
1691 | } | 1683 | } |
1692 | 1684 | ||
1693 | /* | 1685 | /* |
@@ -1962,23 +1954,20 @@ void exit_mmap(struct mm_struct *mm) | |||
1962 | unsigned long end; | 1954 | unsigned long end; |
1963 | 1955 | ||
1964 | lru_add_drain(); | 1956 | lru_add_drain(); |
1965 | |||
1966 | spin_lock(&mm->page_table_lock); | ||
1967 | |||
1968 | flush_cache_mm(mm); | 1957 | flush_cache_mm(mm); |
1969 | tlb = tlb_gather_mmu(mm, 1); | 1958 | tlb = tlb_gather_mmu(mm, 1); |
1970 | /* Don't update_hiwater_rss(mm) here, do_exit already did */ | 1959 | /* Don't update_hiwater_rss(mm) here, do_exit already did */ |
1971 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 1960 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
1961 | spin_lock(&mm->page_table_lock); | ||
1972 | end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); | 1962 | end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); |
1963 | spin_unlock(&mm->page_table_lock); | ||
1973 | vm_unacct_memory(nr_accounted); | 1964 | vm_unacct_memory(nr_accounted); |
1974 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); | 1965 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); |
1975 | tlb_finish_mmu(tlb, 0, end); | 1966 | tlb_finish_mmu(tlb, 0, end); |
1976 | 1967 | ||
1977 | spin_unlock(&mm->page_table_lock); | ||
1978 | |||
1979 | /* | 1968 | /* |
1980 | * Walk the list again, actually closing and freeing it | 1969 | * Walk the list again, actually closing and freeing it, |
1981 | * without holding any MM locks. | 1970 | * with preemption enabled, without holding any MM locks. |
1982 | */ | 1971 | */ |
1983 | while (vma) | 1972 | while (vma) |
1984 | vma = remove_vma(vma); | 1973 | vma = remove_vma(vma); |