diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-04-19 16:29:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org.(none)> | 2005-04-19 16:29:19 -0400 |
commit | e2cdef8c847b480529b7e26991926aab4be008e6 (patch) | |
tree | b936ab7f0964f56bc3312ad9ad956e978ac39895 | |
parent | 021740dc30d184e3b0fa7679936e65a56090c425 (diff) |
[PATCH] freepgt: free_pgtables from FIRST_USER_ADDRESS
The patches to free_pgtables by vma left problems on any architectures which
leave some user address page table entries unencapsulated by vma. Andi has
fixed the 32-bit vDSO on x86_64 to use a vma. Now fix arm (and arm26), whose
first PAGE_SIZE is reserved (perhaps) for machine vectors.
Our calls to free_pgtables must not touch that area, and exit_mmap's
BUG_ON(nr_ptes) must allow that arm's get_pgd_slow may (or may not) have
allocated an extra page table, which its free_pgd_slow would free later.
FIRST_USER_PGD_NR has misled me and others: until all the arches define
FIRST_USER_ADDRESS instead, a hack in mmap.c to derive one from t'other. This
patch fixes the bugs, the remaining patches just clean it up.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/mmap.c | 11 |
1 files changed, 8 insertions, 3 deletions
@@ -1612,6 +1612,11 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) | |||
1612 | validate_mm(mm); | 1612 | validate_mm(mm); |
1613 | } | 1613 | } |
1614 | 1614 | ||
1615 | #ifndef FIRST_USER_ADDRESS /* temporary hack */ | ||
1616 | #define THIS_IS_ARM FIRST_USER_PGD_NR | ||
1617 | #define FIRST_USER_ADDRESS (THIS_IS_ARM * PAGE_SIZE) | ||
1618 | #endif | ||
1619 | |||
1615 | /* | 1620 | /* |
1616 | * Get rid of page table information in the indicated region. | 1621 | * Get rid of page table information in the indicated region. |
1617 | * | 1622 | * |
@@ -1630,7 +1635,7 @@ static void unmap_region(struct mm_struct *mm, | |||
1630 | tlb = tlb_gather_mmu(mm, 0); | 1635 | tlb = tlb_gather_mmu(mm, 0); |
1631 | unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); | 1636 | unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); |
1632 | vm_unacct_memory(nr_accounted); | 1637 | vm_unacct_memory(nr_accounted); |
1633 | free_pgtables(&tlb, vma, prev? prev->vm_end: 0, | 1638 | free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, |
1634 | next? next->vm_start: 0); | 1639 | next? next->vm_start: 0); |
1635 | tlb_finish_mmu(tlb, start, end); | 1640 | tlb_finish_mmu(tlb, start, end); |
1636 | spin_unlock(&mm->page_table_lock); | 1641 | spin_unlock(&mm->page_table_lock); |
@@ -1910,7 +1915,7 @@ void exit_mmap(struct mm_struct *mm) | |||
1910 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 1915 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
1911 | end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); | 1916 | end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); |
1912 | vm_unacct_memory(nr_accounted); | 1917 | vm_unacct_memory(nr_accounted); |
1913 | free_pgtables(&tlb, vma, 0, 0); | 1918 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); |
1914 | tlb_finish_mmu(tlb, 0, end); | 1919 | tlb_finish_mmu(tlb, 0, end); |
1915 | 1920 | ||
1916 | mm->mmap = mm->mmap_cache = NULL; | 1921 | mm->mmap = mm->mmap_cache = NULL; |
@@ -1931,7 +1936,7 @@ void exit_mmap(struct mm_struct *mm) | |||
1931 | vma = next; | 1936 | vma = next; |
1932 | } | 1937 | } |
1933 | 1938 | ||
1934 | BUG_ON(mm->nr_ptes); /* This is just debugging */ | 1939 | BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); |
1935 | } | 1940 | } |
1936 | 1941 | ||
1937 | /* Insert vm structure into process list sorted by address | 1942 | /* Insert vm structure into process list sorted by address |