diff options
| author | David S. Miller <davem@davemloft.net> | 2006-03-20 04:17:17 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2006-03-20 04:17:17 -0500 |
| commit | f6b83f070e9b7ad9075f7cc5646260e56c7d0219 (patch) | |
| tree | 48586ca4f4c75ee3862f63be332351e78f2d5476 | |
| parent | 467418f3508b426adbc7df795ebf3baaed4fbefc (diff) | |
[SPARC64]: Fix 2 bugs in huge page support.
1) huge_pte_offset() did not check the page table hierarchy
elements as being empty correctly, resulting in an OOPS
2) Need platform specific hugetlb_get_unmapped_area() to handle
the top-down vs. bottom-up address space allocation strategies.
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | arch/sparc64/mm/hugetlbpage.c | 179 | ||||
| -rw-r--r-- | include/asm-sparc64/page.h | 1 |
2 files changed, 176 insertions, 4 deletions
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 625cbb336a23..a7a24869d045 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * SPARC64 Huge TLB page support. | 2 | * SPARC64 Huge TLB page support. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
| @@ -22,6 +22,175 @@ | |||
| 22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
| 23 | #include <asm/mmu_context.h> | 23 | #include <asm/mmu_context.h> |
| 24 | 24 | ||
| 25 | /* Slightly simplified from the non-hugepage variant because by | ||
| 26 | * definition we don't have to worry about any page coloring stuff | ||
| 27 | */ | ||
| 28 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | ||
| 29 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) | ||
| 30 | |||
| 31 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | ||
| 32 | unsigned long addr, | ||
| 33 | unsigned long len, | ||
| 34 | unsigned long pgoff, | ||
| 35 | unsigned long flags) | ||
| 36 | { | ||
| 37 | struct mm_struct *mm = current->mm; | ||
| 38 | struct vm_area_struct * vma; | ||
| 39 | unsigned long task_size = TASK_SIZE; | ||
| 40 | unsigned long start_addr; | ||
| 41 | |||
| 42 | if (test_thread_flag(TIF_32BIT)) | ||
| 43 | task_size = STACK_TOP32; | ||
| 44 | if (unlikely(len >= VA_EXCLUDE_START)) | ||
| 45 | return -ENOMEM; | ||
| 46 | |||
| 47 | if (len > mm->cached_hole_size) { | ||
| 48 | start_addr = addr = mm->free_area_cache; | ||
| 49 | } else { | ||
| 50 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
| 51 | mm->cached_hole_size = 0; | ||
| 52 | } | ||
| 53 | |||
| 54 | task_size -= len; | ||
| 55 | |||
| 56 | full_search: | ||
| 57 | addr = ALIGN(addr, HPAGE_SIZE); | ||
| 58 | |||
| 59 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
| 60 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
| 61 | if (addr < VA_EXCLUDE_START && | ||
| 62 | (addr + len) >= VA_EXCLUDE_START) { | ||
| 63 | addr = VA_EXCLUDE_END; | ||
| 64 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
| 65 | } | ||
| 66 | if (unlikely(task_size < addr)) { | ||
| 67 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
| 68 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
| 69 | mm->cached_hole_size = 0; | ||
| 70 | goto full_search; | ||
| 71 | } | ||
| 72 | return -ENOMEM; | ||
| 73 | } | ||
| 74 | if (likely(!vma || addr + len <= vma->vm_start)) { | ||
| 75 | /* | ||
| 76 | * Remember the place where we stopped the search: | ||
| 77 | */ | ||
| 78 | mm->free_area_cache = addr + len; | ||
| 79 | return addr; | ||
| 80 | } | ||
| 81 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
| 82 | mm->cached_hole_size = vma->vm_start - addr; | ||
| 83 | |||
| 84 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | static unsigned long | ||
| 89 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
| 90 | const unsigned long len, | ||
| 91 | const unsigned long pgoff, | ||
| 92 | const unsigned long flags) | ||
| 93 | { | ||
| 94 | struct vm_area_struct *vma; | ||
| 95 | struct mm_struct *mm = current->mm; | ||
| 96 | unsigned long addr = addr0; | ||
| 97 | |||
| 98 | /* This should only ever run for 32-bit processes. */ | ||
| 99 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
| 100 | |||
| 101 | /* check if free_area_cache is useful for us */ | ||
| 102 | if (len <= mm->cached_hole_size) { | ||
| 103 | mm->cached_hole_size = 0; | ||
| 104 | mm->free_area_cache = mm->mmap_base; | ||
| 105 | } | ||
| 106 | |||
| 107 | /* either no address requested or can't fit in requested address hole */ | ||
| 108 | addr = mm->free_area_cache & HPAGE_MASK; | ||
| 109 | |||
| 110 | /* make sure it can fit in the remaining address space */ | ||
| 111 | if (likely(addr > len)) { | ||
| 112 | vma = find_vma(mm, addr-len); | ||
| 113 | if (!vma || addr <= vma->vm_start) { | ||
| 114 | /* remember the address as a hint for next time */ | ||
| 115 | return (mm->free_area_cache = addr-len); | ||
| 116 | } | ||
| 117 | } | ||
| 118 | |||
| 119 | if (unlikely(mm->mmap_base < len)) | ||
| 120 | goto bottomup; | ||
| 121 | |||
| 122 | addr = (mm->mmap_base-len) & HPAGE_MASK; | ||
| 123 | |||
| 124 | do { | ||
| 125 | /* | ||
| 126 | * Lookup failure means no vma is above this address, | ||
| 127 | * else if new region fits below vma->vm_start, | ||
| 128 | * return with success: | ||
| 129 | */ | ||
| 130 | vma = find_vma(mm, addr); | ||
| 131 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
| 132 | /* remember the address as a hint for next time */ | ||
| 133 | return (mm->free_area_cache = addr); | ||
| 134 | } | ||
| 135 | |||
| 136 | /* remember the largest hole we saw so far */ | ||
| 137 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
| 138 | mm->cached_hole_size = vma->vm_start - addr; | ||
| 139 | |||
| 140 | /* try just below the current vma->vm_start */ | ||
| 141 | addr = (vma->vm_start-len) & HPAGE_MASK; | ||
| 142 | } while (likely(len < vma->vm_start)); | ||
| 143 | |||
| 144 | bottomup: | ||
| 145 | /* | ||
| 146 | * A failed mmap() very likely causes application failure, | ||
| 147 | * so fall back to the bottom-up function here. This scenario | ||
| 148 | * can happen with large stack limits and large mmap() | ||
| 149 | * allocations. | ||
| 150 | */ | ||
| 151 | mm->cached_hole_size = ~0UL; | ||
| 152 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
| 153 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
| 154 | /* | ||
| 155 | * Restore the topdown base: | ||
| 156 | */ | ||
| 157 | mm->free_area_cache = mm->mmap_base; | ||
| 158 | mm->cached_hole_size = ~0UL; | ||
| 159 | |||
| 160 | return addr; | ||
| 161 | } | ||
| 162 | |||
| 163 | unsigned long | ||
| 164 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
| 165 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
| 166 | { | ||
| 167 | struct mm_struct *mm = current->mm; | ||
| 168 | struct vm_area_struct *vma; | ||
| 169 | unsigned long task_size = TASK_SIZE; | ||
| 170 | |||
| 171 | if (test_thread_flag(TIF_32BIT)) | ||
| 172 | task_size = STACK_TOP32; | ||
| 173 | |||
| 174 | if (len & ~HPAGE_MASK) | ||
| 175 | return -EINVAL; | ||
| 176 | if (len > task_size) | ||
| 177 | return -ENOMEM; | ||
| 178 | |||
| 179 | if (addr) { | ||
| 180 | addr = ALIGN(addr, HPAGE_SIZE); | ||
| 181 | vma = find_vma(mm, addr); | ||
| 182 | if (task_size - len >= addr && | ||
| 183 | (!vma || addr + len <= vma->vm_start)) | ||
| 184 | return addr; | ||
| 185 | } | ||
| 186 | if (mm->get_unmapped_area == arch_get_unmapped_area) | ||
| 187 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | ||
| 188 | pgoff, flags); | ||
| 189 | else | ||
| 190 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | ||
| 191 | pgoff, flags); | ||
| 192 | } | ||
| 193 | |||
| 25 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 194 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
| 26 | { | 195 | { |
| 27 | pgd_t *pgd; | 196 | pgd_t *pgd; |
| @@ -48,12 +217,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
| 48 | pmd_t *pmd; | 217 | pmd_t *pmd; |
| 49 | pte_t *pte = NULL; | 218 | pte_t *pte = NULL; |
| 50 | 219 | ||
| 220 | addr &= HPAGE_MASK; | ||
| 221 | |||
| 51 | pgd = pgd_offset(mm, addr); | 222 | pgd = pgd_offset(mm, addr); |
| 52 | if (pgd) { | 223 | if (!pgd_none(*pgd)) { |
| 53 | pud = pud_offset(pgd, addr); | 224 | pud = pud_offset(pgd, addr); |
| 54 | if (pud) { | 225 | if (!pud_none(*pud)) { |
| 55 | pmd = pmd_offset(pud, addr); | 226 | pmd = pmd_offset(pud, addr); |
| 56 | if (pmd) | 227 | if (!pmd_none(*pmd)) |
| 57 | pte = pte_offset_map(pmd, addr); | 228 | pte = pte_offset_map(pmd, addr); |
| 58 | } | 229 | } |
| 59 | } | 230 | } |
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index f6b49256fe2b..fcb2812265f4 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h | |||
| @@ -104,6 +104,7 @@ typedef unsigned long pgprot_t; | |||
| 104 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 104 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 105 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | 105 | #define ARCH_HAS_SETCLEAR_HUGE_PTE |
| 106 | #define ARCH_HAS_HUGETLB_PREFAULT_HOOK | 106 | #define ARCH_HAS_HUGETLB_PREFAULT_HOOK |
| 107 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
| 107 | #endif | 108 | #endif |
| 108 | 109 | ||
| 109 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ | 110 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ |
