diff options
author | James Hogan <james.hogan@imgtec.com> | 2013-02-11 12:28:10 -0500 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2013-03-02 15:11:13 -0500 |
commit | f75c28d896f4dd0064e60bba1e82a4c98908d239 (patch) | |
tree | 18097d57e812bdcd86634d86b6089fc07605946a | |
parent | c838e72a35e49ea51c39c2c634ece028fa49c565 (diff) |
metag: hugetlb: convert to vm_unmapped_area()
Convert hugetlb_get_unmapped_area_new_pmd() to use vm_unmapped_area()
rather than searching the virtual address space itself. This fixes the
following errors in linux-next due to the specified members being
removed after other architectures have already been converted:
arch/metag/mm/hugetlbpage.c: In function 'hugetlb_get_unmapped_area_new_pmd':
arch/metag/mm/hugetlbpage.c:199: error: 'struct mm_struct' has no member named 'cached_hole_size'
arch/metag/mm/hugetlbpage.c:200: error: 'struct mm_struct' has no member named 'free_area_cache'
arch/metag/mm/hugetlbpage.c:215: error: 'struct mm_struct' has no member named 'cached_hole_size'
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Michel Lespinasse <walken@google.com>
-rw-r--r-- | arch/metag/mm/hugetlbpage.c | 52 |
1 files changed, 10 insertions, 42 deletions
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c index 24ceed4f4eed..3c52fa6d0f8e 100644 --- a/arch/metag/mm/hugetlbpage.c +++ b/arch/metag/mm/hugetlbpage.c | |||
@@ -192,43 +192,15 @@ new_search: | |||
192 | static unsigned long | 192 | static unsigned long |
193 | hugetlb_get_unmapped_area_new_pmd(unsigned long len) | 193 | hugetlb_get_unmapped_area_new_pmd(unsigned long len) |
194 | { | 194 | { |
195 | struct mm_struct *mm = current->mm; | 195 | struct vm_unmapped_area_info info; |
196 | struct vm_area_struct *vma; | 196 | |
197 | unsigned long start_addr, addr; | 197 | info.flags = 0; |
198 | 198 | info.length = len; | |
199 | if (ALIGN_HUGEPT(len) > mm->cached_hole_size) | 199 | info.low_limit = TASK_UNMAPPED_BASE; |
200 | start_addr = mm->free_area_cache; | 200 | info.high_limit = TASK_SIZE; |
201 | else | 201 | info.align_mask = PAGE_MASK & HUGEPT_MASK; |
202 | start_addr = TASK_UNMAPPED_BASE; | 202 | info.align_offset = 0; |
203 | 203 | return vm_unmapped_area(&info); | |
204 | new_search: | ||
205 | addr = ALIGN_HUGEPT(start_addr); | ||
206 | |||
207 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
208 | if (TASK_SIZE - len < addr) { | ||
209 | /* | ||
210 | * Start a new search - just in case we missed | ||
211 | * some holes. | ||
212 | */ | ||
213 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
214 | start_addr = TASK_UNMAPPED_BASE; | ||
215 | mm->cached_hole_size = 0; | ||
216 | goto new_search; | ||
217 | } | ||
218 | return 0; | ||
219 | } | ||
220 | /* skip ahead if we've aligned right over some vmas */ | ||
221 | if (vma && vma->vm_end <= addr) | ||
222 | continue; | ||
223 | if (!vma || ALIGN_HUGEPT(addr + len) <= vma->vm_start) { | ||
224 | #if HPAGE_SHIFT < HUGEPT_SHIFT | ||
225 | if (len & HUGEPT_MASK) | ||
226 | mm->context.part_huge = addr + len; | ||
227 | #endif | ||
228 | return addr; | ||
229 | } | ||
230 | addr = ALIGN_HUGEPT(vma->vm_end); | ||
231 | } | ||
232 | } | 204 | } |
233 | 205 | ||
234 | unsigned long | 206 | unsigned long |
@@ -266,11 +238,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
266 | * Find an unmapped naturally aligned set of 4MB blocks that we can use | 238 | * Find an unmapped naturally aligned set of 4MB blocks that we can use |
267 | * for huge pages. | 239 | * for huge pages. |
268 | */ | 240 | */ |
269 | addr = hugetlb_get_unmapped_area_new_pmd(len); | 241 | return hugetlb_get_unmapped_area_new_pmd(len); |
270 | if (likely(addr)) | ||
271 | return addr; | ||
272 | |||
273 | return -EINVAL; | ||
274 | } | 242 | } |
275 | 243 | ||
276 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | 244 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ |