aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2008-07-24 00:27:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:18 -0400
commit39c11e6c05b7fedbf7ed4df3908b25f622d56204 (patch)
tree7d928e69164ae802d17ef507726513407698fcf8 /arch/x86/mm
parentceb868796181dc95ea01a110e123afd391639873 (diff)
x86: support GB hugepages on 64-bit
Acked-by: Adam Litke <agl@us.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/hugetlbpage.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index a4789e87a315..b7a65a07af03 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -134,9 +134,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
134 pgd = pgd_offset(mm, addr); 134 pgd = pgd_offset(mm, addr);
135 pud = pud_alloc(mm, pgd, addr); 135 pud = pud_alloc(mm, pgd, addr);
136 if (pud) { 136 if (pud) {
137 if (pud_none(*pud)) 137 if (sz == PUD_SIZE) {
138 huge_pmd_share(mm, addr, pud); 138 pte = (pte_t *)pud;
139 pte = (pte_t *) pmd_alloc(mm, pud, addr); 139 } else {
140 BUG_ON(sz != PMD_SIZE);
141 if (pud_none(*pud))
142 huge_pmd_share(mm, addr, pud);
143 pte = (pte_t *) pmd_alloc(mm, pud, addr);
144 }
140 } 145 }
141 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 146 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
142 147
@@ -152,8 +157,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
152 pgd = pgd_offset(mm, addr); 157 pgd = pgd_offset(mm, addr);
153 if (pgd_present(*pgd)) { 158 if (pgd_present(*pgd)) {
154 pud = pud_offset(pgd, addr); 159 pud = pud_offset(pgd, addr);
155 if (pud_present(*pud)) 160 if (pud_present(*pud)) {
161 if (pud_large(*pud))
162 return (pte_t *)pud;
156 pmd = pmd_offset(pud, addr); 163 pmd = pmd_offset(pud, addr);
164 }
157 } 165 }
158 return (pte_t *) pmd; 166 return (pte_t *) pmd;
159} 167}
@@ -216,7 +224,7 @@ int pmd_huge(pmd_t pmd)
216 224
217int pud_huge(pud_t pud) 225int pud_huge(pud_t pud)
218{ 226{
219 return 0; 227 return !!(pud_val(pud) & _PAGE_PSE);
220} 228}
221 229
222struct page * 230struct page *
@@ -252,6 +260,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
252 unsigned long addr, unsigned long len, 260 unsigned long addr, unsigned long len,
253 unsigned long pgoff, unsigned long flags) 261 unsigned long pgoff, unsigned long flags)
254{ 262{
263 struct hstate *h = hstate_file(file);
255 struct mm_struct *mm = current->mm; 264 struct mm_struct *mm = current->mm;
256 struct vm_area_struct *vma; 265 struct vm_area_struct *vma;
257 unsigned long start_addr; 266 unsigned long start_addr;
@@ -264,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
264 } 273 }
265 274
266full_search: 275full_search:
267 addr = ALIGN(start_addr, HPAGE_SIZE); 276 addr = ALIGN(start_addr, huge_page_size(h));
268 277
269 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 278 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
270 /* At this point: (!vma || addr < vma->vm_end). */ 279 /* At this point: (!vma || addr < vma->vm_end). */
@@ -286,7 +295,7 @@ full_search:
286 } 295 }
287 if (addr + mm->cached_hole_size < vma->vm_start) 296 if (addr + mm->cached_hole_size < vma->vm_start)
288 mm->cached_hole_size = vma->vm_start - addr; 297 mm->cached_hole_size = vma->vm_start - addr;
289 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 298 addr = ALIGN(vma->vm_end, huge_page_size(h));
290 } 299 }
291} 300}
292 301
@@ -294,6 +303,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
294 unsigned long addr0, unsigned long len, 303 unsigned long addr0, unsigned long len,
295 unsigned long pgoff, unsigned long flags) 304 unsigned long pgoff, unsigned long flags)
296{ 305{
306 struct hstate *h = hstate_file(file);
297 struct mm_struct *mm = current->mm; 307 struct mm_struct *mm = current->mm;
298 struct vm_area_struct *vma, *prev_vma; 308 struct vm_area_struct *vma, *prev_vma;
299 unsigned long base = mm->mmap_base, addr = addr0; 309 unsigned long base = mm->mmap_base, addr = addr0;
@@ -314,7 +324,7 @@ try_again:
314 goto fail; 324 goto fail;
315 325
316 /* either no address requested or cant fit in requested address hole */ 326 /* either no address requested or cant fit in requested address hole */
317 addr = (mm->free_area_cache - len) & HPAGE_MASK; 327 addr = (mm->free_area_cache - len) & huge_page_mask(h);
318 do { 328 do {
319 /* 329 /*
320 * Lookup failure means no vma is above this address, 330 * Lookup failure means no vma is above this address,
@@ -345,7 +355,7 @@ try_again:
345 largest_hole = vma->vm_start - addr; 355 largest_hole = vma->vm_start - addr;
346 356
347 /* try just below the current vma->vm_start */ 357 /* try just below the current vma->vm_start */
348 addr = (vma->vm_start - len) & HPAGE_MASK; 358 addr = (vma->vm_start - len) & huge_page_mask(h);
349 } while (len <= vma->vm_start); 359 } while (len <= vma->vm_start);
350 360
351fail: 361fail:
@@ -383,10 +393,11 @@ unsigned long
383hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 393hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
384 unsigned long len, unsigned long pgoff, unsigned long flags) 394 unsigned long len, unsigned long pgoff, unsigned long flags)
385{ 395{
396 struct hstate *h = hstate_file(file);
386 struct mm_struct *mm = current->mm; 397 struct mm_struct *mm = current->mm;
387 struct vm_area_struct *vma; 398 struct vm_area_struct *vma;
388 399
389 if (len & ~HPAGE_MASK) 400 if (len & ~huge_page_mask(h))
390 return -EINVAL; 401 return -EINVAL;
391 if (len > TASK_SIZE) 402 if (len > TASK_SIZE)
392 return -ENOMEM; 403 return -ENOMEM;
@@ -398,7 +409,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
398 } 409 }
399 410
400 if (addr) { 411 if (addr) {
401 addr = ALIGN(addr, HPAGE_SIZE); 412 addr = ALIGN(addr, huge_page_size(h));
402 vma = find_vma(mm, addr); 413 vma = find_vma(mm, addr);
403 if (TASK_SIZE - len >= addr && 414 if (TASK_SIZE - len >= addr &&
404 (!vma || addr + len <= vma->vm_start)) 415 (!vma || addr + len <= vma->vm_start))