diff options
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 548 |
1 files changed, 6 insertions, 542 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index fb959264c104..92a1b16fb7e3 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -91,7 +91,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
91 | pgd_t *pg; | 91 | pgd_t *pg; |
92 | pud_t *pu; | 92 | pud_t *pu; |
93 | 93 | ||
94 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 94 | BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); |
95 | 95 | ||
96 | addr &= HPAGE_MASK; | 96 | addr &= HPAGE_MASK; |
97 | 97 | ||
@@ -119,7 +119,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | |||
119 | pud_t *pu; | 119 | pud_t *pu; |
120 | hugepd_t *hpdp = NULL; | 120 | hugepd_t *hpdp = NULL; |
121 | 121 | ||
122 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 122 | BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); |
123 | 123 | ||
124 | addr &= HPAGE_MASK; | 124 | addr &= HPAGE_MASK; |
125 | 125 | ||
@@ -302,7 +302,7 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, | |||
302 | start = addr; | 302 | start = addr; |
303 | pgd = pgd_offset((*tlb)->mm, addr); | 303 | pgd = pgd_offset((*tlb)->mm, addr); |
304 | do { | 304 | do { |
305 | BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr)); | 305 | BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize); |
306 | next = pgd_addr_end(addr, end); | 306 | next = pgd_addr_end(addr, end); |
307 | if (pgd_none_or_clear_bad(pgd)) | 307 | if (pgd_none_or_clear_bad(pgd)) |
308 | continue; | 308 | continue; |
@@ -331,203 +331,13 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |||
331 | return __pte(old); | 331 | return __pte(old); |
332 | } | 332 | } |
333 | 333 | ||
334 | struct slb_flush_info { | ||
335 | struct mm_struct *mm; | ||
336 | u16 newareas; | ||
337 | }; | ||
338 | |||
339 | static void flush_low_segments(void *parm) | ||
340 | { | ||
341 | struct slb_flush_info *fi = parm; | ||
342 | unsigned long i; | ||
343 | |||
344 | BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS); | ||
345 | |||
346 | if (current->active_mm != fi->mm) | ||
347 | return; | ||
348 | |||
349 | /* Only need to do anything if this CPU is working in the same | ||
350 | * mm as the one which has changed */ | ||
351 | |||
352 | /* update the paca copy of the context struct */ | ||
353 | get_paca()->context = current->active_mm->context; | ||
354 | |||
355 | asm volatile("isync" : : : "memory"); | ||
356 | for (i = 0; i < NUM_LOW_AREAS; i++) { | ||
357 | if (! (fi->newareas & (1U << i))) | ||
358 | continue; | ||
359 | asm volatile("slbie %0" | ||
360 | : : "r" ((i << SID_SHIFT) | SLBIE_C)); | ||
361 | } | ||
362 | asm volatile("isync" : : : "memory"); | ||
363 | } | ||
364 | |||
365 | static void flush_high_segments(void *parm) | ||
366 | { | ||
367 | struct slb_flush_info *fi = parm; | ||
368 | unsigned long i, j; | ||
369 | |||
370 | |||
371 | BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS); | ||
372 | |||
373 | if (current->active_mm != fi->mm) | ||
374 | return; | ||
375 | |||
376 | /* Only need to do anything if this CPU is working in the same | ||
377 | * mm as the one which has changed */ | ||
378 | |||
379 | /* update the paca copy of the context struct */ | ||
380 | get_paca()->context = current->active_mm->context; | ||
381 | |||
382 | asm volatile("isync" : : : "memory"); | ||
383 | for (i = 0; i < NUM_HIGH_AREAS; i++) { | ||
384 | if (! (fi->newareas & (1U << i))) | ||
385 | continue; | ||
386 | for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) | ||
387 | asm volatile("slbie %0" | ||
388 | :: "r" (((i << HTLB_AREA_SHIFT) | ||
389 | + (j << SID_SHIFT)) | SLBIE_C)); | ||
390 | } | ||
391 | asm volatile("isync" : : : "memory"); | ||
392 | } | ||
393 | |||
394 | static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area) | ||
395 | { | ||
396 | unsigned long start = area << SID_SHIFT; | ||
397 | unsigned long end = (area+1) << SID_SHIFT; | ||
398 | struct vm_area_struct *vma; | ||
399 | |||
400 | BUG_ON(area >= NUM_LOW_AREAS); | ||
401 | |||
402 | /* Check no VMAs are in the region */ | ||
403 | vma = find_vma(mm, start); | ||
404 | if (vma && (vma->vm_start < end)) | ||
405 | return -EBUSY; | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) | ||
411 | { | ||
412 | unsigned long start = area << HTLB_AREA_SHIFT; | ||
413 | unsigned long end = (area+1) << HTLB_AREA_SHIFT; | ||
414 | struct vm_area_struct *vma; | ||
415 | |||
416 | BUG_ON(area >= NUM_HIGH_AREAS); | ||
417 | |||
418 | /* Hack, so that each addresses is controlled by exactly one | ||
419 | * of the high or low area bitmaps, the first high area starts | ||
420 | * at 4GB, not 0 */ | ||
421 | if (start == 0) | ||
422 | start = 0x100000000UL; | ||
423 | |||
424 | /* Check no VMAs are in the region */ | ||
425 | vma = find_vma(mm, start); | ||
426 | if (vma && (vma->vm_start < end)) | ||
427 | return -EBUSY; | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) | ||
433 | { | ||
434 | unsigned long i; | ||
435 | struct slb_flush_info fi; | ||
436 | |||
437 | BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); | ||
438 | BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); | ||
439 | |||
440 | newareas &= ~(mm->context.low_htlb_areas); | ||
441 | if (! newareas) | ||
442 | return 0; /* The segments we want are already open */ | ||
443 | |||
444 | for (i = 0; i < NUM_LOW_AREAS; i++) | ||
445 | if ((1 << i) & newareas) | ||
446 | if (prepare_low_area_for_htlb(mm, i) != 0) | ||
447 | return -EBUSY; | ||
448 | |||
449 | mm->context.low_htlb_areas |= newareas; | ||
450 | |||
451 | /* the context change must make it to memory before the flush, | ||
452 | * so that further SLB misses do the right thing. */ | ||
453 | mb(); | ||
454 | |||
455 | fi.mm = mm; | ||
456 | fi.newareas = newareas; | ||
457 | on_each_cpu(flush_low_segments, &fi, 0, 1); | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) | ||
463 | { | ||
464 | struct slb_flush_info fi; | ||
465 | unsigned long i; | ||
466 | |||
467 | BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); | ||
468 | BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8) | ||
469 | != NUM_HIGH_AREAS); | ||
470 | |||
471 | newareas &= ~(mm->context.high_htlb_areas); | ||
472 | if (! newareas) | ||
473 | return 0; /* The areas we want are already open */ | ||
474 | |||
475 | for (i = 0; i < NUM_HIGH_AREAS; i++) | ||
476 | if ((1 << i) & newareas) | ||
477 | if (prepare_high_area_for_htlb(mm, i) != 0) | ||
478 | return -EBUSY; | ||
479 | |||
480 | mm->context.high_htlb_areas |= newareas; | ||
481 | |||
482 | /* the context change must make it to memory before the flush, | ||
483 | * so that further SLB misses do the right thing. */ | ||
484 | mb(); | ||
485 | |||
486 | fi.mm = mm; | ||
487 | fi.newareas = newareas; | ||
488 | on_each_cpu(flush_high_segments, &fi, 0, 1); | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff) | ||
494 | { | ||
495 | int err = 0; | ||
496 | |||
497 | if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT)) | ||
498 | return -EINVAL; | ||
499 | if (len & ~HPAGE_MASK) | ||
500 | return -EINVAL; | ||
501 | if (addr & ~HPAGE_MASK) | ||
502 | return -EINVAL; | ||
503 | |||
504 | if (addr < 0x100000000UL) | ||
505 | err = open_low_hpage_areas(current->mm, | ||
506 | LOW_ESID_MASK(addr, len)); | ||
507 | if ((addr + len) > 0x100000000UL) | ||
508 | err = open_high_hpage_areas(current->mm, | ||
509 | HTLB_AREA_MASK(addr, len)); | ||
510 | #ifdef CONFIG_SPE_BASE | ||
511 | spu_flush_all_slbs(current->mm); | ||
512 | #endif | ||
513 | if (err) { | ||
514 | printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" | ||
515 | " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n", | ||
516 | addr, len, | ||
517 | LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len)); | ||
518 | return err; | ||
519 | } | ||
520 | |||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | struct page * | 334 | struct page * |
525 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 335 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
526 | { | 336 | { |
527 | pte_t *ptep; | 337 | pte_t *ptep; |
528 | struct page *page; | 338 | struct page *page; |
529 | 339 | ||
530 | if (! in_hugepage_area(mm->context, address)) | 340 | if (get_slice_psize(mm, address) != mmu_huge_psize) |
531 | return ERR_PTR(-EINVAL); | 341 | return ERR_PTR(-EINVAL); |
532 | 342 | ||
533 | ptep = huge_pte_offset(mm, address); | 343 | ptep = huge_pte_offset(mm, address); |
@@ -551,359 +361,13 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
551 | return NULL; | 361 | return NULL; |
552 | } | 362 | } |
553 | 363 | ||
554 | /* Because we have an exclusive hugepage region which lies within the | ||
555 | * normal user address space, we have to take special measures to make | ||
556 | * non-huge mmap()s evade the hugepage reserved regions. */ | ||
557 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
558 | unsigned long len, unsigned long pgoff, | ||
559 | unsigned long flags) | ||
560 | { | ||
561 | struct mm_struct *mm = current->mm; | ||
562 | struct vm_area_struct *vma; | ||
563 | unsigned long start_addr; | ||
564 | |||
565 | if (len > TASK_SIZE) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | /* handle fixed mapping: prevent overlap with huge pages */ | ||
569 | if (flags & MAP_FIXED) { | ||
570 | if (is_hugepage_only_range(mm, addr, len)) | ||
571 | return -EINVAL; | ||
572 | return addr; | ||
573 | } | ||
574 | |||
575 | if (addr) { | ||
576 | addr = PAGE_ALIGN(addr); | ||
577 | vma = find_vma(mm, addr); | ||
578 | if (((TASK_SIZE - len) >= addr) | ||
579 | && (!vma || (addr+len) <= vma->vm_start) | ||
580 | && !is_hugepage_only_range(mm, addr,len)) | ||
581 | return addr; | ||
582 | } | ||
583 | if (len > mm->cached_hole_size) { | ||
584 | start_addr = addr = mm->free_area_cache; | ||
585 | } else { | ||
586 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
587 | mm->cached_hole_size = 0; | ||
588 | } | ||
589 | |||
590 | full_search: | ||
591 | vma = find_vma(mm, addr); | ||
592 | while (TASK_SIZE - len >= addr) { | ||
593 | BUG_ON(vma && (addr >= vma->vm_end)); | ||
594 | |||
595 | if (touches_hugepage_low_range(mm, addr, len)) { | ||
596 | addr = ALIGN(addr+1, 1<<SID_SHIFT); | ||
597 | vma = find_vma(mm, addr); | ||
598 | continue; | ||
599 | } | ||
600 | if (touches_hugepage_high_range(mm, addr, len)) { | ||
601 | addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); | ||
602 | vma = find_vma(mm, addr); | ||
603 | continue; | ||
604 | } | ||
605 | if (!vma || addr + len <= vma->vm_start) { | ||
606 | /* | ||
607 | * Remember the place where we stopped the search: | ||
608 | */ | ||
609 | mm->free_area_cache = addr + len; | ||
610 | return addr; | ||
611 | } | ||
612 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
613 | mm->cached_hole_size = vma->vm_start - addr; | ||
614 | addr = vma->vm_end; | ||
615 | vma = vma->vm_next; | ||
616 | } | ||
617 | |||
618 | /* Make sure we didn't miss any holes */ | ||
619 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
620 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
621 | mm->cached_hole_size = 0; | ||
622 | goto full_search; | ||
623 | } | ||
624 | return -ENOMEM; | ||
625 | } | ||
626 | |||
627 | /* | ||
628 | * This mmap-allocator allocates new areas top-down from below the | ||
629 | * stack's low limit (the base): | ||
630 | * | ||
631 | * Because we have an exclusive hugepage region which lies within the | ||
632 | * normal user address space, we have to take special measures to make | ||
633 | * non-huge mmap()s evade the hugepage reserved regions. | ||
634 | */ | ||
635 | unsigned long | ||
636 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
637 | const unsigned long len, const unsigned long pgoff, | ||
638 | const unsigned long flags) | ||
639 | { | ||
640 | struct vm_area_struct *vma, *prev_vma; | ||
641 | struct mm_struct *mm = current->mm; | ||
642 | unsigned long base = mm->mmap_base, addr = addr0; | ||
643 | unsigned long largest_hole = mm->cached_hole_size; | ||
644 | int first_time = 1; | ||
645 | |||
646 | /* requested length too big for entire address space */ | ||
647 | if (len > TASK_SIZE) | ||
648 | return -ENOMEM; | ||
649 | |||
650 | /* handle fixed mapping: prevent overlap with huge pages */ | ||
651 | if (flags & MAP_FIXED) { | ||
652 | if (is_hugepage_only_range(mm, addr, len)) | ||
653 | return -EINVAL; | ||
654 | return addr; | ||
655 | } | ||
656 | |||
657 | /* dont allow allocations above current base */ | ||
658 | if (mm->free_area_cache > base) | ||
659 | mm->free_area_cache = base; | ||
660 | |||
661 | /* requesting a specific address */ | ||
662 | if (addr) { | ||
663 | addr = PAGE_ALIGN(addr); | ||
664 | vma = find_vma(mm, addr); | ||
665 | if (TASK_SIZE - len >= addr && | ||
666 | (!vma || addr + len <= vma->vm_start) | ||
667 | && !is_hugepage_only_range(mm, addr,len)) | ||
668 | return addr; | ||
669 | } | ||
670 | |||
671 | if (len <= largest_hole) { | ||
672 | largest_hole = 0; | ||
673 | mm->free_area_cache = base; | ||
674 | } | ||
675 | try_again: | ||
676 | /* make sure it can fit in the remaining address space */ | ||
677 | if (mm->free_area_cache < len) | ||
678 | goto fail; | ||
679 | |||
680 | /* either no address requested or cant fit in requested address hole */ | ||
681 | addr = (mm->free_area_cache - len) & PAGE_MASK; | ||
682 | do { | ||
683 | hugepage_recheck: | ||
684 | if (touches_hugepage_low_range(mm, addr, len)) { | ||
685 | addr = (addr & ((~0) << SID_SHIFT)) - len; | ||
686 | goto hugepage_recheck; | ||
687 | } else if (touches_hugepage_high_range(mm, addr, len)) { | ||
688 | addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len; | ||
689 | goto hugepage_recheck; | ||
690 | } | ||
691 | |||
692 | /* | ||
693 | * Lookup failure means no vma is above this address, | ||
694 | * i.e. return with success: | ||
695 | */ | ||
696 | if (!(vma = find_vma_prev(mm, addr, &prev_vma))) | ||
697 | return addr; | ||
698 | |||
699 | /* | ||
700 | * new region fits between prev_vma->vm_end and | ||
701 | * vma->vm_start, use it: | ||
702 | */ | ||
703 | if (addr+len <= vma->vm_start && | ||
704 | (!prev_vma || (addr >= prev_vma->vm_end))) { | ||
705 | /* remember the address as a hint for next time */ | ||
706 | mm->cached_hole_size = largest_hole; | ||
707 | return (mm->free_area_cache = addr); | ||
708 | } else { | ||
709 | /* pull free_area_cache down to the first hole */ | ||
710 | if (mm->free_area_cache == vma->vm_end) { | ||
711 | mm->free_area_cache = vma->vm_start; | ||
712 | mm->cached_hole_size = largest_hole; | ||
713 | } | ||
714 | } | ||
715 | |||
716 | /* remember the largest hole we saw so far */ | ||
717 | if (addr + largest_hole < vma->vm_start) | ||
718 | largest_hole = vma->vm_start - addr; | ||
719 | |||
720 | /* try just below the current vma->vm_start */ | ||
721 | addr = vma->vm_start-len; | ||
722 | } while (len <= vma->vm_start); | ||
723 | |||
724 | fail: | ||
725 | /* | ||
726 | * if hint left us with no space for the requested | ||
727 | * mapping then try again: | ||
728 | */ | ||
729 | if (first_time) { | ||
730 | mm->free_area_cache = base; | ||
731 | largest_hole = 0; | ||
732 | first_time = 0; | ||
733 | goto try_again; | ||
734 | } | ||
735 | /* | ||
736 | * A failed mmap() very likely causes application failure, | ||
737 | * so fall back to the bottom-up function here. This scenario | ||
738 | * can happen with large stack limits and large mmap() | ||
739 | * allocations. | ||
740 | */ | ||
741 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
742 | mm->cached_hole_size = ~0UL; | ||
743 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
744 | /* | ||
745 | * Restore the topdown base: | ||
746 | */ | ||
747 | mm->free_area_cache = base; | ||
748 | mm->cached_hole_size = ~0UL; | ||
749 | |||
750 | return addr; | ||
751 | } | ||
752 | |||
753 | static int htlb_check_hinted_area(unsigned long addr, unsigned long len) | ||
754 | { | ||
755 | struct vm_area_struct *vma; | ||
756 | |||
757 | vma = find_vma(current->mm, addr); | ||
758 | if (TASK_SIZE - len >= addr && | ||
759 | (!vma || ((addr + len) <= vma->vm_start))) | ||
760 | return 0; | ||
761 | |||
762 | return -ENOMEM; | ||
763 | } | ||
764 | |||
765 | static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) | ||
766 | { | ||
767 | unsigned long addr = 0; | ||
768 | struct vm_area_struct *vma; | ||
769 | |||
770 | vma = find_vma(current->mm, addr); | ||
771 | while (addr + len <= 0x100000000UL) { | ||
772 | BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ | ||
773 | |||
774 | if (! __within_hugepage_low_range(addr, len, segmask)) { | ||
775 | addr = ALIGN(addr+1, 1<<SID_SHIFT); | ||
776 | vma = find_vma(current->mm, addr); | ||
777 | continue; | ||
778 | } | ||
779 | |||
780 | if (!vma || (addr + len) <= vma->vm_start) | ||
781 | return addr; | ||
782 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | ||
783 | /* Depending on segmask this might not be a confirmed | ||
784 | * hugepage region, so the ALIGN could have skipped | ||
785 | * some VMAs */ | ||
786 | vma = find_vma(current->mm, addr); | ||
787 | } | ||
788 | |||
789 | return -ENOMEM; | ||
790 | } | ||
791 | |||
792 | static unsigned long htlb_get_high_area(unsigned long len, u16 areamask) | ||
793 | { | ||
794 | unsigned long addr = 0x100000000UL; | ||
795 | struct vm_area_struct *vma; | ||
796 | |||
797 | vma = find_vma(current->mm, addr); | ||
798 | while (addr + len <= TASK_SIZE_USER64) { | ||
799 | BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ | ||
800 | |||
801 | if (! __within_hugepage_high_range(addr, len, areamask)) { | ||
802 | addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT); | ||
803 | vma = find_vma(current->mm, addr); | ||
804 | continue; | ||
805 | } | ||
806 | |||
807 | if (!vma || (addr + len) <= vma->vm_start) | ||
808 | return addr; | ||
809 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | ||
810 | /* Depending on segmask this might not be a confirmed | ||
811 | * hugepage region, so the ALIGN could have skipped | ||
812 | * some VMAs */ | ||
813 | vma = find_vma(current->mm, addr); | ||
814 | } | ||
815 | |||
816 | return -ENOMEM; | ||
817 | } | ||
818 | 364 | ||
819 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 365 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
820 | unsigned long len, unsigned long pgoff, | 366 | unsigned long len, unsigned long pgoff, |
821 | unsigned long flags) | 367 | unsigned long flags) |
822 | { | 368 | { |
823 | int lastshift; | 369 | return slice_get_unmapped_area(addr, len, flags, |
824 | u16 areamask, curareas; | 370 | mmu_huge_psize, 1, 0); |
825 | |||
826 | if (HPAGE_SHIFT == 0) | ||
827 | return -EINVAL; | ||
828 | if (len & ~HPAGE_MASK) | ||
829 | return -EINVAL; | ||
830 | if (len > TASK_SIZE) | ||
831 | return -ENOMEM; | ||
832 | |||
833 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) | ||
834 | return -EINVAL; | ||
835 | |||
836 | /* Paranoia, caller should have dealt with this */ | ||
837 | BUG_ON((addr + len) < addr); | ||
838 | |||
839 | /* Handle MAP_FIXED */ | ||
840 | if (flags & MAP_FIXED) { | ||
841 | if (prepare_hugepage_range(addr, len, pgoff)) | ||
842 | return -EINVAL; | ||
843 | return addr; | ||
844 | } | ||
845 | |||
846 | if (test_thread_flag(TIF_32BIT)) { | ||
847 | curareas = current->mm->context.low_htlb_areas; | ||
848 | |||
849 | /* First see if we can use the hint address */ | ||
850 | if (addr && (htlb_check_hinted_area(addr, len) == 0)) { | ||
851 | areamask = LOW_ESID_MASK(addr, len); | ||
852 | if (open_low_hpage_areas(current->mm, areamask) == 0) | ||
853 | return addr; | ||
854 | } | ||
855 | |||
856 | /* Next see if we can map in the existing low areas */ | ||
857 | addr = htlb_get_low_area(len, curareas); | ||
858 | if (addr != -ENOMEM) | ||
859 | return addr; | ||
860 | |||
861 | /* Finally go looking for areas to open */ | ||
862 | lastshift = 0; | ||
863 | for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); | ||
864 | ! lastshift; areamask >>=1) { | ||
865 | if (areamask & 1) | ||
866 | lastshift = 1; | ||
867 | |||
868 | addr = htlb_get_low_area(len, curareas | areamask); | ||
869 | if ((addr != -ENOMEM) | ||
870 | && open_low_hpage_areas(current->mm, areamask) == 0) | ||
871 | return addr; | ||
872 | } | ||
873 | } else { | ||
874 | curareas = current->mm->context.high_htlb_areas; | ||
875 | |||
876 | /* First see if we can use the hint address */ | ||
877 | /* We discourage 64-bit processes from doing hugepage | ||
878 | * mappings below 4GB (must use MAP_FIXED) */ | ||
879 | if ((addr >= 0x100000000UL) | ||
880 | && (htlb_check_hinted_area(addr, len) == 0)) { | ||
881 | areamask = HTLB_AREA_MASK(addr, len); | ||
882 | if (open_high_hpage_areas(current->mm, areamask) == 0) | ||
883 | return addr; | ||
884 | } | ||
885 | |||
886 | /* Next see if we can map in the existing high areas */ | ||
887 | addr = htlb_get_high_area(len, curareas); | ||
888 | if (addr != -ENOMEM) | ||
889 | return addr; | ||
890 | |||
891 | /* Finally go looking for areas to open */ | ||
892 | lastshift = 0; | ||
893 | for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); | ||
894 | ! lastshift; areamask >>=1) { | ||
895 | if (areamask & 1) | ||
896 | lastshift = 1; | ||
897 | |||
898 | addr = htlb_get_high_area(len, curareas | areamask); | ||
899 | if ((addr != -ENOMEM) | ||
900 | && open_high_hpage_areas(current->mm, areamask) == 0) | ||
901 | return addr; | ||
902 | } | ||
903 | } | ||
904 | printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open" | ||
905 | " enough areas\n"); | ||
906 | return -ENOMEM; | ||
907 | } | 371 | } |
908 | 372 | ||
909 | /* | 373 | /* |