diff options
| -rw-r--r-- | arch/sparc/include/asm/hugetlb.h | 5 | ||||
| -rw-r--r-- | arch/sparc/include/asm/mmu_64.h | 2 | ||||
| -rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 2 | ||||
| -rw-r--r-- | arch/sparc/include/asm/page_64.h | 7 | ||||
| -rw-r--r-- | arch/sparc/include/asm/pgalloc_64.h | 4 | ||||
| -rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 177 | ||||
| -rw-r--r-- | arch/sparc/include/asm/tsb.h | 94 | ||||
| -rw-r--r-- | arch/sparc/kernel/sun4v_tlb_miss.S | 2 | ||||
| -rw-r--r-- | arch/sparc/kernel/tsb.S | 9 | ||||
| -rw-r--r-- | arch/sparc/mm/fault_64.c | 4 | ||||
| -rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 50 | ||||
| -rw-r--r-- | arch/sparc/mm/init_64.c | 204 | ||||
| -rw-r--r-- | arch/sparc/mm/tlb.c | 118 | ||||
| -rw-r--r-- | arch/sparc/mm/tsb.c | 14 |
14 files changed, 582 insertions, 110 deletions
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index e7927c9758a1..8c5eed6d267f 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h | |||
| @@ -10,7 +10,10 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |||
| 10 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | 10 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 11 | pte_t *ptep); | 11 | pte_t *ptep); |
| 12 | 12 | ||
| 13 | void hugetlb_prefault_arch_hook(struct mm_struct *mm); | 13 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) |
| 14 | { | ||
| 15 | hugetlb_setup(mm); | ||
| 16 | } | ||
| 14 | 17 | ||
| 15 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 18 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
| 16 | unsigned long addr, | 19 | unsigned long addr, |
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 31977c8dd942..76092c4dd277 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h | |||
| @@ -82,7 +82,7 @@ struct tsb_config { | |||
| 82 | 82 | ||
| 83 | #define MM_TSB_BASE 0 | 83 | #define MM_TSB_BASE 0 |
| 84 | 84 | ||
| 85 | #ifdef CONFIG_HUGETLB_PAGE | 85 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 86 | #define MM_TSB_HUGE 1 | 86 | #define MM_TSB_HUGE 1 |
| 87 | #define MM_NUM_TSBS 2 | 87 | #define MM_NUM_TSBS 2 |
| 88 | #else | 88 | #else |
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index a97fd085cebe..9191ca62ed9c 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h | |||
| @@ -36,7 +36,7 @@ static inline void tsb_context_switch(struct mm_struct *mm) | |||
| 36 | { | 36 | { |
| 37 | __tsb_context_switch(__pa(mm->pgd), | 37 | __tsb_context_switch(__pa(mm->pgd), |
| 38 | &mm->context.tsb_block[0], | 38 | &mm->context.tsb_block[0], |
| 39 | #ifdef CONFIG_HUGETLB_PAGE | 39 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 40 | (mm->context.tsb_block[1].tsb ? | 40 | (mm->context.tsb_block[1].tsb ? |
| 41 | &mm->context.tsb_block[1] : | 41 | &mm->context.tsb_block[1] : |
| 42 | NULL) | 42 | NULL) |
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 087a5c505c69..4b39f74d6ca0 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #define HPAGE_SHIFT 22 | 18 | #define HPAGE_SHIFT 22 |
| 19 | 19 | ||
| 20 | #ifdef CONFIG_HUGETLB_PAGE | 20 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 21 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) | 21 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
| 22 | #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) | 22 | #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) |
| 23 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 23 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| @@ -26,6 +26,11 @@ | |||
| 26 | 26 | ||
| 27 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
| 28 | 28 | ||
| 29 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
| 30 | struct mm_struct; | ||
| 31 | extern void hugetlb_setup(struct mm_struct *mm); | ||
| 32 | #endif | ||
| 33 | |||
| 29 | #define WANT_PAGE_VIRTUAL | 34 | #define WANT_PAGE_VIRTUAL |
| 30 | 35 | ||
| 31 | extern void _clear_page(void *page); | 36 | extern void _clear_page(void *page); |
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 0ebca93ef0f5..bcfe063bce23 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h | |||
| @@ -45,8 +45,8 @@ extern pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
| 45 | extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); | 45 | extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte); |
| 46 | extern void pte_free(struct mm_struct *mm, pgtable_t ptepage); | 46 | extern void pte_free(struct mm_struct *mm, pgtable_t ptepage); |
| 47 | 47 | ||
| 48 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) | 48 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE) |
| 49 | #define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE) | 49 | #define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE) |
| 50 | #define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD)) | 50 | #define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD)) |
| 51 | 51 | ||
| 52 | #define check_pgt_cache() do { } while (0) | 52 | #define check_pgt_cache() do { } while (0) |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index af3cd7a9e9ac..95515f1e7cef 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
| @@ -63,10 +63,31 @@ | |||
| 63 | #error Page table parameters do not cover virtual address space properly. | 63 | #error Page table parameters do not cover virtual address space properly. |
| 64 | #endif | 64 | #endif |
| 65 | 65 | ||
| 66 | #if (PMD_SHIFT != HPAGE_SHIFT) | ||
| 67 | #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. | ||
| 68 | #endif | ||
| 69 | |||
| 66 | /* PMDs point to PTE tables which are 4K aligned. */ | 70 | /* PMDs point to PTE tables which are 4K aligned. */ |
| 67 | #define PMD_PADDR _AC(0xfffffffe,UL) | 71 | #define PMD_PADDR _AC(0xfffffffe,UL) |
| 68 | #define PMD_PADDR_SHIFT _AC(11,UL) | 72 | #define PMD_PADDR_SHIFT _AC(11,UL) |
| 69 | 73 | ||
| 74 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 75 | #define PMD_ISHUGE _AC(0x00000001,UL) | ||
| 76 | |||
| 77 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge | ||
| 78 | * pages, this frees up a bunch of bits in the layout that we can | ||
| 79 | * use for the protection settings and software metadata. | ||
| 80 | */ | ||
| 81 | #define PMD_HUGE_PADDR _AC(0xfffff800,UL) | ||
| 82 | #define PMD_HUGE_PROTBITS _AC(0x000007ff,UL) | ||
| 83 | #define PMD_HUGE_PRESENT _AC(0x00000400,UL) | ||
| 84 | #define PMD_HUGE_WRITE _AC(0x00000200,UL) | ||
| 85 | #define PMD_HUGE_DIRTY _AC(0x00000100,UL) | ||
| 86 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) | ||
| 87 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) | ||
| 88 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) | ||
| 89 | #endif | ||
| 90 | |||
| 70 | /* PGDs point to PMD tables which are 8K aligned. */ | 91 | /* PGDs point to PMD tables which are 8K aligned. */ |
| 71 | #define PGD_PADDR _AC(0xfffffffc,UL) | 92 | #define PGD_PADDR _AC(0xfffffffc,UL) |
| 72 | #define PGD_PADDR_SHIFT _AC(11,UL) | 93 | #define PGD_PADDR_SHIFT _AC(11,UL) |
| @@ -219,6 +240,19 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | |||
| 219 | } | 240 | } |
| 220 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 241 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 221 | 242 | ||
| 243 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 244 | extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot); | ||
| 245 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | ||
| 246 | |||
| 247 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | ||
| 248 | |||
| 249 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
| 250 | { | ||
| 251 | /* Do nothing, mk_pmd() does this part. */ | ||
| 252 | return pmd; | ||
| 253 | } | ||
| 254 | #endif | ||
| 255 | |||
| 222 | /* This one can be done with two shifts. */ | 256 | /* This one can be done with two shifts. */ |
| 223 | static inline unsigned long pte_pfn(pte_t pte) | 257 | static inline unsigned long pte_pfn(pte_t pte) |
| 224 | { | 258 | { |
| @@ -588,19 +622,130 @@ static inline unsigned long pte_special(pte_t pte) | |||
| 588 | return pte_val(pte) & _PAGE_SPECIAL; | 622 | return pte_val(pte) & _PAGE_SPECIAL; |
| 589 | } | 623 | } |
| 590 | 624 | ||
| 591 | #define pmd_set(pmdp, ptep) \ | 625 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 592 | (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT)) | 626 | static inline int pmd_young(pmd_t pmd) |
| 627 | { | ||
| 628 | return pmd_val(pmd) & PMD_HUGE_ACCESSED; | ||
| 629 | } | ||
| 630 | |||
| 631 | static inline int pmd_write(pmd_t pmd) | ||
| 632 | { | ||
| 633 | return pmd_val(pmd) & PMD_HUGE_WRITE; | ||
| 634 | } | ||
| 635 | |||
| 636 | static inline unsigned long pmd_pfn(pmd_t pmd) | ||
| 637 | { | ||
| 638 | unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR; | ||
| 639 | |||
| 640 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); | ||
| 641 | } | ||
| 642 | |||
| 643 | static inline int pmd_large(pmd_t pmd) | ||
| 644 | { | ||
| 645 | return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == | ||
| 646 | (PMD_ISHUGE | PMD_HUGE_PRESENT); | ||
| 647 | } | ||
| 648 | |||
| 649 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
| 650 | { | ||
| 651 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == | ||
| 652 | (PMD_ISHUGE|PMD_HUGE_SPLITTING); | ||
| 653 | } | ||
| 654 | |||
| 655 | static inline int pmd_trans_huge(pmd_t pmd) | ||
| 656 | { | ||
| 657 | return pmd_val(pmd) & PMD_ISHUGE; | ||
| 658 | } | ||
| 659 | |||
| 660 | #define has_transparent_hugepage() 1 | ||
| 661 | |||
| 662 | static inline pmd_t pmd_mkold(pmd_t pmd) | ||
| 663 | { | ||
| 664 | pmd_val(pmd) &= ~PMD_HUGE_ACCESSED; | ||
| 665 | return pmd; | ||
| 666 | } | ||
| 667 | |||
| 668 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | ||
| 669 | { | ||
| 670 | pmd_val(pmd) &= ~PMD_HUGE_WRITE; | ||
| 671 | return pmd; | ||
| 672 | } | ||
| 673 | |||
| 674 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | ||
| 675 | { | ||
| 676 | pmd_val(pmd) |= PMD_HUGE_DIRTY; | ||
| 677 | return pmd; | ||
| 678 | } | ||
| 679 | |||
| 680 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | ||
| 681 | { | ||
| 682 | pmd_val(pmd) |= PMD_HUGE_ACCESSED; | ||
| 683 | return pmd; | ||
| 684 | } | ||
| 685 | |||
| 686 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | ||
| 687 | { | ||
| 688 | pmd_val(pmd) |= PMD_HUGE_WRITE; | ||
| 689 | return pmd; | ||
| 690 | } | ||
| 691 | |||
| 692 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | ||
| 693 | { | ||
| 694 | pmd_val(pmd) &= ~PMD_HUGE_PRESENT; | ||
| 695 | return pmd; | ||
| 696 | } | ||
| 697 | |||
| 698 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | ||
| 699 | { | ||
| 700 | pmd_val(pmd) |= PMD_HUGE_SPLITTING; | ||
| 701 | return pmd; | ||
| 702 | } | ||
| 703 | |||
| 704 | extern pgprot_t pmd_pgprot(pmd_t entry); | ||
| 705 | #endif | ||
| 706 | |||
| 707 | static inline int pmd_present(pmd_t pmd) | ||
| 708 | { | ||
| 709 | return pmd_val(pmd) != 0U; | ||
| 710 | } | ||
| 711 | |||
| 712 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
| 713 | |||
| 714 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 715 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
| 716 | pmd_t *pmdp, pmd_t pmd); | ||
| 717 | #else | ||
| 718 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
| 719 | pmd_t *pmdp, pmd_t pmd) | ||
| 720 | { | ||
| 721 | *pmdp = pmd; | ||
| 722 | } | ||
| 723 | #endif | ||
| 724 | |||
| 725 | static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) | ||
| 726 | { | ||
| 727 | unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT; | ||
| 728 | |||
| 729 | pmd_val(*pmdp) = val; | ||
| 730 | } | ||
| 731 | |||
| 593 | #define pud_set(pudp, pmdp) \ | 732 | #define pud_set(pudp, pmdp) \ |
| 594 | (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) | 733 | (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) |
| 595 | #define __pmd_page(pmd) \ | 734 | static inline unsigned long __pmd_page(pmd_t pmd) |
| 596 | ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<PMD_PADDR_SHIFT))) | 735 | { |
| 736 | unsigned long paddr = (unsigned long) pmd_val(pmd); | ||
| 737 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 738 | if (pmd_val(pmd) & PMD_ISHUGE) | ||
| 739 | paddr &= PMD_HUGE_PADDR; | ||
| 740 | #endif | ||
| 741 | paddr <<= PMD_PADDR_SHIFT; | ||
| 742 | return ((unsigned long) __va(paddr)); | ||
| 743 | } | ||
| 597 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) | 744 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) |
| 598 | #define pud_page_vaddr(pud) \ | 745 | #define pud_page_vaddr(pud) \ |
| 599 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT))) | 746 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT))) |
| 600 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) | 747 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) |
| 601 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
| 602 | #define pmd_bad(pmd) (0) | 748 | #define pmd_bad(pmd) (0) |
| 603 | #define pmd_present(pmd) (pmd_val(pmd) != 0U) | ||
| 604 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U) | 749 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U) |
| 605 | #define pud_none(pud) (!pud_val(pud)) | 750 | #define pud_none(pud) (!pud_val(pud)) |
| 606 | #define pud_bad(pud) (0) | 751 | #define pud_bad(pud) (0) |
| @@ -634,6 +779,16 @@ static inline unsigned long pte_special(pte_t pte) | |||
| 634 | extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | 779 | extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
| 635 | pte_t *ptep, pte_t orig, int fullmm); | 780 | pte_t *ptep, pte_t orig, int fullmm); |
| 636 | 781 | ||
| 782 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR | ||
| 783 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | ||
| 784 | unsigned long addr, | ||
| 785 | pmd_t *pmdp) | ||
| 786 | { | ||
| 787 | pmd_t pmd = *pmdp; | ||
| 788 | set_pmd_at(mm, addr, pmdp, __pmd(0U)); | ||
| 789 | return pmd; | ||
| 790 | } | ||
| 791 | |||
| 637 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | 792 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 638 | pte_t *ptep, pte_t pte, int fullmm) | 793 | pte_t *ptep, pte_t pte, int fullmm) |
| 639 | { | 794 | { |
| @@ -689,6 +844,16 @@ extern void mmu_info(struct seq_file *); | |||
| 689 | 844 | ||
| 690 | struct vm_area_struct; | 845 | struct vm_area_struct; |
| 691 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); | 846 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
| 847 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 848 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
| 849 | pmd_t *pmd); | ||
| 850 | |||
| 851 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
| 852 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
| 853 | |||
| 854 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
| 855 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
| 856 | #endif | ||
| 692 | 857 | ||
| 693 | /* Encode and de-code a swap entry */ | 858 | /* Encode and de-code a swap entry */ |
| 694 | #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) | 859 | #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) |
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index ef8cd1a174f1..b4c258de4443 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h | |||
| @@ -157,10 +157,86 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
| 157 | andn REG2, 0x7, REG2; \ | 157 | andn REG2, 0x7, REG2; \ |
| 158 | add REG1, REG2, REG1; | 158 | add REG1, REG2, REG1; |
| 159 | 159 | ||
| 160 | /* Do a user page table walk in MMU globals. Leaves physical PTE | 160 | /* This macro exists only to make the PMD translator below easier |
| 161 | * pointer in REG1. Jumps to FAIL_LABEL on early page table walk | 161 | * to read. It hides the ELF section switch for the sun4v code |
| 162 | * termination. Physical base of page tables is in PHYS_PGD which | 162 | * patching. |
| 163 | * will not be modified. | 163 | */ |
| 164 | #define OR_PTE_BIT(REG, NAME) \ | ||
| 165 | 661: or REG, _PAGE_##NAME##_4U, REG; \ | ||
| 166 | .section .sun4v_1insn_patch, "ax"; \ | ||
| 167 | .word 661b; \ | ||
| 168 | or REG, _PAGE_##NAME##_4V, REG; \ | ||
| 169 | .previous; | ||
| 170 | |||
| 171 | /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ | ||
| 172 | #define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ | ||
| 173 | 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ | ||
| 174 | .section .sun4v_1insn_patch, "ax"; \ | ||
| 175 | .word 661b; \ | ||
| 176 | sethi %uhi(_PAGE_VALID), REG; \ | ||
| 177 | .previous; \ | ||
| 178 | sllx REG, 32, REG; \ | ||
| 179 | 661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \ | ||
| 180 | .section .sun4v_1insn_patch, "ax"; \ | ||
| 181 | .word 661b; \ | ||
| 182 | or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \ | ||
| 183 | .previous; | ||
| 184 | |||
| 185 | /* PMD has been loaded into REG1, interpret the value, seeing | ||
| 186 | * if it is a HUGE PMD or a normal one. If it is not valid | ||
| 187 | * then jump to FAIL_LABEL. If it is a HUGE PMD, and it | ||
| 188 | * translates to a valid PTE, branch to PTE_LABEL. | ||
| 189 | * | ||
| 190 | * We translate the PMD by hand, one bit at a time, | ||
| 191 | * constructing the huge PTE. | ||
| 192 | * | ||
| 193 | * So we construct the PTE in REG2 as follows: | ||
| 194 | * | ||
| 195 | * 1) Extract the PMD PFN from REG1 and place it into REG2. | ||
| 196 | * | ||
| 197 | * 2) Translate PMD protection bits in REG1 into REG2, one bit | ||
| 198 | * at a time using andcc tests on REG1 and OR's into REG2. | ||
| 199 | * | ||
| 200 | * Only two bits to be concerned with here, EXEC and WRITE. | ||
| 201 | * Now REG1 is freed up and we can use it as a temporary. | ||
| 202 | * | ||
| 203 | * 3) Construct the VALID, CACHE, and page size PTE bits in | ||
| 204 | * REG1, OR with REG2 to form final PTE. | ||
| 205 | */ | ||
| 206 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 207 | #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ | ||
| 208 | brz,pn REG1, FAIL_LABEL; \ | ||
| 209 | andcc REG1, PMD_ISHUGE, %g0; \ | ||
| 210 | be,pt %xcc, 700f; \ | ||
| 211 | and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \ | ||
| 212 | cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \ | ||
| 213 | bne,pn %xcc, FAIL_LABEL; \ | ||
| 214 | andn REG1, PMD_HUGE_PROTBITS, REG2; \ | ||
| 215 | sllx REG2, PMD_PADDR_SHIFT, REG2; \ | ||
| 216 | /* REG2 now holds PFN << PAGE_SHIFT */ \ | ||
| 217 | andcc REG1, PMD_HUGE_EXEC, %g0; \ | ||
| 218 | bne,a,pt %xcc, 1f; \ | ||
| 219 | OR_PTE_BIT(REG2, EXEC); \ | ||
| 220 | 1: andcc REG1, PMD_HUGE_WRITE, %g0; \ | ||
| 221 | bne,a,pt %xcc, 1f; \ | ||
| 222 | OR_PTE_BIT(REG2, W); \ | ||
| 223 | /* REG1 can now be clobbered, build final PTE */ \ | ||
| 224 | 1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ | ||
| 225 | ba,pt %xcc, PTE_LABEL; \ | ||
| 226 | or REG1, REG2, REG1; \ | ||
| 227 | 700: | ||
| 228 | #else | ||
| 229 | #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ | ||
| 230 | brz,pn REG1, FAIL_LABEL; \ | ||
| 231 | nop; | ||
| 232 | #endif | ||
| 233 | |||
| 234 | /* Do a user page table walk in MMU globals. Leaves final, | ||
| 235 | * valid, PTE value in REG1. Jumps to FAIL_LABEL on early | ||
| 236 | * page table walk termination or if the PTE is not valid. | ||
| 237 | * | ||
| 238 | * Physical base of page tables is in PHYS_PGD which will not | ||
| 239 | * be modified. | ||
| 164 | * | 240 | * |
| 165 | * VADDR will not be clobbered, but REG1 and REG2 will. | 241 | * VADDR will not be clobbered, but REG1 and REG2 will. |
| 166 | */ | 242 | */ |
| @@ -175,12 +251,16 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
| 175 | sllx REG1, PGD_PADDR_SHIFT, REG1; \ | 251 | sllx REG1, PGD_PADDR_SHIFT, REG1; \ |
| 176 | andn REG2, 0x3, REG2; \ | 252 | andn REG2, 0x3, REG2; \ |
| 177 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | 253 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
| 178 | brz,pn REG1, FAIL_LABEL; \ | 254 | USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ |
| 179 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | 255 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
| 180 | srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ | 256 | srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ |
| 181 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ | 257 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ |
| 182 | andn REG2, 0x7, REG2; \ | 258 | andn REG2, 0x7, REG2; \ |
| 183 | add REG1, REG2, REG1; | 259 | add REG1, REG2, REG1; \ |
| 260 | ldxa [REG1] ASI_PHYS_USE_EC, REG1; \ | ||
| 261 | brgez,pn REG1, FAIL_LABEL; \ | ||
| 262 | nop; \ | ||
| 263 | 800: | ||
| 184 | 264 | ||
| 185 | /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. | 265 | /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. |
| 186 | * If no entry is found, FAIL_LABEL will be branched to. On success | 266 | * If no entry is found, FAIL_LABEL will be branched to. On success |
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S index e1fbf8c75787..bde867fd71e8 100644 --- a/arch/sparc/kernel/sun4v_tlb_miss.S +++ b/arch/sparc/kernel/sun4v_tlb_miss.S | |||
| @@ -176,7 +176,7 @@ sun4v_tsb_miss_common: | |||
| 176 | 176 | ||
| 177 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 | 177 | sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 |
| 178 | 178 | ||
| 179 | #ifdef CONFIG_HUGETLB_PAGE | 179 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 180 | mov SCRATCHPAD_UTSBREG2, %g5 | 180 | mov SCRATCHPAD_UTSBREG2, %g5 |
| 181 | ldxa [%g5] ASI_SCRATCHPAD, %g5 | 181 | ldxa [%g5] ASI_SCRATCHPAD, %g5 |
| 182 | cmp %g5, -1 | 182 | cmp %g5, -1 |
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index db15d123f054..d4bdc7a62375 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S | |||
| @@ -49,7 +49,7 @@ tsb_miss_page_table_walk: | |||
| 49 | /* Before committing to a full page table walk, | 49 | /* Before committing to a full page table walk, |
| 50 | * check the huge page TSB. | 50 | * check the huge page TSB. |
| 51 | */ | 51 | */ |
| 52 | #ifdef CONFIG_HUGETLB_PAGE | 52 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 53 | 53 | ||
| 54 | 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 | 54 | 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 |
| 55 | nop | 55 | nop |
| @@ -110,12 +110,9 @@ tsb_miss_page_table_walk: | |||
| 110 | tsb_miss_page_table_walk_sun4v_fastpath: | 110 | tsb_miss_page_table_walk_sun4v_fastpath: |
| 111 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) | 111 | USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) |
| 112 | 112 | ||
| 113 | /* Load and check PTE. */ | 113 | /* Valid PTE is now in %g5. */ |
| 114 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
| 115 | brgez,pn %g5, tsb_do_fault | ||
| 116 | nop | ||
| 117 | 114 | ||
| 118 | #ifdef CONFIG_HUGETLB_PAGE | 115 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 119 | 661: sethi %uhi(_PAGE_SZALL_4U), %g7 | 116 | 661: sethi %uhi(_PAGE_SZALL_4U), %g7 |
| 120 | sllx %g7, 32, %g7 | 117 | sllx %g7, 32, %g7 |
| 121 | .section .sun4v_2insn_patch, "ax" | 118 | .section .sun4v_2insn_patch, "ax" |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 413d29263304..2976dba1ebaf 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
| @@ -465,13 +465,13 @@ good_area: | |||
| 465 | up_read(&mm->mmap_sem); | 465 | up_read(&mm->mmap_sem); |
| 466 | 466 | ||
| 467 | mm_rss = get_mm_rss(mm); | 467 | mm_rss = get_mm_rss(mm); |
| 468 | #ifdef CONFIG_HUGETLB_PAGE | 468 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 469 | mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); | 469 | mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); |
| 470 | #endif | 470 | #endif |
| 471 | if (unlikely(mm_rss > | 471 | if (unlikely(mm_rss > |
| 472 | mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) | 472 | mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) |
| 473 | tsb_grow(mm, MM_TSB_BASE, mm_rss); | 473 | tsb_grow(mm, MM_TSB_BASE, mm_rss); |
| 474 | #ifdef CONFIG_HUGETLB_PAGE | 474 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 475 | mm_rss = mm->context.huge_pte_count; | 475 | mm_rss = mm->context.huge_pte_count; |
| 476 | if (unlikely(mm_rss > | 476 | if (unlikely(mm_rss > |
| 477 | mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) | 477 | mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) |
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 07e14535375c..f76f83d5ac63 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c | |||
| @@ -303,53 +303,3 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
| 303 | { | 303 | { |
| 304 | return NULL; | 304 | return NULL; |
| 305 | } | 305 | } |
| 306 | |||
| 307 | static void context_reload(void *__data) | ||
| 308 | { | ||
| 309 | struct mm_struct *mm = __data; | ||
| 310 | |||
| 311 | if (mm == current->mm) | ||
| 312 | load_secondary_context(mm); | ||
| 313 | } | ||
| 314 | |||
| 315 | void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
| 316 | { | ||
| 317 | struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; | ||
| 318 | |||
| 319 | if (likely(tp->tsb != NULL)) | ||
| 320 | return; | ||
| 321 | |||
| 322 | tsb_grow(mm, MM_TSB_HUGE, 0); | ||
| 323 | tsb_context_switch(mm); | ||
| 324 | smp_tsb_sync(mm); | ||
| 325 | |||
| 326 | /* On UltraSPARC-III+ and later, configure the second half of | ||
| 327 | * the Data-TLB for huge pages. | ||
| 328 | */ | ||
| 329 | if (tlb_type == cheetah_plus) { | ||
| 330 | unsigned long ctx; | ||
| 331 | |||
| 332 | spin_lock(&ctx_alloc_lock); | ||
| 333 | ctx = mm->context.sparc64_ctx_val; | ||
| 334 | ctx &= ~CTX_PGSZ_MASK; | ||
| 335 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; | ||
| 336 | ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; | ||
| 337 | |||
| 338 | if (ctx != mm->context.sparc64_ctx_val) { | ||
| 339 | /* When changing the page size fields, we | ||
| 340 | * must perform a context flush so that no | ||
| 341 | * stale entries match. This flush must | ||
| 342 | * occur with the original context register | ||
| 343 | * settings. | ||
| 344 | */ | ||
| 345 | do_flush_tlb_mm(mm); | ||
| 346 | |||
| 347 | /* Reload the context register of all processors | ||
| 348 | * also executing in this address space. | ||
| 349 | */ | ||
| 350 | mm->context.sparc64_ctx_val = ctx; | ||
| 351 | on_each_cpu(context_reload, mm, 0); | ||
| 352 | } | ||
| 353 | spin_unlock(&ctx_alloc_lock); | ||
| 354 | } | ||
| 355 | } | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 12ef4ea60c88..9e28a118e6a4 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
| @@ -306,12 +306,24 @@ static void flush_dcache(unsigned long pfn) | |||
| 306 | } | 306 | } |
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | /* mm->context.lock must be held */ | ||
| 310 | static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index, | ||
| 311 | unsigned long tsb_hash_shift, unsigned long address, | ||
| 312 | unsigned long tte) | ||
| 313 | { | ||
| 314 | struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; | ||
| 315 | unsigned long tag; | ||
| 316 | |||
| 317 | tsb += ((address >> tsb_hash_shift) & | ||
| 318 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); | ||
| 319 | tag = (address >> 22UL); | ||
| 320 | tsb_insert(tsb, tag, tte); | ||
| 321 | } | ||
| 322 | |||
| 309 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 323 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
| 310 | { | 324 | { |
| 325 | unsigned long tsb_index, tsb_hash_shift, flags; | ||
| 311 | struct mm_struct *mm; | 326 | struct mm_struct *mm; |
| 312 | struct tsb *tsb; | ||
| 313 | unsigned long tag, flags; | ||
| 314 | unsigned long tsb_index, tsb_hash_shift; | ||
| 315 | pte_t pte = *ptep; | 327 | pte_t pte = *ptep; |
| 316 | 328 | ||
| 317 | if (tlb_type != hypervisor) { | 329 | if (tlb_type != hypervisor) { |
| @@ -328,7 +340,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * | |||
| 328 | 340 | ||
| 329 | spin_lock_irqsave(&mm->context.lock, flags); | 341 | spin_lock_irqsave(&mm->context.lock, flags); |
| 330 | 342 | ||
| 331 | #ifdef CONFIG_HUGETLB_PAGE | 343 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 332 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { | 344 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { |
| 333 | if ((tlb_type == hypervisor && | 345 | if ((tlb_type == hypervisor && |
| 334 | (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || | 346 | (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || |
| @@ -340,11 +352,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * | |||
| 340 | } | 352 | } |
| 341 | #endif | 353 | #endif |
| 342 | 354 | ||
| 343 | tsb = mm->context.tsb_block[tsb_index].tsb; | 355 | __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, |
| 344 | tsb += ((address >> tsb_hash_shift) & | 356 | address, pte_val(pte)); |
| 345 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); | ||
| 346 | tag = (address >> 22UL); | ||
| 347 | tsb_insert(tsb, tag, pte_val(pte)); | ||
| 348 | 357 | ||
| 349 | spin_unlock_irqrestore(&mm->context.lock, flags); | 358 | spin_unlock_irqrestore(&mm->context.lock, flags); |
| 350 | } | 359 | } |
| @@ -2568,3 +2577,180 @@ void pgtable_free(void *table, bool is_page) | |||
| 2568 | else | 2577 | else |
| 2569 | kmem_cache_free(pgtable_cache, table); | 2578 | kmem_cache_free(pgtable_cache, table); |
| 2570 | } | 2579 | } |
| 2580 | |||
| 2581 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 2582 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify) | ||
| 2583 | { | ||
| 2584 | if (pgprot_val(pgprot) & _PAGE_VALID) | ||
| 2585 | pmd_val(pmd) |= PMD_HUGE_PRESENT; | ||
| 2586 | if (tlb_type == hypervisor) { | ||
| 2587 | if (pgprot_val(pgprot) & _PAGE_WRITE_4V) | ||
| 2588 | pmd_val(pmd) |= PMD_HUGE_WRITE; | ||
| 2589 | if (pgprot_val(pgprot) & _PAGE_EXEC_4V) | ||
| 2590 | pmd_val(pmd) |= PMD_HUGE_EXEC; | ||
| 2591 | |||
| 2592 | if (!for_modify) { | ||
| 2593 | if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V) | ||
| 2594 | pmd_val(pmd) |= PMD_HUGE_ACCESSED; | ||
| 2595 | if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V) | ||
| 2596 | pmd_val(pmd) |= PMD_HUGE_DIRTY; | ||
| 2597 | } | ||
| 2598 | } else { | ||
| 2599 | if (pgprot_val(pgprot) & _PAGE_WRITE_4U) | ||
| 2600 | pmd_val(pmd) |= PMD_HUGE_WRITE; | ||
| 2601 | if (pgprot_val(pgprot) & _PAGE_EXEC_4U) | ||
| 2602 | pmd_val(pmd) |= PMD_HUGE_EXEC; | ||
| 2603 | |||
| 2604 | if (!for_modify) { | ||
| 2605 | if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U) | ||
| 2606 | pmd_val(pmd) |= PMD_HUGE_ACCESSED; | ||
| 2607 | if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U) | ||
| 2608 | pmd_val(pmd) |= PMD_HUGE_DIRTY; | ||
| 2609 | } | ||
| 2610 | } | ||
| 2611 | |||
| 2612 | return pmd; | ||
| 2613 | } | ||
| 2614 | |||
| 2615 | pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | ||
| 2616 | { | ||
| 2617 | pmd_t pmd; | ||
| 2618 | |||
| 2619 | pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT))); | ||
| 2620 | pmd_val(pmd) |= PMD_ISHUGE; | ||
| 2621 | pmd = pmd_set_protbits(pmd, pgprot, false); | ||
| 2622 | return pmd; | ||
| 2623 | } | ||
| 2624 | |||
| 2625 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
| 2626 | { | ||
| 2627 | pmd_val(pmd) &= ~(PMD_HUGE_PRESENT | | ||
| 2628 | PMD_HUGE_WRITE | | ||
| 2629 | PMD_HUGE_EXEC); | ||
| 2630 | pmd = pmd_set_protbits(pmd, newprot, true); | ||
| 2631 | return pmd; | ||
| 2632 | } | ||
| 2633 | |||
| 2634 | pgprot_t pmd_pgprot(pmd_t entry) | ||
| 2635 | { | ||
| 2636 | unsigned long pte = 0; | ||
| 2637 | |||
| 2638 | if (pmd_val(entry) & PMD_HUGE_PRESENT) | ||
| 2639 | pte |= _PAGE_VALID; | ||
| 2640 | |||
| 2641 | if (tlb_type == hypervisor) { | ||
| 2642 | if (pmd_val(entry) & PMD_HUGE_PRESENT) | ||
| 2643 | pte |= _PAGE_PRESENT_4V; | ||
| 2644 | if (pmd_val(entry) & PMD_HUGE_EXEC) | ||
| 2645 | pte |= _PAGE_EXEC_4V; | ||
| 2646 | if (pmd_val(entry) & PMD_HUGE_WRITE) | ||
| 2647 | pte |= _PAGE_W_4V; | ||
| 2648 | if (pmd_val(entry) & PMD_HUGE_ACCESSED) | ||
| 2649 | pte |= _PAGE_ACCESSED_4V; | ||
| 2650 | if (pmd_val(entry) & PMD_HUGE_DIRTY) | ||
| 2651 | pte |= _PAGE_MODIFIED_4V; | ||
| 2652 | pte |= _PAGE_CP_4V|_PAGE_CV_4V; | ||
| 2653 | } else { | ||
| 2654 | if (pmd_val(entry) & PMD_HUGE_PRESENT) | ||
| 2655 | pte |= _PAGE_PRESENT_4U; | ||
| 2656 | if (pmd_val(entry) & PMD_HUGE_EXEC) | ||
| 2657 | pte |= _PAGE_EXEC_4U; | ||
| 2658 | if (pmd_val(entry) & PMD_HUGE_WRITE) | ||
| 2659 | pte |= _PAGE_W_4U; | ||
| 2660 | if (pmd_val(entry) & PMD_HUGE_ACCESSED) | ||
| 2661 | pte |= _PAGE_ACCESSED_4U; | ||
| 2662 | if (pmd_val(entry) & PMD_HUGE_DIRTY) | ||
| 2663 | pte |= _PAGE_MODIFIED_4U; | ||
| 2664 | pte |= _PAGE_CP_4U|_PAGE_CV_4U; | ||
| 2665 | } | ||
| 2666 | |||
| 2667 | return __pgprot(pte); | ||
| 2668 | } | ||
| 2669 | |||
| 2670 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
| 2671 | pmd_t *pmd) | ||
| 2672 | { | ||
| 2673 | unsigned long pte, flags; | ||
| 2674 | struct mm_struct *mm; | ||
| 2675 | pmd_t entry = *pmd; | ||
| 2676 | pgprot_t prot; | ||
| 2677 | |||
| 2678 | if (!pmd_large(entry) || !pmd_young(entry)) | ||
| 2679 | return; | ||
| 2680 | |||
| 2681 | pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS); | ||
| 2682 | pte <<= PMD_PADDR_SHIFT; | ||
| 2683 | pte |= _PAGE_VALID; | ||
| 2684 | |||
| 2685 | prot = pmd_pgprot(entry); | ||
| 2686 | |||
| 2687 | if (tlb_type == hypervisor) | ||
| 2688 | pgprot_val(prot) |= _PAGE_SZHUGE_4V; | ||
| 2689 | else | ||
| 2690 | pgprot_val(prot) |= _PAGE_SZHUGE_4U; | ||
| 2691 | |||
| 2692 | pte |= pgprot_val(prot); | ||
| 2693 | |||
| 2694 | mm = vma->vm_mm; | ||
| 2695 | |||
| 2696 | spin_lock_irqsave(&mm->context.lock, flags); | ||
| 2697 | |||
| 2698 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) | ||
| 2699 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, | ||
| 2700 | addr, pte); | ||
| 2701 | |||
| 2702 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
| 2703 | } | ||
| 2704 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
| 2705 | |||
| 2706 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
| 2707 | static void context_reload(void *__data) | ||
| 2708 | { | ||
| 2709 | struct mm_struct *mm = __data; | ||
| 2710 | |||
| 2711 | if (mm == current->mm) | ||
| 2712 | load_secondary_context(mm); | ||
| 2713 | } | ||
| 2714 | |||
| 2715 | void hugetlb_setup(struct mm_struct *mm) | ||
| 2716 | { | ||
| 2717 | struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; | ||
| 2718 | |||
| 2719 | if (likely(tp->tsb != NULL)) | ||
| 2720 | return; | ||
| 2721 | |||
| 2722 | tsb_grow(mm, MM_TSB_HUGE, 0); | ||
| 2723 | tsb_context_switch(mm); | ||
| 2724 | smp_tsb_sync(mm); | ||
| 2725 | |||
| 2726 | /* On UltraSPARC-III+ and later, configure the second half of | ||
| 2727 | * the Data-TLB for huge pages. | ||
| 2728 | */ | ||
| 2729 | if (tlb_type == cheetah_plus) { | ||
| 2730 | unsigned long ctx; | ||
| 2731 | |||
| 2732 | spin_lock(&ctx_alloc_lock); | ||
| 2733 | ctx = mm->context.sparc64_ctx_val; | ||
| 2734 | ctx &= ~CTX_PGSZ_MASK; | ||
| 2735 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; | ||
| 2736 | ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; | ||
| 2737 | |||
| 2738 | if (ctx != mm->context.sparc64_ctx_val) { | ||
| 2739 | /* When changing the page size fields, we | ||
| 2740 | * must perform a context flush so that no | ||
| 2741 | * stale entries match. This flush must | ||
| 2742 | * occur with the original context register | ||
| 2743 | * settings. | ||
| 2744 | */ | ||
| 2745 | do_flush_tlb_mm(mm); | ||
| 2746 | |||
| 2747 | /* Reload the context register of all processors | ||
| 2748 | * also executing in this address space. | ||
| 2749 | */ | ||
| 2750 | mm->context.sparc64_ctx_val = ctx; | ||
| 2751 | on_each_cpu(context_reload, mm, 0); | ||
| 2752 | } | ||
| 2753 | spin_unlock(&ctx_alloc_lock); | ||
| 2754 | } | ||
| 2755 | } | ||
| 2756 | #endif | ||
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b1f279cd00bf..3e8fec391fe0 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
| @@ -43,16 +43,37 @@ void flush_tlb_pending(void) | |||
| 43 | put_cpu_var(tlb_batch); | 43 | put_cpu_var(tlb_batch); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | 46 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
| 47 | pte_t *ptep, pte_t orig, int fullmm) | 47 | bool exec) |
| 48 | { | 48 | { |
| 49 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); | 49 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
| 50 | unsigned long nr; | 50 | unsigned long nr; |
| 51 | 51 | ||
| 52 | vaddr &= PAGE_MASK; | 52 | vaddr &= PAGE_MASK; |
| 53 | if (pte_exec(orig)) | 53 | if (exec) |
| 54 | vaddr |= 0x1UL; | 54 | vaddr |= 0x1UL; |
| 55 | 55 | ||
| 56 | nr = tb->tlb_nr; | ||
| 57 | |||
| 58 | if (unlikely(nr != 0 && mm != tb->mm)) { | ||
| 59 | flush_tlb_pending(); | ||
| 60 | nr = 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | if (nr == 0) | ||
| 64 | tb->mm = mm; | ||
| 65 | |||
| 66 | tb->vaddrs[nr] = vaddr; | ||
| 67 | tb->tlb_nr = ++nr; | ||
| 68 | if (nr >= TLB_BATCH_NR) | ||
| 69 | flush_tlb_pending(); | ||
| 70 | |||
| 71 | put_cpu_var(tlb_batch); | ||
| 72 | } | ||
| 73 | |||
| 74 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | ||
| 75 | pte_t *ptep, pte_t orig, int fullmm) | ||
| 76 | { | ||
| 56 | if (tlb_type != hypervisor && | 77 | if (tlb_type != hypervisor && |
| 57 | pte_dirty(orig)) { | 78 | pte_dirty(orig)) { |
| 58 | unsigned long paddr, pfn = pte_pfn(orig); | 79 | unsigned long paddr, pfn = pte_pfn(orig); |
| @@ -77,26 +98,91 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, | |||
| 77 | } | 98 | } |
| 78 | 99 | ||
| 79 | no_cache_flush: | 100 | no_cache_flush: |
| 101 | if (!fullmm) | ||
| 102 | tlb_batch_add_one(mm, vaddr, pte_exec(orig)); | ||
| 103 | } | ||
| 104 | |||
| 105 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 106 | static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, | ||
| 107 | pmd_t pmd, bool exec) | ||
| 108 | { | ||
| 109 | unsigned long end; | ||
| 110 | pte_t *pte; | ||
| 111 | |||
| 112 | pte = pte_offset_map(&pmd, vaddr); | ||
| 113 | end = vaddr + HPAGE_SIZE; | ||
| 114 | while (vaddr < end) { | ||
| 115 | if (pte_val(*pte) & _PAGE_VALID) | ||
| 116 | tlb_batch_add_one(mm, vaddr, exec); | ||
| 117 | pte++; | ||
| 118 | vaddr += PAGE_SIZE; | ||
| 119 | } | ||
| 120 | pte_unmap(pte); | ||
| 121 | } | ||
| 80 | 122 | ||
| 81 | if (fullmm) { | 123 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 82 | put_cpu_var(tlb_batch); | 124 | pmd_t *pmdp, pmd_t pmd) |
| 125 | { | ||
| 126 | pmd_t orig = *pmdp; | ||
| 127 | |||
| 128 | *pmdp = pmd; | ||
| 129 | |||
| 130 | if (mm == &init_mm) | ||
| 83 | return; | 131 | return; |
| 132 | |||
| 133 | if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) { | ||
| 134 | if (pmd_val(pmd) & PMD_ISHUGE) | ||
| 135 | mm->context.huge_pte_count++; | ||
| 136 | else | ||
| 137 | mm->context.huge_pte_count--; | ||
| 138 | if (mm->context.huge_pte_count == 1) | ||
| 139 | hugetlb_setup(mm); | ||
| 84 | } | 140 | } |
| 85 | 141 | ||
| 86 | nr = tb->tlb_nr; | 142 | if (!pmd_none(orig)) { |
| 143 | bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); | ||
| 87 | 144 | ||
| 88 | if (unlikely(nr != 0 && mm != tb->mm)) { | 145 | addr &= HPAGE_MASK; |
| 89 | flush_tlb_pending(); | 146 | if (pmd_val(orig) & PMD_ISHUGE) |
| 90 | nr = 0; | 147 | tlb_batch_add_one(mm, addr, exec); |
| 148 | else | ||
| 149 | tlb_batch_pmd_scan(mm, addr, orig, exec); | ||
| 91 | } | 150 | } |
| 151 | } | ||
| 92 | 152 | ||
| 93 | if (nr == 0) | 153 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) |
| 94 | tb->mm = mm; | 154 | { |
| 155 | struct list_head *lh = (struct list_head *) pgtable; | ||
| 95 | 156 | ||
| 96 | tb->vaddrs[nr] = vaddr; | 157 | assert_spin_locked(&mm->page_table_lock); |
| 97 | tb->tlb_nr = ++nr; | ||
| 98 | if (nr >= TLB_BATCH_NR) | ||
| 99 | flush_tlb_pending(); | ||
| 100 | 158 | ||
| 101 | put_cpu_var(tlb_batch); | 159 | /* FIFO */ |
| 160 | if (!mm->pmd_huge_pte) | ||
| 161 | INIT_LIST_HEAD(lh); | ||
| 162 | else | ||
| 163 | list_add(lh, (struct list_head *) mm->pmd_huge_pte); | ||
| 164 | mm->pmd_huge_pte = pgtable; | ||
| 165 | } | ||
| 166 | |||
| 167 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) | ||
| 168 | { | ||
| 169 | struct list_head *lh; | ||
| 170 | pgtable_t pgtable; | ||
| 171 | |||
| 172 | assert_spin_locked(&mm->page_table_lock); | ||
| 173 | |||
| 174 | /* FIFO */ | ||
| 175 | pgtable = mm->pmd_huge_pte; | ||
| 176 | lh = (struct list_head *) pgtable; | ||
| 177 | if (list_empty(lh)) | ||
| 178 | mm->pmd_huge_pte = NULL; | ||
| 179 | else { | ||
| 180 | mm->pmd_huge_pte = (pgtable_t) lh->next; | ||
| 181 | list_del(lh); | ||
| 182 | } | ||
| 183 | pte_val(pgtable[0]) = 0; | ||
| 184 | pte_val(pgtable[1]) = 0; | ||
| 185 | |||
| 186 | return pgtable; | ||
| 102 | } | 187 | } |
| 188 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a35ee832baf3..7f6474347491 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
| @@ -78,7 +78,7 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
| 78 | base = __pa(base); | 78 | base = __pa(base); |
| 79 | __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); | 79 | __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); |
| 80 | 80 | ||
| 81 | #ifdef CONFIG_HUGETLB_PAGE | 81 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 82 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | 82 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
| 83 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | 83 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; |
| 84 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | 84 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
| @@ -93,7 +93,7 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
| 93 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K | 93 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K |
| 94 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K | 94 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K |
| 95 | 95 | ||
| 96 | #ifdef CONFIG_HUGETLB_PAGE | 96 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 97 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB | 97 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB |
| 98 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB | 98 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB |
| 99 | #endif | 99 | #endif |
| @@ -190,7 +190,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign | |||
| 190 | case MM_TSB_BASE: | 190 | case MM_TSB_BASE: |
| 191 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; | 191 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; |
| 192 | break; | 192 | break; |
| 193 | #ifdef CONFIG_HUGETLB_PAGE | 193 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 194 | case MM_TSB_HUGE: | 194 | case MM_TSB_HUGE: |
| 195 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; | 195 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; |
| 196 | break; | 196 | break; |
| @@ -205,7 +205,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign | |||
| 205 | case MM_TSB_BASE: | 205 | case MM_TSB_BASE: |
| 206 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; | 206 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; |
| 207 | break; | 207 | break; |
| 208 | #ifdef CONFIG_HUGETLB_PAGE | 208 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 209 | case MM_TSB_HUGE: | 209 | case MM_TSB_HUGE: |
| 210 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; | 210 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; |
| 211 | break; | 211 | break; |
| @@ -427,7 +427,7 @@ retry_tsb_alloc: | |||
| 427 | 427 | ||
| 428 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 428 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 429 | { | 429 | { |
| 430 | #ifdef CONFIG_HUGETLB_PAGE | 430 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 431 | unsigned long huge_pte_count; | 431 | unsigned long huge_pte_count; |
| 432 | #endif | 432 | #endif |
| 433 | unsigned int i; | 433 | unsigned int i; |
| @@ -436,7 +436,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
| 436 | 436 | ||
| 437 | mm->context.sparc64_ctx_val = 0UL; | 437 | mm->context.sparc64_ctx_val = 0UL; |
| 438 | 438 | ||
| 439 | #ifdef CONFIG_HUGETLB_PAGE | 439 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 440 | /* We reset it to zero because the fork() page copying | 440 | /* We reset it to zero because the fork() page copying |
| 441 | * will re-increment the counters as the parent PTEs are | 441 | * will re-increment the counters as the parent PTEs are |
| 442 | * copied into the child address space. | 442 | * copied into the child address space. |
| @@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
| 459 | */ | 459 | */ |
| 460 | tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); | 460 | tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); |
| 461 | 461 | ||
| 462 | #ifdef CONFIG_HUGETLB_PAGE | 462 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 463 | if (unlikely(huge_pte_count)) | 463 | if (unlikely(huge_pte_count)) |
| 464 | tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); | 464 | tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); |
| 465 | #endif | 465 | #endif |
