diff options
author | David S. Miller <davem@davemloft.net> | 2013-09-26 16:45:15 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-11-13 15:33:08 -0500 |
commit | a7b9403f0e6d5f99139dca18be885819c8d380a1 (patch) | |
tree | 3e88ddc9250a1408eefc48a20a8fb445e19fcaa3 /arch/sparc | |
parent | 2b77933c28f5044629bb19e8045aae65b72b939d (diff) |
sparc64: Encode huge PMDs using PTE encoding.
Now that we have 64-bits for PMDs we can stop using special encodings
for the huge PMD values, and just put real PTEs in there.
We allocate a _PAGE_PMD_HUGE bit to distinguish between plain PMDs and
huge ones. It is the same for both 4U and 4V PTE layouts.
We also use _PAGE_SPECIAL to indicate the splitting state, since a
huge PMD cannot also be special.
All of the PMD --> PTE translation code disappears, and most of the
huge PMD bit modifications and tests just degenerate into the PTE
operations. In particular USER_PGTABLE_CHECK_PMD_HUGE becomes
trivial.
As a side effect, normal PMDs don't shift the physical address around.
This also speeds up the page table walks in the TLB miss paths since
they don't have to do the shifts any more.
Another non-trivial aspect is that pte_modify() has to be changed
to preserve the _PAGE_PMD_HUGE bits as well as the page size field
of the pte.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 181 | ||||
-rw-r--r-- | arch/sparc/include/asm/tsb.h | 92 | ||||
-rw-r--r-- | arch/sparc/mm/gup.c | 9 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 102 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 9 |
5 files changed, 131 insertions, 262 deletions
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index eee803e10b92..8358dc144959 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -67,29 +67,6 @@ | |||
67 | #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. | 67 | #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | /* PMDs point to PTE tables which are 4K aligned. */ | ||
71 | #define PMD_PADDR _AC(0xfffffffe,UL) | ||
72 | #define PMD_PADDR_SHIFT _AC(11,UL) | ||
73 | |||
74 | #define PMD_ISHUGE _AC(0x00000001,UL) | ||
75 | |||
76 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge | ||
77 | * pages, this frees up a bunch of bits in the layout that we can | ||
78 | * use for the protection settings and software metadata. | ||
79 | */ | ||
80 | #define PMD_HUGE_PADDR _AC(0xfffff800,UL) | ||
81 | #define PMD_HUGE_PROTBITS _AC(0x000007ff,UL) | ||
82 | #define PMD_HUGE_PRESENT _AC(0x00000400,UL) | ||
83 | #define PMD_HUGE_WRITE _AC(0x00000200,UL) | ||
84 | #define PMD_HUGE_DIRTY _AC(0x00000100,UL) | ||
85 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) | ||
86 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) | ||
87 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) | ||
88 | |||
89 | /* PGDs point to PMD tables which are 8K aligned. */ | ||
90 | #define PGD_PADDR _AC(0xfffffffc,UL) | ||
91 | #define PGD_PADDR_SHIFT _AC(11,UL) | ||
92 | |||
93 | #ifndef __ASSEMBLY__ | 70 | #ifndef __ASSEMBLY__ |
94 | 71 | ||
95 | #include <linux/sched.h> | 72 | #include <linux/sched.h> |
@@ -112,6 +89,7 @@ | |||
112 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ | 89 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ |
113 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ | 90 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ |
114 | #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ | 91 | #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ |
92 | #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ | ||
115 | 93 | ||
116 | /* Advertise support for _PAGE_SPECIAL */ | 94 | /* Advertise support for _PAGE_SPECIAL */ |
117 | #define __HAVE_ARCH_PTE_SPECIAL | 95 | #define __HAVE_ARCH_PTE_SPECIAL |
@@ -125,6 +103,7 @@ | |||
125 | #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ | 103 | #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ |
126 | #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ | 104 | #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ |
127 | #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ | 105 | #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ |
106 | #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */ | ||
128 | #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ | 107 | #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ |
129 | #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ | 108 | #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ |
130 | #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ | 109 | #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ |
@@ -155,6 +134,7 @@ | |||
155 | #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ | 134 | #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ |
156 | #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ | 135 | #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ |
157 | #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */ | 136 | #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */ |
137 | #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */ | ||
158 | #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ | 138 | #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ |
159 | #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ | 139 | #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ |
160 | #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ | 140 | #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ |
@@ -243,16 +223,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | |||
243 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 223 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
244 | 224 | ||
245 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 225 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
246 | extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot); | 226 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) |
247 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | ||
248 | |||
249 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | ||
250 | |||
251 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
252 | { | 227 | { |
253 | /* Do nothing, mk_pmd() does this part. */ | 228 | pte_t pte = pfn_pte(page_nr, pgprot); |
254 | return pmd; | 229 | |
230 | return __pmd(pte_val(pte)); | ||
255 | } | 231 | } |
232 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) | ||
256 | #endif | 233 | #endif |
257 | 234 | ||
258 | /* This one can be done with two shifts. */ | 235 | /* This one can be done with two shifts. */ |
@@ -313,14 +290,25 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) | |||
313 | : "=r" (mask), "=r" (tmp) | 290 | : "=r" (mask), "=r" (tmp) |
314 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | | 291 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | |
315 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | | 292 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | |
316 | _PAGE_SPECIAL), | 293 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), |
317 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | | 294 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | |
318 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | | 295 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | |
319 | _PAGE_SPECIAL)); | 296 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); |
320 | 297 | ||
321 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); | 298 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); |
322 | } | 299 | } |
323 | 300 | ||
301 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
302 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
303 | { | ||
304 | pte_t pte = __pte(pmd_val(pmd)); | ||
305 | |||
306 | pte = pte_modify(pte, newprot); | ||
307 | |||
308 | return __pmd(pte_val(pte)); | ||
309 | } | ||
310 | #endif | ||
311 | |||
324 | static inline pte_t pgoff_to_pte(unsigned long off) | 312 | static inline pte_t pgoff_to_pte(unsigned long off) |
325 | { | 313 | { |
326 | off <<= PAGE_SHIFT; | 314 | off <<= PAGE_SHIFT; |
@@ -361,7 +349,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) | |||
361 | */ | 349 | */ |
362 | #define pgprot_noncached pgprot_noncached | 350 | #define pgprot_noncached pgprot_noncached |
363 | 351 | ||
364 | #ifdef CONFIG_HUGETLB_PAGE | 352 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
365 | static inline pte_t pte_mkhuge(pte_t pte) | 353 | static inline pte_t pte_mkhuge(pte_t pte) |
366 | { | 354 | { |
367 | unsigned long mask; | 355 | unsigned long mask; |
@@ -379,6 +367,17 @@ static inline pte_t pte_mkhuge(pte_t pte) | |||
379 | 367 | ||
380 | return __pte(pte_val(pte) | mask); | 368 | return __pte(pte_val(pte) | mask); |
381 | } | 369 | } |
370 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
371 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
372 | { | ||
373 | pte_t pte = __pte(pmd_val(pmd)); | ||
374 | |||
375 | pte = pte_mkhuge(pte); | ||
376 | pte_val(pte) |= _PAGE_PMD_HUGE; | ||
377 | |||
378 | return __pmd(pte_val(pte)); | ||
379 | } | ||
380 | #endif | ||
382 | #endif | 381 | #endif |
383 | 382 | ||
384 | static inline pte_t pte_mkdirty(pte_t pte) | 383 | static inline pte_t pte_mkdirty(pte_t pte) |
@@ -630,86 +629,125 @@ static inline unsigned long pte_special(pte_t pte) | |||
630 | return pte_val(pte) & _PAGE_SPECIAL; | 629 | return pte_val(pte) & _PAGE_SPECIAL; |
631 | } | 630 | } |
632 | 631 | ||
633 | static inline int pmd_large(pmd_t pmd) | 632 | static inline unsigned long pmd_large(pmd_t pmd) |
634 | { | 633 | { |
635 | return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == | 634 | pte_t pte = __pte(pmd_val(pmd)); |
636 | (PMD_ISHUGE | PMD_HUGE_PRESENT); | 635 | |
636 | return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); | ||
637 | } | 637 | } |
638 | 638 | ||
639 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 639 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
640 | static inline int pmd_young(pmd_t pmd) | 640 | static inline unsigned long pmd_young(pmd_t pmd) |
641 | { | 641 | { |
642 | return pmd_val(pmd) & PMD_HUGE_ACCESSED; | 642 | pte_t pte = __pte(pmd_val(pmd)); |
643 | |||
644 | return pte_young(pte); | ||
643 | } | 645 | } |
644 | 646 | ||
645 | static inline int pmd_write(pmd_t pmd) | 647 | static inline unsigned long pmd_write(pmd_t pmd) |
646 | { | 648 | { |
647 | return pmd_val(pmd) & PMD_HUGE_WRITE; | 649 | pte_t pte = __pte(pmd_val(pmd)); |
650 | |||
651 | return pte_write(pte); | ||
648 | } | 652 | } |
649 | 653 | ||
650 | static inline unsigned long pmd_pfn(pmd_t pmd) | 654 | static inline unsigned long pmd_pfn(pmd_t pmd) |
651 | { | 655 | { |
652 | unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR; | 656 | pte_t pte = __pte(pmd_val(pmd)); |
653 | 657 | ||
654 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); | 658 | return pte_pfn(pte); |
655 | } | 659 | } |
656 | 660 | ||
657 | static inline int pmd_trans_splitting(pmd_t pmd) | 661 | static inline unsigned long pmd_trans_huge(pmd_t pmd) |
658 | { | 662 | { |
659 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == | 663 | pte_t pte = __pte(pmd_val(pmd)); |
660 | (PMD_ISHUGE|PMD_HUGE_SPLITTING); | 664 | |
665 | return pte_val(pte) & _PAGE_PMD_HUGE; | ||
661 | } | 666 | } |
662 | 667 | ||
663 | static inline int pmd_trans_huge(pmd_t pmd) | 668 | static inline unsigned long pmd_trans_splitting(pmd_t pmd) |
664 | { | 669 | { |
665 | return pmd_val(pmd) & PMD_ISHUGE; | 670 | pte_t pte = __pte(pmd_val(pmd)); |
671 | |||
672 | return pmd_trans_huge(pmd) && pte_special(pte); | ||
666 | } | 673 | } |
667 | 674 | ||
668 | #define has_transparent_hugepage() 1 | 675 | #define has_transparent_hugepage() 1 |
669 | 676 | ||
670 | static inline pmd_t pmd_mkold(pmd_t pmd) | 677 | static inline pmd_t pmd_mkold(pmd_t pmd) |
671 | { | 678 | { |
672 | pmd_val(pmd) &= ~PMD_HUGE_ACCESSED; | 679 | pte_t pte = __pte(pmd_val(pmd)); |
673 | return pmd; | 680 | |
681 | pte = pte_mkold(pte); | ||
682 | |||
683 | return __pmd(pte_val(pte)); | ||
674 | } | 684 | } |
675 | 685 | ||
676 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | 686 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
677 | { | 687 | { |
678 | pmd_val(pmd) &= ~PMD_HUGE_WRITE; | 688 | pte_t pte = __pte(pmd_val(pmd)); |
679 | return pmd; | 689 | |
690 | pte = pte_wrprotect(pte); | ||
691 | |||
692 | return __pmd(pte_val(pte)); | ||
680 | } | 693 | } |
681 | 694 | ||
682 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | 695 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
683 | { | 696 | { |
684 | pmd_val(pmd) |= PMD_HUGE_DIRTY; | 697 | pte_t pte = __pte(pmd_val(pmd)); |
685 | return pmd; | 698 | |
699 | pte = pte_mkdirty(pte); | ||
700 | |||
701 | return __pmd(pte_val(pte)); | ||
686 | } | 702 | } |
687 | 703 | ||
688 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | 704 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
689 | { | 705 | { |
690 | pmd_val(pmd) |= PMD_HUGE_ACCESSED; | 706 | pte_t pte = __pte(pmd_val(pmd)); |
691 | return pmd; | 707 | |
708 | pte = pte_mkyoung(pte); | ||
709 | |||
710 | return __pmd(pte_val(pte)); | ||
692 | } | 711 | } |
693 | 712 | ||
694 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | 713 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
695 | { | 714 | { |
696 | pmd_val(pmd) |= PMD_HUGE_WRITE; | 715 | pte_t pte = __pte(pmd_val(pmd)); |
697 | return pmd; | 716 | |
717 | pte = pte_mkwrite(pte); | ||
718 | |||
719 | return __pmd(pte_val(pte)); | ||
698 | } | 720 | } |
699 | 721 | ||
700 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | 722 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
701 | { | 723 | { |
702 | pmd_val(pmd) &= ~PMD_HUGE_PRESENT; | 724 | unsigned long mask; |
725 | |||
726 | if (tlb_type == hypervisor) | ||
727 | mask = _PAGE_PRESENT_4V; | ||
728 | else | ||
729 | mask = _PAGE_PRESENT_4U; | ||
730 | |||
731 | pmd_val(pmd) &= ~mask; | ||
732 | |||
703 | return pmd; | 733 | return pmd; |
704 | } | 734 | } |
705 | 735 | ||
706 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | 736 | static inline pmd_t pmd_mksplitting(pmd_t pmd) |
707 | { | 737 | { |
708 | pmd_val(pmd) |= PMD_HUGE_SPLITTING; | 738 | pte_t pte = __pte(pmd_val(pmd)); |
709 | return pmd; | 739 | |
740 | pte = pte_mkspecial(pte); | ||
741 | |||
742 | return __pmd(pte_val(pte)); | ||
710 | } | 743 | } |
711 | 744 | ||
712 | extern pgprot_t pmd_pgprot(pmd_t entry); | 745 | static inline pgprot_t pmd_pgprot(pmd_t entry) |
746 | { | ||
747 | unsigned long val = pmd_val(entry); | ||
748 | |||
749 | return __pgprot(val); | ||
750 | } | ||
713 | #endif | 751 | #endif |
714 | 752 | ||
715 | static inline int pmd_present(pmd_t pmd) | 753 | static inline int pmd_present(pmd_t pmd) |
@@ -732,26 +770,25 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
732 | 770 | ||
733 | static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) | 771 | static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) |
734 | { | 772 | { |
735 | unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT; | 773 | unsigned long val = __pa((unsigned long) (ptep)); |
736 | 774 | ||
737 | pmd_val(*pmdp) = val; | 775 | pmd_val(*pmdp) = val; |
738 | } | 776 | } |
739 | 777 | ||
740 | #define pud_set(pudp, pmdp) \ | 778 | #define pud_set(pudp, pmdp) \ |
741 | (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) | 779 | (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)))) |
742 | static inline unsigned long __pmd_page(pmd_t pmd) | 780 | static inline unsigned long __pmd_page(pmd_t pmd) |
743 | { | 781 | { |
744 | unsigned long paddr = pmd_val(pmd); | 782 | pte_t pte = __pte(pmd_val(pmd)); |
745 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 783 | unsigned long pfn; |
746 | if (pmd_val(pmd) & PMD_ISHUGE) | 784 | |
747 | paddr &= PMD_HUGE_PADDR; | 785 | pfn = pte_pfn(pte); |
748 | #endif | 786 | |
749 | paddr <<= PMD_PADDR_SHIFT; | 787 | return ((unsigned long) __va(pfn << PAGE_SHIFT)); |
750 | return ((unsigned long) __va(paddr)); | ||
751 | } | 788 | } |
752 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) | 789 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) |
753 | #define pud_page_vaddr(pud) \ | 790 | #define pud_page_vaddr(pud) \ |
754 | ((unsigned long) __va((pud_val(pud)<<PGD_PADDR_SHIFT))) | 791 | ((unsigned long) __va(pud_val(pud))) |
755 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) | 792 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) |
756 | #define pmd_bad(pmd) (0) | 793 | #define pmd_bad(pmd) (0) |
757 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) | 794 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) |
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index cc0432f15817..2230f80d9fe3 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h | |||
@@ -147,100 +147,34 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
147 | brz,pn REG1, FAIL_LABEL; \ | 147 | brz,pn REG1, FAIL_LABEL; \ |
148 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | 148 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ |
149 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | 149 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
150 | sllx REG1, PGD_PADDR_SHIFT, REG1; \ | ||
151 | andn REG2, 0x7, REG2; \ | 150 | andn REG2, 0x7, REG2; \ |
152 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | 151 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
153 | brz,pn REG1, FAIL_LABEL; \ | 152 | brz,pn REG1, FAIL_LABEL; \ |
154 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | 153 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
155 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | 154 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
156 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ | ||
157 | andn REG2, 0x7, REG2; \ | 155 | andn REG2, 0x7, REG2; \ |
158 | add REG1, REG2, REG1; | 156 | add REG1, REG2, REG1; |
159 | 157 | ||
160 | /* These macros exists only to make the PMD translator below | ||
161 | * easier to read. It hides the ELF section switch for the | ||
162 | * sun4v code patching. | ||
163 | */ | ||
164 | #define OR_PTE_BIT_1INSN(REG, NAME) \ | ||
165 | 661: or REG, _PAGE_##NAME##_4U, REG; \ | ||
166 | .section .sun4v_1insn_patch, "ax"; \ | ||
167 | .word 661b; \ | ||
168 | or REG, _PAGE_##NAME##_4V, REG; \ | ||
169 | .previous; | ||
170 | |||
171 | #define OR_PTE_BIT_2INSN(REG, TMP, NAME) \ | ||
172 | 661: sethi %hi(_PAGE_##NAME##_4U), TMP; \ | ||
173 | or REG, TMP, REG; \ | ||
174 | .section .sun4v_2insn_patch, "ax"; \ | ||
175 | .word 661b; \ | ||
176 | mov -1, TMP; \ | ||
177 | or REG, _PAGE_##NAME##_4V, REG; \ | ||
178 | .previous; | ||
179 | |||
180 | /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. | ||
181 | * | ||
182 | * We are fabricating an 8MB page using 2 4MB HW pages here. | ||
183 | */ | ||
184 | #define BUILD_PTE_VALID_SZHUGE_CACHE(VADDR, PADDR_BITS, REG) \ | ||
185 | sethi %hi(4 * 1024 * 1024), REG; \ | ||
186 | andn PADDR_BITS, REG, PADDR_BITS; \ | ||
187 | and VADDR, REG, REG; \ | ||
188 | or PADDR_BITS, REG, PADDR_BITS; \ | ||
189 | 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ | ||
190 | .section .sun4v_1insn_patch, "ax"; \ | ||
191 | .word 661b; \ | ||
192 | sethi %uhi(_PAGE_VALID), REG; \ | ||
193 | .previous; \ | ||
194 | sllx REG, 32, REG; \ | ||
195 | 661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \ | ||
196 | .section .sun4v_1insn_patch, "ax"; \ | ||
197 | .word 661b; \ | ||
198 | or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \ | ||
199 | .previous; | ||
200 | |||
201 | /* PMD has been loaded into REG1, interpret the value, seeing | 158 | /* PMD has been loaded into REG1, interpret the value, seeing |
202 | * if it is a HUGE PMD or a normal one. If it is not valid | 159 | * if it is a HUGE PMD or a normal one. If it is not valid |
203 | * then jump to FAIL_LABEL. If it is a HUGE PMD, and it | 160 | * then jump to FAIL_LABEL. If it is a HUGE PMD, and it |
204 | * translates to a valid PTE, branch to PTE_LABEL. | 161 | * translates to a valid PTE, branch to PTE_LABEL. |
205 | * | 162 | * |
206 | * We translate the PMD by hand, one bit at a time, | 163 | * We have to propagate the 4MB bit of the virtual address |
207 | * constructing the huge PTE. | 164 | * because we are fabricating 8MB pages using 4MB hw pages. |
208 | * | ||
209 | * So we construct the PTE in REG2 as follows: | ||
210 | * | ||
211 | * 1) Extract the PMD PFN from REG1 and place it into REG2. | ||
212 | * | ||
213 | * 2) Translate PMD protection bits in REG1 into REG2, one bit | ||
214 | * at a time using andcc tests on REG1 and OR's into REG2. | ||
215 | * | ||
216 | * Only two bits to be concerned with here, EXEC and WRITE. | ||
217 | * Now REG1 is freed up and we can use it as a temporary. | ||
218 | * | ||
219 | * 3) Construct the VALID, CACHE, and page size PTE bits in | ||
220 | * REG1, OR with REG2 to form final PTE. | ||
221 | */ | 165 | */ |
222 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 166 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
223 | #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ | 167 | #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ |
224 | brz,pn REG1, FAIL_LABEL; \ | 168 | brz,pn REG1, FAIL_LABEL; \ |
225 | andcc REG1, PMD_ISHUGE, %g0; \ | 169 | sethi %uhi(_PAGE_PMD_HUGE), REG2; \ |
226 | be,pt %xcc, 700f; \ | 170 | sllx REG2, 32, REG2; \ |
227 | and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \ | 171 | andcc REG1, REG2, %g0; \ |
228 | cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \ | 172 | be,pt %xcc, 700f; \ |
229 | bne,pn %xcc, FAIL_LABEL; \ | 173 | sethi %hi(4 * 1024 * 1024), REG2; \ |
230 | andn REG1, PMD_HUGE_PROTBITS, REG2; \ | 174 | andn REG1, REG2, REG1; \ |
231 | sllx REG2, PMD_PADDR_SHIFT, REG2; \ | 175 | and VADDR, REG2, REG2; \ |
232 | /* REG2 now holds PFN << PAGE_SHIFT */ \ | 176 | brlz,pt REG1, PTE_LABEL; \ |
233 | andcc REG1, PMD_HUGE_WRITE, %g0; \ | 177 | or REG1, REG2, REG1; \ |
234 | bne,a,pt %xcc, 1f; \ | ||
235 | OR_PTE_BIT_1INSN(REG2, W); \ | ||
236 | 1: andcc REG1, PMD_HUGE_EXEC, %g0; \ | ||
237 | be,pt %xcc, 1f; \ | ||
238 | nop; \ | ||
239 | OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \ | ||
240 | /* REG1 can now be clobbered, build final PTE */ \ | ||
241 | 1: BUILD_PTE_VALID_SZHUGE_CACHE(VADDR, REG2, REG1); \ | ||
242 | ba,pt %xcc, PTE_LABEL; \ | ||
243 | or REG1, REG2, REG1; \ | ||
244 | 700: | 178 | 700: |
245 | #else | 179 | #else |
246 | #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ | 180 | #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ |
@@ -265,13 +199,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
265 | brz,pn REG1, FAIL_LABEL; \ | 199 | brz,pn REG1, FAIL_LABEL; \ |
266 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | 200 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ |
267 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | 201 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
268 | sllx REG1, PGD_PADDR_SHIFT, REG1; \ | ||
269 | andn REG2, 0x7, REG2; \ | 202 | andn REG2, 0x7, REG2; \ |
270 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | 203 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
271 | USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ | 204 | USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ |
272 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | 205 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
273 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | 206 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
274 | sllx REG1, PMD_PADDR_SHIFT, REG1; \ | ||
275 | andn REG2, 0x7, REG2; \ | 207 | andn REG2, 0x7, REG2; \ |
276 | add REG1, REG2, REG1; \ | 208 | add REG1, REG2, REG1; \ |
277 | ldxa [REG1] ASI_PHYS_USE_EC, REG1; \ | 209 | ldxa [REG1] ASI_PHYS_USE_EC, REG1; \ |
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 01ee23dd724d..c4d3da68b800 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c | |||
@@ -71,13 +71,12 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
71 | int *nr) | 71 | int *nr) |
72 | { | 72 | { |
73 | struct page *head, *page, *tail; | 73 | struct page *head, *page, *tail; |
74 | u32 mask; | ||
75 | int refs; | 74 | int refs; |
76 | 75 | ||
77 | mask = PMD_HUGE_PRESENT; | 76 | if (!pmd_large(pmd)) |
78 | if (write) | 77 | return 0; |
79 | mask |= PMD_HUGE_WRITE; | 78 | |
80 | if ((pmd_val(pmd) & mask) != mask) | 79 | if (write && !pmd_write(pmd)) |
81 | return 0; | 80 | return 0; |
82 | 81 | ||
83 | refs = 0; | 82 | refs = 0; |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 828784419e25..bd6430ded69f 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -2602,121 +2602,21 @@ void pgtable_free(void *table, bool is_page) | |||
2602 | } | 2602 | } |
2603 | 2603 | ||
2604 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 2604 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
2605 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify) | ||
2606 | { | ||
2607 | if (pgprot_val(pgprot) & _PAGE_VALID) | ||
2608 | pmd_val(pmd) |= PMD_HUGE_PRESENT; | ||
2609 | if (tlb_type == hypervisor) { | ||
2610 | if (pgprot_val(pgprot) & _PAGE_WRITE_4V) | ||
2611 | pmd_val(pmd) |= PMD_HUGE_WRITE; | ||
2612 | if (pgprot_val(pgprot) & _PAGE_EXEC_4V) | ||
2613 | pmd_val(pmd) |= PMD_HUGE_EXEC; | ||
2614 | |||
2615 | if (!for_modify) { | ||
2616 | if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V) | ||
2617 | pmd_val(pmd) |= PMD_HUGE_ACCESSED; | ||
2618 | if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V) | ||
2619 | pmd_val(pmd) |= PMD_HUGE_DIRTY; | ||
2620 | } | ||
2621 | } else { | ||
2622 | if (pgprot_val(pgprot) & _PAGE_WRITE_4U) | ||
2623 | pmd_val(pmd) |= PMD_HUGE_WRITE; | ||
2624 | if (pgprot_val(pgprot) & _PAGE_EXEC_4U) | ||
2625 | pmd_val(pmd) |= PMD_HUGE_EXEC; | ||
2626 | |||
2627 | if (!for_modify) { | ||
2628 | if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U) | ||
2629 | pmd_val(pmd) |= PMD_HUGE_ACCESSED; | ||
2630 | if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U) | ||
2631 | pmd_val(pmd) |= PMD_HUGE_DIRTY; | ||
2632 | } | ||
2633 | } | ||
2634 | |||
2635 | return pmd; | ||
2636 | } | ||
2637 | |||
2638 | pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | ||
2639 | { | ||
2640 | pmd_t pmd; | ||
2641 | |||
2642 | pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT))); | ||
2643 | pmd_val(pmd) |= PMD_ISHUGE; | ||
2644 | pmd = pmd_set_protbits(pmd, pgprot, false); | ||
2645 | return pmd; | ||
2646 | } | ||
2647 | |||
2648 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
2649 | { | ||
2650 | pmd_val(pmd) &= ~(PMD_HUGE_PRESENT | | ||
2651 | PMD_HUGE_WRITE | | ||
2652 | PMD_HUGE_EXEC); | ||
2653 | pmd = pmd_set_protbits(pmd, newprot, true); | ||
2654 | return pmd; | ||
2655 | } | ||
2656 | |||
2657 | pgprot_t pmd_pgprot(pmd_t entry) | ||
2658 | { | ||
2659 | unsigned long pte = 0; | ||
2660 | |||
2661 | if (pmd_val(entry) & PMD_HUGE_PRESENT) | ||
2662 | pte |= _PAGE_VALID; | ||
2663 | |||
2664 | if (tlb_type == hypervisor) { | ||
2665 | if (pmd_val(entry) & PMD_HUGE_PRESENT) | ||
2666 | pte |= _PAGE_PRESENT_4V; | ||
2667 | if (pmd_val(entry) & PMD_HUGE_EXEC) | ||
2668 | pte |= _PAGE_EXEC_4V; | ||
2669 | if (pmd_val(entry) & PMD_HUGE_WRITE) | ||
2670 | pte |= _PAGE_W_4V; | ||
2671 | if (pmd_val(entry) & PMD_HUGE_ACCESSED) | ||
2672 | pte |= _PAGE_ACCESSED_4V; | ||
2673 | if (pmd_val(entry) & PMD_HUGE_DIRTY) | ||
2674 | pte |= _PAGE_MODIFIED_4V; | ||
2675 | pte |= _PAGE_CP_4V|_PAGE_CV_4V; | ||
2676 | } else { | ||
2677 | if (pmd_val(entry) & PMD_HUGE_PRESENT) | ||
2678 | pte |= _PAGE_PRESENT_4U; | ||
2679 | if (pmd_val(entry) & PMD_HUGE_EXEC) | ||
2680 | pte |= _PAGE_EXEC_4U; | ||
2681 | if (pmd_val(entry) & PMD_HUGE_WRITE) | ||
2682 | pte |= _PAGE_W_4U; | ||
2683 | if (pmd_val(entry) & PMD_HUGE_ACCESSED) | ||
2684 | pte |= _PAGE_ACCESSED_4U; | ||
2685 | if (pmd_val(entry) & PMD_HUGE_DIRTY) | ||
2686 | pte |= _PAGE_MODIFIED_4U; | ||
2687 | pte |= _PAGE_CP_4U|_PAGE_CV_4U; | ||
2688 | } | ||
2689 | |||
2690 | return __pgprot(pte); | ||
2691 | } | ||
2692 | |||
2693 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | 2605 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
2694 | pmd_t *pmd) | 2606 | pmd_t *pmd) |
2695 | { | 2607 | { |
2696 | unsigned long pte, flags; | 2608 | unsigned long pte, flags; |
2697 | struct mm_struct *mm; | 2609 | struct mm_struct *mm; |
2698 | pmd_t entry = *pmd; | 2610 | pmd_t entry = *pmd; |
2699 | pgprot_t prot; | ||
2700 | 2611 | ||
2701 | if (!pmd_large(entry) || !pmd_young(entry)) | 2612 | if (!pmd_large(entry) || !pmd_young(entry)) |
2702 | return; | 2613 | return; |
2703 | 2614 | ||
2704 | pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS); | 2615 | pte = pmd_val(entry); |
2705 | pte <<= PMD_PADDR_SHIFT; | ||
2706 | pte |= _PAGE_VALID; | ||
2707 | 2616 | ||
2708 | /* We are fabricating 8MB pages using 4MB real hw pages. */ | 2617 | /* We are fabricating 8MB pages using 4MB real hw pages. */ |
2709 | pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); | 2618 | pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); |
2710 | 2619 | ||
2711 | prot = pmd_pgprot(entry); | ||
2712 | |||
2713 | if (tlb_type == hypervisor) | ||
2714 | pgprot_val(prot) |= _PAGE_SZHUGE_4V; | ||
2715 | else | ||
2716 | pgprot_val(prot) |= _PAGE_SZHUGE_4U; | ||
2717 | |||
2718 | pte |= pgprot_val(prot); | ||
2719 | |||
2720 | mm = vma->vm_mm; | 2620 | mm = vma->vm_mm; |
2721 | 2621 | ||
2722 | spin_lock_irqsave(&mm->context.lock, flags); | 2622 | spin_lock_irqsave(&mm->context.lock, flags); |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 97d1e56e9863..f1bd83019e71 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -161,8 +161,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
161 | if (mm == &init_mm) | 161 | if (mm == &init_mm) |
162 | return; | 162 | return; |
163 | 163 | ||
164 | if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) { | 164 | if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { |
165 | if (pmd_val(pmd) & PMD_ISHUGE) | 165 | if (pmd_val(pmd) & _PAGE_PMD_HUGE) |
166 | mm->context.huge_pte_count++; | 166 | mm->context.huge_pte_count++; |
167 | else | 167 | else |
168 | mm->context.huge_pte_count--; | 168 | mm->context.huge_pte_count--; |
@@ -178,10 +178,11 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
178 | } | 178 | } |
179 | 179 | ||
180 | if (!pmd_none(orig)) { | 180 | if (!pmd_none(orig)) { |
181 | bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); | 181 | pte_t orig_pte = __pte(pmd_val(orig)); |
182 | bool exec = pte_exec(orig_pte); | ||
182 | 183 | ||
183 | addr &= HPAGE_MASK; | 184 | addr &= HPAGE_MASK; |
184 | if (pmd_val(orig) & PMD_ISHUGE) { | 185 | if (pmd_trans_huge(orig)) { |
185 | tlb_batch_add_one(mm, addr, exec); | 186 | tlb_batch_add_one(mm, addr, exec); |
186 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); | 187 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); |
187 | } else { | 188 | } else { |