diff options
-rw-r--r-- | arch/sparc/include/asm/page_64.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 55 | ||||
-rw-r--r-- | arch/sparc/include/asm/tsb.h | 47 | ||||
-rw-r--r-- | arch/sparc/kernel/ktlb.S | 108 | ||||
-rw-r--r-- | arch/sparc/kernel/vmlinux.lds.S | 5 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 393 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.h | 7 |
7 files changed, 244 insertions, 374 deletions
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 2211a8036bfa..732ba178a289 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h | |||
@@ -128,9 +128,6 @@ extern unsigned long PAGE_OFFSET; | |||
128 | */ | 128 | */ |
129 | #define MAX_PHYS_ADDRESS_BITS 47 | 129 | #define MAX_PHYS_ADDRESS_BITS 47 |
130 | 130 | ||
131 | /* These two shift counts are used when indexing sparc64_valid_addr_bitmap | ||
132 | * and kpte_linear_bitmap. | ||
133 | */ | ||
134 | #define ILOG2_4MB 22 | 131 | #define ILOG2_4MB 22 |
135 | #define ILOG2_256MB 28 | 132 | #define ILOG2_256MB 28 |
136 | 133 | ||
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 31ac919920a9..a305b22ab581 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -79,22 +79,7 @@ | |||
79 | 79 | ||
80 | #include <linux/sched.h> | 80 | #include <linux/sched.h> |
81 | 81 | ||
82 | extern unsigned long sparc64_valid_addr_bitmap[]; | 82 | bool kern_addr_valid(unsigned long addr); |
83 | |||
84 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | ||
85 | static inline bool __kern_addr_valid(unsigned long paddr) | ||
86 | { | ||
87 | if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL) | ||
88 | return false; | ||
89 | return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap); | ||
90 | } | ||
91 | |||
92 | static inline bool kern_addr_valid(unsigned long addr) | ||
93 | { | ||
94 | unsigned long paddr = __pa(addr); | ||
95 | |||
96 | return __kern_addr_valid(paddr); | ||
97 | } | ||
98 | 83 | ||
99 | /* Entries per page directory level. */ | 84 | /* Entries per page directory level. */ |
100 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) | 85 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) |
@@ -122,6 +107,7 @@ static inline bool kern_addr_valid(unsigned long addr) | |||
122 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ | 107 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ |
123 | #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ | 108 | #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ |
124 | #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ | 109 | #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ |
110 | #define _PAGE_PUD_HUGE _PAGE_PMD_HUGE | ||
125 | 111 | ||
126 | /* Advertise support for _PAGE_SPECIAL */ | 112 | /* Advertise support for _PAGE_SPECIAL */ |
127 | #define __HAVE_ARCH_PTE_SPECIAL | 113 | #define __HAVE_ARCH_PTE_SPECIAL |
@@ -668,26 +654,26 @@ static inline unsigned long pmd_large(pmd_t pmd) | |||
668 | return pte_val(pte) & _PAGE_PMD_HUGE; | 654 | return pte_val(pte) & _PAGE_PMD_HUGE; |
669 | } | 655 | } |
670 | 656 | ||
671 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 657 | static inline unsigned long pmd_pfn(pmd_t pmd) |
672 | static inline unsigned long pmd_young(pmd_t pmd) | ||
673 | { | 658 | { |
674 | pte_t pte = __pte(pmd_val(pmd)); | 659 | pte_t pte = __pte(pmd_val(pmd)); |
675 | 660 | ||
676 | return pte_young(pte); | 661 | return pte_pfn(pte); |
677 | } | 662 | } |
678 | 663 | ||
679 | static inline unsigned long pmd_write(pmd_t pmd) | 664 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
665 | static inline unsigned long pmd_young(pmd_t pmd) | ||
680 | { | 666 | { |
681 | pte_t pte = __pte(pmd_val(pmd)); | 667 | pte_t pte = __pte(pmd_val(pmd)); |
682 | 668 | ||
683 | return pte_write(pte); | 669 | return pte_young(pte); |
684 | } | 670 | } |
685 | 671 | ||
686 | static inline unsigned long pmd_pfn(pmd_t pmd) | 672 | static inline unsigned long pmd_write(pmd_t pmd) |
687 | { | 673 | { |
688 | pte_t pte = __pte(pmd_val(pmd)); | 674 | pte_t pte = __pte(pmd_val(pmd)); |
689 | 675 | ||
690 | return pte_pfn(pte); | 676 | return pte_write(pte); |
691 | } | 677 | } |
692 | 678 | ||
693 | static inline unsigned long pmd_trans_huge(pmd_t pmd) | 679 | static inline unsigned long pmd_trans_huge(pmd_t pmd) |
@@ -781,18 +767,15 @@ static inline int pmd_present(pmd_t pmd) | |||
781 | * the top bits outside of the range of any physical address size we | 767 | * the top bits outside of the range of any physical address size we |
782 | * support are clear as well. We also validate the physical itself. | 768 | * support are clear as well. We also validate the physical itself. |
783 | */ | 769 | */ |
784 | #define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \ | 770 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
785 | !__kern_addr_valid(pmd_val(pmd))) | ||
786 | 771 | ||
787 | #define pud_none(pud) (!pud_val(pud)) | 772 | #define pud_none(pud) (!pud_val(pud)) |
788 | 773 | ||
789 | #define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ | 774 | #define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK) |
790 | !__kern_addr_valid(pud_val(pud))) | ||
791 | 775 | ||
792 | #define pgd_none(pgd) (!pgd_val(pgd)) | 776 | #define pgd_none(pgd) (!pgd_val(pgd)) |
793 | 777 | ||
794 | #define pgd_bad(pgd) ((pgd_val(pgd) & ~PAGE_MASK) || \ | 778 | #define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK) |
795 | !__kern_addr_valid(pgd_val(pgd))) | ||
796 | 779 | ||
797 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 780 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
798 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 781 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
@@ -835,6 +818,20 @@ static inline unsigned long __pmd_page(pmd_t pmd) | |||
835 | #define pgd_present(pgd) (pgd_val(pgd) != 0U) | 818 | #define pgd_present(pgd) (pgd_val(pgd) != 0U) |
836 | #define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL) | 819 | #define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL) |
837 | 820 | ||
821 | static inline unsigned long pud_large(pud_t pud) | ||
822 | { | ||
823 | pte_t pte = __pte(pud_val(pud)); | ||
824 | |||
825 | return pte_val(pte) & _PAGE_PMD_HUGE; | ||
826 | } | ||
827 | |||
828 | static inline unsigned long pud_pfn(pud_t pud) | ||
829 | { | ||
830 | pte_t pte = __pte(pud_val(pud)); | ||
831 | |||
832 | return pte_pfn(pte); | ||
833 | } | ||
834 | |||
838 | /* Same in both SUN4V and SUN4U. */ | 835 | /* Same in both SUN4V and SUN4U. */ |
839 | #define pte_none(pte) (!pte_val(pte)) | 836 | #define pte_none(pte) (!pte_val(pte)) |
840 | 837 | ||
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index a2f541905715..ecb49cfa3be9 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h | |||
@@ -133,9 +133,24 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
133 | sub TSB, 0x8, TSB; \ | 133 | sub TSB, 0x8, TSB; \ |
134 | TSB_STORE(TSB, TAG); | 134 | TSB_STORE(TSB, TAG); |
135 | 135 | ||
136 | /* Do a kernel page table walk. Leaves physical PTE pointer in | 136 | /* Do a kernel page table walk. Leaves valid PTE value in |
137 | * REG1. Jumps to FAIL_LABEL on early page table walk termination. | 137 | * REG1. Jumps to FAIL_LABEL on early page table walk |
138 | * VADDR will not be clobbered, but REG2 will. | 138 | * termination. VADDR will not be clobbered, but REG2 will. |
139 | * | ||
140 | * There are two masks we must apply to propagate bits from | ||
141 | * the virtual address into the PTE physical address field | ||
142 | * when dealing with huge pages. This is because the page | ||
143 | * table boundaries do not match the huge page size(s) the | ||
144 | * hardware supports. | ||
145 | * | ||
146 | * In these cases we propagate the bits that are below the | ||
147 | * page table level where we saw the huge page mapping, but | ||
148 | * are still within the relevant physical bits for the huge | ||
149 | * page size in question. So for PMD mappings (which fall on | ||
150 | * bit 23, for 8MB per PMD) we must propagate bit 22 for a | ||
151 | * 4MB huge page. For huge PUDs (which fall on bit 33, for | ||
152 | * 8GB per PUD), we have to accomodate 256MB and 2GB huge | ||
153 | * pages. So for those we propagate bits 32 to 28. | ||
139 | */ | 154 | */ |
140 | #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ | 155 | #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ |
141 | sethi %hi(swapper_pg_dir), REG1; \ | 156 | sethi %hi(swapper_pg_dir), REG1; \ |
@@ -150,15 +165,35 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
150 | andn REG2, 0x7, REG2; \ | 165 | andn REG2, 0x7, REG2; \ |
151 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | 166 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
152 | brz,pn REG1, FAIL_LABEL; \ | 167 | brz,pn REG1, FAIL_LABEL; \ |
153 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | 168 | sethi %uhi(_PAGE_PUD_HUGE), REG2; \ |
169 | brz,pn REG1, FAIL_LABEL; \ | ||
170 | sllx REG2, 32, REG2; \ | ||
171 | andcc REG1, REG2, %g0; \ | ||
172 | sethi %hi(0xf8000000), REG2; \ | ||
173 | bne,pt %xcc, 697f; \ | ||
174 | sllx REG2, 1, REG2; \ | ||
175 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ | ||
154 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | 176 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
155 | andn REG2, 0x7, REG2; \ | 177 | andn REG2, 0x7, REG2; \ |
156 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ | 178 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
179 | sethi %uhi(_PAGE_PMD_HUGE), REG2; \ | ||
157 | brz,pn REG1, FAIL_LABEL; \ | 180 | brz,pn REG1, FAIL_LABEL; \ |
158 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ | 181 | sllx REG2, 32, REG2; \ |
182 | andcc REG1, REG2, %g0; \ | ||
183 | be,pn %xcc, 698f; \ | ||
184 | sethi %hi(0x400000), REG2; \ | ||
185 | 697: brgez,pn REG1, FAIL_LABEL; \ | ||
186 | andn REG1, REG2, REG1; \ | ||
187 | and VADDR, REG2, REG2; \ | ||
188 | ba,pt %xcc, 699f; \ | ||
189 | or REG1, REG2, REG1; \ | ||
190 | 698: sllx VADDR, 64 - PMD_SHIFT, REG2; \ | ||
159 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ | 191 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
160 | andn REG2, 0x7, REG2; \ | 192 | andn REG2, 0x7, REG2; \ |
161 | add REG1, REG2, REG1; | 193 | ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
194 | brgez,pn REG1, FAIL_LABEL; \ | ||
195 | nop; \ | ||
196 | 699: | ||
162 | 197 | ||
163 | /* PMD has been loaded into REG1, interpret the value, seeing | 198 | /* PMD has been loaded into REG1, interpret the value, seeing |
164 | * if it is a HUGE PMD or a normal one. If it is not valid | 199 | * if it is a HUGE PMD or a normal one. If it is not valid |
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 605d49204580..94a1e6648bd0 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S | |||
@@ -47,14 +47,6 @@ kvmap_itlb_vmalloc_addr: | |||
47 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) | 47 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) |
48 | 48 | ||
49 | TSB_LOCK_TAG(%g1, %g2, %g7) | 49 | TSB_LOCK_TAG(%g1, %g2, %g7) |
50 | |||
51 | /* Load and check PTE. */ | ||
52 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
53 | mov 1, %g7 | ||
54 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 | ||
55 | brgez,a,pn %g5, kvmap_itlb_longpath | ||
56 | TSB_STORE(%g1, %g7) | ||
57 | |||
58 | TSB_WRITE(%g1, %g5, %g6) | 50 | TSB_WRITE(%g1, %g5, %g6) |
59 | 51 | ||
60 | /* fallthrough to TLB load */ | 52 | /* fallthrough to TLB load */ |
@@ -118,6 +110,12 @@ kvmap_dtlb_obp: | |||
118 | ba,pt %xcc, kvmap_dtlb_load | 110 | ba,pt %xcc, kvmap_dtlb_load |
119 | nop | 111 | nop |
120 | 112 | ||
113 | kvmap_linear_early: | ||
114 | sethi %hi(kern_linear_pte_xor), %g7 | ||
115 | ldx [%g7 + %lo(kern_linear_pte_xor)], %g2 | ||
116 | ba,pt %xcc, kvmap_dtlb_tsb4m_load | ||
117 | xor %g2, %g4, %g5 | ||
118 | |||
121 | .align 32 | 119 | .align 32 |
122 | kvmap_dtlb_tsb4m_load: | 120 | kvmap_dtlb_tsb4m_load: |
123 | TSB_LOCK_TAG(%g1, %g2, %g7) | 121 | TSB_LOCK_TAG(%g1, %g2, %g7) |
@@ -146,105 +144,17 @@ kvmap_dtlb_4v: | |||
146 | /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ | 144 | /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ |
147 | KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) | 145 | KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) |
148 | #endif | 146 | #endif |
149 | /* TSB entry address left in %g1, lookup linear PTE. | 147 | /* Linear mapping TSB lookup failed. Fallthrough to kernel |
150 | * Must preserve %g1 and %g6 (TAG). | 148 | * page table based lookup. |
151 | */ | ||
152 | kvmap_dtlb_tsb4m_miss: | ||
153 | /* Clear the PAGE_OFFSET top virtual bits, shift | ||
154 | * down to get PFN, and make sure PFN is in range. | ||
155 | */ | ||
156 | 661: sllx %g4, 0, %g5 | ||
157 | .section .page_offset_shift_patch, "ax" | ||
158 | .word 661b | ||
159 | .previous | ||
160 | |||
161 | /* Check to see if we know about valid memory at the 4MB | ||
162 | * chunk this physical address will reside within. | ||
163 | */ | 149 | */ |
164 | 661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2 | ||
165 | .section .page_offset_shift_patch, "ax" | ||
166 | .word 661b | ||
167 | .previous | ||
168 | |||
169 | brnz,pn %g2, kvmap_dtlb_longpath | ||
170 | nop | ||
171 | |||
172 | /* This unconditional branch and delay-slot nop gets patched | ||
173 | * by the sethi sequence once the bitmap is properly setup. | ||
174 | */ | ||
175 | .globl valid_addr_bitmap_insn | ||
176 | valid_addr_bitmap_insn: | ||
177 | ba,pt %xcc, 2f | ||
178 | nop | ||
179 | .subsection 2 | ||
180 | .globl valid_addr_bitmap_patch | ||
181 | valid_addr_bitmap_patch: | ||
182 | sethi %hi(sparc64_valid_addr_bitmap), %g7 | ||
183 | or %g7, %lo(sparc64_valid_addr_bitmap), %g7 | ||
184 | .previous | ||
185 | |||
186 | 661: srlx %g5, ILOG2_4MB, %g2 | ||
187 | .section .page_offset_shift_patch, "ax" | ||
188 | .word 661b | ||
189 | .previous | ||
190 | |||
191 | srlx %g2, 6, %g5 | ||
192 | and %g2, 63, %g2 | ||
193 | sllx %g5, 3, %g5 | ||
194 | ldx [%g7 + %g5], %g5 | ||
195 | mov 1, %g7 | ||
196 | sllx %g7, %g2, %g7 | ||
197 | andcc %g5, %g7, %g0 | ||
198 | be,pn %xcc, kvmap_dtlb_longpath | ||
199 | |||
200 | 2: sethi %hi(kpte_linear_bitmap), %g2 | ||
201 | |||
202 | /* Get the 256MB physical address index. */ | ||
203 | 661: sllx %g4, 0, %g5 | ||
204 | .section .page_offset_shift_patch, "ax" | ||
205 | .word 661b | ||
206 | .previous | ||
207 | |||
208 | or %g2, %lo(kpte_linear_bitmap), %g2 | ||
209 | |||
210 | 661: srlx %g5, ILOG2_256MB, %g5 | ||
211 | .section .page_offset_shift_patch, "ax" | ||
212 | .word 661b | ||
213 | .previous | ||
214 | |||
215 | and %g5, (32 - 1), %g7 | ||
216 | |||
217 | /* Divide by 32 to get the offset into the bitmask. */ | ||
218 | srlx %g5, 5, %g5 | ||
219 | add %g7, %g7, %g7 | ||
220 | sllx %g5, 3, %g5 | ||
221 | |||
222 | /* kern_linear_pte_xor[(mask >> shift) & 3)] */ | ||
223 | ldx [%g2 + %g5], %g2 | ||
224 | srlx %g2, %g7, %g7 | ||
225 | sethi %hi(kern_linear_pte_xor), %g5 | ||
226 | and %g7, 3, %g7 | ||
227 | or %g5, %lo(kern_linear_pte_xor), %g5 | ||
228 | sllx %g7, 3, %g7 | ||
229 | ldx [%g5 + %g7], %g2 | ||
230 | |||
231 | .globl kvmap_linear_patch | 150 | .globl kvmap_linear_patch |
232 | kvmap_linear_patch: | 151 | kvmap_linear_patch: |
233 | ba,pt %xcc, kvmap_dtlb_tsb4m_load | 152 | ba,a,pt %xcc, kvmap_linear_early |
234 | xor %g2, %g4, %g5 | ||
235 | 153 | ||
236 | kvmap_dtlb_vmalloc_addr: | 154 | kvmap_dtlb_vmalloc_addr: |
237 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) | 155 | KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) |
238 | 156 | ||
239 | TSB_LOCK_TAG(%g1, %g2, %g7) | 157 | TSB_LOCK_TAG(%g1, %g2, %g7) |
240 | |||
241 | /* Load and check PTE. */ | ||
242 | ldxa [%g5] ASI_PHYS_USE_EC, %g5 | ||
243 | mov 1, %g7 | ||
244 | sllx %g7, TSB_TAG_INVALID_BIT, %g7 | ||
245 | brgez,a,pn %g5, kvmap_dtlb_longpath | ||
246 | TSB_STORE(%g1, %g7) | ||
247 | |||
248 | TSB_WRITE(%g1, %g5, %g6) | 158 | TSB_WRITE(%g1, %g5, %g6) |
249 | 159 | ||
250 | /* fallthrough to TLB load */ | 160 | /* fallthrough to TLB load */ |
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 932ff90fd760..0bacceb19150 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
@@ -122,11 +122,6 @@ SECTIONS | |||
122 | *(.swapper_4m_tsb_phys_patch) | 122 | *(.swapper_4m_tsb_phys_patch) |
123 | __swapper_4m_tsb_phys_patch_end = .; | 123 | __swapper_4m_tsb_phys_patch_end = .; |
124 | } | 124 | } |
125 | .page_offset_shift_patch : { | ||
126 | __page_offset_shift_patch = .; | ||
127 | *(.page_offset_shift_patch) | ||
128 | __page_offset_shift_patch_end = .; | ||
129 | } | ||
130 | .popc_3insn_patch : { | 125 | .popc_3insn_patch : { |
131 | __popc_3insn_patch = .; | 126 | __popc_3insn_patch = .; |
132 | *(.popc_3insn_patch) | 127 | *(.popc_3insn_patch) |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 35fcc9cb960d..848440a33125 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -75,7 +75,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly; | |||
75 | * 'cpu' properties, but we need to have this table setup before the | 75 | * 'cpu' properties, but we need to have this table setup before the |
76 | * MDESC is initialized. | 76 | * MDESC is initialized. |
77 | */ | 77 | */ |
78 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | ||
79 | 78 | ||
80 | #ifndef CONFIG_DEBUG_PAGEALLOC | 79 | #ifndef CONFIG_DEBUG_PAGEALLOC |
81 | /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. | 80 | /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. |
@@ -84,6 +83,7 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | |||
84 | */ | 83 | */ |
85 | extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; | 84 | extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; |
86 | #endif | 85 | #endif |
86 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
87 | 87 | ||
88 | static unsigned long cpu_pgsz_mask; | 88 | static unsigned long cpu_pgsz_mask; |
89 | 89 | ||
@@ -165,10 +165,6 @@ static void __init read_obp_memory(const char *property, | |||
165 | cmp_p64, NULL); | 165 | cmp_p64, NULL); |
166 | } | 166 | } |
167 | 167 | ||
168 | unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / | ||
169 | sizeof(unsigned long)]; | ||
170 | EXPORT_SYMBOL(sparc64_valid_addr_bitmap); | ||
171 | |||
172 | /* Kernel physical address base and size in bytes. */ | 168 | /* Kernel physical address base and size in bytes. */ |
173 | unsigned long kern_base __read_mostly; | 169 | unsigned long kern_base __read_mostly; |
174 | unsigned long kern_size __read_mostly; | 170 | unsigned long kern_size __read_mostly; |
@@ -1369,9 +1365,145 @@ static unsigned long __init bootmem_init(unsigned long phys_base) | |||
1369 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | 1365 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; |
1370 | static int pall_ents __initdata; | 1366 | static int pall_ents __initdata; |
1371 | 1367 | ||
1372 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1368 | static unsigned long max_phys_bits = 40; |
1369 | |||
1370 | bool kern_addr_valid(unsigned long addr) | ||
1371 | { | ||
1372 | unsigned long above = ((long)addr) >> max_phys_bits; | ||
1373 | pgd_t *pgd; | ||
1374 | pud_t *pud; | ||
1375 | pmd_t *pmd; | ||
1376 | pte_t *pte; | ||
1377 | |||
1378 | if (above != 0 && above != -1UL) | ||
1379 | return false; | ||
1380 | |||
1381 | if (addr >= (unsigned long) KERNBASE && | ||
1382 | addr < (unsigned long)&_end) | ||
1383 | return true; | ||
1384 | |||
1385 | if (addr >= PAGE_OFFSET) { | ||
1386 | unsigned long pa = __pa(addr); | ||
1387 | |||
1388 | return pfn_valid(pa >> PAGE_SHIFT); | ||
1389 | } | ||
1390 | |||
1391 | pgd = pgd_offset_k(addr); | ||
1392 | if (pgd_none(*pgd)) | ||
1393 | return 0; | ||
1394 | |||
1395 | pud = pud_offset(pgd, addr); | ||
1396 | if (pud_none(*pud)) | ||
1397 | return 0; | ||
1398 | |||
1399 | if (pud_large(*pud)) | ||
1400 | return pfn_valid(pud_pfn(*pud)); | ||
1401 | |||
1402 | pmd = pmd_offset(pud, addr); | ||
1403 | if (pmd_none(*pmd)) | ||
1404 | return 0; | ||
1405 | |||
1406 | if (pmd_large(*pmd)) | ||
1407 | return pfn_valid(pmd_pfn(*pmd)); | ||
1408 | |||
1409 | pte = pte_offset_kernel(pmd, addr); | ||
1410 | if (pte_none(*pte)) | ||
1411 | return 0; | ||
1412 | |||
1413 | return pfn_valid(pte_pfn(*pte)); | ||
1414 | } | ||
1415 | EXPORT_SYMBOL(kern_addr_valid); | ||
1416 | |||
1417 | static unsigned long __ref kernel_map_hugepud(unsigned long vstart, | ||
1418 | unsigned long vend, | ||
1419 | pud_t *pud) | ||
1420 | { | ||
1421 | const unsigned long mask16gb = (1UL << 34) - 1UL; | ||
1422 | u64 pte_val = vstart; | ||
1423 | |||
1424 | /* Each PUD is 8GB */ | ||
1425 | if ((vstart & mask16gb) || | ||
1426 | (vend - vstart <= mask16gb)) { | ||
1427 | pte_val ^= kern_linear_pte_xor[2]; | ||
1428 | pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; | ||
1429 | |||
1430 | return vstart + PUD_SIZE; | ||
1431 | } | ||
1432 | |||
1433 | pte_val ^= kern_linear_pte_xor[3]; | ||
1434 | pte_val |= _PAGE_PUD_HUGE; | ||
1435 | |||
1436 | vend = vstart + mask16gb + 1UL; | ||
1437 | while (vstart < vend) { | ||
1438 | pud_val(*pud) = pte_val; | ||
1439 | |||
1440 | pte_val += PUD_SIZE; | ||
1441 | vstart += PUD_SIZE; | ||
1442 | pud++; | ||
1443 | } | ||
1444 | return vstart; | ||
1445 | } | ||
1446 | |||
1447 | static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, | ||
1448 | bool guard) | ||
1449 | { | ||
1450 | if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) | ||
1451 | return true; | ||
1452 | |||
1453 | return false; | ||
1454 | } | ||
1455 | |||
1456 | static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, | ||
1457 | unsigned long vend, | ||
1458 | pmd_t *pmd) | ||
1459 | { | ||
1460 | const unsigned long mask256mb = (1UL << 28) - 1UL; | ||
1461 | const unsigned long mask2gb = (1UL << 31) - 1UL; | ||
1462 | u64 pte_val = vstart; | ||
1463 | |||
1464 | /* Each PMD is 8MB */ | ||
1465 | if ((vstart & mask256mb) || | ||
1466 | (vend - vstart <= mask256mb)) { | ||
1467 | pte_val ^= kern_linear_pte_xor[0]; | ||
1468 | pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; | ||
1469 | |||
1470 | return vstart + PMD_SIZE; | ||
1471 | } | ||
1472 | |||
1473 | if ((vstart & mask2gb) || | ||
1474 | (vend - vstart <= mask2gb)) { | ||
1475 | pte_val ^= kern_linear_pte_xor[1]; | ||
1476 | pte_val |= _PAGE_PMD_HUGE; | ||
1477 | vend = vstart + mask256mb + 1UL; | ||
1478 | } else { | ||
1479 | pte_val ^= kern_linear_pte_xor[2]; | ||
1480 | pte_val |= _PAGE_PMD_HUGE; | ||
1481 | vend = vstart + mask2gb + 1UL; | ||
1482 | } | ||
1483 | |||
1484 | while (vstart < vend) { | ||
1485 | pmd_val(*pmd) = pte_val; | ||
1486 | |||
1487 | pte_val += PMD_SIZE; | ||
1488 | vstart += PMD_SIZE; | ||
1489 | pmd++; | ||
1490 | } | ||
1491 | |||
1492 | return vstart; | ||
1493 | } | ||
1494 | |||
1495 | static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, | ||
1496 | bool guard) | ||
1497 | { | ||
1498 | if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) | ||
1499 | return true; | ||
1500 | |||
1501 | return false; | ||
1502 | } | ||
1503 | |||
1373 | static unsigned long __ref kernel_map_range(unsigned long pstart, | 1504 | static unsigned long __ref kernel_map_range(unsigned long pstart, |
1374 | unsigned long pend, pgprot_t prot) | 1505 | unsigned long pend, pgprot_t prot, |
1506 | bool use_huge) | ||
1375 | { | 1507 | { |
1376 | unsigned long vstart = PAGE_OFFSET + pstart; | 1508 | unsigned long vstart = PAGE_OFFSET + pstart; |
1377 | unsigned long vend = PAGE_OFFSET + pend; | 1509 | unsigned long vend = PAGE_OFFSET + pend; |
@@ -1401,15 +1533,23 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, | |||
1401 | if (pud_none(*pud)) { | 1533 | if (pud_none(*pud)) { |
1402 | pmd_t *new; | 1534 | pmd_t *new; |
1403 | 1535 | ||
1536 | if (kernel_can_map_hugepud(vstart, vend, use_huge)) { | ||
1537 | vstart = kernel_map_hugepud(vstart, vend, pud); | ||
1538 | continue; | ||
1539 | } | ||
1404 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | 1540 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
1405 | alloc_bytes += PAGE_SIZE; | 1541 | alloc_bytes += PAGE_SIZE; |
1406 | pud_populate(&init_mm, pud, new); | 1542 | pud_populate(&init_mm, pud, new); |
1407 | } | 1543 | } |
1408 | 1544 | ||
1409 | pmd = pmd_offset(pud, vstart); | 1545 | pmd = pmd_offset(pud, vstart); |
1410 | if (!pmd_present(*pmd)) { | 1546 | if (pmd_none(*pmd)) { |
1411 | pte_t *new; | 1547 | pte_t *new; |
1412 | 1548 | ||
1549 | if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { | ||
1550 | vstart = kernel_map_hugepmd(vstart, vend, pmd); | ||
1551 | continue; | ||
1552 | } | ||
1413 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | 1553 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); |
1414 | alloc_bytes += PAGE_SIZE; | 1554 | alloc_bytes += PAGE_SIZE; |
1415 | pmd_populate_kernel(&init_mm, pmd, new); | 1555 | pmd_populate_kernel(&init_mm, pmd, new); |
@@ -1432,100 +1572,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, | |||
1432 | return alloc_bytes; | 1572 | return alloc_bytes; |
1433 | } | 1573 | } |
1434 | 1574 | ||
1435 | extern unsigned int kvmap_linear_patch[1]; | 1575 | static void __init flush_all_kernel_tsbs(void) |
1436 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
1437 | |||
1438 | static void __init kpte_set_val(unsigned long index, unsigned long val) | ||
1439 | { | ||
1440 | unsigned long *ptr = kpte_linear_bitmap; | ||
1441 | |||
1442 | val <<= ((index % (BITS_PER_LONG / 2)) * 2); | ||
1443 | ptr += (index / (BITS_PER_LONG / 2)); | ||
1444 | |||
1445 | *ptr |= val; | ||
1446 | } | ||
1447 | |||
1448 | static const unsigned long kpte_shift_min = 28; /* 256MB */ | ||
1449 | static const unsigned long kpte_shift_max = 34; /* 16GB */ | ||
1450 | static const unsigned long kpte_shift_incr = 3; | ||
1451 | |||
1452 | static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end, | ||
1453 | unsigned long shift) | ||
1454 | { | 1576 | { |
1455 | unsigned long size = (1UL << shift); | 1577 | int i; |
1456 | unsigned long mask = (size - 1UL); | ||
1457 | unsigned long remains = end - start; | ||
1458 | unsigned long val; | ||
1459 | |||
1460 | if (remains < size || (start & mask)) | ||
1461 | return start; | ||
1462 | |||
1463 | /* VAL maps: | ||
1464 | * | ||
1465 | * shift 28 --> kern_linear_pte_xor index 1 | ||
1466 | * shift 31 --> kern_linear_pte_xor index 2 | ||
1467 | * shift 34 --> kern_linear_pte_xor index 3 | ||
1468 | */ | ||
1469 | val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1; | ||
1470 | |||
1471 | remains &= ~mask; | ||
1472 | if (shift != kpte_shift_max) | ||
1473 | remains = size; | ||
1474 | |||
1475 | while (remains) { | ||
1476 | unsigned long index = start >> kpte_shift_min; | ||
1477 | 1578 | ||
1478 | kpte_set_val(index, val); | 1579 | for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { |
1580 | struct tsb *ent = &swapper_tsb[i]; | ||
1479 | 1581 | ||
1480 | start += 1UL << kpte_shift_min; | 1582 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
1481 | remains -= 1UL << kpte_shift_min; | ||
1482 | } | 1583 | } |
1584 | #ifndef CONFIG_DEBUG_PAGEALLOC | ||
1585 | for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { | ||
1586 | struct tsb *ent = &swapper_4m_tsb[i]; | ||
1483 | 1587 | ||
1484 | return start; | 1588 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
1485 | } | ||
1486 | |||
1487 | static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) | ||
1488 | { | ||
1489 | unsigned long smallest_size, smallest_mask; | ||
1490 | unsigned long s; | ||
1491 | |||
1492 | smallest_size = (1UL << kpte_shift_min); | ||
1493 | smallest_mask = (smallest_size - 1UL); | ||
1494 | |||
1495 | while (start < end) { | ||
1496 | unsigned long orig_start = start; | ||
1497 | |||
1498 | for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) { | ||
1499 | start = kpte_mark_using_shift(start, end, s); | ||
1500 | |||
1501 | if (start != orig_start) | ||
1502 | break; | ||
1503 | } | ||
1504 | |||
1505 | if (start == orig_start) | ||
1506 | start = (start + smallest_size) & ~smallest_mask; | ||
1507 | } | 1589 | } |
1590 | #endif | ||
1508 | } | 1591 | } |
1509 | 1592 | ||
1510 | static void __init init_kpte_bitmap(void) | 1593 | extern unsigned int kvmap_linear_patch[1]; |
1511 | { | ||
1512 | unsigned long i; | ||
1513 | |||
1514 | for (i = 0; i < pall_ents; i++) { | ||
1515 | unsigned long phys_start, phys_end; | ||
1516 | |||
1517 | phys_start = pall[i].phys_addr; | ||
1518 | phys_end = phys_start + pall[i].reg_size; | ||
1519 | |||
1520 | mark_kpte_bitmap(phys_start, phys_end); | ||
1521 | } | ||
1522 | } | ||
1523 | 1594 | ||
1524 | static void __init kernel_physical_mapping_init(void) | 1595 | static void __init kernel_physical_mapping_init(void) |
1525 | { | 1596 | { |
1526 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1527 | unsigned long i, mem_alloced = 0UL; | 1597 | unsigned long i, mem_alloced = 0UL; |
1598 | bool use_huge = true; | ||
1528 | 1599 | ||
1600 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1601 | use_huge = false; | ||
1602 | #endif | ||
1529 | for (i = 0; i < pall_ents; i++) { | 1603 | for (i = 0; i < pall_ents; i++) { |
1530 | unsigned long phys_start, phys_end; | 1604 | unsigned long phys_start, phys_end; |
1531 | 1605 | ||
@@ -1533,7 +1607,7 @@ static void __init kernel_physical_mapping_init(void) | |||
1533 | phys_end = phys_start + pall[i].reg_size; | 1607 | phys_end = phys_start + pall[i].reg_size; |
1534 | 1608 | ||
1535 | mem_alloced += kernel_map_range(phys_start, phys_end, | 1609 | mem_alloced += kernel_map_range(phys_start, phys_end, |
1536 | PAGE_KERNEL); | 1610 | PAGE_KERNEL, use_huge); |
1537 | } | 1611 | } |
1538 | 1612 | ||
1539 | printk("Allocated %ld bytes for kernel page tables.\n", | 1613 | printk("Allocated %ld bytes for kernel page tables.\n", |
@@ -1542,8 +1616,9 @@ static void __init kernel_physical_mapping_init(void) | |||
1542 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | 1616 | kvmap_linear_patch[0] = 0x01000000; /* nop */ |
1543 | flushi(&kvmap_linear_patch[0]); | 1617 | flushi(&kvmap_linear_patch[0]); |
1544 | 1618 | ||
1619 | flush_all_kernel_tsbs(); | ||
1620 | |||
1545 | __flush_tlb_all(); | 1621 | __flush_tlb_all(); |
1546 | #endif | ||
1547 | } | 1622 | } |
1548 | 1623 | ||
1549 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1624 | #ifdef CONFIG_DEBUG_PAGEALLOC |
@@ -1553,7 +1628,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1553 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | 1628 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); |
1554 | 1629 | ||
1555 | kernel_map_range(phys_start, phys_end, | 1630 | kernel_map_range(phys_start, phys_end, |
1556 | (enable ? PAGE_KERNEL : __pgprot(0))); | 1631 | (enable ? PAGE_KERNEL : __pgprot(0)), false); |
1557 | 1632 | ||
1558 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, | 1633 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, |
1559 | PAGE_OFFSET + phys_end); | 1634 | PAGE_OFFSET + phys_end); |
@@ -1581,62 +1656,11 @@ unsigned long __init find_ecache_flush_span(unsigned long size) | |||
1581 | unsigned long PAGE_OFFSET; | 1656 | unsigned long PAGE_OFFSET; |
1582 | EXPORT_SYMBOL(PAGE_OFFSET); | 1657 | EXPORT_SYMBOL(PAGE_OFFSET); |
1583 | 1658 | ||
1584 | static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits) | ||
1585 | { | ||
1586 | unsigned long final_shift; | ||
1587 | unsigned int val = *insn; | ||
1588 | unsigned int cnt; | ||
1589 | |||
1590 | /* We are patching in ilog2(max_supported_phys_address), and | ||
1591 | * we are doing so in a manner similar to a relocation addend. | ||
1592 | * That is, we are adding the shift value to whatever value | ||
1593 | * is in the shift instruction count field already. | ||
1594 | */ | ||
1595 | cnt = (val & 0x3f); | ||
1596 | val &= ~0x3f; | ||
1597 | |||
1598 | /* If we are trying to shift >= 64 bits, clear the destination | ||
1599 | * register. This can happen when phys_bits ends up being equal | ||
1600 | * to MAX_PHYS_ADDRESS_BITS. | ||
1601 | */ | ||
1602 | final_shift = (cnt + (64 - phys_bits)); | ||
1603 | if (final_shift >= 64) { | ||
1604 | unsigned int rd = (val >> 25) & 0x1f; | ||
1605 | |||
1606 | val = 0x80100000 | (rd << 25); | ||
1607 | } else { | ||
1608 | val |= final_shift; | ||
1609 | } | ||
1610 | *insn = val; | ||
1611 | |||
1612 | __asm__ __volatile__("flush %0" | ||
1613 | : /* no outputs */ | ||
1614 | : "r" (insn)); | ||
1615 | } | ||
1616 | |||
1617 | static void __init page_offset_shift_patch(unsigned long phys_bits) | ||
1618 | { | ||
1619 | extern unsigned int __page_offset_shift_patch; | ||
1620 | extern unsigned int __page_offset_shift_patch_end; | ||
1621 | unsigned int *p; | ||
1622 | |||
1623 | p = &__page_offset_shift_patch; | ||
1624 | while (p < &__page_offset_shift_patch_end) { | ||
1625 | unsigned int *insn = (unsigned int *)(unsigned long)*p; | ||
1626 | |||
1627 | page_offset_shift_patch_one(insn, phys_bits); | ||
1628 | |||
1629 | p++; | ||
1630 | } | ||
1631 | } | ||
1632 | |||
1633 | unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; | 1659 | unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; |
1634 | unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; | 1660 | unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; |
1635 | 1661 | ||
1636 | static void __init setup_page_offset(void) | 1662 | static void __init setup_page_offset(void) |
1637 | { | 1663 | { |
1638 | unsigned long max_phys_bits = 40; | ||
1639 | |||
1640 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 1664 | if (tlb_type == cheetah || tlb_type == cheetah_plus) { |
1641 | /* Cheetah/Panther support a full 64-bit virtual | 1665 | /* Cheetah/Panther support a full 64-bit virtual |
1642 | * address, so we can use all that our page tables | 1666 | * address, so we can use all that our page tables |
@@ -1685,8 +1709,6 @@ static void __init setup_page_offset(void) | |||
1685 | 1709 | ||
1686 | pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", | 1710 | pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", |
1687 | PAGE_OFFSET, max_phys_bits); | 1711 | PAGE_OFFSET, max_phys_bits); |
1688 | |||
1689 | page_offset_shift_patch(max_phys_bits); | ||
1690 | } | 1712 | } |
1691 | 1713 | ||
1692 | static void __init tsb_phys_patch(void) | 1714 | static void __init tsb_phys_patch(void) |
@@ -1731,7 +1753,6 @@ static void __init tsb_phys_patch(void) | |||
1731 | #define NUM_KTSB_DESCR 1 | 1753 | #define NUM_KTSB_DESCR 1 |
1732 | #endif | 1754 | #endif |
1733 | static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; | 1755 | static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; |
1734 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
1735 | 1756 | ||
1736 | /* The swapper TSBs are loaded with a base sequence of: | 1757 | /* The swapper TSBs are loaded with a base sequence of: |
1737 | * | 1758 | * |
@@ -2077,11 +2098,9 @@ void __init paging_init(void) | |||
2077 | 2098 | ||
2078 | pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t)); | 2099 | pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t)); |
2079 | pud_set(&swapper_pud_dir[0], pmd); | 2100 | pud_set(&swapper_pud_dir[0], pmd); |
2080 | 2101 | ||
2081 | inherit_prom_mappings(); | 2102 | inherit_prom_mappings(); |
2082 | 2103 | ||
2083 | init_kpte_bitmap(); | ||
2084 | |||
2085 | /* Ok, we can use our TLB miss and window trap handlers safely. */ | 2104 | /* Ok, we can use our TLB miss and window trap handlers safely. */ |
2086 | setup_tba(); | 2105 | setup_tba(); |
2087 | 2106 | ||
@@ -2188,70 +2207,6 @@ int page_in_phys_avail(unsigned long paddr) | |||
2188 | return 0; | 2207 | return 0; |
2189 | } | 2208 | } |
2190 | 2209 | ||
2191 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | ||
2192 | static int pavail_rescan_ents __initdata; | ||
2193 | |||
2194 | /* Certain OBP calls, such as fetching "available" properties, can | ||
2195 | * claim physical memory. So, along with initializing the valid | ||
2196 | * address bitmap, what we do here is refetch the physical available | ||
2197 | * memory list again, and make sure it provides at least as much | ||
2198 | * memory as 'pavail' does. | ||
2199 | */ | ||
2200 | static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) | ||
2201 | { | ||
2202 | int i; | ||
2203 | |||
2204 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); | ||
2205 | |||
2206 | for (i = 0; i < pavail_ents; i++) { | ||
2207 | unsigned long old_start, old_end; | ||
2208 | |||
2209 | old_start = pavail[i].phys_addr; | ||
2210 | old_end = old_start + pavail[i].reg_size; | ||
2211 | while (old_start < old_end) { | ||
2212 | int n; | ||
2213 | |||
2214 | for (n = 0; n < pavail_rescan_ents; n++) { | ||
2215 | unsigned long new_start, new_end; | ||
2216 | |||
2217 | new_start = pavail_rescan[n].phys_addr; | ||
2218 | new_end = new_start + | ||
2219 | pavail_rescan[n].reg_size; | ||
2220 | |||
2221 | if (new_start <= old_start && | ||
2222 | new_end >= (old_start + PAGE_SIZE)) { | ||
2223 | set_bit(old_start >> ILOG2_4MB, bitmap); | ||
2224 | goto do_next_page; | ||
2225 | } | ||
2226 | } | ||
2227 | |||
2228 | prom_printf("mem_init: Lost memory in pavail\n"); | ||
2229 | prom_printf("mem_init: OLD start[%lx] size[%lx]\n", | ||
2230 | pavail[i].phys_addr, | ||
2231 | pavail[i].reg_size); | ||
2232 | prom_printf("mem_init: NEW start[%lx] size[%lx]\n", | ||
2233 | pavail_rescan[i].phys_addr, | ||
2234 | pavail_rescan[i].reg_size); | ||
2235 | prom_printf("mem_init: Cannot continue, aborting.\n"); | ||
2236 | prom_halt(); | ||
2237 | |||
2238 | do_next_page: | ||
2239 | old_start += PAGE_SIZE; | ||
2240 | } | ||
2241 | } | ||
2242 | } | ||
2243 | |||
2244 | static void __init patch_tlb_miss_handler_bitmap(void) | ||
2245 | { | ||
2246 | extern unsigned int valid_addr_bitmap_insn[]; | ||
2247 | extern unsigned int valid_addr_bitmap_patch[]; | ||
2248 | |||
2249 | valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; | ||
2250 | mb(); | ||
2251 | valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; | ||
2252 | flushi(&valid_addr_bitmap_insn[0]); | ||
2253 | } | ||
2254 | |||
2255 | static void __init register_page_bootmem_info(void) | 2210 | static void __init register_page_bootmem_info(void) |
2256 | { | 2211 | { |
2257 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 2212 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
@@ -2264,18 +2219,6 @@ static void __init register_page_bootmem_info(void) | |||
2264 | } | 2219 | } |
2265 | void __init mem_init(void) | 2220 | void __init mem_init(void) |
2266 | { | 2221 | { |
2267 | unsigned long addr, last; | ||
2268 | |||
2269 | addr = PAGE_OFFSET + kern_base; | ||
2270 | last = PAGE_ALIGN(kern_size) + addr; | ||
2271 | while (addr < last) { | ||
2272 | set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap); | ||
2273 | addr += PAGE_SIZE; | ||
2274 | } | ||
2275 | |||
2276 | setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); | ||
2277 | patch_tlb_miss_handler_bitmap(); | ||
2278 | |||
2279 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | 2222 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); |
2280 | 2223 | ||
2281 | register_page_bootmem_info(); | 2224 | register_page_bootmem_info(); |
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h index 0668b364f44d..29ff73fc96b4 100644 --- a/arch/sparc/mm/init_64.h +++ b/arch/sparc/mm/init_64.h | |||
@@ -8,15 +8,8 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS) | 10 | #define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS) |
11 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) | ||
12 | #define KPTE_BITMAP_BYTES \ | ||
13 | ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4) | ||
14 | #define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL) | ||
15 | #define VALID_ADDR_BITMAP_BYTES \ | ||
16 | ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8) | ||
17 | 11 | ||
18 | extern unsigned long kern_linear_pte_xor[4]; | 12 | extern unsigned long kern_linear_pte_xor[4]; |
19 | extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | ||
20 | extern unsigned int sparc64_highest_unlocked_tlb_ent; | 13 | extern unsigned int sparc64_highest_unlocked_tlb_ent; |
21 | extern unsigned long sparc64_kern_pri_context; | 14 | extern unsigned long sparc64_kern_pri_context; |
22 | extern unsigned long sparc64_kern_pri_nuc_bits; | 15 | extern unsigned long sparc64_kern_pri_nuc_bits; |