diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 89 | ||||
-rw-r--r-- | arch/sparc/include/asm/tsb.h | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/head_64.S | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/ktlb.S | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/nmi.c | 21 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 6 | ||||
-rw-r--r-- | arch/sparc/kernel/sys32.S | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/sysfs.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/unaligned_64.c | 12 | ||||
-rw-r--r-- | arch/sparc/lib/NG2memcpy.S | 1 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 98 | ||||
-rw-r--r-- | arch/sparc/mm/gup.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 12 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 26 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 14 |
15 files changed, 168 insertions, 126 deletions
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 0f9e94537eee..1a49ffdf9da9 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -24,7 +24,8 @@ | |||
24 | 24 | ||
25 | /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). | 25 | /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). |
26 | * The page copy blockops can use 0x6000000 to 0x8000000. | 26 | * The page copy blockops can use 0x6000000 to 0x8000000. |
27 | * The TSB is mapped in the 0x8000000 to 0xa000000 range. | 27 | * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. |
28 | * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range. | ||
28 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. | 29 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. |
29 | * The vmalloc area spans 0x100000000 to 0x200000000. | 30 | * The vmalloc area spans 0x100000000 to 0x200000000. |
30 | * Since modules need to be in the lowest 32-bits of the address space, | 31 | * Since modules need to be in the lowest 32-bits of the address space, |
@@ -33,7 +34,8 @@ | |||
33 | * 0x400000000. | 34 | * 0x400000000. |
34 | */ | 35 | */ |
35 | #define TLBTEMP_BASE _AC(0x0000000006000000,UL) | 36 | #define TLBTEMP_BASE _AC(0x0000000006000000,UL) |
36 | #define TSBMAP_BASE _AC(0x0000000008000000,UL) | 37 | #define TSBMAP_8K_BASE _AC(0x0000000008000000,UL) |
38 | #define TSBMAP_4M_BASE _AC(0x0000000008400000,UL) | ||
37 | #define MODULES_VADDR _AC(0x0000000010000000,UL) | 39 | #define MODULES_VADDR _AC(0x0000000010000000,UL) |
38 | #define MODULES_LEN _AC(0x00000000e0000000,UL) | 40 | #define MODULES_LEN _AC(0x00000000e0000000,UL) |
39 | #define MODULES_END _AC(0x00000000f0000000,UL) | 41 | #define MODULES_END _AC(0x00000000f0000000,UL) |
@@ -71,6 +73,23 @@ | |||
71 | 73 | ||
72 | #include <linux/sched.h> | 74 | #include <linux/sched.h> |
73 | 75 | ||
76 | extern unsigned long sparc64_valid_addr_bitmap[]; | ||
77 | |||
78 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | ||
79 | static inline bool __kern_addr_valid(unsigned long paddr) | ||
80 | { | ||
81 | if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL) | ||
82 | return false; | ||
83 | return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap); | ||
84 | } | ||
85 | |||
86 | static inline bool kern_addr_valid(unsigned long addr) | ||
87 | { | ||
88 | unsigned long paddr = __pa(addr); | ||
89 | |||
90 | return __kern_addr_valid(paddr); | ||
91 | } | ||
92 | |||
74 | /* Entries per page directory level. */ | 93 | /* Entries per page directory level. */ |
75 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) | 94 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) |
76 | #define PTRS_PER_PMD (1UL << PMD_BITS) | 95 | #define PTRS_PER_PMD (1UL << PMD_BITS) |
@@ -79,9 +98,12 @@ | |||
79 | /* Kernel has a separate 44bit address space. */ | 98 | /* Kernel has a separate 44bit address space. */ |
80 | #define FIRST_USER_ADDRESS 0 | 99 | #define FIRST_USER_ADDRESS 0 |
81 | 100 | ||
82 | #define pte_ERROR(e) __builtin_trap() | 101 | #define pmd_ERROR(e) \ |
83 | #define pmd_ERROR(e) __builtin_trap() | 102 | pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ |
84 | #define pgd_ERROR(e) __builtin_trap() | 103 | __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) |
104 | #define pgd_ERROR(e) \ | ||
105 | pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ | ||
106 | __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) | ||
85 | 107 | ||
86 | #endif /* !(__ASSEMBLY__) */ | 108 | #endif /* !(__ASSEMBLY__) */ |
87 | 109 | ||
@@ -258,8 +280,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) | |||
258 | { | 280 | { |
259 | unsigned long mask, tmp; | 281 | unsigned long mask, tmp; |
260 | 282 | ||
261 | /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) | 283 | /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7) |
262 | * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) | 284 | * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8) |
263 | * | 285 | * |
264 | * Even if we use negation tricks the result is still a 6 | 286 | * Even if we use negation tricks the result is still a 6 |
265 | * instruction sequence, so don't try to play fancy and just | 287 | * instruction sequence, so don't try to play fancy and just |
@@ -289,10 +311,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) | |||
289 | " .previous\n" | 311 | " .previous\n" |
290 | : "=r" (mask), "=r" (tmp) | 312 | : "=r" (mask), "=r" (tmp) |
291 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | | 313 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | |
292 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | | 314 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | |
293 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), | 315 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), |
294 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | | 316 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | |
295 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | | 317 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | |
296 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); | 318 | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); |
297 | 319 | ||
298 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); | 320 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); |
@@ -633,7 +655,7 @@ static inline unsigned long pmd_large(pmd_t pmd) | |||
633 | { | 655 | { |
634 | pte_t pte = __pte(pmd_val(pmd)); | 656 | pte_t pte = __pte(pmd_val(pmd)); |
635 | 657 | ||
636 | return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); | 658 | return pte_val(pte) & _PAGE_PMD_HUGE; |
637 | } | 659 | } |
638 | 660 | ||
639 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 661 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -719,20 +741,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) | |||
719 | return __pmd(pte_val(pte)); | 741 | return __pmd(pte_val(pte)); |
720 | } | 742 | } |
721 | 743 | ||
722 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | ||
723 | { | ||
724 | unsigned long mask; | ||
725 | |||
726 | if (tlb_type == hypervisor) | ||
727 | mask = _PAGE_PRESENT_4V; | ||
728 | else | ||
729 | mask = _PAGE_PRESENT_4U; | ||
730 | |||
731 | pmd_val(pmd) &= ~mask; | ||
732 | |||
733 | return pmd; | ||
734 | } | ||
735 | |||
736 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | 744 | static inline pmd_t pmd_mksplitting(pmd_t pmd) |
737 | { | 745 | { |
738 | pte_t pte = __pte(pmd_val(pmd)); | 746 | pte_t pte = __pte(pmd_val(pmd)); |
@@ -757,6 +765,20 @@ static inline int pmd_present(pmd_t pmd) | |||
757 | 765 | ||
758 | #define pmd_none(pmd) (!pmd_val(pmd)) | 766 | #define pmd_none(pmd) (!pmd_val(pmd)) |
759 | 767 | ||
768 | /* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is | ||
769 | * very simple, it's just the physical address. PTE tables are of | ||
770 | * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and | ||
771 | * the top bits outside of the range of any physical address size we | ||
772 | * support are clear as well. We also validate the physical itself. | ||
773 | */ | ||
774 | #define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \ | ||
775 | !__kern_addr_valid(pmd_val(pmd))) | ||
776 | |||
777 | #define pud_none(pud) (!pud_val(pud)) | ||
778 | |||
779 | #define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ | ||
780 | !__kern_addr_valid(pud_val(pud))) | ||
781 | |||
760 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 782 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
761 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 783 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
762 | pmd_t *pmdp, pmd_t pmd); | 784 | pmd_t *pmdp, pmd_t pmd); |
@@ -790,10 +812,7 @@ static inline unsigned long __pmd_page(pmd_t pmd) | |||
790 | #define pud_page_vaddr(pud) \ | 812 | #define pud_page_vaddr(pud) \ |
791 | ((unsigned long) __va(pud_val(pud))) | 813 | ((unsigned long) __va(pud_val(pud))) |
792 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) | 814 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) |
793 | #define pmd_bad(pmd) (0) | ||
794 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) | 815 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) |
795 | #define pud_none(pud) (!pud_val(pud)) | ||
796 | #define pud_bad(pud) (0) | ||
797 | #define pud_present(pud) (pud_val(pud) != 0U) | 816 | #define pud_present(pud) (pud_val(pud) != 0U) |
798 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) | 817 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) |
799 | 818 | ||
@@ -893,6 +912,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); | |||
893 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | 912 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
894 | pmd_t *pmd); | 913 | pmd_t *pmd); |
895 | 914 | ||
915 | #define __HAVE_ARCH_PMDP_INVALIDATE | ||
916 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | ||
917 | pmd_t *pmdp); | ||
918 | |||
896 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | 919 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
897 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | 920 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
898 | pgtable_t pgtable); | 921 | pgtable_t pgtable); |
@@ -919,18 +942,6 @@ extern unsigned long pte_file(pte_t); | |||
919 | extern pte_t pgoff_to_pte(unsigned long); | 942 | extern pte_t pgoff_to_pte(unsigned long); |
920 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) | 943 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) |
921 | 944 | ||
922 | extern unsigned long sparc64_valid_addr_bitmap[]; | ||
923 | |||
924 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | ||
925 | static inline bool kern_addr_valid(unsigned long addr) | ||
926 | { | ||
927 | unsigned long paddr = __pa(addr); | ||
928 | |||
929 | if ((paddr >> 41UL) != 0UL) | ||
930 | return false; | ||
931 | return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); | ||
932 | } | ||
933 | |||
934 | extern int page_in_phys_avail(unsigned long paddr); | 945 | extern int page_in_phys_avail(unsigned long paddr); |
935 | 946 | ||
936 | /* | 947 | /* |
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 2230f80d9fe3..90916f955cac 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h | |||
@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
171 | andcc REG1, REG2, %g0; \ | 171 | andcc REG1, REG2, %g0; \ |
172 | be,pt %xcc, 700f; \ | 172 | be,pt %xcc, 700f; \ |
173 | sethi %hi(4 * 1024 * 1024), REG2; \ | 173 | sethi %hi(4 * 1024 * 1024), REG2; \ |
174 | andn REG1, REG2, REG1; \ | 174 | brgez,pn REG1, FAIL_LABEL; \ |
175 | andn REG1, REG2, REG1; \ | ||
175 | and VADDR, REG2, REG2; \ | 176 | and VADDR, REG2, REG2; \ |
176 | brlz,pt REG1, PTE_LABEL; \ | 177 | brlz,pt REG1, PTE_LABEL; \ |
177 | or REG1, REG2, REG1; \ | 178 | or REG1, REG2, REG1; \ |
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 26b706a1867d..452f04fe8da6 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
@@ -282,8 +282,8 @@ sun4v_chip_type: | |||
282 | stx %l2, [%l4 + 0x0] | 282 | stx %l2, [%l4 + 0x0] |
283 | ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low | 283 | ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low |
284 | /* 4MB align */ | 284 | /* 4MB align */ |
285 | srlx %l3, 22, %l3 | 285 | srlx %l3, ILOG2_4MB, %l3 |
286 | sllx %l3, 22, %l3 | 286 | sllx %l3, ILOG2_4MB, %l3 |
287 | stx %l3, [%l4 + 0x8] | 287 | stx %l3, [%l4 + 0x8] |
288 | 288 | ||
289 | /* Leave service as-is, "call-method" */ | 289 | /* Leave service as-is, "call-method" */ |
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 542e96ac4d39..605d49204580 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S | |||
@@ -277,7 +277,7 @@ kvmap_dtlb_load: | |||
277 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 277 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
278 | kvmap_vmemmap: | 278 | kvmap_vmemmap: |
279 | sub %g4, %g5, %g5 | 279 | sub %g4, %g5, %g5 |
280 | srlx %g5, 22, %g5 | 280 | srlx %g5, ILOG2_4MB, %g5 |
281 | sethi %hi(vmemmap_table), %g1 | 281 | sethi %hi(vmemmap_table), %g1 |
282 | sllx %g5, 3, %g5 | 282 | sllx %g5, 3, %g5 |
283 | or %g1, %lo(vmemmap_table), %g1 | 283 | or %g1, %lo(vmemmap_table), %g1 |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 6479256fd5a4..337094556916 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -68,27 +68,16 @@ EXPORT_SYMBOL(touch_nmi_watchdog); | |||
68 | 68 | ||
69 | static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) | 69 | static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) |
70 | { | 70 | { |
71 | int this_cpu = smp_processor_id(); | ||
72 | |||
71 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, | 73 | if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, |
72 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) | 74 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) |
73 | return; | 75 | return; |
74 | 76 | ||
75 | console_verbose(); | ||
76 | bust_spinlocks(1); | ||
77 | |||
78 | printk(KERN_EMERG "%s", str); | ||
79 | printk(" on CPU%d, ip %08lx, registers:\n", | ||
80 | smp_processor_id(), regs->tpc); | ||
81 | show_regs(regs); | ||
82 | dump_stack(); | ||
83 | |||
84 | bust_spinlocks(0); | ||
85 | |||
86 | if (do_panic || panic_on_oops) | 77 | if (do_panic || panic_on_oops) |
87 | panic("Non maskable interrupt"); | 78 | panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
88 | 79 | else | |
89 | nmi_exit(); | 80 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
90 | local_irq_enable(); | ||
91 | do_exit(SIGBUS); | ||
92 | } | 81 | } |
93 | 82 | ||
94 | notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | 83 | notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 9781048161ab..745a3633ce14 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -149,7 +149,7 @@ void cpu_panic(void) | |||
149 | #define NUM_ROUNDS 64 /* magic value */ | 149 | #define NUM_ROUNDS 64 /* magic value */ |
150 | #define NUM_ITERS 5 /* likewise */ | 150 | #define NUM_ITERS 5 /* likewise */ |
151 | 151 | ||
152 | static DEFINE_SPINLOCK(itc_sync_lock); | 152 | static DEFINE_RAW_SPINLOCK(itc_sync_lock); |
153 | static unsigned long go[SLAVE + 1]; | 153 | static unsigned long go[SLAVE + 1]; |
154 | 154 | ||
155 | #define DEBUG_TICK_SYNC 0 | 155 | #define DEBUG_TICK_SYNC 0 |
@@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu) | |||
257 | go[MASTER] = 0; | 257 | go[MASTER] = 0; |
258 | membar_safe("#StoreLoad"); | 258 | membar_safe("#StoreLoad"); |
259 | 259 | ||
260 | spin_lock_irqsave(&itc_sync_lock, flags); | 260 | raw_spin_lock_irqsave(&itc_sync_lock, flags); |
261 | { | 261 | { |
262 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | 262 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { |
263 | while (!go[MASTER]) | 263 | while (!go[MASTER]) |
@@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu) | |||
268 | membar_safe("#StoreLoad"); | 268 | membar_safe("#StoreLoad"); |
269 | } | 269 | } |
270 | } | 270 | } |
271 | spin_unlock_irqrestore(&itc_sync_lock, flags); | 271 | raw_spin_unlock_irqrestore(&itc_sync_lock, flags); |
272 | } | 272 | } |
273 | 273 | ||
274 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) | 274 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index f7c72b6efc27..d066eb18650c 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S | |||
@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1) | |||
44 | SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) | 44 | SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) |
45 | SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) | 45 | SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) |
46 | SIGN1(sys32_select, compat_sys_select, %o0) | 46 | SIGN1(sys32_select, compat_sys_select, %o0) |
47 | SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) | 47 | SIGN1(sys32_futex, compat_sys_futex, %o1) |
48 | SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) | 48 | SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) |
49 | SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) | 49 | SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) |
50 | SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) | 50 | SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) |
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index a364000ca1aa..7f41d40b7e6e 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c | |||
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s, | |||
151 | size_t count) | 151 | size_t count) |
152 | { | 152 | { |
153 | unsigned long val, err; | 153 | unsigned long val, err; |
154 | int ret = sscanf(buf, "%ld", &val); | 154 | int ret = sscanf(buf, "%lu", &val); |
155 | 155 | ||
156 | if (ret != 1) | 156 | if (ret != 1) |
157 | return -EINVAL; | 157 | return -EINVAL; |
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 3c1a7cb31579..35ab8b60d256 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c | |||
@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) | |||
166 | unsigned long compute_effective_address(struct pt_regs *regs, | 166 | unsigned long compute_effective_address(struct pt_regs *regs, |
167 | unsigned int insn, unsigned int rd) | 167 | unsigned int insn, unsigned int rd) |
168 | { | 168 | { |
169 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; | ||
169 | unsigned int rs1 = (insn >> 14) & 0x1f; | 170 | unsigned int rs1 = (insn >> 14) & 0x1f; |
170 | unsigned int rs2 = insn & 0x1f; | 171 | unsigned int rs2 = insn & 0x1f; |
171 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; | 172 | unsigned long addr; |
172 | 173 | ||
173 | if (insn & 0x2000) { | 174 | if (insn & 0x2000) { |
174 | maybe_flush_windows(rs1, 0, rd, from_kernel); | 175 | maybe_flush_windows(rs1, 0, rd, from_kernel); |
175 | return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); | 176 | addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); |
176 | } else { | 177 | } else { |
177 | maybe_flush_windows(rs1, rs2, rd, from_kernel); | 178 | maybe_flush_windows(rs1, rs2, rd, from_kernel); |
178 | return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); | 179 | addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); |
179 | } | 180 | } |
181 | |||
182 | if (!from_kernel && test_thread_flag(TIF_32BIT)) | ||
183 | addr &= 0xffffffff; | ||
184 | |||
185 | return addr; | ||
180 | } | 186 | } |
181 | 187 | ||
182 | /* This is just to make gcc think die_if_kernel does return... */ | 188 | /* This is just to make gcc think die_if_kernel does return... */ |
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index 2c20ad63ddbf..30eee6e8a81b 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S | |||
@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
236 | */ | 236 | */ |
237 | VISEntryHalf | 237 | VISEntryHalf |
238 | 238 | ||
239 | membar #Sync | ||
239 | alignaddr %o1, %g0, %g0 | 240 | alignaddr %o1, %g0, %g0 |
240 | 241 | ||
241 | add %o1, (64 - 1), %o4 | 242 | add %o1, (64 - 1), %o4 |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 69bb818fdd79..4ced3fc66130 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc) | |||
96 | pte_t *ptep, pte; | 96 | pte_t *ptep, pte; |
97 | unsigned long pa; | 97 | unsigned long pa; |
98 | u32 insn = 0; | 98 | u32 insn = 0; |
99 | unsigned long pstate; | ||
100 | 99 | ||
101 | if (pgd_none(*pgdp)) | 100 | if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) |
102 | goto outret; | 101 | goto out; |
103 | pudp = pud_offset(pgdp, tpc); | 102 | pudp = pud_offset(pgdp, tpc); |
104 | if (pud_none(*pudp)) | 103 | if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) |
105 | goto outret; | 104 | goto out; |
106 | pmdp = pmd_offset(pudp, tpc); | ||
107 | if (pmd_none(*pmdp)) | ||
108 | goto outret; | ||
109 | 105 | ||
110 | /* This disables preemption for us as well. */ | 106 | /* This disables preemption for us as well. */ |
111 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | 107 | local_irq_disable(); |
112 | __asm__ __volatile__("wrpr %0, %1, %%pstate" | 108 | |
113 | : : "r" (pstate), "i" (PSTATE_IE)); | 109 | pmdp = pmd_offset(pudp, tpc); |
114 | ptep = pte_offset_map(pmdp, tpc); | 110 | if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) |
115 | pte = *ptep; | 111 | goto out_irq_enable; |
116 | if (!pte_present(pte)) | ||
117 | goto out; | ||
118 | 112 | ||
119 | pa = (pte_pfn(pte) << PAGE_SHIFT); | 113 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
120 | pa += (tpc & ~PAGE_MASK); | 114 | if (pmd_trans_huge(*pmdp)) { |
115 | if (pmd_trans_splitting(*pmdp)) | ||
116 | goto out_irq_enable; | ||
121 | 117 | ||
122 | /* Use phys bypass so we don't pollute dtlb/dcache. */ | 118 | pa = pmd_pfn(*pmdp) << PAGE_SHIFT; |
123 | __asm__ __volatile__("lduwa [%1] %2, %0" | 119 | pa += tpc & ~HPAGE_MASK; |
124 | : "=r" (insn) | ||
125 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
126 | 120 | ||
121 | /* Use phys bypass so we don't pollute dtlb/dcache. */ | ||
122 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
123 | : "=r" (insn) | ||
124 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
125 | } else | ||
126 | #endif | ||
127 | { | ||
128 | ptep = pte_offset_map(pmdp, tpc); | ||
129 | pte = *ptep; | ||
130 | if (pte_present(pte)) { | ||
131 | pa = (pte_pfn(pte) << PAGE_SHIFT); | ||
132 | pa += (tpc & ~PAGE_MASK); | ||
133 | |||
134 | /* Use phys bypass so we don't pollute dtlb/dcache. */ | ||
135 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
136 | : "=r" (insn) | ||
137 | : "r" (pa), "i" (ASI_PHYS_USE_EC)); | ||
138 | } | ||
139 | pte_unmap(ptep); | ||
140 | } | ||
141 | out_irq_enable: | ||
142 | local_irq_enable(); | ||
127 | out: | 143 | out: |
128 | pte_unmap(ptep); | ||
129 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | ||
130 | outret: | ||
131 | return insn; | 144 | return insn; |
132 | } | 145 | } |
133 | 146 | ||
@@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code, | |||
153 | } | 166 | } |
154 | 167 | ||
155 | static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, | 168 | static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, |
156 | unsigned int insn, int fault_code) | 169 | unsigned long fault_addr, unsigned int insn, |
170 | int fault_code) | ||
157 | { | 171 | { |
158 | unsigned long addr; | 172 | unsigned long addr; |
159 | siginfo_t info; | 173 | siginfo_t info; |
@@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, | |||
161 | info.si_code = code; | 175 | info.si_code = code; |
162 | info.si_signo = sig; | 176 | info.si_signo = sig; |
163 | info.si_errno = 0; | 177 | info.si_errno = 0; |
164 | if (fault_code & FAULT_CODE_ITLB) | 178 | if (fault_code & FAULT_CODE_ITLB) { |
165 | addr = regs->tpc; | 179 | addr = regs->tpc; |
166 | else | 180 | } else { |
167 | addr = compute_effective_address(regs, insn, 0); | 181 | /* If we were able to probe the faulting instruction, use it |
182 | * to compute a precise fault address. Otherwise use the fault | ||
183 | * time provided address which may only have page granularity. | ||
184 | */ | ||
185 | if (insn) | ||
186 | addr = compute_effective_address(regs, insn, 0); | ||
187 | else | ||
188 | addr = fault_addr; | ||
189 | } | ||
168 | info.si_addr = (void __user *) addr; | 190 | info.si_addr = (void __user *) addr; |
169 | info.si_trapno = 0; | 191 | info.si_trapno = 0; |
170 | 192 | ||
@@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, | |||
239 | /* The si_code was set to make clear whether | 261 | /* The si_code was set to make clear whether |
240 | * this was a SEGV_MAPERR or SEGV_ACCERR fault. | 262 | * this was a SEGV_MAPERR or SEGV_ACCERR fault. |
241 | */ | 263 | */ |
242 | do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); | 264 | do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); |
243 | return; | 265 | return; |
244 | } | 266 | } |
245 | 267 | ||
@@ -259,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) | |||
259 | show_regs(regs); | 281 | show_regs(regs); |
260 | } | 282 | } |
261 | 283 | ||
262 | static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, | ||
263 | unsigned long addr) | ||
264 | { | ||
265 | static int times; | ||
266 | |||
267 | if (times++ < 10) | ||
268 | printk(KERN_ERR "FAULT[%s:%d]: 32-bit process " | ||
269 | "reports 64-bit fault address [%lx]\n", | ||
270 | current->comm, current->pid, addr); | ||
271 | show_regs(regs); | ||
272 | } | ||
273 | |||
274 | asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | 284 | asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) |
275 | { | 285 | { |
276 | enum ctx_state prev_state = exception_enter(); | 286 | enum ctx_state prev_state = exception_enter(); |
@@ -300,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
300 | goto intr_or_no_mm; | 310 | goto intr_or_no_mm; |
301 | } | 311 | } |
302 | } | 312 | } |
303 | if (unlikely((address >> 32) != 0)) { | 313 | if (unlikely((address >> 32) != 0)) |
304 | bogus_32bit_fault_address(regs, address); | ||
305 | goto intr_or_no_mm; | 314 | goto intr_or_no_mm; |
306 | } | ||
307 | } | 315 | } |
308 | 316 | ||
309 | if (regs->tstate & TSTATE_PRIV) { | 317 | if (regs->tstate & TSTATE_PRIV) { |
@@ -525,7 +533,7 @@ do_sigbus: | |||
525 | * Send a sigbus, regardless of whether we were in kernel | 533 | * Send a sigbus, regardless of whether we were in kernel |
526 | * or user mode. | 534 | * or user mode. |
527 | */ | 535 | */ |
528 | do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); | 536 | do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); |
529 | 537 | ||
530 | /* Kernel mode? Handle exceptions or die */ | 538 | /* Kernel mode? Handle exceptions or die */ |
531 | if (regs->tstate & TSTATE_PRIV) | 539 | if (regs->tstate & TSTATE_PRIV) |
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index c4d3da68b800..1aed0432c64b 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c | |||
@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
73 | struct page *head, *page, *tail; | 73 | struct page *head, *page, *tail; |
74 | int refs; | 74 | int refs; |
75 | 75 | ||
76 | if (!pmd_large(pmd)) | 76 | if (!(pmd_val(pmd) & _PAGE_VALID)) |
77 | return 0; | 77 | return 0; |
78 | 78 | ||
79 | if (write && !pmd_write(pmd)) | 79 | if (write && !pmd_write(pmd)) |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index eafbc65c9c47..ed3c969a5f4c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -588,7 +588,7 @@ static void __init remap_kernel(void) | |||
588 | int i, tlb_ent = sparc64_highest_locked_tlbent(); | 588 | int i, tlb_ent = sparc64_highest_locked_tlbent(); |
589 | 589 | ||
590 | tte_vaddr = (unsigned long) KERNBASE; | 590 | tte_vaddr = (unsigned long) KERNBASE; |
591 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 591 | phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; |
592 | tte_data = kern_large_tte(phys_page); | 592 | tte_data = kern_large_tte(phys_page); |
593 | 593 | ||
594 | kern_locked_tte_data = tte_data; | 594 | kern_locked_tte_data = tte_data; |
@@ -1881,7 +1881,7 @@ void __init paging_init(void) | |||
1881 | 1881 | ||
1882 | BUILD_BUG_ON(NR_CPUS > 4096); | 1882 | BUILD_BUG_ON(NR_CPUS > 4096); |
1883 | 1883 | ||
1884 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 1884 | kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; |
1885 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | 1885 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; |
1886 | 1886 | ||
1887 | /* Invalidate both kernel TSBs. */ | 1887 | /* Invalidate both kernel TSBs. */ |
@@ -1937,7 +1937,7 @@ void __init paging_init(void) | |||
1937 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | 1937 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
1938 | 1938 | ||
1939 | real_end = (unsigned long)_end; | 1939 | real_end = (unsigned long)_end; |
1940 | num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); | 1940 | num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); |
1941 | printk("Kernel: Using %d locked TLB entries for main kernel image.\n", | 1941 | printk("Kernel: Using %d locked TLB entries for main kernel image.\n", |
1942 | num_kernel_image_mappings); | 1942 | num_kernel_image_mappings); |
1943 | 1943 | ||
@@ -2094,7 +2094,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) | |||
2094 | 2094 | ||
2095 | if (new_start <= old_start && | 2095 | if (new_start <= old_start && |
2096 | new_end >= (old_start + PAGE_SIZE)) { | 2096 | new_end >= (old_start + PAGE_SIZE)) { |
2097 | set_bit(old_start >> 22, bitmap); | 2097 | set_bit(old_start >> ILOG2_4MB, bitmap); |
2098 | goto do_next_page; | 2098 | goto do_next_page; |
2099 | } | 2099 | } |
2100 | } | 2100 | } |
@@ -2143,7 +2143,7 @@ void __init mem_init(void) | |||
2143 | addr = PAGE_OFFSET + kern_base; | 2143 | addr = PAGE_OFFSET + kern_base; |
2144 | last = PAGE_ALIGN(kern_size) + addr; | 2144 | last = PAGE_ALIGN(kern_size) + addr; |
2145 | while (addr < last) { | 2145 | while (addr < last) { |
2146 | set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); | 2146 | set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap); |
2147 | addr += PAGE_SIZE; | 2147 | addr += PAGE_SIZE; |
2148 | } | 2148 | } |
2149 | 2149 | ||
@@ -2267,7 +2267,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, | |||
2267 | void *block; | 2267 | void *block; |
2268 | 2268 | ||
2269 | if (!(*vmem_pp & _PAGE_VALID)) { | 2269 | if (!(*vmem_pp & _PAGE_VALID)) { |
2270 | block = vmemmap_alloc_block(1UL << 22, node); | 2270 | block = vmemmap_alloc_block(1UL << ILOG2_4MB, node); |
2271 | if (!block) | 2271 | if (!block) |
2272 | return -ENOMEM; | 2272 | return -ENOMEM; |
2273 | 2273 | ||
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b12cb5e72812..b89aba217e3b 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -134,7 +134,7 @@ no_cache_flush: | |||
134 | 134 | ||
135 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 135 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
136 | static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, | 136 | static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, |
137 | pmd_t pmd, bool exec) | 137 | pmd_t pmd) |
138 | { | 138 | { |
139 | unsigned long end; | 139 | unsigned long end; |
140 | pte_t *pte; | 140 | pte_t *pte; |
@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, | |||
142 | pte = pte_offset_map(&pmd, vaddr); | 142 | pte = pte_offset_map(&pmd, vaddr); |
143 | end = vaddr + HPAGE_SIZE; | 143 | end = vaddr + HPAGE_SIZE; |
144 | while (vaddr < end) { | 144 | while (vaddr < end) { |
145 | if (pte_val(*pte) & _PAGE_VALID) | 145 | if (pte_val(*pte) & _PAGE_VALID) { |
146 | bool exec = pte_exec(*pte); | ||
147 | |||
146 | tlb_batch_add_one(mm, vaddr, exec); | 148 | tlb_batch_add_one(mm, vaddr, exec); |
149 | } | ||
147 | pte++; | 150 | pte++; |
148 | vaddr += PAGE_SIZE; | 151 | vaddr += PAGE_SIZE; |
149 | } | 152 | } |
@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
177 | } | 180 | } |
178 | 181 | ||
179 | if (!pmd_none(orig)) { | 182 | if (!pmd_none(orig)) { |
180 | pte_t orig_pte = __pte(pmd_val(orig)); | ||
181 | bool exec = pte_exec(orig_pte); | ||
182 | |||
183 | addr &= HPAGE_MASK; | 183 | addr &= HPAGE_MASK; |
184 | if (pmd_trans_huge(orig)) { | 184 | if (pmd_trans_huge(orig)) { |
185 | pte_t orig_pte = __pte(pmd_val(orig)); | ||
186 | bool exec = pte_exec(orig_pte); | ||
187 | |||
185 | tlb_batch_add_one(mm, addr, exec); | 188 | tlb_batch_add_one(mm, addr, exec); |
186 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); | 189 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); |
187 | } else { | 190 | } else { |
188 | tlb_batch_pmd_scan(mm, addr, orig, exec); | 191 | tlb_batch_pmd_scan(mm, addr, orig); |
189 | } | 192 | } |
190 | } | 193 | } |
191 | } | 194 | } |
192 | 195 | ||
196 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | ||
197 | pmd_t *pmdp) | ||
198 | { | ||
199 | pmd_t entry = *pmdp; | ||
200 | |||
201 | pmd_val(entry) &= ~_PAGE_VALID; | ||
202 | |||
203 | set_pmd_at(vma->vm_mm, address, pmdp, entry); | ||
204 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | ||
205 | } | ||
206 | |||
193 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | 207 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
194 | pgtable_t pgtable) | 208 | pgtable_t pgtable) |
195 | { | 209 | { |
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index f5d506fdddad..fe19b81acc09 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign | |||
133 | mm->context.tsb_block[tsb_idx].tsb_nentries = | 133 | mm->context.tsb_block[tsb_idx].tsb_nentries = |
134 | tsb_bytes / sizeof(struct tsb); | 134 | tsb_bytes / sizeof(struct tsb); |
135 | 135 | ||
136 | base = TSBMAP_BASE; | 136 | switch (tsb_idx) { |
137 | case MM_TSB_BASE: | ||
138 | base = TSBMAP_8K_BASE; | ||
139 | break; | ||
140 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
141 | case MM_TSB_HUGE: | ||
142 | base = TSBMAP_4M_BASE; | ||
143 | break; | ||
144 | #endif | ||
145 | default: | ||
146 | BUG(); | ||
147 | } | ||
148 | |||
137 | tte = pgprot_val(PAGE_KERNEL_LOCKED); | 149 | tte = pgprot_val(PAGE_KERNEL_LOCKED); |
138 | tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); | 150 | tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); |
139 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); | 151 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); |