aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-05-06 12:08:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-05-06 12:08:03 -0400
commit9bd29c56ade57106501d21fd4b0936644a426cb6 (patch)
tree39cf0349a7877fdbf91dd3609b2d1b7b6028a751 /arch
parent30321c7b658a5661eea715b33e82a5fd3e33e180 (diff)
parentfe866433f843b080246ce729b5e6b27b5f5d9a58 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller: "I've been auditing the THP support on sparc64 and found several bugs, hopefully most of which are fixed completely here. Also an RT kernel locking fix from Kirill Tkhai" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: Give more detailed information in {pgd,pmd}_ERROR() and kill pte_ERROR(). sparc64: Add basic validations to {pud,pmd}_bad(). sparc64: Use 'ILOG2_4MB' instead of constant '22'. sparc64: Fix range check in kern_addr_valid(). sparc64: Fix top-level fault handling bugs. sparc64: Handle 32-bit tasks properly in compute_effective_address(). sparc64: Don't use _PAGE_PRESENT in pte_modify() mask. sparc64: Fix hex values in comment above pte_modify(). sparc64: Fix bugs in get_user_pages_fast() wrt. THP. sparc64: Fix huge PMD invalidation. sparc64: Fix executable bit testing in set_pmd_at() paths. sparc64: Normalize NMI watchdog logging and behavior. sparc64: Make itc_sync_lock raw sparc64: Fix argument sign extension for compat_sys_futex().
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/include/asm/pgtable_64.h83
-rw-r--r--arch/sparc/include/asm/tsb.h3
-rw-r--r--arch/sparc/kernel/head_64.S4
-rw-r--r--arch/sparc/kernel/ktlb.S2
-rw-r--r--arch/sparc/kernel/nmi.c21
-rw-r--r--arch/sparc/kernel/smp_64.c6
-rw-r--r--arch/sparc/kernel/sys32.S2
-rw-r--r--arch/sparc/kernel/unaligned_64.c12
-rw-r--r--arch/sparc/mm/fault_64.c82
-rw-r--r--arch/sparc/mm/gup.c2
-rw-r--r--arch/sparc/mm/init_64.c12
-rw-r--r--arch/sparc/mm/tlb.c26
12 files changed, 148 insertions, 107 deletions
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 0f9e94537eee..fde5abaac0cc 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -71,6 +71,23 @@
71 71
72#include <linux/sched.h> 72#include <linux/sched.h>
73 73
74extern unsigned long sparc64_valid_addr_bitmap[];
75
76/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
77static inline bool __kern_addr_valid(unsigned long paddr)
78{
79 if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
80 return false;
81 return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
82}
83
84static inline bool kern_addr_valid(unsigned long addr)
85{
86 unsigned long paddr = __pa(addr);
87
88 return __kern_addr_valid(paddr);
89}
90
74/* Entries per page directory level. */ 91/* Entries per page directory level. */
75#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) 92#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
76#define PTRS_PER_PMD (1UL << PMD_BITS) 93#define PTRS_PER_PMD (1UL << PMD_BITS)
@@ -79,9 +96,12 @@
79/* Kernel has a separate 44bit address space. */ 96/* Kernel has a separate 44bit address space. */
80#define FIRST_USER_ADDRESS 0 97#define FIRST_USER_ADDRESS 0
81 98
82#define pte_ERROR(e) __builtin_trap() 99#define pmd_ERROR(e) \
83#define pmd_ERROR(e) __builtin_trap() 100 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
84#define pgd_ERROR(e) __builtin_trap() 101 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
102#define pgd_ERROR(e) \
103 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
104 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
85 105
86#endif /* !(__ASSEMBLY__) */ 106#endif /* !(__ASSEMBLY__) */
87 107
@@ -258,8 +278,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
258{ 278{
259 unsigned long mask, tmp; 279 unsigned long mask, tmp;
260 280
261 /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) 281 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
262 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) 282 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
263 * 283 *
264 * Even if we use negation tricks the result is still a 6 284 * Even if we use negation tricks the result is still a 6
265 * instruction sequence, so don't try to play fancy and just 285 * instruction sequence, so don't try to play fancy and just
@@ -289,10 +309,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
289 " .previous\n" 309 " .previous\n"
290 : "=r" (mask), "=r" (tmp) 310 : "=r" (mask), "=r" (tmp)
291 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | 311 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
292 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | 312 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
293 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), 313 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
294 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | 314 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
295 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | 315 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
296 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); 316 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
297 317
298 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); 318 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@@ -633,7 +653,7 @@ static inline unsigned long pmd_large(pmd_t pmd)
633{ 653{
634 pte_t pte = __pte(pmd_val(pmd)); 654 pte_t pte = __pte(pmd_val(pmd));
635 655
636 return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); 656 return pte_val(pte) & _PAGE_PMD_HUGE;
637} 657}
638 658
639#ifdef CONFIG_TRANSPARENT_HUGEPAGE 659#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -719,20 +739,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
719 return __pmd(pte_val(pte)); 739 return __pmd(pte_val(pte));
720} 740}
721 741
722static inline pmd_t pmd_mknotpresent(pmd_t pmd)
723{
724 unsigned long mask;
725
726 if (tlb_type == hypervisor)
727 mask = _PAGE_PRESENT_4V;
728 else
729 mask = _PAGE_PRESENT_4U;
730
731 pmd_val(pmd) &= ~mask;
732
733 return pmd;
734}
735
736static inline pmd_t pmd_mksplitting(pmd_t pmd) 742static inline pmd_t pmd_mksplitting(pmd_t pmd)
737{ 743{
738 pte_t pte = __pte(pmd_val(pmd)); 744 pte_t pte = __pte(pmd_val(pmd));
@@ -757,6 +763,20 @@ static inline int pmd_present(pmd_t pmd)
757 763
758#define pmd_none(pmd) (!pmd_val(pmd)) 764#define pmd_none(pmd) (!pmd_val(pmd))
759 765
766/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
767 * very simple, it's just the physical address. PTE tables are of
768 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
769 * the top bits outside of the range of any physical address size we
770 * support are clear as well. We also validate the physical itself.
771 */
772#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
773 !__kern_addr_valid(pmd_val(pmd)))
774
775#define pud_none(pud) (!pud_val(pud))
776
777#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
778 !__kern_addr_valid(pud_val(pud)))
779
760#ifdef CONFIG_TRANSPARENT_HUGEPAGE 780#ifdef CONFIG_TRANSPARENT_HUGEPAGE
761extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 781extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
762 pmd_t *pmdp, pmd_t pmd); 782 pmd_t *pmdp, pmd_t pmd);
@@ -790,10 +810,7 @@ static inline unsigned long __pmd_page(pmd_t pmd)
790#define pud_page_vaddr(pud) \ 810#define pud_page_vaddr(pud) \
791 ((unsigned long) __va(pud_val(pud))) 811 ((unsigned long) __va(pud_val(pud)))
792#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) 812#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
793#define pmd_bad(pmd) (0)
794#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) 813#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
795#define pud_none(pud) (!pud_val(pud))
796#define pud_bad(pud) (0)
797#define pud_present(pud) (pud_val(pud) != 0U) 814#define pud_present(pud) (pud_val(pud) != 0U)
798#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 815#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
799 816
@@ -893,6 +910,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
893extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 910extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
894 pmd_t *pmd); 911 pmd_t *pmd);
895 912
913#define __HAVE_ARCH_PMDP_INVALIDATE
914extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
915 pmd_t *pmdp);
916
896#define __HAVE_ARCH_PGTABLE_DEPOSIT 917#define __HAVE_ARCH_PGTABLE_DEPOSIT
897extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 918extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
898 pgtable_t pgtable); 919 pgtable_t pgtable);
@@ -919,18 +940,6 @@ extern unsigned long pte_file(pte_t);
919extern pte_t pgoff_to_pte(unsigned long); 940extern pte_t pgoff_to_pte(unsigned long);
920#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 941#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
921 942
922extern unsigned long sparc64_valid_addr_bitmap[];
923
924/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
925static inline bool kern_addr_valid(unsigned long addr)
926{
927 unsigned long paddr = __pa(addr);
928
929 if ((paddr >> 41UL) != 0UL)
930 return false;
931 return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
932}
933
934extern int page_in_phys_avail(unsigned long paddr); 943extern int page_in_phys_avail(unsigned long paddr);
935 944
936/* 945/*
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index 2230f80d9fe3..90916f955cac 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
171 andcc REG1, REG2, %g0; \ 171 andcc REG1, REG2, %g0; \
172 be,pt %xcc, 700f; \ 172 be,pt %xcc, 700f; \
173 sethi %hi(4 * 1024 * 1024), REG2; \ 173 sethi %hi(4 * 1024 * 1024), REG2; \
174 andn REG1, REG2, REG1; \ 174 brgez,pn REG1, FAIL_LABEL; \
175 andn REG1, REG2, REG1; \
175 and VADDR, REG2, REG2; \ 176 and VADDR, REG2, REG2; \
176 brlz,pt REG1, PTE_LABEL; \ 177 brlz,pt REG1, PTE_LABEL; \
177 or REG1, REG2, REG1; \ 178 or REG1, REG2, REG1; \
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 26b706a1867d..452f04fe8da6 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -282,8 +282,8 @@ sun4v_chip_type:
282 stx %l2, [%l4 + 0x0] 282 stx %l2, [%l4 + 0x0]
283 ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low 283 ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
284 /* 4MB align */ 284 /* 4MB align */
285 srlx %l3, 22, %l3 285 srlx %l3, ILOG2_4MB, %l3
286 sllx %l3, 22, %l3 286 sllx %l3, ILOG2_4MB, %l3
287 stx %l3, [%l4 + 0x8] 287 stx %l3, [%l4 + 0x8]
288 288
289 /* Leave service as-is, "call-method" */ 289 /* Leave service as-is, "call-method" */
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 542e96ac4d39..605d49204580 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -277,7 +277,7 @@ kvmap_dtlb_load:
277#ifdef CONFIG_SPARSEMEM_VMEMMAP 277#ifdef CONFIG_SPARSEMEM_VMEMMAP
278kvmap_vmemmap: 278kvmap_vmemmap:
279 sub %g4, %g5, %g5 279 sub %g4, %g5, %g5
280 srlx %g5, 22, %g5 280 srlx %g5, ILOG2_4MB, %g5
281 sethi %hi(vmemmap_table), %g1 281 sethi %hi(vmemmap_table), %g1
282 sllx %g5, 3, %g5 282 sllx %g5, 3, %g5
283 or %g1, %lo(vmemmap_table), %g1 283 or %g1, %lo(vmemmap_table), %g1
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 6479256fd5a4..337094556916 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -68,27 +68,16 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
68 68
69static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) 69static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
70{ 70{
71 int this_cpu = smp_processor_id();
72
71 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 73 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
72 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 74 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
73 return; 75 return;
74 76
75 console_verbose();
76 bust_spinlocks(1);
77
78 printk(KERN_EMERG "%s", str);
79 printk(" on CPU%d, ip %08lx, registers:\n",
80 smp_processor_id(), regs->tpc);
81 show_regs(regs);
82 dump_stack();
83
84 bust_spinlocks(0);
85
86 if (do_panic || panic_on_oops) 77 if (do_panic || panic_on_oops)
87 panic("Non maskable interrupt"); 78 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
88 79 else
89 nmi_exit(); 80 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
90 local_irq_enable();
91 do_exit(SIGBUS);
92} 81}
93 82
94notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 83notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 9781048161ab..745a3633ce14 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -149,7 +149,7 @@ void cpu_panic(void)
149#define NUM_ROUNDS 64 /* magic value */ 149#define NUM_ROUNDS 64 /* magic value */
150#define NUM_ITERS 5 /* likewise */ 150#define NUM_ITERS 5 /* likewise */
151 151
152static DEFINE_SPINLOCK(itc_sync_lock); 152static DEFINE_RAW_SPINLOCK(itc_sync_lock);
153static unsigned long go[SLAVE + 1]; 153static unsigned long go[SLAVE + 1];
154 154
155#define DEBUG_TICK_SYNC 0 155#define DEBUG_TICK_SYNC 0
@@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
257 go[MASTER] = 0; 257 go[MASTER] = 0;
258 membar_safe("#StoreLoad"); 258 membar_safe("#StoreLoad");
259 259
260 spin_lock_irqsave(&itc_sync_lock, flags); 260 raw_spin_lock_irqsave(&itc_sync_lock, flags);
261 { 261 {
262 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { 262 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
263 while (!go[MASTER]) 263 while (!go[MASTER])
@@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu)
268 membar_safe("#StoreLoad"); 268 membar_safe("#StoreLoad");
269 } 269 }
270 } 270 }
271 spin_unlock_irqrestore(&itc_sync_lock, flags); 271 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
272} 272}
273 273
274#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) 274#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index f7c72b6efc27..d066eb18650c 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
44SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) 44SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
45SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) 45SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
46SIGN1(sys32_select, compat_sys_select, %o0) 46SIGN1(sys32_select, compat_sys_select, %o0)
47SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) 47SIGN1(sys32_futex, compat_sys_futex, %o1)
48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) 50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 3c1a7cb31579..35ab8b60d256 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
166unsigned long compute_effective_address(struct pt_regs *regs, 166unsigned long compute_effective_address(struct pt_regs *regs,
167 unsigned int insn, unsigned int rd) 167 unsigned int insn, unsigned int rd)
168{ 168{
169 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
169 unsigned int rs1 = (insn >> 14) & 0x1f; 170 unsigned int rs1 = (insn >> 14) & 0x1f;
170 unsigned int rs2 = insn & 0x1f; 171 unsigned int rs2 = insn & 0x1f;
171 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 172 unsigned long addr;
172 173
173 if (insn & 0x2000) { 174 if (insn & 0x2000) {
174 maybe_flush_windows(rs1, 0, rd, from_kernel); 175 maybe_flush_windows(rs1, 0, rd, from_kernel);
175 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); 176 addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
176 } else { 177 } else {
177 maybe_flush_windows(rs1, rs2, rd, from_kernel); 178 maybe_flush_windows(rs1, rs2, rd, from_kernel);
178 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); 179 addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
179 } 180 }
181
182 if (!from_kernel && test_thread_flag(TIF_32BIT))
183 addr &= 0xffffffff;
184
185 return addr;
180} 186}
181 187
182/* This is just to make gcc think die_if_kernel does return... */ 188/* This is just to make gcc think die_if_kernel does return... */
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 69bb818fdd79..a8ff0d1a3b69 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc)
96 pte_t *ptep, pte; 96 pte_t *ptep, pte;
97 unsigned long pa; 97 unsigned long pa;
98 u32 insn = 0; 98 u32 insn = 0;
99 unsigned long pstate;
100 99
101 if (pgd_none(*pgdp)) 100 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
102 goto outret; 101 goto out;
103 pudp = pud_offset(pgdp, tpc); 102 pudp = pud_offset(pgdp, tpc);
104 if (pud_none(*pudp)) 103 if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
105 goto outret; 104 goto out;
106 pmdp = pmd_offset(pudp, tpc);
107 if (pmd_none(*pmdp))
108 goto outret;
109 105
110 /* This disables preemption for us as well. */ 106 /* This disables preemption for us as well. */
111 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 107 local_irq_disable();
112 __asm__ __volatile__("wrpr %0, %1, %%pstate"
113 : : "r" (pstate), "i" (PSTATE_IE));
114 ptep = pte_offset_map(pmdp, tpc);
115 pte = *ptep;
116 if (!pte_present(pte))
117 goto out;
118 108
119 pa = (pte_pfn(pte) << PAGE_SHIFT); 109 pmdp = pmd_offset(pudp, tpc);
120 pa += (tpc & ~PAGE_MASK); 110 if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
111 goto out_irq_enable;
112
113#ifdef CONFIG_TRANSPARENT_HUGEPAGE
114 if (pmd_trans_huge(*pmdp)) {
115 if (pmd_trans_splitting(*pmdp))
116 goto out_irq_enable;
121 117
122 /* Use phys bypass so we don't pollute dtlb/dcache. */ 118 pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
123 __asm__ __volatile__("lduwa [%1] %2, %0" 119 pa += tpc & ~HPAGE_MASK;
124 : "=r" (insn)
125 : "r" (pa), "i" (ASI_PHYS_USE_EC));
126 120
121 /* Use phys bypass so we don't pollute dtlb/dcache. */
122 __asm__ __volatile__("lduwa [%1] %2, %0"
123 : "=r" (insn)
124 : "r" (pa), "i" (ASI_PHYS_USE_EC));
125 } else
126#endif
127 {
128 ptep = pte_offset_map(pmdp, tpc);
129 pte = *ptep;
130 if (pte_present(pte)) {
131 pa = (pte_pfn(pte) << PAGE_SHIFT);
132 pa += (tpc & ~PAGE_MASK);
133
134 /* Use phys bypass so we don't pollute dtlb/dcache. */
135 __asm__ __volatile__("lduwa [%1] %2, %0"
136 : "=r" (insn)
137 : "r" (pa), "i" (ASI_PHYS_USE_EC));
138 }
139 pte_unmap(ptep);
140 }
141out_irq_enable:
142 local_irq_enable();
127out: 143out:
128 pte_unmap(ptep);
129 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
130outret:
131 return insn; 144 return insn;
132} 145}
133 146
@@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
153} 166}
154 167
155static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, 168static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
156 unsigned int insn, int fault_code) 169 unsigned long fault_addr, unsigned int insn,
170 int fault_code)
157{ 171{
158 unsigned long addr; 172 unsigned long addr;
159 siginfo_t info; 173 siginfo_t info;
@@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
161 info.si_code = code; 175 info.si_code = code;
162 info.si_signo = sig; 176 info.si_signo = sig;
163 info.si_errno = 0; 177 info.si_errno = 0;
164 if (fault_code & FAULT_CODE_ITLB) 178 if (fault_code & FAULT_CODE_ITLB) {
165 addr = regs->tpc; 179 addr = regs->tpc;
166 else 180 } else {
167 addr = compute_effective_address(regs, insn, 0); 181 /* If we were able to probe the faulting instruction, use it
182 * to compute a precise fault address. Otherwise use the fault
183 * time provided address which may only have page granularity.
184 */
185 if (insn)
186 addr = compute_effective_address(regs, insn, 0);
187 else
188 addr = fault_addr;
189 }
168 info.si_addr = (void __user *) addr; 190 info.si_addr = (void __user *) addr;
169 info.si_trapno = 0; 191 info.si_trapno = 0;
170 192
@@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
239 /* The si_code was set to make clear whether 261 /* The si_code was set to make clear whether
240 * this was a SEGV_MAPERR or SEGV_ACCERR fault. 262 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
241 */ 263 */
242 do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); 264 do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
243 return; 265 return;
244 } 266 }
245 267
@@ -525,7 +547,7 @@ do_sigbus:
525 * Send a sigbus, regardless of whether we were in kernel 547 * Send a sigbus, regardless of whether we were in kernel
526 * or user mode. 548 * or user mode.
527 */ 549 */
528 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); 550 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
529 551
530 /* Kernel mode? Handle exceptions or die */ 552 /* Kernel mode? Handle exceptions or die */
531 if (regs->tstate & TSTATE_PRIV) 553 if (regs->tstate & TSTATE_PRIV)
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index c4d3da68b800..1aed0432c64b 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
73 struct page *head, *page, *tail; 73 struct page *head, *page, *tail;
74 int refs; 74 int refs;
75 75
76 if (!pmd_large(pmd)) 76 if (!(pmd_val(pmd) & _PAGE_VALID))
77 return 0; 77 return 0;
78 78
79 if (write && !pmd_write(pmd)) 79 if (write && !pmd_write(pmd))
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index eafbc65c9c47..ed3c969a5f4c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -588,7 +588,7 @@ static void __init remap_kernel(void)
588 int i, tlb_ent = sparc64_highest_locked_tlbent(); 588 int i, tlb_ent = sparc64_highest_locked_tlbent();
589 589
590 tte_vaddr = (unsigned long) KERNBASE; 590 tte_vaddr = (unsigned long) KERNBASE;
591 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 591 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
592 tte_data = kern_large_tte(phys_page); 592 tte_data = kern_large_tte(phys_page);
593 593
594 kern_locked_tte_data = tte_data; 594 kern_locked_tte_data = tte_data;
@@ -1881,7 +1881,7 @@ void __init paging_init(void)
1881 1881
1882 BUILD_BUG_ON(NR_CPUS > 4096); 1882 BUILD_BUG_ON(NR_CPUS > 4096);
1883 1883
1884 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1884 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
1885 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1885 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1886 1886
1887 /* Invalidate both kernel TSBs. */ 1887 /* Invalidate both kernel TSBs. */
@@ -1937,7 +1937,7 @@ void __init paging_init(void)
1937 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1937 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1938 1938
1939 real_end = (unsigned long)_end; 1939 real_end = (unsigned long)_end;
1940 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1940 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
1941 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1941 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1942 num_kernel_image_mappings); 1942 num_kernel_image_mappings);
1943 1943
@@ -2094,7 +2094,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
2094 2094
2095 if (new_start <= old_start && 2095 if (new_start <= old_start &&
2096 new_end >= (old_start + PAGE_SIZE)) { 2096 new_end >= (old_start + PAGE_SIZE)) {
2097 set_bit(old_start >> 22, bitmap); 2097 set_bit(old_start >> ILOG2_4MB, bitmap);
2098 goto do_next_page; 2098 goto do_next_page;
2099 } 2099 }
2100 } 2100 }
@@ -2143,7 +2143,7 @@ void __init mem_init(void)
2143 addr = PAGE_OFFSET + kern_base; 2143 addr = PAGE_OFFSET + kern_base;
2144 last = PAGE_ALIGN(kern_size) + addr; 2144 last = PAGE_ALIGN(kern_size) + addr;
2145 while (addr < last) { 2145 while (addr < last) {
2146 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); 2146 set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
2147 addr += PAGE_SIZE; 2147 addr += PAGE_SIZE;
2148 } 2148 }
2149 2149
@@ -2267,7 +2267,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2267 void *block; 2267 void *block;
2268 2268
2269 if (!(*vmem_pp & _PAGE_VALID)) { 2269 if (!(*vmem_pp & _PAGE_VALID)) {
2270 block = vmemmap_alloc_block(1UL << 22, node); 2270 block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
2271 if (!block) 2271 if (!block)
2272 return -ENOMEM; 2272 return -ENOMEM;
2273 2273
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b12cb5e72812..b89aba217e3b 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -134,7 +134,7 @@ no_cache_flush:
134 134
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE 135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
136static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, 136static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
137 pmd_t pmd, bool exec) 137 pmd_t pmd)
138{ 138{
139 unsigned long end; 139 unsigned long end;
140 pte_t *pte; 140 pte_t *pte;
@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
142 pte = pte_offset_map(&pmd, vaddr); 142 pte = pte_offset_map(&pmd, vaddr);
143 end = vaddr + HPAGE_SIZE; 143 end = vaddr + HPAGE_SIZE;
144 while (vaddr < end) { 144 while (vaddr < end) {
145 if (pte_val(*pte) & _PAGE_VALID) 145 if (pte_val(*pte) & _PAGE_VALID) {
146 bool exec = pte_exec(*pte);
147
146 tlb_batch_add_one(mm, vaddr, exec); 148 tlb_batch_add_one(mm, vaddr, exec);
149 }
147 pte++; 150 pte++;
148 vaddr += PAGE_SIZE; 151 vaddr += PAGE_SIZE;
149 } 152 }
@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
177 } 180 }
178 181
179 if (!pmd_none(orig)) { 182 if (!pmd_none(orig)) {
180 pte_t orig_pte = __pte(pmd_val(orig));
181 bool exec = pte_exec(orig_pte);
182
183 addr &= HPAGE_MASK; 183 addr &= HPAGE_MASK;
184 if (pmd_trans_huge(orig)) { 184 if (pmd_trans_huge(orig)) {
185 pte_t orig_pte = __pte(pmd_val(orig));
186 bool exec = pte_exec(orig_pte);
187
185 tlb_batch_add_one(mm, addr, exec); 188 tlb_batch_add_one(mm, addr, exec);
186 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); 189 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
187 } else { 190 } else {
188 tlb_batch_pmd_scan(mm, addr, orig, exec); 191 tlb_batch_pmd_scan(mm, addr, orig);
189 } 192 }
190 } 193 }
191} 194}
192 195
196void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
197 pmd_t *pmdp)
198{
199 pmd_t entry = *pmdp;
200
201 pmd_val(entry) &= ~_PAGE_VALID;
202
203 set_pmd_at(vma->vm_mm, address, pmdp, entry);
204 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
205}
206
193void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 207void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
194 pgtable_t pgtable) 208 pgtable_t pgtable)
195{ 209{