aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/irq_64.c20
-rw-r--r--arch/sparc/kernel/kstack.h19
-rw-r--r--arch/sparc/kernel/nmi.c7
-rw-r--r--arch/sparc/kernel/rtrap_64.S12
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
5 files changed, 42 insertions, 22 deletions
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 454ce3a2527..830d70a3e20 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -22,6 +22,7 @@
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/ftrace.h> 23#include <linux/ftrace.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/kmemleak.h>
25 26
26#include <asm/ptrace.h> 27#include <asm/ptrace.h>
27#include <asm/processor.h> 28#include <asm/processor.h>
@@ -46,6 +47,7 @@
46 47
47#include "entry.h" 48#include "entry.h"
48#include "cpumap.h" 49#include "cpumap.h"
50#include "kstack.h"
49 51
50#define NUM_IVECS (IMAP_INR + 1) 52#define NUM_IVECS (IMAP_INR + 1)
51 53
@@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq)
712void *hardirq_stack[NR_CPUS]; 714void *hardirq_stack[NR_CPUS];
713void *softirq_stack[NR_CPUS]; 715void *softirq_stack[NR_CPUS];
714 716
715static __attribute__((always_inline)) void *set_hardirq_stack(void)
716{
717 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
718
719 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
720 if (orig_sp < sp ||
721 orig_sp > (sp + THREAD_SIZE)) {
722 sp += THREAD_SIZE - 192 - STACK_BIAS;
723 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
724 }
725
726 return orig_sp;
727}
728static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
729{
730 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
731}
732
733void __irq_entry handler_irq(int irq, struct pt_regs *regs) 717void __irq_entry handler_irq(int irq, struct pt_regs *regs)
734{ 718{
735 unsigned long pstate, bucket_pa; 719 unsigned long pstate, bucket_pa;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 5247283d1c0..53dfb92e09f 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -61,4 +61,23 @@ check_magic:
61 61
62} 62}
63 63
64static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
65{
66 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
67
68 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
69 if (orig_sp < sp ||
70 orig_sp > (sp + THREAD_SIZE)) {
71 sp += THREAD_SIZE - 192 - STACK_BIAS;
72 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
73 }
74
75 return orig_sp;
76}
77
78static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
79{
80 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
81}
82
64#endif /* _KSTACK_H */ 83#endif /* _KSTACK_H */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 75a3d1a2535..a4bd7ba74c8 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -23,6 +23,8 @@
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/pcr.h> 24#include <asm/pcr.h>
25 25
26#include "kstack.h"
27
26/* We don't have a real NMI on sparc64, but we can fake one 28/* We don't have a real NMI on sparc64, but we can fake one
27 * up using profiling counter overflow interrupts and interrupt 29 * up using profiling counter overflow interrupts and interrupt
28 * levels. 30 * levels.
@@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
92notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 94notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
93{ 95{
94 unsigned int sum, touched = 0; 96 unsigned int sum, touched = 0;
97 void *orig_sp;
95 98
96 clear_softint(1 << irq); 99 clear_softint(1 << irq);
97 100
@@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
99 102
100 nmi_enter(); 103 nmi_enter();
101 104
105 orig_sp = set_hardirq_stack();
106
102 if (notify_die(DIE_NMI, "nmi", regs, 0, 107 if (notify_die(DIE_NMI, "nmi", regs, 0,
103 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 108 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
104 touched = 1; 109 touched = 1;
@@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
124 pcr_ops->write(pcr_enable); 129 pcr_ops->write(pcr_enable);
125 } 130 }
126 131
132 restore_hardirq_stack(orig_sp);
133
127 nmi_exit(); 134 nmi_exit();
128} 135}
129 136
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 83f1873c6c1..090b9e9ad5e 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -130,7 +130,17 @@ rtrap_xcall:
130 nop 130 nop
131 call trace_hardirqs_on 131 call trace_hardirqs_on
132 nop 132 nop
133 wrpr %l4, %pil 133 /* Do not actually set the %pil here. We will do that
134 * below after we clear PSTATE_IE in the %pstate register.
135 * If we re-enable interrupts here, we can recurse down
136 * the hardirq stack potentially endlessly, causing a
137 * stack overflow.
138 *
139 * It is tempting to put this test and trace_hardirqs_on
140 * call at the 'rt_continue' label, but that will not work
141 * as that path hits unconditionally and we do not want to
142 * execute this in NMI return paths, for example.
143 */
134#endif 144#endif
135rtrap_no_irq_enable: 145rtrap_no_irq_enable:
136 andcc %l1, TSTATE_PRIV, %l3 146 andcc %l1, TSTATE_PRIV, %l3
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index ebce43018c4..c752c4c479b 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
50} 50}
51 51
52/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ 52/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
53static inline int decode_access_size(unsigned int insn) 53static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
54{ 54{
55 unsigned int tmp; 55 unsigned int tmp;
56 56
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
66 return 2; 66 return 2;
67 else { 67 else {
68 printk("Impossible unaligned trap. insn=%08x\n", insn); 68 printk("Impossible unaligned trap. insn=%08x\n", insn);
69 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); 69 die_if_kernel("Byte sized unaligned access?!?!", regs);
70 70
71 /* GCC should never warn that control reaches the end 71 /* GCC should never warn that control reaches the end
72 * of this function without returning a value because 72 * of this function without returning a value because
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
286asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 286asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
287{ 287{
288 enum direction dir = decode_direction(insn); 288 enum direction dir = decode_direction(insn);
289 int size = decode_access_size(insn); 289 int size = decode_access_size(regs, insn);
290 int orig_asi, asi; 290 int orig_asi, asi;
291 291
292 current_thread_info()->kern_una_regs = regs; 292 current_thread_info()->kern_una_regs = regs;