aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc64/kernel/irq.c52
-rw-r--r--arch/sparc64/kernel/kstack.h58
-rw-r--r--arch/sparc64/kernel/process.c27
-rw-r--r--arch/sparc64/kernel/stacktrace.c10
-rw-r--r--arch/sparc64/kernel/traps.c7
-rw-r--r--arch/sparc64/lib/mcount.S22
-rw-r--r--arch/sparc64/mm/init.c11
8 files changed, 161 insertions, 30 deletions
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 3473e25231d9..e3dd9303643d 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -93,4 +93,8 @@ static inline unsigned long get_softint(void)
93void __trigger_all_cpu_backtrace(void); 93void __trigger_all_cpu_backtrace(void);
94#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 94#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
95 95
96extern void *hardirq_stack[NR_CPUS];
97extern void *softirq_stack[NR_CPUS];
98#define __ARCH_HAS_DO_SOFTIRQ
99
96#endif 100#endif
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ba43d85e8dde..9b6689d9d570 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -682,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq)
682 ino, virt_irq); 682 ino, virt_irq);
683} 683}
684 684
685void *hardirq_stack[NR_CPUS];
686void *softirq_stack[NR_CPUS];
687
688static __attribute__((always_inline)) void *set_hardirq_stack(void)
689{
690 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
691
692 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
693 if (orig_sp < sp ||
694 orig_sp > (sp + THREAD_SIZE)) {
695 sp += THREAD_SIZE - 192 - STACK_BIAS;
696 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
697 }
698
699 return orig_sp;
700}
701static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
702{
703 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
704}
705
685void handler_irq(int irq, struct pt_regs *regs) 706void handler_irq(int irq, struct pt_regs *regs)
686{ 707{
687 unsigned long pstate, bucket_pa; 708 unsigned long pstate, bucket_pa;
688 struct pt_regs *old_regs; 709 struct pt_regs *old_regs;
710 void *orig_sp;
689 711
690 clear_softint(1 << irq); 712 clear_softint(1 << irq);
691 713
@@ -703,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs)
703 "i" (PSTATE_IE) 725 "i" (PSTATE_IE)
704 : "memory"); 726 : "memory");
705 727
728 orig_sp = set_hardirq_stack();
729
706 while (bucket_pa) { 730 while (bucket_pa) {
707 struct irq_desc *desc; 731 struct irq_desc *desc;
708 unsigned long next_pa; 732 unsigned long next_pa;
@@ -719,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs)
719 bucket_pa = next_pa; 743 bucket_pa = next_pa;
720 } 744 }
721 745
746 restore_hardirq_stack(orig_sp);
747
722 irq_exit(); 748 irq_exit();
723 set_irq_regs(old_regs); 749 set_irq_regs(old_regs);
724} 750}
725 751
752void do_softirq(void)
753{
754 unsigned long flags;
755
756 if (in_interrupt())
757 return;
758
759 local_irq_save(flags);
760
761 if (local_softirq_pending()) {
762 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
763
764 sp += THREAD_SIZE - 192 - STACK_BIAS;
765
766 __asm__ __volatile__("mov %%sp, %0\n\t"
767 "mov %1, %%sp"
768 : "=&r" (orig_sp)
769 : "r" (sp));
770 __do_softirq();
771 __asm__ __volatile__("mov %0, %%sp"
772 : : "r" (orig_sp));
773 }
774
775 local_irq_restore(flags);
776}
777
726#ifdef CONFIG_HOTPLUG_CPU 778#ifdef CONFIG_HOTPLUG_CPU
727void fixup_irqs(void) 779void fixup_irqs(void)
728{ 780{
diff --git a/arch/sparc64/kernel/kstack.h b/arch/sparc64/kernel/kstack.h
new file mode 100644
index 000000000000..43909d5680ea
--- /dev/null
+++ b/arch/sparc64/kernel/kstack.h
@@ -0,0 +1,58 @@
1#ifndef _KSTACK_H
2#define _KSTACK_H
3
4#include <linux/thread_info.h>
5#include <linux/sched.h>
6#include <asm/ptrace.h>
7#include <asm/irq.h>
8
9/* SP must be STACK_BIAS adjusted already. */
10static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
11{
12 unsigned long base = (unsigned long) tp;
13
14 if (sp >= (base + sizeof(struct thread_info)) &&
15 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
16 return true;
17
18 base = (unsigned long) hardirq_stack[tp->cpu];
19 if (sp >= base &&
20 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
21 return true;
22 base = (unsigned long) softirq_stack[tp->cpu];
23 if (sp >= base &&
24 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
25 return true;
26
27 return false;
28}
29
30/* Does "regs" point to a valid pt_regs trap frame? */
31static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
32{
33 unsigned long base = (unsigned long) tp;
34 unsigned long addr = (unsigned long) regs;
35
36 if (addr >= base &&
37 addr <= (base + THREAD_SIZE - sizeof(*regs)))
38 goto check_magic;
39
40 base = (unsigned long) hardirq_stack[tp->cpu];
41 if (addr >= base &&
42 addr <= (base + THREAD_SIZE - sizeof(*regs)))
43 goto check_magic;
44 base = (unsigned long) softirq_stack[tp->cpu];
45 if (addr >= base &&
46 addr <= (base + THREAD_SIZE - sizeof(*regs)))
47 goto check_magic;
48
49 return false;
50
51check_magic:
52 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
53 return true;
54 return false;
55
56}
57
58#endif /* _KSTACK_H */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 7f5debdc5fed..15f4178592e7 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -52,6 +52,8 @@
52#include <asm/irq_regs.h> 52#include <asm/irq_regs.h>
53#include <asm/smp.h> 53#include <asm/smp.h>
54 54
55#include "kstack.h"
56
55static void sparc64_yield(int cpu) 57static void sparc64_yield(int cpu)
56{ 58{
57 if (tlb_type != hypervisor) 59 if (tlb_type != hypervisor)
@@ -235,19 +237,6 @@ void show_regs(struct pt_regs *regs)
235struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; 237struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
236static DEFINE_SPINLOCK(global_reg_snapshot_lock); 238static DEFINE_SPINLOCK(global_reg_snapshot_lock);
237 239
238static bool kstack_valid(struct thread_info *tp, struct reg_window *rw)
239{
240 unsigned long thread_base, fp;
241
242 thread_base = (unsigned long) tp;
243 fp = (unsigned long) rw;
244
245 if (fp < (thread_base + sizeof(struct thread_info)) ||
246 fp >= (thread_base + THREAD_SIZE))
247 return false;
248 return true;
249}
250
251static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 240static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
252 int this_cpu) 241 int this_cpu)
253{ 242{
@@ -264,11 +253,11 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
264 253
265 rw = (struct reg_window *) 254 rw = (struct reg_window *)
266 (regs->u_regs[UREG_FP] + STACK_BIAS); 255 (regs->u_regs[UREG_FP] + STACK_BIAS);
267 if (kstack_valid(tp, rw)) { 256 if (kstack_valid(tp, (unsigned long) rw)) {
268 global_reg_snapshot[this_cpu].i7 = rw->ins[7]; 257 global_reg_snapshot[this_cpu].i7 = rw->ins[7];
269 rw = (struct reg_window *) 258 rw = (struct reg_window *)
270 (rw->ins[6] + STACK_BIAS); 259 (rw->ins[6] + STACK_BIAS);
271 if (kstack_valid(tp, rw)) 260 if (kstack_valid(tp, (unsigned long) rw))
272 global_reg_snapshot[this_cpu].rpc = rw->ins[7]; 261 global_reg_snapshot[this_cpu].rpc = rw->ins[7];
273 } 262 }
274 } else { 263 } else {
@@ -828,7 +817,7 @@ out:
828unsigned long get_wchan(struct task_struct *task) 817unsigned long get_wchan(struct task_struct *task)
829{ 818{
830 unsigned long pc, fp, bias = 0; 819 unsigned long pc, fp, bias = 0;
831 unsigned long thread_info_base; 820 struct thread_info *tp;
832 struct reg_window *rw; 821 struct reg_window *rw;
833 unsigned long ret = 0; 822 unsigned long ret = 0;
834 int count = 0; 823 int count = 0;
@@ -837,14 +826,12 @@ unsigned long get_wchan(struct task_struct *task)
837 task->state == TASK_RUNNING) 826 task->state == TASK_RUNNING)
838 goto out; 827 goto out;
839 828
840 thread_info_base = (unsigned long) task_stack_page(task); 829 tp = task_thread_info(task);
841 bias = STACK_BIAS; 830 bias = STACK_BIAS;
842 fp = task_thread_info(task)->ksp + bias; 831 fp = task_thread_info(task)->ksp + bias;
843 832
844 do { 833 do {
845 /* Bogus frame pointer? */ 834 if (!kstack_valid(tp, fp))
846 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
847 fp >= (thread_info_base + THREAD_SIZE))
848 break; 835 break;
849 rw = (struct reg_window *) fp; 836 rw = (struct reg_window *) fp;
850 pc = rw->ins[7]; 837 pc = rw->ins[7];
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
index e9d7f0660f2e..237e7f8a40ac 100644
--- a/arch/sparc64/kernel/stacktrace.c
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -5,6 +5,8 @@
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/stacktrace.h> 6#include <asm/stacktrace.h>
7 7
8#include "kstack.h"
9
8void save_stack_trace(struct stack_trace *trace) 10void save_stack_trace(struct stack_trace *trace)
9{ 11{
10 unsigned long ksp, fp, thread_base; 12 unsigned long ksp, fp, thread_base;
@@ -24,17 +26,13 @@ void save_stack_trace(struct stack_trace *trace)
24 struct pt_regs *regs; 26 struct pt_regs *regs;
25 unsigned long pc; 27 unsigned long pc;
26 28
27 /* Bogus frame pointer? */ 29 if (!kstack_valid(tp, fp))
28 if (fp < (thread_base + sizeof(struct thread_info)) ||
29 fp > (thread_base + THREAD_SIZE - sizeof(struct sparc_stackf)))
30 break; 30 break;
31 31
32 sf = (struct sparc_stackf *) fp; 32 sf = (struct sparc_stackf *) fp;
33 regs = (struct pt_regs *) (sf + 1); 33 regs = (struct pt_regs *) (sf + 1);
34 34
35 if (((unsigned long)regs <= 35 if (kstack_is_trap_frame(tp, regs)) {
36 (thread_base + THREAD_SIZE - sizeof(*regs))) &&
37 (regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
38 if (!(regs->tstate & TSTATE_PRIV)) 36 if (!(regs->tstate & TSTATE_PRIV))
39 break; 37 break;
40 pc = regs->tpc; 38 pc = regs->tpc;
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 404e8561e2d0..3d924121c796 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -39,6 +39,7 @@
39#include <asm/prom.h> 39#include <asm/prom.h>
40 40
41#include "entry.h" 41#include "entry.h"
42#include "kstack.h"
42 43
43/* When an irrecoverable trap occurs at tl > 0, the trap entry 44/* When an irrecoverable trap occurs at tl > 0, the trap entry
44 * code logs the trap state registers at every level in the trap 45 * code logs the trap state registers at every level in the trap
@@ -2115,14 +2116,12 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2115 struct pt_regs *regs; 2116 struct pt_regs *regs;
2116 unsigned long pc; 2117 unsigned long pc;
2117 2118
2118 /* Bogus frame pointer? */ 2119 if (!kstack_valid(tp, fp))
2119 if (fp < (thread_base + sizeof(struct thread_info)) ||
2120 fp >= (thread_base + THREAD_SIZE))
2121 break; 2120 break;
2122 sf = (struct sparc_stackf *) fp; 2121 sf = (struct sparc_stackf *) fp;
2123 regs = (struct pt_regs *) (sf + 1); 2122 regs = (struct pt_regs *) (sf + 1);
2124 2123
2125 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) { 2124 if (kstack_is_trap_frame(tp, regs)) {
2126 if (!(regs->tstate & TSTATE_PRIV)) 2125 if (!(regs->tstate & TSTATE_PRIV))
2127 break; 2126 break;
2128 pc = regs->tpc; 2127 pc = regs->tpc;
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
index 734caf0cec09..fad90ddb3a28 100644
--- a/arch/sparc64/lib/mcount.S
+++ b/arch/sparc64/lib/mcount.S
@@ -49,6 +49,28 @@ mcount:
49 cmp %sp, %g3 49 cmp %sp, %g3
50 bg,pt %xcc, 1f 50 bg,pt %xcc, 1f
51 nop 51 nop
52 lduh [%g6 + TI_CPU], %g1
53 sethi %hi(hardirq_stack), %g3
54 or %g3, %lo(hardirq_stack), %g3
55 sllx %g1, 3, %g1
56 ldx [%g3 + %g1], %g7
57 sub %g7, STACK_BIAS, %g7
58 cmp %sp, %g7
59 bleu,pt %xcc, 2f
60 sethi %hi(THREAD_SIZE), %g3
61 add %g7, %g3, %g7
62 cmp %sp, %g7
63 blu,pn %xcc, 1f
642: sethi %hi(softirq_stack), %g3
65 or %g3, %lo(softirq_stack), %g3
66 ldx [%g3 + %g1], %g7
67 cmp %sp, %g7
68 bleu,pt %xcc, 2f
69 sethi %hi(THREAD_SIZE), %g3
70 add %g7, %g3, %g7
71 cmp %sp, %g7
72 blu,pn %xcc, 1f
73 nop
52 /* If we are already on ovstack, don't hop onto it 74 /* If we are already on ovstack, don't hop onto it
53 * again, we are already trying to output the stack overflow 75 * again, we are already trying to output the stack overflow
54 * message. 76 * message.
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 4e821b3ecb03..217de3ea29e8 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -49,6 +49,7 @@
49#include <asm/sstate.h> 49#include <asm/sstate.h>
50#include <asm/mdesc.h> 50#include <asm/mdesc.h>
51#include <asm/cpudata.h> 51#include <asm/cpudata.h>
52#include <asm/irq.h>
52 53
53#define MAX_PHYS_ADDRESS (1UL << 42UL) 54#define MAX_PHYS_ADDRESS (1UL << 42UL)
54#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 55#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -1771,6 +1772,16 @@ void __init paging_init(void)
1771 if (tlb_type == hypervisor) 1772 if (tlb_type == hypervisor)
1772 sun4v_mdesc_init(); 1773 sun4v_mdesc_init();
1773 1774
1775 /* Once the OF device tree and MDESC have been setup, we know
1776 * the list of possible cpus. Therefore we can allocate the
1777 * IRQ stacks.
1778 */
1779 for_each_possible_cpu(i) {
1780 /* XXX Use node local allocations... XXX */
1781 softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1782 hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
1783 }
1784
1774 /* Setup bootmem... */ 1785 /* Setup bootmem... */
1775 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1786 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1776 1787