aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/kdebug_64.h2
-rw-r--r--arch/sparc/include/asm/nmi.h10
-rw-r--r--arch/sparc/include/asm/pcr.h46
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/cpu.c33
-rw-r--r--arch/sparc/kernel/irq_64.c68
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/nmi.c224
-rw-r--r--arch/sparc/kernel/pcr.c153
-rw-r--r--arch/sparc/kernel/process_64.c5
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/ttable.S3
-rw-r--r--arch/sparc/mm/fault_64.c44
-rw-r--r--arch/sparc/oprofile/init.c232
17 files changed, 548 insertions, 284 deletions
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 7da7c13d23c4..a11b89ee9ef8 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -17,7 +17,7 @@
17typedef struct { 17typedef struct {
18 /* Dcache line 1 */ 18 /* Dcache line 1 */
19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
20 unsigned int __pad0; 20 unsigned int __nmi_count;
21 unsigned long clock_tick; /* %tick's per second */ 21 unsigned long clock_tick; /* %tick's per second */
22 unsigned long __pad; 22 unsigned long __pad;
23 unsigned int __pad1; 23 unsigned int __pad1;
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index d47d4a1955a9..1934f2cbf513 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -66,9 +66,6 @@ extern void virt_irq_free(unsigned int virt_irq);
66extern void __init init_IRQ(void); 66extern void __init init_IRQ(void);
67extern void fixup_irqs(void); 67extern void fixup_irqs(void);
68 68
69extern int register_perfctr_intr(void (*handler)(struct pt_regs *));
70extern void release_perfctr_intr(void (*handler)(struct pt_regs *));
71
72static inline void set_softint(unsigned long bits) 69static inline void set_softint(unsigned long bits)
73{ 70{
74 __asm__ __volatile__("wr %0, 0x0, %%set_softint" 71 __asm__ __volatile__("wr %0, 0x0, %%set_softint"
@@ -98,5 +95,6 @@ void __trigger_all_cpu_backtrace(void);
98extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
99extern void *softirq_stack[NR_CPUS]; 96extern void *softirq_stack[NR_CPUS];
100#define __ARCH_HAS_DO_SOFTIRQ 97#define __ARCH_HAS_DO_SOFTIRQ
98#define ARCH_HAS_NMI_WATCHDOG
101 99
102#endif 100#endif
diff --git a/arch/sparc/include/asm/kdebug_64.h b/arch/sparc/include/asm/kdebug_64.h
index f905b773235a..feb3578e12c4 100644
--- a/arch/sparc/include/asm/kdebug_64.h
+++ b/arch/sparc/include/asm/kdebug_64.h
@@ -14,6 +14,8 @@ enum die_val {
14 DIE_TRAP, 14 DIE_TRAP,
15 DIE_TRAP_TL1, 15 DIE_TRAP_TL1,
16 DIE_CALL, 16 DIE_CALL,
17 DIE_NMI,
18 DIE_NMIWATCHDOG,
17}; 19};
18 20
19#endif 21#endif
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
new file mode 100644
index 000000000000..fbd546dd4feb
--- /dev/null
+++ b/arch/sparc/include/asm/nmi.h
@@ -0,0 +1,10 @@
1#ifndef __NMI_H
2#define __NMI_H
3
4extern int __init nmi_init(void);
5extern void perfctr_irq(int irq, struct pt_regs *regs);
6extern void nmi_adjust_hz(unsigned int new_hz);
7
8extern int nmi_usable;
9
10#endif /* __NMI_H */
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h
new file mode 100644
index 000000000000..a2f5c61f924e
--- /dev/null
+++ b/arch/sparc/include/asm/pcr.h
@@ -0,0 +1,46 @@
1#ifndef __PCR_H
2#define __PCR_H
3
4struct pcr_ops {
5 u64 (*read)(void);
6 void (*write)(u64);
7};
8extern const struct pcr_ops *pcr_ops;
9
10extern void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
11extern void schedule_deferred_pcr_work(void);
12
13#define PCR_PIC_PRIV 0x00000001 /* PIC access is privileged */
14#define PCR_STRACE 0x00000002 /* Trace supervisor events */
15#define PCR_UTRACE 0x00000004 /* Trace user events */
16#define PCR_N2_HTRACE 0x00000008 /* Trace hypervisor events */
17#define PCR_N2_TOE_OV0 0x00000010 /* Trap if PIC 0 overflows */
18#define PCR_N2_TOE_OV1 0x00000020 /* Trap if PIC 1 overflows */
19#define PCR_N2_MASK0 0x00003fc0
20#define PCR_N2_MASK0_SHIFT 6
21#define PCR_N2_SL0 0x0003c000
22#define PCR_N2_SL0_SHIFT 14
23#define PCR_N2_OV0 0x00040000
24#define PCR_N2_MASK1 0x07f80000
25#define PCR_N2_MASK1_SHIFT 19
26#define PCR_N2_SL1 0x78000000
27#define PCR_N2_SL1_SHIFT 27
28#define PCR_N2_OV1 0x80000000
29
30extern unsigned int picl_shift;
31
32/* In order to commonize as much of the implementation as
33 * possible, we use PICH as our counter. Mostly this is
34 * to accomodate Niagara-1 which can only count insn cycles
35 * in PICH.
36 */
37static inline u64 picl_value(unsigned int nmi_hz)
38{
39 u32 delta = local_cpu_data().clock_tick / (nmi_hz << picl_shift);
40
41 return ((u64)((0 - delta) & 0xffffffff)) << 32;
42}
43
44extern u64 pcr_enable;
45
46#endif /* __PCR_H */
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index d573820c0ff4..32a7efe76d00 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -23,6 +23,7 @@
23#define PIL_SMP_CTX_NEW_VERSION 4 23#define PIL_SMP_CTX_NEW_VERSION 4
24#define PIL_DEVICE_IRQ 5 24#define PIL_DEVICE_IRQ 5
25#define PIL_SMP_CALL_FUNC_SNGL 6 25#define PIL_SMP_CALL_FUNC_SNGL 6
26#define PIL_DEFERRED_PCR_WORK 7
26#define PIL_NORMAL_MAX 14 27#define PIL_NORMAL_MAX 14
27#define PIL_NMI 15 28#define PIL_NMI 15
28 29
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 53adcaa0348b..54742e58831c 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -52,6 +52,8 @@ obj-$(CONFIG_SPARC64) += visemul.o
52obj-$(CONFIG_SPARC64) += hvapi.o 52obj-$(CONFIG_SPARC64) += hvapi.o
53obj-$(CONFIG_SPARC64) += sstate.o 53obj-$(CONFIG_SPARC64) += sstate.o
54obj-$(CONFIG_SPARC64) += mdesc.o 54obj-$(CONFIG_SPARC64) += mdesc.o
55obj-$(CONFIG_SPARC64) += pcr.o
56obj-$(CONFIG_SPARC64) += nmi.o
55 57
56# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation 58# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
57obj-$(CONFIG_SPARC32) += devres.o 59obj-$(CONFIG_SPARC32) += devres.o
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 32d32b4824f5..d85c3dc4953a 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -26,6 +26,7 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
26struct cpu_info { 26struct cpu_info {
27 int psr_vers; 27 int psr_vers;
28 const char *name; 28 const char *name;
29 const char *pmu_name;
29}; 30};
30 31
31struct fpu_info { 32struct fpu_info {
@@ -45,6 +46,9 @@ struct manufacturer_info {
45#define CPU(ver, _name) \ 46#define CPU(ver, _name) \
46{ .psr_vers = ver, .name = _name } 47{ .psr_vers = ver, .name = _name }
47 48
49#define CPU_PMU(ver, _name, _pmu_name) \
50{ .psr_vers = ver, .name = _name, .pmu_name = _pmu_name }
51
48#define FPU(ver, _name) \ 52#define FPU(ver, _name) \
49{ .fp_vers = ver, .name = _name } 53{ .fp_vers = ver, .name = _name }
50 54
@@ -183,10 +187,10 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
183},{ 187},{
184 0x17, 188 0x17,
185 .cpu_info = { 189 .cpu_info = {
186 CPU(0x10, "TI UltraSparc I (SpitFire)"), 190 CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"),
187 CPU(0x11, "TI UltraSparc II (BlackBird)"), 191 CPU_PMU(0x11, "TI UltraSparc II (BlackBird)", "ultra12"),
188 CPU(0x12, "TI UltraSparc IIi (Sabre)"), 192 CPU_PMU(0x12, "TI UltraSparc IIi (Sabre)", "ultra12"),
189 CPU(0x13, "TI UltraSparc IIe (Hummingbird)"), 193 CPU_PMU(0x13, "TI UltraSparc IIe (Hummingbird)", "ultra12"),
190 CPU(-1, NULL) 194 CPU(-1, NULL)
191 }, 195 },
192 .fpu_info = { 196 .fpu_info = {
@@ -199,7 +203,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
199},{ 203},{
200 0x22, 204 0x22,
201 .cpu_info = { 205 .cpu_info = {
202 CPU(0x10, "TI UltraSparc I (SpitFire)"), 206 CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"),
203 CPU(-1, NULL) 207 CPU(-1, NULL)
204 }, 208 },
205 .fpu_info = { 209 .fpu_info = {
@@ -209,12 +213,12 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
209},{ 213},{
210 0x3e, 214 0x3e,
211 .cpu_info = { 215 .cpu_info = {
212 CPU(0x14, "TI UltraSparc III (Cheetah)"), 216 CPU_PMU(0x14, "TI UltraSparc III (Cheetah)", "ultra3"),
213 CPU(0x15, "TI UltraSparc III+ (Cheetah+)"), 217 CPU_PMU(0x15, "TI UltraSparc III+ (Cheetah+)", "ultra3+"),
214 CPU(0x16, "TI UltraSparc IIIi (Jalapeno)"), 218 CPU_PMU(0x16, "TI UltraSparc IIIi (Jalapeno)", "ultra3i"),
215 CPU(0x18, "TI UltraSparc IV (Jaguar)"), 219 CPU_PMU(0x18, "TI UltraSparc IV (Jaguar)", "ultra3+"),
216 CPU(0x19, "TI UltraSparc IV+ (Panther)"), 220 CPU_PMU(0x19, "TI UltraSparc IV+ (Panther)", "ultra4+"),
217 CPU(0x22, "TI UltraSparc IIIi+ (Serrano)"), 221 CPU_PMU(0x22, "TI UltraSparc IIIi+ (Serrano)", "ultra3i"),
218 CPU(-1, NULL) 222 CPU(-1, NULL)
219 }, 223 },
220 .fpu_info = { 224 .fpu_info = {
@@ -234,6 +238,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
234 238
235const char *sparc_cpu_type; 239const char *sparc_cpu_type;
236const char *sparc_fpu_type; 240const char *sparc_fpu_type;
241const char *sparc_pmu_type;
237 242
238unsigned int fsr_storage; 243unsigned int fsr_storage;
239 244
@@ -244,6 +249,7 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
244 249
245 sparc_cpu_type = NULL; 250 sparc_cpu_type = NULL;
246 sparc_fpu_type = NULL; 251 sparc_fpu_type = NULL;
252 sparc_pmu_type = NULL;
247 manuf = NULL; 253 manuf = NULL;
248 254
249 for (i = 0; i < ARRAY_SIZE(manufacturer_info); i++) 255 for (i = 0; i < ARRAY_SIZE(manufacturer_info); i++)
@@ -263,6 +269,7 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
263 { 269 {
264 if (cpu->psr_vers == psr_vers) { 270 if (cpu->psr_vers == psr_vers) {
265 sparc_cpu_type = cpu->name; 271 sparc_cpu_type = cpu->name;
272 sparc_pmu_type = cpu->pmu_name;
266 sparc_fpu_type = "No FPU"; 273 sparc_fpu_type = "No FPU";
267 break; 274 break;
268 } 275 }
@@ -290,6 +297,8 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
290 psr_impl, fpu_vers); 297 psr_impl, fpu_vers);
291 sparc_fpu_type = "Unknown FPU"; 298 sparc_fpu_type = "Unknown FPU";
292 } 299 }
300 if (sparc_pmu_type == NULL)
301 sparc_pmu_type = "Unknown PMU";
293} 302}
294 303
295#ifdef CONFIG_SPARC32 304#ifdef CONFIG_SPARC32
@@ -315,11 +324,13 @@ static void __init sun4v_cpu_probe(void)
315 case SUN4V_CHIP_NIAGARA1: 324 case SUN4V_CHIP_NIAGARA1:
316 sparc_cpu_type = "UltraSparc T1 (Niagara)"; 325 sparc_cpu_type = "UltraSparc T1 (Niagara)";
317 sparc_fpu_type = "UltraSparc T1 integrated FPU"; 326 sparc_fpu_type = "UltraSparc T1 integrated FPU";
327 sparc_pmu_type = "niagara";
318 break; 328 break;
319 329
320 case SUN4V_CHIP_NIAGARA2: 330 case SUN4V_CHIP_NIAGARA2:
321 sparc_cpu_type = "UltraSparc T2 (Niagara2)"; 331 sparc_cpu_type = "UltraSparc T2 (Niagara2)";
322 sparc_fpu_type = "UltraSparc T2 integrated FPU"; 332 sparc_fpu_type = "UltraSparc T2 integrated FPU";
333 sparc_pmu_type = "niagara2";
323 break; 334 break;
324 335
325 default: 336 default:
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index cab8e0286871..e289376198eb 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -196,6 +196,11 @@ int show_interrupts(struct seq_file *p, void *v)
196 seq_putc(p, '\n'); 196 seq_putc(p, '\n');
197skip: 197skip:
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 198 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
199 } else if (i == NR_IRQS) {
200 seq_printf(p, "NMI: ");
201 for_each_online_cpu(j)
202 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
203 seq_printf(p, " Non-maskable interrupts\n");
199 } 204 }
200 return 0; 205 return 0;
201} 206}
@@ -778,69 +783,6 @@ void do_softirq(void)
778 local_irq_restore(flags); 783 local_irq_restore(flags);
779} 784}
780 785
781static void unhandled_perf_irq(struct pt_regs *regs)
782{
783 unsigned long pcr, pic;
784
785 read_pcr(pcr);
786 read_pic(pic);
787
788 write_pcr(0);
789
790 printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n",
791 smp_processor_id());
792 printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n",
793 smp_processor_id(), pcr, pic);
794}
795
796/* Almost a direct copy of the powerpc PMC code. */
797static DEFINE_SPINLOCK(perf_irq_lock);
798static void *perf_irq_owner_caller; /* mostly for debugging */
799static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq;
800
801/* Invoked from level 15 PIL handler in trap table. */
802void perfctr_irq(int irq, struct pt_regs *regs)
803{
804 clear_softint(1 << irq);
805 perf_irq(regs);
806}
807
808int register_perfctr_intr(void (*handler)(struct pt_regs *))
809{
810 int ret;
811
812 if (!handler)
813 return -EINVAL;
814
815 spin_lock(&perf_irq_lock);
816 if (perf_irq != unhandled_perf_irq) {
817 printk(KERN_WARNING "register_perfctr_intr: "
818 "perf IRQ busy (reserved by caller %p)\n",
819 perf_irq_owner_caller);
820 ret = -EBUSY;
821 goto out;
822 }
823
824 perf_irq_owner_caller = __builtin_return_address(0);
825 perf_irq = handler;
826
827 ret = 0;
828out:
829 spin_unlock(&perf_irq_lock);
830
831 return ret;
832}
833EXPORT_SYMBOL_GPL(register_perfctr_intr);
834
835void release_perfctr_intr(void (*handler)(struct pt_regs *))
836{
837 spin_lock(&perf_irq_lock);
838 perf_irq_owner_caller = NULL;
839 perf_irq = unhandled_perf_irq;
840 spin_unlock(&perf_irq_lock);
841}
842EXPORT_SYMBOL_GPL(release_perfctr_intr);
843
844#ifdef CONFIG_HOTPLUG_CPU 786#ifdef CONFIG_HOTPLUG_CPU
845void fixup_irqs(void) 787void fixup_irqs(void)
846{ 788{
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 81a972e8d8ea..15d8a3f645c9 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -5,6 +5,7 @@
5 5
6/* cpu.c */ 6/* cpu.c */
7extern const char *sparc_cpu_type; 7extern const char *sparc_cpu_type;
8extern const char *sparc_pmu_type;
8extern const char *sparc_fpu_type; 9extern const char *sparc_fpu_type;
9 10
10extern unsigned int fsr_storage; 11extern unsigned int fsr_storage;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
new file mode 100644
index 000000000000..09f088ed4a64
--- /dev/null
+++ b/arch/sparc/kernel/nmi.c
@@ -0,0 +1,224 @@
1/* Pseudo NMI support on sparc64 systems.
2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 *
5 * The NMI watchdog support and infrastructure is based almost
6 * entirely upon the x86 NMI support code.
7 */
8#include <linux/kernel.h>
9#include <linux/param.h>
10#include <linux/init.h>
11#include <linux/percpu.h>
12#include <linux/nmi.h>
13#include <linux/module.h>
14#include <linux/kprobes.h>
15#include <linux/kernel_stat.h>
16#include <linux/slab.h>
17#include <linux/kdebug.h>
18#include <linux/delay.h>
19#include <linux/smp.h>
20
21#include <asm/ptrace.h>
22#include <asm/local.h>
23#include <asm/pcr.h>
24
25/* We don't have a real NMI on sparc64, but we can fake one
26 * up using profiling counter overflow interrupts and interrupt
27 * levels.
28 *
29 * The profile overflow interrupts at level 15, so we use
30 * level 14 as our IRQ off level.
31 */
32
33static int nmi_watchdog_active;
34static int panic_on_timeout;
35
36int nmi_usable;
37EXPORT_SYMBOL_GPL(nmi_usable);
38
39static unsigned int nmi_hz = HZ;
40
41static DEFINE_PER_CPU(unsigned int, last_irq_sum);
42static DEFINE_PER_CPU(local_t, alert_counter);
43static DEFINE_PER_CPU(int, nmi_touch);
44
45void touch_nmi_watchdog(void)
46{
47 if (nmi_watchdog_active) {
48 int cpu;
49
50 for_each_present_cpu(cpu) {
51 if (per_cpu(nmi_touch, cpu) != 1)
52 per_cpu(nmi_touch, cpu) = 1;
53 }
54 }
55
56 touch_softlockup_watchdog();
57}
58EXPORT_SYMBOL(touch_nmi_watchdog);
59
60static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
61{
62 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
63 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
64 return;
65
66 console_verbose();
67 bust_spinlocks(1);
68
69 printk(KERN_EMERG "%s", str);
70 printk(" on CPU%d, ip %08lx, registers:\n",
71 smp_processor_id(), regs->tpc);
72 show_regs(regs);
73
74 bust_spinlocks(0);
75
76 if (do_panic || panic_on_oops)
77 panic("Non maskable interrupt");
78
79 local_irq_enable();
80 do_exit(SIGBUS);
81}
82
83notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
84{
85 unsigned int sum, touched = 0;
86 int cpu = smp_processor_id();
87
88 clear_softint(1 << irq);
89 pcr_ops->write(PCR_PIC_PRIV);
90
91 local_cpu_data().__nmi_count++;
92
93 if (notify_die(DIE_NMI, "nmi", regs, 0,
94 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
95 touched = 1;
96
97 sum = kstat_irqs_cpu(0, cpu);
98 if (__get_cpu_var(nmi_touch)) {
99 __get_cpu_var(nmi_touch) = 0;
100 touched = 1;
101 }
102 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
103 local_inc(&__get_cpu_var(alert_counter));
104 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
105 die_nmi("BUG: NMI Watchdog detected LOCKUP",
106 regs, panic_on_timeout);
107 } else {
108 __get_cpu_var(last_irq_sum) = sum;
109 local_set(&__get_cpu_var(alert_counter), 0);
110 }
111 if (nmi_usable) {
112 write_pic(picl_value(nmi_hz));
113 pcr_ops->write(pcr_enable);
114 }
115}
116
117static inline unsigned int get_nmi_count(int cpu)
118{
119 return cpu_data(cpu).__nmi_count;
120}
121
122static int endflag __initdata;
123
124static __init void nmi_cpu_busy(void *data)
125{
126 local_irq_enable_in_hardirq();
127 while (endflag == 0)
128 mb();
129}
130
131static void report_broken_nmi(int cpu, int *prev_nmi_count)
132{
133 printk(KERN_CONT "\n");
134
135 printk(KERN_WARNING
136 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
137 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
138
139 printk(KERN_WARNING
140 "Please report this to bugzilla.kernel.org,\n");
141 printk(KERN_WARNING
142 "and attach the output of the 'dmesg' command.\n");
143
144 nmi_usable = 0;
145}
146
147static void stop_watchdog(void *unused)
148{
149 pcr_ops->write(PCR_PIC_PRIV);
150}
151
152static int __init check_nmi_watchdog(void)
153{
154 unsigned int *prev_nmi_count;
155 int cpu, err;
156
157 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
158 if (!prev_nmi_count) {
159 err = -ENOMEM;
160 goto error;
161 }
162
163 printk(KERN_INFO "Testing NMI watchdog ... ");
164
165 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
166
167 for_each_possible_cpu(cpu)
168 prev_nmi_count[cpu] = get_nmi_count(cpu);
169 local_irq_enable();
170 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
171
172 for_each_online_cpu(cpu) {
173 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
174 report_broken_nmi(cpu, prev_nmi_count);
175 }
176 endflag = 1;
177 if (!nmi_usable) {
178 kfree(prev_nmi_count);
179 err = -ENODEV;
180 goto error;
181 }
182 printk("OK.\n");
183
184 nmi_hz = 1;
185
186 kfree(prev_nmi_count);
187 return 0;
188error:
189 on_each_cpu(stop_watchdog, NULL, 1);
190 return err;
191}
192
193static void start_watchdog(void *unused)
194{
195 pcr_ops->write(PCR_PIC_PRIV);
196 write_pic(picl_value(nmi_hz));
197
198 pcr_ops->write(pcr_enable);
199}
200
201void nmi_adjust_hz(unsigned int new_hz)
202{
203 nmi_hz = new_hz;
204 on_each_cpu(start_watchdog, NULL, 1);
205}
206EXPORT_SYMBOL_GPL(nmi_adjust_hz);
207
208int __init nmi_init(void)
209{
210 nmi_usable = 1;
211
212 on_each_cpu(start_watchdog, NULL, 1);
213
214 return check_nmi_watchdog();
215}
216
217static int __init setup_nmi_watchdog(char *str)
218{
219 if (!strncmp(str, "panic", 5))
220 panic_on_timeout = 1;
221
222 return 0;
223}
224__setup("nmi_watchdog=", setup_nmi_watchdog);
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
new file mode 100644
index 000000000000..92e0dda141a4
--- /dev/null
+++ b/arch/sparc/kernel/pcr.c
@@ -0,0 +1,153 @@
1/* pcr.c: Generic sparc64 performance counter infrastructure.
2 *
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/irq.h>
9
10#include <asm/pil.h>
11#include <asm/pcr.h>
12#include <asm/nmi.h>
13
14/* This code is shared between various users of the performance
15 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
16 * perf_counter support layer.
17 */
18
19#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
20#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
21 PCR_N2_TOE_OV1 | \
22 (2 << PCR_N2_SL1_SHIFT) | \
23 (0xff << PCR_N2_MASK1_SHIFT))
24
25u64 pcr_enable;
26unsigned int picl_shift;
27
28/* Performance counter interrupts run unmasked at PIL level 15.
29 * Therefore we can't do things like wakeups and other work
30 * that expects IRQ disabling to be adhered to in locking etc.
31 *
32 * Therefore in such situations we defer the work by signalling
33 * a lower level cpu IRQ.
34 */
35void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
36{
37 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
38}
39
40void schedule_deferred_pcr_work(void)
41{
42 set_softint(1 << PIL_DEFERRED_PCR_WORK);
43}
44
45const struct pcr_ops *pcr_ops;
46EXPORT_SYMBOL_GPL(pcr_ops);
47
48static u64 direct_pcr_read(void)
49{
50 u64 val;
51
52 read_pcr(val);
53 return val;
54}
55
56static void direct_pcr_write(u64 val)
57{
58 write_pcr(val);
59}
60
61static const struct pcr_ops direct_pcr_ops = {
62 .read = direct_pcr_read,
63 .write = direct_pcr_write,
64};
65
66static void n2_pcr_write(u64 val)
67{
68 unsigned long ret;
69
70 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
71 if (val != HV_EOK)
72 write_pcr(val);
73}
74
75static const struct pcr_ops n2_pcr_ops = {
76 .read = direct_pcr_read,
77 .write = n2_pcr_write,
78};
79
80static unsigned long perf_hsvc_group;
81static unsigned long perf_hsvc_major;
82static unsigned long perf_hsvc_minor;
83
84static int __init register_perf_hsvc(void)
85{
86 if (tlb_type == hypervisor) {
87 switch (sun4v_chip_type) {
88 case SUN4V_CHIP_NIAGARA1:
89 perf_hsvc_group = HV_GRP_NIAG_PERF;
90 break;
91
92 case SUN4V_CHIP_NIAGARA2:
93 perf_hsvc_group = HV_GRP_N2_CPU;
94 break;
95
96 default:
97 return -ENODEV;
98 }
99
100
101 perf_hsvc_major = 1;
102 perf_hsvc_minor = 0;
103 if (sun4v_hvapi_register(perf_hsvc_group,
104 perf_hsvc_major,
105 &perf_hsvc_minor)) {
106 printk("perfmon: Could not register hvapi.\n");
107 return -ENODEV;
108 }
109 }
110 return 0;
111}
112
113static void __init unregister_perf_hsvc(void)
114{
115 if (tlb_type != hypervisor)
116 return;
117 sun4v_hvapi_unregister(perf_hsvc_group);
118}
119
120int __init pcr_arch_init(void)
121{
122 int err = register_perf_hsvc();
123
124 if (err)
125 return err;
126
127 switch (tlb_type) {
128 case hypervisor:
129 pcr_ops = &n2_pcr_ops;
130 pcr_enable = PCR_N2_ENABLE;
131 picl_shift = 2;
132 break;
133
134 case cheetah:
135 case cheetah_plus:
136 case spitfire:
137 pcr_ops = &direct_pcr_ops;
138 pcr_enable = PCR_SUN4U_ENABLE;
139 break;
140
141 default:
142 err = -ENODEV;
143 goto out_unregister;
144 }
145
146 return nmi_init();
147
148out_unregister:
149 unregister_perf_hsvc();
150 return err;
151}
152
153arch_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index cc8b5604442c..a73954b87f0a 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -29,6 +29,7 @@
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30#include <linux/elfcore.h> 30#include <linux/elfcore.h>
31#include <linux/sysrq.h> 31#include <linux/sysrq.h>
32#include <linux/nmi.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/system.h> 35#include <asm/system.h>
@@ -52,8 +53,10 @@
52 53
53static void sparc64_yield(int cpu) 54static void sparc64_yield(int cpu)
54{ 55{
55 if (tlb_type != hypervisor) 56 if (tlb_type != hypervisor) {
57 touch_nmi_watchdog();
56 return; 58 return;
59 }
57 60
58 clear_thread_flag(TIF_POLLING_NRFLAG); 61 clear_thread_flag(TIF_POLLING_NRFLAG);
59 smp_mb__after_clear_bit(); 62 smp_mb__after_clear_bit();
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 49d061f4ae9d..f2bcfd2967d7 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -354,6 +354,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
354 seq_printf(m, 354 seq_printf(m,
355 "cpu\t\t: %s\n" 355 "cpu\t\t: %s\n"
356 "fpu\t\t: %s\n" 356 "fpu\t\t: %s\n"
357 "pmu\t\t: %s\n"
357 "prom\t\t: %s\n" 358 "prom\t\t: %s\n"
358 "type\t\t: %s\n" 359 "type\t\t: %s\n"
359 "ncpus probed\t: %d\n" 360 "ncpus probed\t: %d\n"
@@ -366,6 +367,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
366 , 367 ,
367 sparc_cpu_type, 368 sparc_cpu_type,
368 sparc_fpu_type, 369 sparc_fpu_type,
370 sparc_pmu_type,
369 prom_version, 371 prom_version,
370 ((tlb_type == hypervisor) ? 372 ((tlb_type == hypervisor) ?
371 "sun4v" : 373 "sun4v" :
diff --git a/arch/sparc/kernel/ttable.S b/arch/sparc/kernel/ttable.S
index ea925503b42e..d9bdfb9d5c18 100644
--- a/arch/sparc/kernel/ttable.S
+++ b/arch/sparc/kernel/ttable.S
@@ -63,7 +63,8 @@ tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
63#else 63#else
64tl0_irq6: BTRAP(0x46) 64tl0_irq6: BTRAP(0x46)
65#endif 65#endif
66tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) 66tl0_irq7: TRAP_IRQ(deferred_pcr_work_irq, 7)
67tl0_irq8: BTRAP(0x48) BTRAP(0x49)
67tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) 68tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
68tl0_irq14: TRAP_IRQ(timer_interrupt, 14) 69tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
69tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15) 70tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15)
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index a9e474bf6385..4ab8993b0863 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/kprobes.h> 20#include <linux/kprobes.h>
21#include <linux/kdebug.h> 21#include <linux/kdebug.h>
22#include <linux/percpu.h>
22 23
23#include <asm/page.h> 24#include <asm/page.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
@@ -224,6 +225,30 @@ cannot_handle:
224 unhandled_fault (address, current, regs); 225 unhandled_fault (address, current, regs);
225} 226}
226 227
228static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
229{
230 static int times;
231
232 if (times++ < 10)
233 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
234 "64-bit TPC [%lx]\n",
235 current->comm, current->pid,
236 regs->tpc);
237 show_regs(regs);
238}
239
240static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
241 unsigned long addr)
242{
243 static int times;
244
245 if (times++ < 10)
246 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
247 "reports 64-bit fault address [%lx]\n",
248 current->comm, current->pid, addr);
249 show_regs(regs);
250}
251
227asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) 252asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
228{ 253{
229 struct mm_struct *mm = current->mm; 254 struct mm_struct *mm = current->mm;
@@ -244,6 +269,19 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
244 (fault_code & FAULT_CODE_DTLB)) 269 (fault_code & FAULT_CODE_DTLB))
245 BUG(); 270 BUG();
246 271
272 if (test_thread_flag(TIF_32BIT)) {
273 if (!(regs->tstate & TSTATE_PRIV)) {
274 if (unlikely((regs->tpc >> 32) != 0)) {
275 bogus_32bit_fault_tpc(regs);
276 goto intr_or_no_mm;
277 }
278 }
279 if (unlikely((address >> 32) != 0)) {
280 bogus_32bit_fault_address(regs, address);
281 goto intr_or_no_mm;
282 }
283 }
284
247 if (regs->tstate & TSTATE_PRIV) { 285 if (regs->tstate & TSTATE_PRIV) {
248 unsigned long tpc = regs->tpc; 286 unsigned long tpc = regs->tpc;
249 287
@@ -264,12 +302,6 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
264 if (in_atomic() || !mm) 302 if (in_atomic() || !mm)
265 goto intr_or_no_mm; 303 goto intr_or_no_mm;
266 304
267 if (test_thread_flag(TIF_32BIT)) {
268 if (!(regs->tstate & TSTATE_PRIV))
269 regs->tpc &= 0xffffffff;
270 address &= 0xffffffff;
271 }
272
273 if (!down_read_trylock(&mm->mmap_sem)) { 305 if (!down_read_trylock(&mm->mmap_sem)) {
274 if ((regs->tstate & TSTATE_PRIV) && 306 if ((regs->tstate & TSTATE_PRIV) &&
275 !search_exception_tables(regs->tpc)) { 307 !search_exception_tables(regs->tpc)) {
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index d6e170c074fc..d172f86439b1 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -13,217 +13,57 @@
13#include <linux/init.h> 13#include <linux/init.h>
14 14
15#ifdef CONFIG_SPARC64 15#ifdef CONFIG_SPARC64
16#include <asm/hypervisor.h> 16#include <linux/notifier.h>
17#include <asm/spitfire.h> 17#include <linux/rcupdate.h>
18#include <asm/cpudata.h> 18#include <linux/kdebug.h>
19#include <asm/irq.h> 19#include <asm/nmi.h>
20 20
21static int nmi_enabled; 21static int profile_timer_exceptions_notify(struct notifier_block *self,
22 22 unsigned long val, void *data)
23struct pcr_ops {
24 u64 (*read)(void);
25 void (*write)(u64);
26};
27static const struct pcr_ops *pcr_ops;
28
29static u64 direct_pcr_read(void)
30{
31 u64 val;
32
33 read_pcr(val);
34 return val;
35}
36
37static void direct_pcr_write(u64 val)
38{
39 write_pcr(val);
40}
41
42static const struct pcr_ops direct_pcr_ops = {
43 .read = direct_pcr_read,
44 .write = direct_pcr_write,
45};
46
47static void n2_pcr_write(u64 val)
48{ 23{
49 unsigned long ret; 24 struct die_args *args = (struct die_args *)data;
50 25 int ret = NOTIFY_DONE;
51 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
52 if (val != HV_EOK)
53 write_pcr(val);
54}
55
56static const struct pcr_ops n2_pcr_ops = {
57 .read = direct_pcr_read,
58 .write = n2_pcr_write,
59};
60
61/* In order to commonize as much of the implementation as
62 * possible, we use PICH as our counter. Mostly this is
63 * to accomodate Niagara-1 which can only count insn cycles
64 * in PICH.
65 */
66static u64 picl_value(void)
67{
68 u32 delta = local_cpu_data().clock_tick / HZ;
69
70 return ((u64)((0 - delta) & 0xffffffff)) << 32;
71}
72
73#define PCR_PIC_PRIV 0x00000001 /* PIC access is privileged */
74#define PCR_STRACE 0x00000002 /* Trace supervisor events */
75#define PCR_UTRACE 0x00000004 /* Trace user events */
76#define PCR_N2_HTRACE 0x00000008 /* Trace hypervisor events */
77#define PCR_N2_TOE_OV0 0x00000010 /* Trap if PIC 0 overflows */
78#define PCR_N2_TOE_OV1 0x00000020 /* Trap if PIC 1 overflows */
79#define PCR_N2_MASK0 0x00003fc0
80#define PCR_N2_MASK0_SHIFT 6
81#define PCR_N2_SL0 0x0003c000
82#define PCR_N2_SL0_SHIFT 14
83#define PCR_N2_OV0 0x00040000
84#define PCR_N2_MASK1 0x07f80000
85#define PCR_N2_MASK1_SHIFT 19
86#define PCR_N2_SL1 0x78000000
87#define PCR_N2_SL1_SHIFT 27
88#define PCR_N2_OV1 0x80000000
89
90#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
91#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
92 PCR_N2_TOE_OV1 | \
93 (2 << PCR_N2_SL1_SHIFT) | \
94 (0xff << PCR_N2_MASK1_SHIFT))
95
96static u64 pcr_enable = PCR_SUN4U_ENABLE;
97
98static void nmi_handler(struct pt_regs *regs)
99{
100 pcr_ops->write(PCR_PIC_PRIV);
101
102 if (nmi_enabled) {
103 oprofile_add_sample(regs, 0);
104
105 write_pic(picl_value());
106 pcr_ops->write(pcr_enable);
107 }
108}
109
110/* We count "clock cycle" events in the lower 32-bit PIC.
111 * Then configure it such that it overflows every HZ, and thus
112 * generates a level 15 interrupt at that frequency.
113 */
114static void cpu_nmi_start(void *_unused)
115{
116 pcr_ops->write(PCR_PIC_PRIV);
117 write_pic(picl_value());
118
119 pcr_ops->write(pcr_enable);
120}
121 26
122static void cpu_nmi_stop(void *_unused) 27 switch (val) {
123{ 28 case DIE_NMI:
124 pcr_ops->write(PCR_PIC_PRIV); 29 oprofile_add_sample(args->regs, 0);
125} 30 ret = NOTIFY_STOP;
126 31 break;
127static int nmi_start(void) 32 default:
128{ 33 break;
129 int err = register_perfctr_intr(nmi_handler);
130
131 if (!err) {
132 nmi_enabled = 1;
133 wmb();
134 err = on_each_cpu(cpu_nmi_start, NULL, 1);
135 if (err) {
136 nmi_enabled = 0;
137 wmb();
138 on_each_cpu(cpu_nmi_stop, NULL, 1);
139 release_perfctr_intr(nmi_handler);
140 }
141 } 34 }
142 35 return ret;
143 return err;
144}
145
146static void nmi_stop(void)
147{
148 nmi_enabled = 0;
149 wmb();
150
151 on_each_cpu(cpu_nmi_stop, NULL, 1);
152 release_perfctr_intr(nmi_handler);
153 synchronize_sched();
154} 36}
155 37
156static unsigned long perf_hsvc_group; 38static struct notifier_block profile_timer_exceptions_nb = {
157static unsigned long perf_hsvc_major; 39 .notifier_call = profile_timer_exceptions_notify,
158static unsigned long perf_hsvc_minor; 40};
159 41
160static int __init register_perf_hsvc(void) 42static int timer_start(void)
161{ 43{
162 if (tlb_type == hypervisor) { 44 if (register_die_notifier(&profile_timer_exceptions_nb))
163 switch (sun4v_chip_type) { 45 return 1;
164 case SUN4V_CHIP_NIAGARA1: 46 nmi_adjust_hz(HZ);
165 perf_hsvc_group = HV_GRP_NIAG_PERF;
166 break;
167
168 case SUN4V_CHIP_NIAGARA2:
169 perf_hsvc_group = HV_GRP_N2_CPU;
170 break;
171
172 default:
173 return -ENODEV;
174 }
175
176
177 perf_hsvc_major = 1;
178 perf_hsvc_minor = 0;
179 if (sun4v_hvapi_register(perf_hsvc_group,
180 perf_hsvc_major,
181 &perf_hsvc_minor)) {
182 printk("perfmon: Could not register N2 hvapi.\n");
183 return -ENODEV;
184 }
185 }
186 return 0; 47 return 0;
187} 48}
188 49
189static void unregister_perf_hsvc(void) 50
51static void timer_stop(void)
190{ 52{
191 if (tlb_type != hypervisor) 53 nmi_adjust_hz(1);
192 return; 54 unregister_die_notifier(&profile_timer_exceptions_nb);
193 sun4v_hvapi_unregister(perf_hsvc_group); 55 synchronize_sched(); /* Allow already-started NMIs to complete. */
194} 56}
195 57
196static int oprofile_nmi_init(struct oprofile_operations *ops) 58static int op_nmi_timer_init(struct oprofile_operations *ops)
197{ 59{
198 int err = register_perf_hsvc(); 60 if (!nmi_usable)
199
200 if (err)
201 return err;
202
203 switch (tlb_type) {
204 case hypervisor:
205 pcr_ops = &n2_pcr_ops;
206 pcr_enable = PCR_N2_ENABLE;
207 break;
208
209 case cheetah:
210 case cheetah_plus:
211 pcr_ops = &direct_pcr_ops;
212 break;
213
214 default:
215 return -ENODEV; 61 return -ENODEV;
216 }
217 62
218 ops->create_files = NULL; 63 ops->start = timer_start;
219 ops->setup = NULL; 64 ops->stop = timer_stop;
220 ops->shutdown = NULL;
221 ops->start = nmi_start;
222 ops->stop = nmi_stop;
223 ops->cpu_type = "timer"; 65 ops->cpu_type = "timer";
224 66 printk(KERN_INFO "oprofile: Using perfctr NMI timer interrupt.\n");
225 printk(KERN_INFO "oprofile: Using perfctr based NMI timer interrupt.\n");
226
227 return 0; 67 return 0;
228} 68}
229#endif 69#endif
@@ -233,7 +73,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
233 int ret = -ENODEV; 73 int ret = -ENODEV;
234 74
235#ifdef CONFIG_SPARC64 75#ifdef CONFIG_SPARC64
236 ret = oprofile_nmi_init(ops); 76 ret = op_nmi_timer_init(ops);
237 if (!ret) 77 if (!ret)
238 return ret; 78 return ret;
239#endif 79#endif
@@ -241,10 +81,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
241 return ret; 81 return ret;
242} 82}
243 83
244
245void oprofile_arch_exit(void) 84void oprofile_arch_exit(void)
246{ 85{
247#ifdef CONFIG_SPARC64
248 unregister_perf_hsvc();
249#endif
250} 86}