aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-01-30 00:22:47 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-30 03:03:53 -0500
commite5553a6d04421eec326a629571d696e8e745a0e4 (patch)
treeb6fe49a18135dbe27a464fb78828b2150c679689
parentc3cf5e8cc56d272f828a66610bb78bbb727b2ce1 (diff)
sparc64: Implement NMI watchdog on capable cpus.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/kdebug_64.h2
-rw-r--r--arch/sparc/include/asm/nmi.h10
-rw-r--r--arch/sparc/include/asm/pcr.h16
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/irq_64.c68
-rw-r--r--arch/sparc/kernel/nmi.c224
-rw-r--r--arch/sparc/kernel/pcr.c17
-rw-r--r--arch/sparc/oprofile/init.c128
10 files changed, 309 insertions, 163 deletions
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 7da7c13d23c4..a11b89ee9ef8 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -17,7 +17,7 @@
17typedef struct { 17typedef struct {
18 /* Dcache line 1 */ 18 /* Dcache line 1 */
19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
20 unsigned int __pad0; 20 unsigned int __nmi_count;
21 unsigned long clock_tick; /* %tick's per second */ 21 unsigned long clock_tick; /* %tick's per second */
22 unsigned long __pad; 22 unsigned long __pad;
23 unsigned int __pad1; 23 unsigned int __pad1;
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index d47d4a1955a9..1934f2cbf513 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -66,9 +66,6 @@ extern void virt_irq_free(unsigned int virt_irq);
66extern void __init init_IRQ(void); 66extern void __init init_IRQ(void);
67extern void fixup_irqs(void); 67extern void fixup_irqs(void);
68 68
69extern int register_perfctr_intr(void (*handler)(struct pt_regs *));
70extern void release_perfctr_intr(void (*handler)(struct pt_regs *));
71
72static inline void set_softint(unsigned long bits) 69static inline void set_softint(unsigned long bits)
73{ 70{
74 __asm__ __volatile__("wr %0, 0x0, %%set_softint" 71 __asm__ __volatile__("wr %0, 0x0, %%set_softint"
@@ -98,5 +95,6 @@ void __trigger_all_cpu_backtrace(void);
98extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
99extern void *softirq_stack[NR_CPUS]; 96extern void *softirq_stack[NR_CPUS];
100#define __ARCH_HAS_DO_SOFTIRQ 97#define __ARCH_HAS_DO_SOFTIRQ
98#define ARCH_HAS_NMI_WATCHDOG
101 99
102#endif 100#endif
diff --git a/arch/sparc/include/asm/kdebug_64.h b/arch/sparc/include/asm/kdebug_64.h
index f905b773235a..feb3578e12c4 100644
--- a/arch/sparc/include/asm/kdebug_64.h
+++ b/arch/sparc/include/asm/kdebug_64.h
@@ -14,6 +14,8 @@ enum die_val {
14 DIE_TRAP, 14 DIE_TRAP,
15 DIE_TRAP_TL1, 15 DIE_TRAP_TL1,
16 DIE_CALL, 16 DIE_CALL,
17 DIE_NMI,
18 DIE_NMIWATCHDOG,
17}; 19};
18 20
19#endif 21#endif
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
new file mode 100644
index 000000000000..fbd546dd4feb
--- /dev/null
+++ b/arch/sparc/include/asm/nmi.h
@@ -0,0 +1,10 @@
1#ifndef __NMI_H
2#define __NMI_H
3
4extern int __init nmi_init(void);
5extern void perfctr_irq(int irq, struct pt_regs *regs);
6extern void nmi_adjust_hz(unsigned int new_hz);
7
8extern int nmi_usable;
9
10#endif /* __NMI_H */
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h
index 4249bb523ef1..a2f5c61f924e 100644
--- a/arch/sparc/include/asm/pcr.h
+++ b/arch/sparc/include/asm/pcr.h
@@ -27,4 +27,20 @@ extern void schedule_deferred_pcr_work(void);
27#define PCR_N2_SL1_SHIFT 27 27#define PCR_N2_SL1_SHIFT 27
28#define PCR_N2_OV1 0x80000000 28#define PCR_N2_OV1 0x80000000
29 29
30extern unsigned int picl_shift;
31
32/* In order to commonize as much of the implementation as
33 * possible, we use PICH as our counter. Mostly this is
34 * to accomodate Niagara-1 which can only count insn cycles
35 * in PICH.
36 */
37static inline u64 picl_value(unsigned int nmi_hz)
38{
39 u32 delta = local_cpu_data().clock_tick / (nmi_hz << picl_shift);
40
41 return ((u64)((0 - delta) & 0xffffffff)) << 32;
42}
43
44extern u64 pcr_enable;
45
30#endif /* __PCR_H */ 46#endif /* __PCR_H */
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index cb182d9c2f2b..54742e58831c 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_SPARC64) += hvapi.o
53obj-$(CONFIG_SPARC64) += sstate.o 53obj-$(CONFIG_SPARC64) += sstate.o
54obj-$(CONFIG_SPARC64) += mdesc.o 54obj-$(CONFIG_SPARC64) += mdesc.o
55obj-$(CONFIG_SPARC64) += pcr.o 55obj-$(CONFIG_SPARC64) += pcr.o
56obj-$(CONFIG_SPARC64) += nmi.o
56 57
57# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation 58# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
58obj-$(CONFIG_SPARC32) += devres.o 59obj-$(CONFIG_SPARC32) += devres.o
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index cab8e0286871..e289376198eb 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -196,6 +196,11 @@ int show_interrupts(struct seq_file *p, void *v)
196 seq_putc(p, '\n'); 196 seq_putc(p, '\n');
197skip: 197skip:
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 198 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
199 } else if (i == NR_IRQS) {
200 seq_printf(p, "NMI: ");
201 for_each_online_cpu(j)
202 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
203 seq_printf(p, " Non-maskable interrupts\n");
199 } 204 }
200 return 0; 205 return 0;
201} 206}
@@ -778,69 +783,6 @@ void do_softirq(void)
778 local_irq_restore(flags); 783 local_irq_restore(flags);
779} 784}
780 785
781static void unhandled_perf_irq(struct pt_regs *regs)
782{
783 unsigned long pcr, pic;
784
785 read_pcr(pcr);
786 read_pic(pic);
787
788 write_pcr(0);
789
790 printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n",
791 smp_processor_id());
792 printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n",
793 smp_processor_id(), pcr, pic);
794}
795
796/* Almost a direct copy of the powerpc PMC code. */
797static DEFINE_SPINLOCK(perf_irq_lock);
798static void *perf_irq_owner_caller; /* mostly for debugging */
799static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq;
800
801/* Invoked from level 15 PIL handler in trap table. */
802void perfctr_irq(int irq, struct pt_regs *regs)
803{
804 clear_softint(1 << irq);
805 perf_irq(regs);
806}
807
808int register_perfctr_intr(void (*handler)(struct pt_regs *))
809{
810 int ret;
811
812 if (!handler)
813 return -EINVAL;
814
815 spin_lock(&perf_irq_lock);
816 if (perf_irq != unhandled_perf_irq) {
817 printk(KERN_WARNING "register_perfctr_intr: "
818 "perf IRQ busy (reserved by caller %p)\n",
819 perf_irq_owner_caller);
820 ret = -EBUSY;
821 goto out;
822 }
823
824 perf_irq_owner_caller = __builtin_return_address(0);
825 perf_irq = handler;
826
827 ret = 0;
828out:
829 spin_unlock(&perf_irq_lock);
830
831 return ret;
832}
833EXPORT_SYMBOL_GPL(register_perfctr_intr);
834
835void release_perfctr_intr(void (*handler)(struct pt_regs *))
836{
837 spin_lock(&perf_irq_lock);
838 perf_irq_owner_caller = NULL;
839 perf_irq = unhandled_perf_irq;
840 spin_unlock(&perf_irq_lock);
841}
842EXPORT_SYMBOL_GPL(release_perfctr_intr);
843
844#ifdef CONFIG_HOTPLUG_CPU 786#ifdef CONFIG_HOTPLUG_CPU
845void fixup_irqs(void) 787void fixup_irqs(void)
846{ 788{
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
new file mode 100644
index 000000000000..ffc690093343
--- /dev/null
+++ b/arch/sparc/kernel/nmi.c
@@ -0,0 +1,224 @@
1/* Pseudo NMI support on sparc64 systems.
2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 *
5 * The NMI watchdog support and infrastructure is based almost
6 * entirely upon the x86 NMI support code.
7 */
8#include <linux/kernel.h>
9#include <linux/param.h>
10#include <linux/init.h>
11#include <linux/percpu.h>
12#include <linux/nmi.h>
13#include <linux/module.h>
14#include <linux/kprobes.h>
15#include <linux/kernel_stat.h>
16#include <linux/slab.h>
17#include <linux/kdebug.h>
18#include <linux/delay.h>
19#include <linux/smp.h>
20
21#include <asm/ptrace.h>
22#include <asm/local.h>
23#include <asm/pcr.h>
24
25/* We don't have a real NMI on sparc64, but we can fake one
26 * up using profiling counter overflow interrupts and interrupt
27 * levels.
28 *
29 * The profile overflow interrupts at level 15, so we use
30 * level 14 as our IRQ off level.
31 */
32
33static int nmi_watchdog_active;
34static int panic_on_timeout;
35
36int nmi_usable;
37EXPORT_SYMBOL_GPL(nmi_usable);
38
39static unsigned int nmi_hz = HZ;
40
41static DEFINE_PER_CPU(unsigned int, last_irq_sum);
42static DEFINE_PER_CPU(local_t, alert_counter);
43static DEFINE_PER_CPU(int, nmi_touch);
44
45void touch_nmi_watchdog(void)
46{
47 if (nmi_watchdog_active) {
48 int cpu;
49
50 for_each_present_cpu(cpu) {
51 if (per_cpu(nmi_touch, cpu) != 1)
52 per_cpu(nmi_touch, cpu) = 1;
53 }
54 }
55
56 touch_softlockup_watchdog();
57}
58EXPORT_SYMBOL(touch_nmi_watchdog);
59
60static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
61{
62 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
63 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
64 return;
65
66 console_verbose();
67 bust_spinlocks(1);
68
69 printk(KERN_EMERG "%s", str);
70 printk(" on CPU%d, ip %08lx, registers:\n",
71 smp_processor_id(), regs->tpc);
72 show_regs(regs);
73
74 bust_spinlocks(0);
75
76 if (do_panic || panic_on_oops)
77 panic("Non maskable interrupt");
78
79 local_irq_enable();
80 do_exit(SIGBUS);
81}
82
83notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
84{
85 unsigned int sum, touched = 0;
86 int cpu = smp_processor_id();
87
88 clear_softint(1 << irq);
89 pcr_ops->write(PCR_PIC_PRIV);
90
91 local_cpu_data().__nmi_count++;
92
93 if (notify_die(DIE_NMI, "nmi", regs, 0,
94 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
95 touched = 1;
96
97 sum = kstat_cpu(cpu).irqs[0];
98 if (__get_cpu_var(nmi_touch)) {
99 __get_cpu_var(nmi_touch) = 0;
100 touched = 1;
101 }
102 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
103 local_inc(&__get_cpu_var(alert_counter));
104 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
105 die_nmi("BUG: NMI Watchdog detected LOCKUP",
106 regs, panic_on_timeout);
107 } else {
108 __get_cpu_var(last_irq_sum) = sum;
109 local_set(&__get_cpu_var(alert_counter), 0);
110 }
111 if (nmi_usable) {
112 write_pic(picl_value(nmi_hz));
113 pcr_ops->write(pcr_enable);
114 }
115}
116
117static inline unsigned int get_nmi_count(int cpu)
118{
119 return cpu_data(cpu).__nmi_count;
120}
121
122static int endflag __initdata;
123
124static __init void nmi_cpu_busy(void *data)
125{
126 local_irq_enable_in_hardirq();
127 while (endflag == 0)
128 mb();
129}
130
131static void report_broken_nmi(int cpu, int *prev_nmi_count)
132{
133 printk(KERN_CONT "\n");
134
135 printk(KERN_WARNING
136 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
137 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
138
139 printk(KERN_WARNING
140 "Please report this to bugzilla.kernel.org,\n");
141 printk(KERN_WARNING
142 "and attach the output of the 'dmesg' command.\n");
143
144 nmi_usable = 0;
145}
146
147static void stop_watchdog(void *unused)
148{
149 pcr_ops->write(PCR_PIC_PRIV);
150}
151
152static int __init check_nmi_watchdog(void)
153{
154 unsigned int *prev_nmi_count;
155 int cpu, err;
156
157 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
158 if (!prev_nmi_count) {
159 err = -ENOMEM;
160 goto error;
161 }
162
163 printk(KERN_INFO "Testing NMI watchdog ... ");
164
165 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
166
167 for_each_possible_cpu(cpu)
168 prev_nmi_count[cpu] = get_nmi_count(cpu);
169 local_irq_enable();
170 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
171
172 for_each_online_cpu(cpu) {
173 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
174 report_broken_nmi(cpu, prev_nmi_count);
175 }
176 endflag = 1;
177 if (!nmi_usable) {
178 kfree(prev_nmi_count);
179 err = -ENODEV;
180 goto error;
181 }
182 printk("OK.\n");
183
184 nmi_hz = 1;
185
186 kfree(prev_nmi_count);
187 return 0;
188error:
189 on_each_cpu(stop_watchdog, NULL, 1);
190 return err;
191}
192
193static void start_watchdog(void *unused)
194{
195 pcr_ops->write(PCR_PIC_PRIV);
196 write_pic(picl_value(nmi_hz));
197
198 pcr_ops->write(pcr_enable);
199}
200
201void nmi_adjust_hz(unsigned int new_hz)
202{
203 nmi_hz = new_hz;
204 on_each_cpu(start_watchdog, NULL, 1);
205}
206EXPORT_SYMBOL_GPL(nmi_adjust_hz);
207
208int __init nmi_init(void)
209{
210 nmi_usable = 1;
211
212 on_each_cpu(start_watchdog, NULL, 1);
213
214 return check_nmi_watchdog();
215}
216
217static int __init setup_nmi_watchdog(char *str)
218{
219 if (!strncmp(str, "panic", 5))
220 panic_on_timeout = 1;
221
222 return 0;
223}
224__setup("nmi_watchdog=", setup_nmi_watchdog);
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c4f24703b165..92e0dda141a4 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -9,12 +9,22 @@
9 9
10#include <asm/pil.h> 10#include <asm/pil.h>
11#include <asm/pcr.h> 11#include <asm/pcr.h>
12#include <asm/nmi.h>
12 13
13/* This code is shared between various users of the performance 14/* This code is shared between various users of the performance
14 * counters. Users will be oprofile, pseudo-NMI watchdog, and the 15 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
15 * perf_counter support layer. 16 * perf_counter support layer.
16 */ 17 */
17 18
19#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
20#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
21 PCR_N2_TOE_OV1 | \
22 (2 << PCR_N2_SL1_SHIFT) | \
23 (0xff << PCR_N2_MASK1_SHIFT))
24
25u64 pcr_enable;
26unsigned int picl_shift;
27
18/* Performance counter interrupts run unmasked at PIL level 15. 28/* Performance counter interrupts run unmasked at PIL level 15.
19 * Therefore we can't do things like wakeups and other work 29 * Therefore we can't do things like wakeups and other work
20 * that expects IRQ disabling to be adhered to in locking etc. 30 * that expects IRQ disabling to be adhered to in locking etc.
@@ -117,12 +127,15 @@ int __init pcr_arch_init(void)
117 switch (tlb_type) { 127 switch (tlb_type) {
118 case hypervisor: 128 case hypervisor:
119 pcr_ops = &n2_pcr_ops; 129 pcr_ops = &n2_pcr_ops;
130 pcr_enable = PCR_N2_ENABLE;
131 picl_shift = 2;
120 break; 132 break;
121 133
122 case spitfire:
123 case cheetah: 134 case cheetah:
124 case cheetah_plus: 135 case cheetah_plus:
136 case spitfire:
125 pcr_ops = &direct_pcr_ops; 137 pcr_ops = &direct_pcr_ops;
138 pcr_enable = PCR_SUN4U_ENABLE;
126 break; 139 break;
127 140
128 default: 141 default:
@@ -130,7 +143,7 @@ int __init pcr_arch_init(void)
130 goto out_unregister; 143 goto out_unregister;
131 } 144 }
132 145
133 return 0; 146 return nmi_init();
134 147
135out_unregister: 148out_unregister:
136 unregister_perf_hsvc(); 149 unregister_perf_hsvc();
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index c8877a5202b0..d172f86439b1 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -13,117 +13,57 @@
13#include <linux/init.h> 13#include <linux/init.h>
14 14
15#ifdef CONFIG_SPARC64 15#ifdef CONFIG_SPARC64
16#include <asm/hypervisor.h> 16#include <linux/notifier.h>
17#include <asm/spitfire.h> 17#include <linux/rcupdate.h>
18#include <asm/cpudata.h> 18#include <linux/kdebug.h>
19#include <asm/irq.h> 19#include <asm/nmi.h>
20#include <asm/pcr.h>
21 20
22static int nmi_enabled; 21static int profile_timer_exceptions_notify(struct notifier_block *self,
23 22 unsigned long val, void *data)
24/* In order to commonize as much of the implementation as
25 * possible, we use PICH as our counter. Mostly this is
26 * to accomodate Niagara-1 which can only count insn cycles
27 * in PICH.
28 */
29static u64 picl_value(void)
30{
31 u32 delta = local_cpu_data().clock_tick / HZ;
32
33 return ((u64)((0 - delta) & 0xffffffff)) << 32;
34}
35
36#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
37#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
38 PCR_N2_TOE_OV1 | \
39 (2 << PCR_N2_SL1_SHIFT) | \
40 (0xff << PCR_N2_MASK1_SHIFT))
41
42static u64 pcr_enable;
43
44static void nmi_handler(struct pt_regs *regs)
45{ 23{
46 pcr_ops->write(PCR_PIC_PRIV); 24 struct die_args *args = (struct die_args *)data;
25 int ret = NOTIFY_DONE;
47 26
48 if (nmi_enabled) { 27 switch (val) {
49 oprofile_add_sample(regs, 0); 28 case DIE_NMI:
50 29 oprofile_add_sample(args->regs, 0);
51 write_pic(picl_value()); 30 ret = NOTIFY_STOP;
52 pcr_ops->write(pcr_enable); 31 break;
32 default:
33 break;
53 } 34 }
35 return ret;
54} 36}
55 37
56/* We count "clock cycle" events in the lower 32-bit PIC. 38static struct notifier_block profile_timer_exceptions_nb = {
57 * Then configure it such that it overflows every HZ, and thus 39 .notifier_call = profile_timer_exceptions_notify,
58 * generates a level 15 interrupt at that frequency. 40};
59 */
60static void cpu_nmi_start(void *_unused)
61{
62 pcr_ops->write(PCR_PIC_PRIV);
63 write_pic(picl_value());
64
65 pcr_ops->write(pcr_enable);
66}
67 41
68static void cpu_nmi_stop(void *_unused) 42static int timer_start(void)
69{ 43{
70 pcr_ops->write(PCR_PIC_PRIV); 44 if (register_die_notifier(&profile_timer_exceptions_nb))
45 return 1;
46 nmi_adjust_hz(HZ);
47 return 0;
71} 48}
72 49
73static int nmi_start(void)
74{
75 int err = register_perfctr_intr(nmi_handler);
76
77 if (!err) {
78 nmi_enabled = 1;
79 wmb();
80 err = on_each_cpu(cpu_nmi_start, NULL, 1);
81 if (err) {
82 nmi_enabled = 0;
83 wmb();
84 on_each_cpu(cpu_nmi_stop, NULL, 1);
85 release_perfctr_intr(nmi_handler);
86 }
87 }
88
89 return err;
90}
91 50
92static void nmi_stop(void) 51static void timer_stop(void)
93{ 52{
94 nmi_enabled = 0; 53 nmi_adjust_hz(1);
95 wmb(); 54 unregister_die_notifier(&profile_timer_exceptions_nb);
96 55 synchronize_sched(); /* Allow already-started NMIs to complete. */
97 on_each_cpu(cpu_nmi_stop, NULL, 1);
98 release_perfctr_intr(nmi_handler);
99 synchronize_sched();
100} 56}
101 57
102static int oprofile_nmi_init(struct oprofile_operations *ops) 58static int op_nmi_timer_init(struct oprofile_operations *ops)
103{ 59{
104 switch (tlb_type) { 60 if (!nmi_usable)
105 case hypervisor:
106 pcr_enable = PCR_N2_ENABLE;
107 break;
108
109 case cheetah:
110 case cheetah_plus:
111 pcr_enable = PCR_SUN4U_ENABLE;
112 break;
113
114 default:
115 return -ENODEV; 61 return -ENODEV;
116 }
117 62
118 ops->create_files = NULL; 63 ops->start = timer_start;
119 ops->setup = NULL; 64 ops->stop = timer_stop;
120 ops->shutdown = NULL;
121 ops->start = nmi_start;
122 ops->stop = nmi_stop;
123 ops->cpu_type = "timer"; 65 ops->cpu_type = "timer";
124 66 printk(KERN_INFO "oprofile: Using perfctr NMI timer interrupt.\n");
125 printk(KERN_INFO "oprofile: Using perfctr based NMI timer interrupt.\n");
126
127 return 0; 67 return 0;
128} 68}
129#endif 69#endif
@@ -133,7 +73,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
133 int ret = -ENODEV; 73 int ret = -ENODEV;
134 74
135#ifdef CONFIG_SPARC64 75#ifdef CONFIG_SPARC64
136 ret = oprofile_nmi_init(ops); 76 ret = op_nmi_timer_init(ops);
137 if (!ret) 77 if (!ret)
138 return ret; 78 return ret;
139#endif 79#endif