aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/cpu.c33
-rw-r--r--arch/sparc/kernel/head_64.S31
-rw-r--r--arch/sparc/kernel/irq_64.c68
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/nmi.c225
-rw-r--r--arch/sparc/kernel/pcr.c158
-rw-r--r--arch/sparc/kernel/process_64.c5
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/ttable.S3
10 files changed, 449 insertions, 79 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 53adcaa0348b..54742e58831c 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -52,6 +52,8 @@ obj-$(CONFIG_SPARC64) += visemul.o
52obj-$(CONFIG_SPARC64) += hvapi.o 52obj-$(CONFIG_SPARC64) += hvapi.o
53obj-$(CONFIG_SPARC64) += sstate.o 53obj-$(CONFIG_SPARC64) += sstate.o
54obj-$(CONFIG_SPARC64) += mdesc.o 54obj-$(CONFIG_SPARC64) += mdesc.o
55obj-$(CONFIG_SPARC64) += pcr.o
56obj-$(CONFIG_SPARC64) += nmi.o
55 57
56# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation 58# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
57obj-$(CONFIG_SPARC32) += devres.o 59obj-$(CONFIG_SPARC32) += devres.o
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 32d32b4824f5..d85c3dc4953a 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -26,6 +26,7 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
26struct cpu_info { 26struct cpu_info {
27 int psr_vers; 27 int psr_vers;
28 const char *name; 28 const char *name;
29 const char *pmu_name;
29}; 30};
30 31
31struct fpu_info { 32struct fpu_info {
@@ -45,6 +46,9 @@ struct manufacturer_info {
45#define CPU(ver, _name) \ 46#define CPU(ver, _name) \
46{ .psr_vers = ver, .name = _name } 47{ .psr_vers = ver, .name = _name }
47 48
49#define CPU_PMU(ver, _name, _pmu_name) \
50{ .psr_vers = ver, .name = _name, .pmu_name = _pmu_name }
51
48#define FPU(ver, _name) \ 52#define FPU(ver, _name) \
49{ .fp_vers = ver, .name = _name } 53{ .fp_vers = ver, .name = _name }
50 54
@@ -183,10 +187,10 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
183},{ 187},{
184 0x17, 188 0x17,
185 .cpu_info = { 189 .cpu_info = {
186 CPU(0x10, "TI UltraSparc I (SpitFire)"), 190 CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"),
187 CPU(0x11, "TI UltraSparc II (BlackBird)"), 191 CPU_PMU(0x11, "TI UltraSparc II (BlackBird)", "ultra12"),
188 CPU(0x12, "TI UltraSparc IIi (Sabre)"), 192 CPU_PMU(0x12, "TI UltraSparc IIi (Sabre)", "ultra12"),
189 CPU(0x13, "TI UltraSparc IIe (Hummingbird)"), 193 CPU_PMU(0x13, "TI UltraSparc IIe (Hummingbird)", "ultra12"),
190 CPU(-1, NULL) 194 CPU(-1, NULL)
191 }, 195 },
192 .fpu_info = { 196 .fpu_info = {
@@ -199,7 +203,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
199},{ 203},{
200 0x22, 204 0x22,
201 .cpu_info = { 205 .cpu_info = {
202 CPU(0x10, "TI UltraSparc I (SpitFire)"), 206 CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"),
203 CPU(-1, NULL) 207 CPU(-1, NULL)
204 }, 208 },
205 .fpu_info = { 209 .fpu_info = {
@@ -209,12 +213,12 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
209},{ 213},{
210 0x3e, 214 0x3e,
211 .cpu_info = { 215 .cpu_info = {
212 CPU(0x14, "TI UltraSparc III (Cheetah)"), 216 CPU_PMU(0x14, "TI UltraSparc III (Cheetah)", "ultra3"),
213 CPU(0x15, "TI UltraSparc III+ (Cheetah+)"), 217 CPU_PMU(0x15, "TI UltraSparc III+ (Cheetah+)", "ultra3+"),
214 CPU(0x16, "TI UltraSparc IIIi (Jalapeno)"), 218 CPU_PMU(0x16, "TI UltraSparc IIIi (Jalapeno)", "ultra3i"),
215 CPU(0x18, "TI UltraSparc IV (Jaguar)"), 219 CPU_PMU(0x18, "TI UltraSparc IV (Jaguar)", "ultra3+"),
216 CPU(0x19, "TI UltraSparc IV+ (Panther)"), 220 CPU_PMU(0x19, "TI UltraSparc IV+ (Panther)", "ultra4+"),
217 CPU(0x22, "TI UltraSparc IIIi+ (Serrano)"), 221 CPU_PMU(0x22, "TI UltraSparc IIIi+ (Serrano)", "ultra3i"),
218 CPU(-1, NULL) 222 CPU(-1, NULL)
219 }, 223 },
220 .fpu_info = { 224 .fpu_info = {
@@ -234,6 +238,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
234 238
235const char *sparc_cpu_type; 239const char *sparc_cpu_type;
236const char *sparc_fpu_type; 240const char *sparc_fpu_type;
241const char *sparc_pmu_type;
237 242
238unsigned int fsr_storage; 243unsigned int fsr_storage;
239 244
@@ -244,6 +249,7 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
244 249
245 sparc_cpu_type = NULL; 250 sparc_cpu_type = NULL;
246 sparc_fpu_type = NULL; 251 sparc_fpu_type = NULL;
252 sparc_pmu_type = NULL;
247 manuf = NULL; 253 manuf = NULL;
248 254
249 for (i = 0; i < ARRAY_SIZE(manufacturer_info); i++) 255 for (i = 0; i < ARRAY_SIZE(manufacturer_info); i++)
@@ -263,6 +269,7 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
263 { 269 {
264 if (cpu->psr_vers == psr_vers) { 270 if (cpu->psr_vers == psr_vers) {
265 sparc_cpu_type = cpu->name; 271 sparc_cpu_type = cpu->name;
272 sparc_pmu_type = cpu->pmu_name;
266 sparc_fpu_type = "No FPU"; 273 sparc_fpu_type = "No FPU";
267 break; 274 break;
268 } 275 }
@@ -290,6 +297,8 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
290 psr_impl, fpu_vers); 297 psr_impl, fpu_vers);
291 sparc_fpu_type = "Unknown FPU"; 298 sparc_fpu_type = "Unknown FPU";
292 } 299 }
300 if (sparc_pmu_type == NULL)
301 sparc_pmu_type = "Unknown PMU";
293} 302}
294 303
295#ifdef CONFIG_SPARC32 304#ifdef CONFIG_SPARC32
@@ -315,11 +324,13 @@ static void __init sun4v_cpu_probe(void)
315 case SUN4V_CHIP_NIAGARA1: 324 case SUN4V_CHIP_NIAGARA1:
316 sparc_cpu_type = "UltraSparc T1 (Niagara)"; 325 sparc_cpu_type = "UltraSparc T1 (Niagara)";
317 sparc_fpu_type = "UltraSparc T1 integrated FPU"; 326 sparc_fpu_type = "UltraSparc T1 integrated FPU";
327 sparc_pmu_type = "niagara";
318 break; 328 break;
319 329
320 case SUN4V_CHIP_NIAGARA2: 330 case SUN4V_CHIP_NIAGARA2:
321 sparc_cpu_type = "UltraSparc T2 (Niagara2)"; 331 sparc_cpu_type = "UltraSparc T2 (Niagara2)";
322 sparc_fpu_type = "UltraSparc T2 integrated FPU"; 332 sparc_fpu_type = "UltraSparc T2 integrated FPU";
333 sparc_pmu_type = "niagara2";
323 break; 334 break;
324 335
325 default: 336 default:
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 8ffee714f932..a46c3a21e26d 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -891,10 +891,35 @@ prom_tba: .xword 0
891tlb_type: .word 0 /* Must NOT end up in BSS */ 891tlb_type: .word 0 /* Must NOT end up in BSS */
892 .section ".fixup",#alloc,#execinstr 892 .section ".fixup",#alloc,#execinstr
893 893
894 .globl __ret_efault, __retl_efault 894 .globl __ret_efault, __retl_efault, __ret_one, __retl_one
895__ret_efault: 895ENTRY(__ret_efault)
896 ret 896 ret
897 restore %g0, -EFAULT, %o0 897 restore %g0, -EFAULT, %o0
898__retl_efault: 898ENDPROC(__ret_efault)
899
900ENTRY(__retl_efault)
899 retl 901 retl
900 mov -EFAULT, %o0 902 mov -EFAULT, %o0
903ENDPROC(__retl_efault)
904
905ENTRY(__retl_one)
906 retl
907 mov 1, %o0
908ENDPROC(__retl_one)
909
910ENTRY(__ret_one_asi)
911 wr %g0, ASI_AIUS, %asi
912 ret
913 restore %g0, 1, %o0
914ENDPROC(__ret_one_asi)
915
916ENTRY(__retl_one_asi)
917 wr %g0, ASI_AIUS, %asi
918 retl
919 mov 1, %o0
920ENDPROC(__retl_one_asi)
921
922ENTRY(__retl_o1)
923 retl
924 mov %o1, %o0
925ENDPROC(__retl_o1)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index cab8e0286871..e289376198eb 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -196,6 +196,11 @@ int show_interrupts(struct seq_file *p, void *v)
196 seq_putc(p, '\n'); 196 seq_putc(p, '\n');
197skip: 197skip:
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 198 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
199 } else if (i == NR_IRQS) {
200 seq_printf(p, "NMI: ");
201 for_each_online_cpu(j)
202 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
203 seq_printf(p, " Non-maskable interrupts\n");
199 } 204 }
200 return 0; 205 return 0;
201} 206}
@@ -778,69 +783,6 @@ void do_softirq(void)
778 local_irq_restore(flags); 783 local_irq_restore(flags);
779} 784}
780 785
781static void unhandled_perf_irq(struct pt_regs *regs)
782{
783 unsigned long pcr, pic;
784
785 read_pcr(pcr);
786 read_pic(pic);
787
788 write_pcr(0);
789
790 printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n",
791 smp_processor_id());
792 printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n",
793 smp_processor_id(), pcr, pic);
794}
795
796/* Almost a direct copy of the powerpc PMC code. */
797static DEFINE_SPINLOCK(perf_irq_lock);
798static void *perf_irq_owner_caller; /* mostly for debugging */
799static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq;
800
801/* Invoked from level 15 PIL handler in trap table. */
802void perfctr_irq(int irq, struct pt_regs *regs)
803{
804 clear_softint(1 << irq);
805 perf_irq(regs);
806}
807
808int register_perfctr_intr(void (*handler)(struct pt_regs *))
809{
810 int ret;
811
812 if (!handler)
813 return -EINVAL;
814
815 spin_lock(&perf_irq_lock);
816 if (perf_irq != unhandled_perf_irq) {
817 printk(KERN_WARNING "register_perfctr_intr: "
818 "perf IRQ busy (reserved by caller %p)\n",
819 perf_irq_owner_caller);
820 ret = -EBUSY;
821 goto out;
822 }
823
824 perf_irq_owner_caller = __builtin_return_address(0);
825 perf_irq = handler;
826
827 ret = 0;
828out:
829 spin_unlock(&perf_irq_lock);
830
831 return ret;
832}
833EXPORT_SYMBOL_GPL(register_perfctr_intr);
834
835void release_perfctr_intr(void (*handler)(struct pt_regs *))
836{
837 spin_lock(&perf_irq_lock);
838 perf_irq_owner_caller = NULL;
839 perf_irq = unhandled_perf_irq;
840 spin_unlock(&perf_irq_lock);
841}
842EXPORT_SYMBOL_GPL(release_perfctr_intr);
843
844#ifdef CONFIG_HOTPLUG_CPU 786#ifdef CONFIG_HOTPLUG_CPU
845void fixup_irqs(void) 787void fixup_irqs(void)
846{ 788{
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 81a972e8d8ea..15d8a3f645c9 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -5,6 +5,7 @@
5 5
6/* cpu.c */ 6/* cpu.c */
7extern const char *sparc_cpu_type; 7extern const char *sparc_cpu_type;
8extern const char *sparc_pmu_type;
8extern const char *sparc_fpu_type; 9extern const char *sparc_fpu_type;
9 10
10extern unsigned int fsr_storage; 11extern unsigned int fsr_storage;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
new file mode 100644
index 000000000000..f3577223c863
--- /dev/null
+++ b/arch/sparc/kernel/nmi.c
@@ -0,0 +1,225 @@
1/* Pseudo NMI support on sparc64 systems.
2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 *
5 * The NMI watchdog support and infrastructure is based almost
6 * entirely upon the x86 NMI support code.
7 */
8#include <linux/kernel.h>
9#include <linux/param.h>
10#include <linux/init.h>
11#include <linux/percpu.h>
12#include <linux/nmi.h>
13#include <linux/module.h>
14#include <linux/kprobes.h>
15#include <linux/kernel_stat.h>
16#include <linux/slab.h>
17#include <linux/kdebug.h>
18#include <linux/delay.h>
19#include <linux/smp.h>
20
21#include <asm/ptrace.h>
22#include <asm/local.h>
23#include <asm/pcr.h>
24
25/* We don't have a real NMI on sparc64, but we can fake one
26 * up using profiling counter overflow interrupts and interrupt
27 * levels.
28 *
29 * The profile overflow interrupts at level 15, so we use
30 * level 14 as our IRQ off level.
31 */
32
33static int nmi_watchdog_active;
34static int panic_on_timeout;
35
36int nmi_usable;
37EXPORT_SYMBOL_GPL(nmi_usable);
38
39static unsigned int nmi_hz = HZ;
40
41static DEFINE_PER_CPU(unsigned int, last_irq_sum);
42static DEFINE_PER_CPU(local_t, alert_counter);
43static DEFINE_PER_CPU(int, nmi_touch);
44
45void touch_nmi_watchdog(void)
46{
47 if (nmi_watchdog_active) {
48 int cpu;
49
50 for_each_present_cpu(cpu) {
51 if (per_cpu(nmi_touch, cpu) != 1)
52 per_cpu(nmi_touch, cpu) = 1;
53 }
54 }
55
56 touch_softlockup_watchdog();
57}
58EXPORT_SYMBOL(touch_nmi_watchdog);
59
60static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
61{
62 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
63 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
64 return;
65
66 console_verbose();
67 bust_spinlocks(1);
68
69 printk(KERN_EMERG "%s", str);
70 printk(" on CPU%d, ip %08lx, registers:\n",
71 smp_processor_id(), regs->tpc);
72 show_regs(regs);
73 dump_stack();
74
75 bust_spinlocks(0);
76
77 if (do_panic || panic_on_oops)
78 panic("Non maskable interrupt");
79
80 local_irq_enable();
81 do_exit(SIGBUS);
82}
83
84notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
85{
86 unsigned int sum, touched = 0;
87 int cpu = smp_processor_id();
88
89 clear_softint(1 << irq);
90 pcr_ops->write(PCR_PIC_PRIV);
91
92 local_cpu_data().__nmi_count++;
93
94 if (notify_die(DIE_NMI, "nmi", regs, 0,
95 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
96 touched = 1;
97
98 sum = kstat_irqs_cpu(0, cpu);
99 if (__get_cpu_var(nmi_touch)) {
100 __get_cpu_var(nmi_touch) = 0;
101 touched = 1;
102 }
103 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
104 local_inc(&__get_cpu_var(alert_counter));
105 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
106 die_nmi("BUG: NMI Watchdog detected LOCKUP",
107 regs, panic_on_timeout);
108 } else {
109 __get_cpu_var(last_irq_sum) = sum;
110 local_set(&__get_cpu_var(alert_counter), 0);
111 }
112 if (nmi_usable) {
113 write_pic(picl_value(nmi_hz));
114 pcr_ops->write(pcr_enable);
115 }
116}
117
118static inline unsigned int get_nmi_count(int cpu)
119{
120 return cpu_data(cpu).__nmi_count;
121}
122
123static int endflag __initdata;
124
125static __init void nmi_cpu_busy(void *data)
126{
127 local_irq_enable_in_hardirq();
128 while (endflag == 0)
129 mb();
130}
131
132static void report_broken_nmi(int cpu, int *prev_nmi_count)
133{
134 printk(KERN_CONT "\n");
135
136 printk(KERN_WARNING
137 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
138 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
139
140 printk(KERN_WARNING
141 "Please report this to bugzilla.kernel.org,\n");
142 printk(KERN_WARNING
143 "and attach the output of the 'dmesg' command.\n");
144
145 nmi_usable = 0;
146}
147
148static void stop_watchdog(void *unused)
149{
150 pcr_ops->write(PCR_PIC_PRIV);
151}
152
153static int __init check_nmi_watchdog(void)
154{
155 unsigned int *prev_nmi_count;
156 int cpu, err;
157
158 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
159 if (!prev_nmi_count) {
160 err = -ENOMEM;
161 goto error;
162 }
163
164 printk(KERN_INFO "Testing NMI watchdog ... ");
165
166 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
167
168 for_each_possible_cpu(cpu)
169 prev_nmi_count[cpu] = get_nmi_count(cpu);
170 local_irq_enable();
171 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
172
173 for_each_online_cpu(cpu) {
174 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
175 report_broken_nmi(cpu, prev_nmi_count);
176 }
177 endflag = 1;
178 if (!nmi_usable) {
179 kfree(prev_nmi_count);
180 err = -ENODEV;
181 goto error;
182 }
183 printk("OK.\n");
184
185 nmi_hz = 1;
186
187 kfree(prev_nmi_count);
188 return 0;
189error:
190 on_each_cpu(stop_watchdog, NULL, 1);
191 return err;
192}
193
194static void start_watchdog(void *unused)
195{
196 pcr_ops->write(PCR_PIC_PRIV);
197 write_pic(picl_value(nmi_hz));
198
199 pcr_ops->write(pcr_enable);
200}
201
202void nmi_adjust_hz(unsigned int new_hz)
203{
204 nmi_hz = new_hz;
205 on_each_cpu(start_watchdog, NULL, 1);
206}
207EXPORT_SYMBOL_GPL(nmi_adjust_hz);
208
209int __init nmi_init(void)
210{
211 nmi_usable = 1;
212
213 on_each_cpu(start_watchdog, NULL, 1);
214
215 return check_nmi_watchdog();
216}
217
218static int __init setup_nmi_watchdog(char *str)
219{
220 if (!strncmp(str, "panic", 5))
221 panic_on_timeout = 1;
222
223 return 0;
224}
225__setup("nmi_watchdog=", setup_nmi_watchdog);
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
new file mode 100644
index 000000000000..1ae8cdd7e703
--- /dev/null
+++ b/arch/sparc/kernel/pcr.c
@@ -0,0 +1,158 @@
1/* pcr.c: Generic sparc64 performance counter infrastructure.
2 *
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/irq.h>
9
10#include <asm/pil.h>
11#include <asm/pcr.h>
12#include <asm/nmi.h>
13
14/* This code is shared between various users of the performance
15 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
16 * perf_counter support layer.
17 */
18
19#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
20#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
21 PCR_N2_TOE_OV1 | \
22 (2 << PCR_N2_SL1_SHIFT) | \
23 (0xff << PCR_N2_MASK1_SHIFT))
24
25u64 pcr_enable;
26unsigned int picl_shift;
27
28/* Performance counter interrupts run unmasked at PIL level 15.
29 * Therefore we can't do things like wakeups and other work
30 * that expects IRQ disabling to be adhered to in locking etc.
31 *
32 * Therefore in such situations we defer the work by signalling
33 * a lower level cpu IRQ.
34 */
35void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
36{
37 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
38}
39
40void schedule_deferred_pcr_work(void)
41{
42 set_softint(1 << PIL_DEFERRED_PCR_WORK);
43}
44
45const struct pcr_ops *pcr_ops;
46EXPORT_SYMBOL_GPL(pcr_ops);
47
48static u64 direct_pcr_read(void)
49{
50 u64 val;
51
52 read_pcr(val);
53 return val;
54}
55
56static void direct_pcr_write(u64 val)
57{
58 write_pcr(val);
59}
60
61static const struct pcr_ops direct_pcr_ops = {
62 .read = direct_pcr_read,
63 .write = direct_pcr_write,
64};
65
66static void n2_pcr_write(u64 val)
67{
68 unsigned long ret;
69
70 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
71 if (val != HV_EOK)
72 write_pcr(val);
73}
74
75static const struct pcr_ops n2_pcr_ops = {
76 .read = direct_pcr_read,
77 .write = n2_pcr_write,
78};
79
80static unsigned long perf_hsvc_group;
81static unsigned long perf_hsvc_major;
82static unsigned long perf_hsvc_minor;
83
84static int __init register_perf_hsvc(void)
85{
86 if (tlb_type == hypervisor) {
87 switch (sun4v_chip_type) {
88 case SUN4V_CHIP_NIAGARA1:
89 perf_hsvc_group = HV_GRP_NIAG_PERF;
90 break;
91
92 case SUN4V_CHIP_NIAGARA2:
93 perf_hsvc_group = HV_GRP_N2_CPU;
94 break;
95
96 default:
97 return -ENODEV;
98 }
99
100
101 perf_hsvc_major = 1;
102 perf_hsvc_minor = 0;
103 if (sun4v_hvapi_register(perf_hsvc_group,
104 perf_hsvc_major,
105 &perf_hsvc_minor)) {
106 printk("perfmon: Could not register hvapi.\n");
107 return -ENODEV;
108 }
109 }
110 return 0;
111}
112
113static void __init unregister_perf_hsvc(void)
114{
115 if (tlb_type != hypervisor)
116 return;
117 sun4v_hvapi_unregister(perf_hsvc_group);
118}
119
120int __init pcr_arch_init(void)
121{
122 int err = register_perf_hsvc();
123
124 if (err)
125 return err;
126
127 switch (tlb_type) {
128 case hypervisor:
129 pcr_ops = &n2_pcr_ops;
130 pcr_enable = PCR_N2_ENABLE;
131 picl_shift = 2;
132 break;
133
134 case cheetah:
135 case cheetah_plus:
136 pcr_ops = &direct_pcr_ops;
137 pcr_enable = PCR_SUN4U_ENABLE;
138 break;
139
140 case spitfire:
141 /* UltraSPARC-I/II and derivatives lack a profile
142 * counter overflow interrupt so we can't make use of
143 * their hardware currently.
144 */
145 /* fallthrough */
146 default:
147 err = -ENODEV;
148 goto out_unregister;
149 }
150
151 return nmi_init();
152
153out_unregister:
154 unregister_perf_hsvc();
155 return err;
156}
157
158arch_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index cc8b5604442c..a73954b87f0a 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -29,6 +29,7 @@
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30#include <linux/elfcore.h> 30#include <linux/elfcore.h>
31#include <linux/sysrq.h> 31#include <linux/sysrq.h>
32#include <linux/nmi.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/system.h> 35#include <asm/system.h>
@@ -52,8 +53,10 @@
52 53
53static void sparc64_yield(int cpu) 54static void sparc64_yield(int cpu)
54{ 55{
55 if (tlb_type != hypervisor) 56 if (tlb_type != hypervisor) {
57 touch_nmi_watchdog();
56 return; 58 return;
59 }
57 60
58 clear_thread_flag(TIF_POLLING_NRFLAG); 61 clear_thread_flag(TIF_POLLING_NRFLAG);
59 smp_mb__after_clear_bit(); 62 smp_mb__after_clear_bit();
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 49d061f4ae9d..f2bcfd2967d7 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -354,6 +354,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
354 seq_printf(m, 354 seq_printf(m,
355 "cpu\t\t: %s\n" 355 "cpu\t\t: %s\n"
356 "fpu\t\t: %s\n" 356 "fpu\t\t: %s\n"
357 "pmu\t\t: %s\n"
357 "prom\t\t: %s\n" 358 "prom\t\t: %s\n"
358 "type\t\t: %s\n" 359 "type\t\t: %s\n"
359 "ncpus probed\t: %d\n" 360 "ncpus probed\t: %d\n"
@@ -366,6 +367,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
366 , 367 ,
367 sparc_cpu_type, 368 sparc_cpu_type,
368 sparc_fpu_type, 369 sparc_fpu_type,
370 sparc_pmu_type,
369 prom_version, 371 prom_version,
370 ((tlb_type == hypervisor) ? 372 ((tlb_type == hypervisor) ?
371 "sun4v" : 373 "sun4v" :
diff --git a/arch/sparc/kernel/ttable.S b/arch/sparc/kernel/ttable.S
index ea925503b42e..d9bdfb9d5c18 100644
--- a/arch/sparc/kernel/ttable.S
+++ b/arch/sparc/kernel/ttable.S
@@ -63,7 +63,8 @@ tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
63#else 63#else
64tl0_irq6: BTRAP(0x46) 64tl0_irq6: BTRAP(0x46)
65#endif 65#endif
66tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) 66tl0_irq7: TRAP_IRQ(deferred_pcr_work_irq, 7)
67tl0_irq8: BTRAP(0x48) BTRAP(0x49)
67tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) 68tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
68tl0_irq14: TRAP_IRQ(timer_interrupt, 14) 69tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
69tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15) 70tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15)