summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2015-07-16 03:37:31 -0400
committerMax Filippov <jcmvbkbc@gmail.com>2015-08-17 00:33:39 -0400
commit38fef73c21d117cf992fb5ec6e30630e54e13f4f (patch)
treeca3aae4c2e29ee81c66dcc16eb835e0b8c4e631f
parent98e298329e7b391293da5097817437292c842dc9 (diff)
xtensa: implement fake NMI
In case perf IRQ is the highest of the medium-level IRQs, and is alone on its level, it may be treated as NMI: - LOCKLEVEL is defined to be one level less than EXCM level, - IRQ masking never lowers current IRQ level, - new fake exception cause code, EXCCAUSE_MAPPED_NMI is assigned to that IRQ; new second level exception handler, do_nmi, assigned to it handles it as NMI, - atomic operations in configurations without s32c1i still need to mask all interrupts. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
-rw-r--r--arch/xtensa/include/asm/atomic.h10
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h4
-rw-r--r--arch/xtensa/include/asm/irqflags.h22
-rw-r--r--arch/xtensa/include/asm/processor.h31
-rw-r--r--arch/xtensa/kernel/entry.S93
-rw-r--r--arch/xtensa/kernel/irq.c8
-rw-r--r--arch/xtensa/kernel/perf_event.c6
-rw-r--r--arch/xtensa/kernel/traps.c26
-rw-r--r--arch/xtensa/kernel/vectors.S10
9 files changed, 183 insertions, 27 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 00b7d46b35b8..ebcd1f6fc8cb 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -29,7 +29,7 @@
29 * 29 *
30 * Locking interrupts looks like this: 30 * Locking interrupts looks like this:
31 * 31 *
32 * rsil a15, LOCKLEVEL 32 * rsil a15, TOPLEVEL
33 * <code> 33 * <code>
34 * wsr a15, PS 34 * wsr a15, PS
35 * rsync 35 * rsync
@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
106 unsigned int vval; \ 106 unsigned int vval; \
107 \ 107 \
108 __asm__ __volatile__( \ 108 __asm__ __volatile__( \
109 " rsil a15, "__stringify(LOCKLEVEL)"\n"\ 109 " rsil a15, "__stringify(TOPLEVEL)"\n"\
110 " l32i %0, %2, 0\n" \ 110 " l32i %0, %2, 0\n" \
111 " " #op " %0, %0, %1\n" \ 111 " " #op " %0, %0, %1\n" \
112 " s32i %0, %2, 0\n" \ 112 " s32i %0, %2, 0\n" \
@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
124 unsigned int vval; \ 124 unsigned int vval; \
125 \ 125 \
126 __asm__ __volatile__( \ 126 __asm__ __volatile__( \
127 " rsil a15,"__stringify(LOCKLEVEL)"\n" \ 127 " rsil a15,"__stringify(TOPLEVEL)"\n" \
128 " l32i %0, %2, 0\n" \ 128 " l32i %0, %2, 0\n" \
129 " " #op " %0, %0, %1\n" \ 129 " " #op " %0, %0, %1\n" \
130 " s32i %0, %2, 0\n" \ 130 " s32i %0, %2, 0\n" \
@@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
272 unsigned int vval; 272 unsigned int vval;
273 273
274 __asm__ __volatile__( 274 __asm__ __volatile__(
275 " rsil a15,"__stringify(LOCKLEVEL)"\n" 275 " rsil a15,"__stringify(TOPLEVEL)"\n"
276 " l32i %0, %2, 0\n" 276 " l32i %0, %2, 0\n"
277 " xor %1, %4, %3\n" 277 " xor %1, %4, %3\n"
278 " and %0, %0, %4\n" 278 " and %0, %0, %4\n"
@@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
306 unsigned int vval; 306 unsigned int vval;
307 307
308 __asm__ __volatile__( 308 __asm__ __volatile__(
309 " rsil a15,"__stringify(LOCKLEVEL)"\n" 309 " rsil a15,"__stringify(TOPLEVEL)"\n"
310 " l32i %0, %2, 0\n" 310 " l32i %0, %2, 0\n"
311 " or %0, %0, %1\n" 311 " or %0, %0, %1\n"
312 " s32i %0, %2, 0\n" 312 " s32i %0, %2, 0\n"
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 370b26f38414..201e9009efd8 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
34 return new; 34 return new;
35#else 35#else
36 __asm__ __volatile__( 36 __asm__ __volatile__(
37 " rsil a15, "__stringify(LOCKLEVEL)"\n" 37 " rsil a15, "__stringify(TOPLEVEL)"\n"
38 " l32i %0, %1, 0\n" 38 " l32i %0, %1, 0\n"
39 " bne %0, %2, 1f\n" 39 " bne %0, %2, 1f\n"
40 " s32i %3, %1, 0\n" 40 " s32i %3, %1, 0\n"
@@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
123#else 123#else
124 unsigned long tmp; 124 unsigned long tmp;
125 __asm__ __volatile__( 125 __asm__ __volatile__(
126 " rsil a15, "__stringify(LOCKLEVEL)"\n" 126 " rsil a15, "__stringify(TOPLEVEL)"\n"
127 " l32i %0, %1, 0\n" 127 " l32i %0, %1, 0\n"
128 " s32i %2, %1, 0\n" 128 " s32i %2, %1, 0\n"
129 " wsr a15, ps\n" 129 " wsr a15, ps\n"
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index ea36674c6ec5..8e090c709046 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -6,6 +6,7 @@
6 * for more details. 6 * for more details.
7 * 7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 * Copyright (C) 2015 Cadence Design Systems Inc.
9 */ 10 */
10 11
11#ifndef _XTENSA_IRQFLAGS_H 12#ifndef _XTENSA_IRQFLAGS_H
@@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void)
23static inline unsigned long arch_local_irq_save(void) 24static inline unsigned long arch_local_irq_save(void)
24{ 25{
25 unsigned long flags; 26 unsigned long flags;
26 asm volatile("rsil %0, "__stringify(LOCKLEVEL) 27#if XTENSA_FAKE_NMI
28#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
29 unsigned long tmp;
30
31 asm volatile("rsr %0, ps\t\n"
32 "extui %1, %0, 0, 4\t\n"
33 "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
34 "rsil %0, "__stringify(LOCKLEVEL)"\n"
35 "1:"
36 : "=a" (flags), "=a" (tmp) :: "memory");
37#else
38 asm volatile("rsr %0, ps\t\n"
39 "or %0, %0, %1\t\n"
40 "xsr %0, ps\t\n"
41 "rsync"
42 : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
43#endif
44#else
45 asm volatile("rsil %0, "__stringify(LOCKLEVEL)
27 : "=a" (flags) :: "memory"); 46 : "=a" (flags) :: "memory");
47#endif
28 return flags; 48 return flags;
29} 49}
30 50
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index b61bdf0eea25..83e2e4bc01ba 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -1,11 +1,10 @@
1/* 1/*
2 * include/asm-xtensa/processor.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 4 * for more details.
7 * 5 *
8 * Copyright (C) 2001 - 2008 Tensilica Inc. 6 * Copyright (C) 2001 - 2008 Tensilica Inc.
7 * Copyright (C) 2015 Cadence Design Systems Inc.
9 */ 8 */
10 9
11#ifndef _XTENSA_PROCESSOR_H 10#ifndef _XTENSA_PROCESSOR_H
@@ -45,6 +44,14 @@
45#define STACK_TOP_MAX STACK_TOP 44#define STACK_TOP_MAX STACK_TOP
46 45
47/* 46/*
47 * General exception cause assigned to fake NMI. Fake NMI needs to be handled
48 * differently from other interrupts, but it uses common kernel entry/exit
49 * code.
50 */
51
52#define EXCCAUSE_MAPPED_NMI 62
53
54/*
48 * General exception cause assigned to debug exceptions. Debug exceptions go 55 * General exception cause assigned to debug exceptions. Debug exceptions go
49 * to their own vector, rather than the general exception vectors (user, 56 * to their own vector, rather than the general exception vectors (user,
50 * kernel, double); and their specific causes are reported via DEBUGCAUSE 57 * kernel, double); and their specific causes are reported via DEBUGCAUSE
@@ -65,10 +72,30 @@
65 72
66#define VALID_DOUBLE_EXCEPTION_ADDRESS 64 73#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
67 74
75#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
76#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
77
78#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
79#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
80
81#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
82
83#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
84
68/* LOCKLEVEL defines the interrupt level that masks all 85/* LOCKLEVEL defines the interrupt level that masks all
69 * general-purpose interrupts. 86 * general-purpose interrupts.
70 */ 87 */
88#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
89 defined(XCHAL_PROFILING_INTERRUPT) && \
90 PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
91 XCHAL_EXCM_LEVEL > 1 && \
92 IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
93#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
94#else
71#define LOCKLEVEL XCHAL_EXCM_LEVEL 95#define LOCKLEVEL XCHAL_EXCM_LEVEL
96#endif
97#define TOPLEVEL XCHAL_EXCM_LEVEL
98#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
72 99
73/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE 100/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
74 * registers 101 * registers
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 5703bc13a9ba..504130357597 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling 2 * Low-level exception handling
5 * 3 *
6 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
@@ -8,6 +6,7 @@
8 * for more details. 6 * for more details.
9 * 7 *
10 * Copyright (C) 2004 - 2008 by Tensilica Inc. 8 * Copyright (C) 2004 - 2008 by Tensilica Inc.
9 * Copyright (C) 2015 Cadence Design Systems Inc.
11 * 10 *
12 * Chris Zankel <chris@zankel.net> 11 * Chris Zankel <chris@zankel.net>
13 * 12 *
@@ -75,6 +74,27 @@
75#endif 74#endif
76 .endm 75 .endm
77 76
77
78 .macro irq_save flags tmp
79#if XTENSA_FAKE_NMI
80#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
81 rsr \flags, ps
82 extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
83 bgei \tmp, LOCKLEVEL, 99f
84 rsil \tmp, LOCKLEVEL
8599:
86#else
87 movi \tmp, LOCKLEVEL
88 rsr \flags, ps
89 or \flags, \flags, \tmp
90 xsr \flags, ps
91 rsync
92#endif
93#else
94 rsil \flags, LOCKLEVEL
95#endif
96 .endm
97
78/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 98/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
79 99
80/* 100/*
@@ -352,11 +372,11 @@ common_exception:
352 372
353 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 373 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
354 374
355 rsr a0, exccause 375 rsr a2, exccause
356 movi a3, 0 376 movi a3, 0
357 rsr a2, excsave1 377 rsr a0, excsave1
358 s32i a0, a1, PT_EXCCAUSE 378 s32i a2, a1, PT_EXCCAUSE
359 s32i a3, a2, EXC_TABLE_FIXUP 379 s32i a3, a0, EXC_TABLE_FIXUP
360 380
361 /* All unrecoverable states are saved on stack, now, and a1 is valid. 381 /* All unrecoverable states are saved on stack, now, and a1 is valid.
362 * Now we can allow exceptions again. In case we've got an interrupt 382 * Now we can allow exceptions again. In case we've got an interrupt
@@ -367,19 +387,46 @@ common_exception:
367 */ 387 */
368 388
369 rsr a3, ps 389 rsr a3, ps
370 addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT 390 s32i a3, a1, PT_PS # save ps
371 movi a2, LOCKLEVEL 391
392#if XTENSA_FAKE_NMI
393 /* Correct PS needs to be saved in the PT_PS:
394 * - in case of exception or level-1 interrupt it's in the PS,
395 * and is already saved.
396 * - in case of medium level interrupt it's in the excsave2.
397 */
398 movi a0, EXCCAUSE_MAPPED_NMI
399 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
400 beq a2, a0, .Lmedium_level_irq
401 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
402 beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
403
404.Lmedium_level_irq:
405 rsr a0, excsave2
406 s32i a0, a1, PT_PS # save medium-level interrupt ps
407 bgei a3, LOCKLEVEL, .Lexception
408
409.Llevel1_irq:
410 movi a3, LOCKLEVEL
411
412.Lexception:
413 movi a0, 1 << PS_WOE_BIT
414 or a3, a3, a0
415#else
416 addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
417 movi a0, LOCKLEVEL
372 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 418 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
373 # a3 = PS.INTLEVEL 419 # a3 = PS.INTLEVEL
374 moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt 420 moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
375 movi a2, 1 << PS_WOE_BIT 421 movi a2, 1 << PS_WOE_BIT
376 or a3, a3, a2 422 or a3, a3, a2
377 rsr a2, exccause 423 rsr a2, exccause
424#endif
425
378 /* restore return address (or 0 if return to userspace) */ 426 /* restore return address (or 0 if return to userspace) */
379 rsr a0, depc 427 rsr a0, depc
380 xsr a3, ps 428 wsr a3, ps
381 429 rsync # PS.WOE => rsync => overflow
382 s32i a3, a1, PT_PS # save ps
383 430
384 /* Save lbeg, lend */ 431 /* Save lbeg, lend */
385 432
@@ -417,8 +464,13 @@ common_exception:
417 .global common_exception_return 464 .global common_exception_return
418common_exception_return: 465common_exception_return:
419 466
467#if XTENSA_FAKE_NMI
468 l32i a2, a1, PT_EXCCAUSE
469 movi a3, EXCCAUSE_MAPPED_NMI
470 beq a2, a3, .LNMIexit
471#endif
4201: 4721:
421 rsil a2, LOCKLEVEL 473 irq_save a2, a3
422#ifdef CONFIG_TRACE_IRQFLAGS 474#ifdef CONFIG_TRACE_IRQFLAGS
423 movi a4, trace_hardirqs_off 475 movi a4, trace_hardirqs_off
424 callx4 a4 476 callx4 a4
@@ -481,6 +533,12 @@ common_exception_return:
481 j 1b 533 j 1b
482#endif 534#endif
483 535
536#if XTENSA_FAKE_NMI
537.LNMIexit:
538 l32i a3, a1, PT_PS
539 _bbci.l a3, PS_UM_BIT, 4f
540#endif
541
4845: 5425:
485#ifdef CONFIG_DEBUG_TLB_SANITY 543#ifdef CONFIG_DEBUG_TLB_SANITY
486 l32i a4, a1, PT_DEPC 544 l32i a4, a1, PT_DEPC
@@ -1564,6 +1622,13 @@ ENTRY(fast_second_level_miss)
1564 rfde 1622 rfde
1565 1623
15669: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 16249: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1625 bnez a0, 8b
1626
1627 /* Even more unlikely case active_mm == 0.
1628 * We can get here with NMI in the middle of context_switch that
1629 * touches vmalloc area.
1630 */
1631 movi a0, init_mm
1567 j 8b 1632 j 8b
1568 1633
1569#if (DCACHE_WAY_SIZE > PAGE_SIZE) 1634#if (DCACHE_WAY_SIZE > PAGE_SIZE)
@@ -1867,7 +1932,7 @@ ENTRY(_switch_to)
1867 1932
1868 /* Disable ints while we manipulate the stack pointer. */ 1933 /* Disable ints while we manipulate the stack pointer. */
1869 1934
1870 rsil a14, LOCKLEVEL 1935 irq_save a14, a3
1871 rsync 1936 rsync
1872 1937
1873 /* Switch CPENABLE */ 1938 /* Switch CPENABLE */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 32b60568ee45..91ba60563021 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -29,6 +29,7 @@
29#include <asm/platform.h> 29#include <asm/platform.h>
30 30
31atomic_t irq_err_count; 31atomic_t irq_err_count;
32DECLARE_PER_CPU(unsigned long, nmi_count);
32 33
33asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) 34asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
34{ 35{
@@ -57,11 +58,18 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
57 58
58int arch_show_interrupts(struct seq_file *p, int prec) 59int arch_show_interrupts(struct seq_file *p, int prec)
59{ 60{
61 unsigned cpu __maybe_unused;
60#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
61 show_ipi_list(p, prec); 63 show_ipi_list(p, prec);
62#endif 64#endif
63 seq_printf(p, "%*s: ", prec, "ERR"); 65 seq_printf(p, "%*s: ", prec, "ERR");
64 seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); 66 seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
67#if XTENSA_FAKE_NMI
68 seq_printf(p, "%*s:", prec, "NMI");
69 for_each_online_cpu(cpu)
70 seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
71 seq_puts(p, " Non-maskable interrupts\n");
72#endif
65 return 0; 73 return 0;
66} 74}
67 75
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index b44df3c8198f..54f01188c29c 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -359,7 +359,7 @@ void perf_event_print_debug(void)
359 local_irq_restore(flags); 359 local_irq_restore(flags);
360} 360}
361 361
362static irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) 362irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
363{ 363{
364 irqreturn_t rc = IRQ_NONE; 364 irqreturn_t rc = IRQ_NONE;
365 struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); 365 struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
@@ -436,10 +436,14 @@ static int __init xtensa_pmu_init(void)
436 int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); 436 int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
437 437
438 perf_cpu_notifier(xtensa_pmu_notifier); 438 perf_cpu_notifier(xtensa_pmu_notifier);
439#if XTENSA_FAKE_NMI
440 enable_irq(irq);
441#else
439 ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU, 442 ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
440 "pmu", NULL); 443 "pmu", NULL);
441 if (ret < 0) 444 if (ret < 0)
442 return ret; 445 return ret;
446#endif
443 447
444 ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW); 448 ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
445 if (ret) 449 if (ret)
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index a1b5bd237c71..42d441f7898b 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -62,6 +62,7 @@ extern void fast_coprocessor(void);
62 62
63extern void do_illegal_instruction (struct pt_regs*); 63extern void do_illegal_instruction (struct pt_regs*);
64extern void do_interrupt (struct pt_regs*); 64extern void do_interrupt (struct pt_regs*);
65extern void do_nmi(struct pt_regs *);
65extern void do_unaligned_user (struct pt_regs*); 66extern void do_unaligned_user (struct pt_regs*);
66extern void do_multihit (struct pt_regs*, unsigned long); 67extern void do_multihit (struct pt_regs*, unsigned long);
67extern void do_page_fault (struct pt_regs*, unsigned long); 68extern void do_page_fault (struct pt_regs*, unsigned long);
@@ -146,6 +147,9 @@ COPROCESSOR(6),
146#if XTENSA_HAVE_COPROCESSOR(7) 147#if XTENSA_HAVE_COPROCESSOR(7)
147COPROCESSOR(7), 148COPROCESSOR(7),
148#endif 149#endif
150#if XTENSA_FAKE_NMI
151{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
152#endif
149{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, 153{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
150{ -1, -1, 0 } 154{ -1, -1, 0 }
151 155
@@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
199 203
200extern void do_IRQ(int, struct pt_regs *); 204extern void do_IRQ(int, struct pt_regs *);
201 205
206#if XTENSA_FAKE_NMI
207
208irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
209
210DEFINE_PER_CPU(unsigned long, nmi_count);
211
212void do_nmi(struct pt_regs *regs)
213{
214 struct pt_regs *old_regs;
215
216 if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
217 trace_hardirqs_off();
218
219 old_regs = set_irq_regs(regs);
220 nmi_enter();
221 ++*this_cpu_ptr(&nmi_count);
222 xtensa_pmu_irq_handler(0, NULL);
223 nmi_exit();
224 set_irq_regs(old_regs);
225}
226#endif
227
202void do_interrupt(struct pt_regs *regs) 228void do_interrupt(struct pt_regs *regs)
203{ 229{
204 static const unsigned int_level_mask[] = { 230 static const unsigned int_level_mask[] = {
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 1b397a902292..abcdb527f18a 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector)
627 wsr a0, excsave2 627 wsr a0, excsave2
628 rsr a0, epc\level 628 rsr a0, epc\level
629 wsr a0, epc1 629 wsr a0, epc1
630 .if \level <= LOCKLEVEL
630 movi a0, EXCCAUSE_LEVEL1_INTERRUPT 631 movi a0, EXCCAUSE_LEVEL1_INTERRUPT
632 .else
633 movi a0, EXCCAUSE_MAPPED_NMI
634 .endif
631 wsr a0, exccause 635 wsr a0, exccause
632 rsr a0, eps\level 636 rsr a0, eps\level
633 # branch to user or kernel vector 637 # branch to user or kernel vector
@@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4)
682 .align 4 686 .align 4
683_SimulateUserKernelVectorException: 687_SimulateUserKernelVectorException:
684 addi a0, a0, (1 << PS_EXCM_BIT) 688 addi a0, a0, (1 << PS_EXCM_BIT)
689#if !XTENSA_FAKE_NMI
685 wsr a0, ps 690 wsr a0, ps
691#endif
686 bbsi.l a0, PS_UM_BIT, 1f # branch if user mode 692 bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
687 rsr a0, excsave2 # restore a0 693 xsr a0, excsave2 # restore a0
688 j _KernelExceptionVector # simulate kernel vector exception 694 j _KernelExceptionVector # simulate kernel vector exception
6891: rsr a0, excsave2 # restore a0 6951: xsr a0, excsave2 # restore a0
690 j _UserExceptionVector # simulate user vector exception 696 j _UserExceptionVector # simulate user vector exception
691#endif 697#endif
692 698