aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/tile/Kconfig7
-rw-r--r--arch/tile/include/asm/irq.h5
-rw-r--r--arch/tile/include/asm/processor.h2
-rw-r--r--arch/tile/include/asm/spinlock_32.h6
-rw-r--r--arch/tile/include/asm/spinlock_64.h5
-rw-r--r--arch/tile/include/asm/stack.h13
-rw-r--r--arch/tile/include/asm/thread_info.h1
-rw-r--r--arch/tile/include/asm/traps.h8
-rw-r--r--arch/tile/include/asm/uaccess.h66
-rw-r--r--arch/tile/include/asm/word-at-a-time.h36
-rw-r--r--arch/tile/include/hv/hypervisor.h60
-rw-r--r--arch/tile/kernel/entry.S7
-rw-r--r--arch/tile/kernel/hvglue.S3
-rw-r--r--arch/tile/kernel/hvglue_trace.c4
-rw-r--r--arch/tile/kernel/intvec_64.S6
-rw-r--r--arch/tile/kernel/process.c138
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/tile/kernel/stack.c125
-rw-r--r--arch/tile/kernel/traps.c15
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c10
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/lib/spinlock_32.c11
-rw-r--r--arch/tile/lib/spinlock_64.c11
-rw-r--r--arch/tile/lib/usercopy_32.S46
-rw-r--r--arch/tile/lib/usercopy_64.S46
-rw-r--r--arch/tile/mm/fault.c17
-rw-r--r--drivers/tty/hvc/hvc_tile.c3
27 files changed, 402 insertions, 254 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 59cf0b911898..9def1f52d03a 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -24,11 +24,14 @@ config TILE
24 select MODULES_USE_ELF_RELA 24 select MODULES_USE_ELF_RELA
25 select HAVE_ARCH_TRACEHOOK 25 select HAVE_ARCH_TRACEHOOK
26 select HAVE_SYSCALL_TRACEPOINTS 26 select HAVE_SYSCALL_TRACEPOINTS
27 select USER_STACKTRACE_SUPPORT
27 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 28 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
28 select HAVE_DEBUG_STACKOVERFLOW 29 select HAVE_DEBUG_STACKOVERFLOW
29 select ARCH_WANT_FRAME_POINTERS 30 select ARCH_WANT_FRAME_POINTERS
30 select HAVE_CONTEXT_TRACKING 31 select HAVE_CONTEXT_TRACKING
31 select EDAC_SUPPORT 32 select EDAC_SUPPORT
33 select GENERIC_STRNCPY_FROM_USER
34 select GENERIC_STRNLEN_USER
32 35
33# FIXME: investigate whether we need/want these options. 36# FIXME: investigate whether we need/want these options.
34# select HAVE_IOREMAP_PROT 37# select HAVE_IOREMAP_PROT
@@ -125,8 +128,10 @@ config HVC_TILE
125 select HVC_IRQ if TILEGX 128 select HVC_IRQ if TILEGX
126 def_bool y 129 def_bool y
127 130
131# Building with ARCH=tilegx (or ARCH=tile) implies using the
132# 64-bit TILE-Gx toolchain, so force CONFIG_TILEGX on.
128config TILEGX 133config TILEGX
129 bool "Building for TILE-Gx (64-bit) processor" 134 def_bool ARCH != "tilepro"
130 select SPARSE_IRQ 135 select SPARSE_IRQ
131 select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 136 select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
132 select HAVE_FUNCTION_TRACER 137 select HAVE_FUNCTION_TRACER
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 1fe86911838b..84a924034bdb 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -78,4 +78,9 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type);
78 78
79void setup_irq_regs(void); 79void setup_irq_regs(void);
80 80
81#ifdef __tilegx__
82void arch_trigger_all_cpu_backtrace(bool self);
83#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
84#endif
85
81#endif /* _ASM_TILE_IRQ_H */ 86#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index dd4f9f17e30a..139dfdee0134 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -111,8 +111,6 @@ struct thread_struct {
111 unsigned long long interrupt_mask; 111 unsigned long long interrupt_mask;
112 /* User interrupt-control 0 state */ 112 /* User interrupt-control 0 state */
113 unsigned long intctrl_0; 113 unsigned long intctrl_0;
114 /* Is this task currently doing a backtrace? */
115 bool in_backtrace;
116 /* Any other miscellaneous processor state bits */ 114 /* Any other miscellaneous processor state bits */
117 unsigned long proc_status; 115 unsigned long proc_status;
118#if !CHIP_HAS_FIXED_INTVEC_BASE() 116#if !CHIP_HAS_FIXED_INTVEC_BASE()
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index c0a77b38d39a..b14b1ba5bf9c 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -41,8 +41,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
41 * to claim the lock is held, since it will be momentarily 41 * to claim the lock is held, since it will be momentarily
42 * if not already. There's no need to wait for a "valid" 42 * if not already. There's no need to wait for a "valid"
43 * lock->next_ticket to become available. 43 * lock->next_ticket to become available.
44 * Use READ_ONCE() to ensure that calling this in a loop is OK.
44 */ 45 */
45 return lock->next_ticket != lock->current_ticket; 46 int curr = READ_ONCE(lock->current_ticket);
47 int next = READ_ONCE(lock->next_ticket);
48
49 return next != curr;
46} 50}
47 51
48void arch_spin_lock(arch_spinlock_t *lock); 52void arch_spin_lock(arch_spinlock_t *lock);
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 9a12b9c7e5d3..b9718fb4e74a 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -18,6 +18,8 @@
18#ifndef _ASM_TILE_SPINLOCK_64_H 18#ifndef _ASM_TILE_SPINLOCK_64_H
19#define _ASM_TILE_SPINLOCK_64_H 19#define _ASM_TILE_SPINLOCK_64_H
20 20
21#include <linux/compiler.h>
22
21/* Shifts and masks for the various fields in "lock". */ 23/* Shifts and masks for the various fields in "lock". */
22#define __ARCH_SPIN_CURRENT_SHIFT 17 24#define __ARCH_SPIN_CURRENT_SHIFT 17
23#define __ARCH_SPIN_NEXT_MASK 0x7fff 25#define __ARCH_SPIN_NEXT_MASK 0x7fff
@@ -44,7 +46,8 @@ static inline u32 arch_spin_next(u32 val)
44/* The lock is locked if a task would have to wait to get it. */ 46/* The lock is locked if a task would have to wait to get it. */
45static inline int arch_spin_is_locked(arch_spinlock_t *lock) 47static inline int arch_spin_is_locked(arch_spinlock_t *lock)
46{ 48{
47 u32 val = lock->lock; 49 /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
50 u32 val = READ_ONCE(lock->lock);
48 return arch_spin_current(val) != arch_spin_next(val); 51 return arch_spin_current(val) != arch_spin_next(val);
49} 52}
50 53
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 0e9d382a2d45..c3cb42615a9f 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -58,17 +58,14 @@ extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
58/* Advance to the next frame. */ 58/* Advance to the next frame. */
59extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt); 59extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt);
60 60
61/* Dump just the contents of the pt_regs structure. */
62extern void tile_show_regs(struct pt_regs *);
63
61/* 64/*
62 * Dump stack given complete register info. Use only from the 65 * Dump stack given complete register info. Use only from the
63 * architecture-specific code; show_stack() 66 * architecture-specific code; show_stack()
64 * and dump_stack() (in entry.S) are architecture-independent entry points. 67 * and dump_stack() are architecture-independent entry points.
65 */ 68 */
66extern void tile_show_stack(struct KBacktraceIterator *, int headers); 69extern void tile_show_stack(struct KBacktraceIterator *);
67
68/* Dump stack of current process, with registers to seed the backtrace. */
69extern void dump_stack_regs(struct pt_regs *);
70
71/* Helper method for assembly dump_stack(). */
72extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
73 70
74#endif /* _ASM_TILE_STACK_H */ 71#endif /* _ASM_TILE_STACK_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index f804c39a5e4d..dc1fb28d9636 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -42,6 +42,7 @@ struct thread_info {
42 unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */ 42 unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
43 void __user *unalign_jit_base; /* unalign fixup JIT base */ 43 void __user *unalign_jit_base; /* unalign fixup JIT base */
44#endif 44#endif
45 bool in_backtrace; /* currently doing backtrace? */
45}; 46};
46 47
47/* 48/*
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 4b99a1c3aab2..11c82270c1f5 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -52,6 +52,14 @@ void do_timer_interrupt(struct pt_regs *, int fault_num);
52/* kernel/messaging.c */ 52/* kernel/messaging.c */
53void hv_message_intr(struct pt_regs *, int intnum); 53void hv_message_intr(struct pt_regs *, int intnum);
54 54
55#define TILE_NMI_DUMP_STACK 1 /* Dump stack for sysrq+'l' */
56
57/* kernel/process.c */
58void do_nmi_dump_stack(struct pt_regs *regs);
59
60/* kernel/traps.c */
61void do_nmi(struct pt_regs *, int fault_num, unsigned long reason);
62
55/* kernel/irq.c */ 63/* kernel/irq.c */
56void tile_dev_intr(struct pt_regs *, int intnum); 64void tile_dev_intr(struct pt_regs *, int intnum);
57 65
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index a33276bf5ca1..0a9c4265763b 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -65,6 +65,13 @@ static inline int is_arch_mappable_range(unsigned long addr,
65#endif 65#endif
66 66
67/* 67/*
68 * Note that using this definition ignores is_arch_mappable_range(),
69 * so on tilepro code that uses user_addr_max() is constrained not
70 * to reference the tilepro user-interrupt region.
71 */
72#define user_addr_max() (current_thread_info()->addr_limit.seg)
73
74/*
68 * Test whether a block of memory is a valid user space address. 75 * Test whether a block of memory is a valid user space address.
69 * Returns 0 if the range is valid, nonzero otherwise. 76 * Returns 0 if the range is valid, nonzero otherwise.
70 */ 77 */
@@ -471,62 +478,9 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
471#endif 478#endif
472 479
473 480
474/** 481extern long strnlen_user(const char __user *str, long n);
475 * strlen_user: - Get the size of a string in user space. 482extern long strlen_user(const char __user *str);
476 * @str: The string to measure. 483extern long strncpy_from_user(char *dst, const char __user *src, long);
477 *
478 * Context: User context only. This function may sleep.
479 *
480 * Get the size of a NUL-terminated string in user space.
481 *
482 * Returns the size of the string INCLUDING the terminating NUL.
483 * On exception, returns 0.
484 *
485 * If there is a limit on the length of a valid string, you may wish to
486 * consider using strnlen_user() instead.
487 */
488extern long strnlen_user_asm(const char __user *str, long n);
489static inline long __must_check strnlen_user(const char __user *str, long n)
490{
491 might_fault();
492 return strnlen_user_asm(str, n);
493}
494#define strlen_user(str) strnlen_user(str, LONG_MAX)
495
496/**
497 * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
498 * @dst: Destination address, in kernel space. This buffer must be at
499 * least @count bytes long.
500 * @src: Source address, in user space.
501 * @count: Maximum number of bytes to copy, including the trailing NUL.
502 *
503 * Copies a NUL-terminated string from userspace to kernel space.
504 * Caller must check the specified block with access_ok() before calling
505 * this function.
506 *
507 * On success, returns the length of the string (not including the trailing
508 * NUL).
509 *
510 * If access to userspace fails, returns -EFAULT (some data may have been
511 * copied).
512 *
513 * If @count is smaller than the length of the string, copies @count bytes
514 * and returns @count.
515 */
516extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
517static inline long __must_check __strncpy_from_user(
518 char *dst, const char __user *src, long count)
519{
520 might_fault();
521 return strncpy_from_user_asm(dst, src, count);
522}
523static inline long __must_check strncpy_from_user(
524 char *dst, const char __user *src, long count)
525{
526 if (access_ok(VERIFY_READ, src, 1))
527 return __strncpy_from_user(dst, src, count);
528 return -EFAULT;
529}
530 484
531/** 485/**
532 * clear_user: - Zero a block of memory in user space. 486 * clear_user: - Zero a block of memory in user space.
diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..9e5ce0d7b292
--- /dev/null
+++ b/arch/tile/include/asm/word-at-a-time.h
@@ -0,0 +1,36 @@
1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4#include <asm/byteorder.h>
5
6struct word_at_a_time { /* unused */ };
7#define WORD_AT_A_TIME_CONSTANTS {}
8
9/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
10static inline unsigned long has_zero(unsigned long val, unsigned long *data,
11 const struct word_at_a_time *c)
12{
13#ifdef __tilegx__
14 unsigned long mask = __insn_v1cmpeqi(val, 0);
15#else /* tilepro */
16 unsigned long mask = __insn_seqib(val, 0);
17#endif
18 *data = mask;
19 return mask;
20}
21
22/* These operations are both nops. */
23#define prep_zero_mask(val, data, c) (data)
24#define create_zero_mask(data) (data)
25
26/* And this operation just depends on endianness. */
27static inline long find_zero(unsigned long mask)
28{
29#ifdef __BIG_ENDIAN
30 return __builtin_clzl(mask) >> 3;
31#else
32 return __builtin_ctzl(mask) >> 3;
33#endif
34}
35
36#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index e0e6af4e783b..f10b332b3b65 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -321,8 +321,11 @@
321/** hv_console_set_ipi */ 321/** hv_console_set_ipi */
322#define HV_DISPATCH_CONSOLE_SET_IPI 63 322#define HV_DISPATCH_CONSOLE_SET_IPI 63
323 323
324/** hv_send_nmi */
325#define HV_DISPATCH_SEND_NMI 65
326
324/** One more than the largest dispatch value */ 327/** One more than the largest dispatch value */
325#define _HV_DISPATCH_END 64 328#define _HV_DISPATCH_END 66
326 329
327 330
328#ifndef __ASSEMBLER__ 331#ifndef __ASSEMBLER__
@@ -1253,6 +1256,11 @@ void hv_downcall_dispatch(void);
1253#define INT_DMATLB_ACCESS_DWNCL INT_DMA_CPL 1256#define INT_DMATLB_ACCESS_DWNCL INT_DMA_CPL
1254/** Device interrupt downcall interrupt vector */ 1257/** Device interrupt downcall interrupt vector */
1255#define INT_DEV_INTR_DWNCL INT_WORLD_ACCESS 1258#define INT_DEV_INTR_DWNCL INT_WORLD_ACCESS
1259/** NMI downcall interrupt vector */
1260#define INT_NMI_DWNCL 64
1261
1262#define HV_NMI_FLAG_FORCE 0x1 /**< Force an NMI downcall regardless of
1263 the ICS bit of the client. */
1256 1264
1257#ifndef __ASSEMBLER__ 1265#ifndef __ASSEMBLER__
1258 1266
@@ -1780,6 +1788,56 @@ int hv_dev_poll(int devhdl, __hv32 events, HV_IntArg intarg);
1780int hv_dev_poll_cancel(int devhdl); 1788int hv_dev_poll_cancel(int devhdl);
1781 1789
1782 1790
1791/** NMI information */
1792typedef struct
1793{
1794 /** Result: negative error, or HV_NMI_RESULT_xxx. */
1795 int result;
1796
1797 /** PC from interrupted remote core (if result != HV_NMI_RESULT_FAIL_HV). */
1798 HV_VirtAddr pc;
1799
1800} HV_NMI_Info;
1801
1802/** NMI issued successfully. */
1803#define HV_NMI_RESULT_OK 0
1804
1805/** NMI not issued: remote tile running at client PL with ICS set. */
1806#define HV_NMI_RESULT_FAIL_ICS 1
1807
1808/** NMI not issued: remote tile waiting in hypervisor. */
1809#define HV_NMI_RESULT_FAIL_HV 2
1810
1811/** Force an NMI downcall regardless of the ICS bit of the client. */
1812#define HV_NMI_FLAG_FORCE 0x1
1813
1814/** Send an NMI interrupt request to a particular tile.
1815 *
1816 * This will cause the NMI to be issued on the remote tile regardless
1817 * of the state of the client interrupt mask. However, if the remote
1818 * tile is in the hypervisor, it will not execute the NMI, and
1819 * HV_NMI_RESULT_FAIL_HV will be returned. Similarly, if the remote
1820 * tile is in a client interrupt critical section at the time of the
1821 * NMI, it will not execute the NMI, and HV_NMI_RESULT_FAIL_ICS will
1822 * be returned. In this second case, however, if HV_NMI_FLAG_FORCE
1823 * is set in flags, then the remote tile will enter its NMI interrupt
1824 * vector regardless. Forcing the NMI vector during an interrupt
1825 * critical section will mean that the client can not safely continue
1826 * execution after handling the interrupt.
1827 *
1828 * @param tile Tile to which the NMI request is sent.
1829 * @param info NMI information which is defined by and interpreted by the
1830 * supervisor, is passed to the specified tile, and is
1831 * stored in the SPR register SYSTEM_SAVE_{CLIENT_PL}_2 on the
1832 * specified tile when entering the NMI handler routine.
1833 * Typically, this parameter stores the NMI type, or an aligned
1834 * VA plus some special bits, etc.
1835 * @param flags Flags (HV_NMI_FLAG_xxx).
1836 * @return Information about the requested NMI.
1837 */
1838HV_NMI_Info hv_send_nmi(HV_Coord tile, unsigned long info, __hv64 flags);
1839
1840
1783/** Scatter-gather list for preada/pwritea calls. */ 1841/** Scatter-gather list for preada/pwritea calls. */
1784typedef struct 1842typedef struct
1785#if CHIP_VA_WIDTH() <= 32 1843#if CHIP_VA_WIDTH() <= 32
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 3d9175992a20..670a3569450f 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -27,13 +27,6 @@ STD_ENTRY(current_text_addr)
27 { move r0, lr; jrp lr } 27 { move r0, lr; jrp lr }
28 STD_ENDPROC(current_text_addr) 28 STD_ENDPROC(current_text_addr)
29 29
30STD_ENTRY(dump_stack)
31 { move r2, lr; lnk r1 }
32 { move r4, r52; addli r1, r1, dump_stack - . }
33 { move r3, sp; j _dump_stack }
34 jrp lr /* keep backtracer happy */
35 STD_ENDPROC(dump_stack)
36
37STD_ENTRY(KBacktraceIterator_init_current) 30STD_ENTRY(KBacktraceIterator_init_current)
38 { move r2, lr; lnk r1 } 31 { move r2, lr; lnk r1 }
39 { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } 32 { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
diff --git a/arch/tile/kernel/hvglue.S b/arch/tile/kernel/hvglue.S
index 2ab456622391..d78ee2ad610c 100644
--- a/arch/tile/kernel/hvglue.S
+++ b/arch/tile/kernel/hvglue.S
@@ -71,4 +71,5 @@ gensym hv_flush_all, 0x6e0, 32
71gensym hv_get_ipi_pte, 0x700, 32 71gensym hv_get_ipi_pte, 0x700, 32
72gensym hv_set_pte_super_shift, 0x720, 32 72gensym hv_set_pte_super_shift, 0x720, 32
73gensym hv_console_set_ipi, 0x7e0, 32 73gensym hv_console_set_ipi, 0x7e0, 32
74gensym hv_glue_internals, 0x800, 30720 74gensym hv_send_nmi, 0x820, 32
75gensym hv_glue_internals, 0x820, 30688
diff --git a/arch/tile/kernel/hvglue_trace.c b/arch/tile/kernel/hvglue_trace.c
index 85c74ad29312..add0d71395c6 100644
--- a/arch/tile/kernel/hvglue_trace.c
+++ b/arch/tile/kernel/hvglue_trace.c
@@ -75,6 +75,7 @@
75#define hv_get_ipi_pte _hv_get_ipi_pte 75#define hv_get_ipi_pte _hv_get_ipi_pte
76#define hv_set_pte_super_shift _hv_set_pte_super_shift 76#define hv_set_pte_super_shift _hv_set_pte_super_shift
77#define hv_console_set_ipi _hv_console_set_ipi 77#define hv_console_set_ipi _hv_console_set_ipi
78#define hv_send_nmi _hv_send_nmi
78#include <hv/hypervisor.h> 79#include <hv/hypervisor.h>
79#undef hv_init 80#undef hv_init
80#undef hv_install_context 81#undef hv_install_context
@@ -134,6 +135,7 @@
134#undef hv_get_ipi_pte 135#undef hv_get_ipi_pte
135#undef hv_set_pte_super_shift 136#undef hv_set_pte_super_shift
136#undef hv_console_set_ipi 137#undef hv_console_set_ipi
138#undef hv_send_nmi
137 139
138/* 140/*
139 * Provide macros based on <linux/syscalls.h> to provide a wrapper 141 * Provide macros based on <linux/syscalls.h> to provide a wrapper
@@ -264,3 +266,5 @@ HV_WRAP9(int, hv_flush_remote, HV_PhysAddr, cache_pa,
264 HV_VirtAddr, tlb_va, unsigned long, tlb_length, 266 HV_VirtAddr, tlb_va, unsigned long, tlb_length,
265 unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask, 267 unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask,
266 HV_Remote_ASID*, asids, int, asidcount) 268 HV_Remote_ASID*, asids, int, asidcount)
269HV_WRAP3(HV_NMI_Info, hv_send_nmi, HV_Coord, tile, unsigned long, info,
270 __hv64, flags)
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 5b67efcecabd..800b91d3f9dc 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -515,6 +515,10 @@ intvec_\vecname:
515 .ifc \c_routine, handle_perf_interrupt 515 .ifc \c_routine, handle_perf_interrupt
516 mfspr r2, AUX_PERF_COUNT_STS 516 mfspr r2, AUX_PERF_COUNT_STS
517 .endif 517 .endif
518 .ifc \c_routine, do_nmi
519 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* nmi type */
520 .else
521 .endif
518 .endif 522 .endif
519 .endif 523 .endif
520 .endif 524 .endif
@@ -1571,3 +1575,5 @@ intrpt_start:
1571 1575
1572 /* Synthetic interrupt delivered only by the simulator */ 1576 /* Synthetic interrupt delivered only by the simulator */
1573 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint 1577 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
1578 /* Synthetic interrupt delivered by hv */
1579 int_hand INT_NMI_DWNCL, NMI_DWNCL, do_nmi, handle_nmi
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index b403c2e3e263..a45213781ad0 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/tracehook.h> 28#include <linux/tracehook.h>
29#include <linux/signal.h> 29#include <linux/signal.h>
30#include <linux/delay.h>
30#include <linux/context_tracking.h> 31#include <linux/context_tracking.h>
31#include <asm/stack.h> 32#include <asm/stack.h>
32#include <asm/switch_to.h> 33#include <asm/switch_to.h>
@@ -132,7 +133,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
132 (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long)); 133 (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
133 callee_regs[0] = sp; /* r30 = function */ 134 callee_regs[0] = sp; /* r30 = function */
134 callee_regs[1] = arg; /* r31 = arg */ 135 callee_regs[1] = arg; /* r31 = arg */
135 childregs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
136 p->thread.pc = (unsigned long) ret_from_kernel_thread; 136 p->thread.pc = (unsigned long) ret_from_kernel_thread;
137 return 0; 137 return 0;
138 } 138 }
@@ -546,31 +546,141 @@ void exit_thread(void)
546#endif 546#endif
547} 547}
548 548
549void show_regs(struct pt_regs *regs) 549void tile_show_regs(struct pt_regs *regs)
550{ 550{
551 struct task_struct *tsk = validate_current();
552 int i; 551 int i;
553
554 if (tsk != &corrupt_current)
555 show_regs_print_info(KERN_ERR);
556#ifdef __tilegx__ 552#ifdef __tilegx__
557 for (i = 0; i < 17; i++) 553 for (i = 0; i < 17; i++)
558 pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", 554 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
559 i, regs->regs[i], i+18, regs->regs[i+18], 555 i, regs->regs[i], i+18, regs->regs[i+18],
560 i+36, regs->regs[i+36]); 556 i+36, regs->regs[i+36]);
561 pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n", 557 pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
562 regs->regs[17], regs->regs[35], regs->tp); 558 regs->regs[17], regs->regs[35], regs->tp);
563 pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr); 559 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
564#else 560#else
565 for (i = 0; i < 13; i++) 561 for (i = 0; i < 13; i++)
566 pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", 562 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
563 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
567 i, regs->regs[i], i+14, regs->regs[i+14], 564 i, regs->regs[i], i+14, regs->regs[i+14],
568 i+27, regs->regs[i+27], i+40, regs->regs[i+40]); 565 i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
569 pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n", 566 pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
570 regs->regs[13], regs->tp, regs->sp, regs->lr); 567 regs->regs[13], regs->tp, regs->sp, regs->lr);
571#endif 568#endif
572 pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n", 569 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld flags:%s%s%s%s\n",
573 regs->pc, regs->ex1, regs->faultnum); 570 regs->pc, regs->ex1, regs->faultnum,
571 is_compat_task() ? " compat" : "",
572 (regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
573 !(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
574 (regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
575}
576
577void show_regs(struct pt_regs *regs)
578{
579 struct KBacktraceIterator kbt;
580
581 show_regs_print_info(KERN_DEFAULT);
582 tile_show_regs(regs);
583
584 KBacktraceIterator_init(&kbt, NULL, regs);
585 tile_show_stack(&kbt);
586}
587
588/* To ensure stack dump on tiles occurs one by one. */
589static DEFINE_SPINLOCK(backtrace_lock);
590/* To ensure no backtrace occurs before all of the stack dump are done. */
591static atomic_t backtrace_cpus;
592/* The cpu mask to avoid reentrance. */
593static struct cpumask backtrace_mask;
574 594
575 dump_stack_regs(regs); 595void do_nmi_dump_stack(struct pt_regs *regs)
596{
597 int is_idle = is_idle_task(current) && !in_interrupt();
598 int cpu;
599
600 nmi_enter();
601 cpu = smp_processor_id();
602 if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu, &backtrace_mask)))
603 goto done;
604
605 spin_lock(&backtrace_lock);
606 if (is_idle)
607 pr_info("CPU: %d idle\n", cpu);
608 else
609 show_regs(regs);
610 spin_unlock(&backtrace_lock);
611 atomic_dec(&backtrace_cpus);
612done:
613 nmi_exit();
614}
615
616#ifdef __tilegx__
617void arch_trigger_all_cpu_backtrace(bool self)
618{
619 struct cpumask mask;
620 HV_Coord tile;
621 unsigned int timeout;
622 int cpu;
623 int ongoing;
624 HV_NMI_Info info[NR_CPUS];
625
626 ongoing = atomic_cmpxchg(&backtrace_cpus, 0, num_online_cpus() - 1);
627 if (ongoing != 0) {
628 pr_err("Trying to do all-cpu backtrace.\n");
629 pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
630 ongoing);
631 if (self) {
632 pr_err("Reporting the stack on this cpu only.\n");
633 dump_stack();
634 }
635 return;
636 }
637
638 cpumask_copy(&mask, cpu_online_mask);
639 cpumask_clear_cpu(smp_processor_id(), &mask);
640 cpumask_copy(&backtrace_mask, &mask);
641
642 /* Backtrace for myself first. */
643 if (self)
644 dump_stack();
645
646 /* Tentatively dump stack on remote tiles via NMI. */
647 timeout = 100;
648 while (!cpumask_empty(&mask) && timeout) {
649 for_each_cpu(cpu, &mask) {
650 tile.x = cpu_x(cpu);
651 tile.y = cpu_y(cpu);
652 info[cpu] = hv_send_nmi(tile, TILE_NMI_DUMP_STACK, 0);
653 if (info[cpu].result == HV_NMI_RESULT_OK)
654 cpumask_clear_cpu(cpu, &mask);
655 }
656
657 mdelay(10);
658 timeout--;
659 }
660
661 /* Warn about cpus stuck in ICS and decrement their counts here. */
662 if (!cpumask_empty(&mask)) {
663 for_each_cpu(cpu, &mask) {
664 switch (info[cpu].result) {
665 case HV_NMI_RESULT_FAIL_ICS:
666 pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
667 cpu, info[cpu].pc);
668 break;
669 case HV_NMI_RESULT_FAIL_HV:
670 pr_warn("Skipping stack dump of cpu %d in hypervisor\n",
671 cpu);
672 break;
673 case HV_ENOSYS:
674 pr_warn("Hypervisor too old to allow remote stack dumps.\n");
675 goto skip_for_each;
676 default: /* should not happen */
677 pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
678 cpu, info[cpu].result, info[cpu].pc);
679 break;
680 }
681 }
682skip_for_each:
683 atomic_sub(cpumask_weight(&mask), &backtrace_cpus);
684 }
576} 685}
686#endif /* __tilegx_ */
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index d366675e4bf8..99c9ff87e018 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -71,7 +71,7 @@ static unsigned long __initdata node_percpu[MAX_NUMNODES];
71 * per-CPU stack and boot info. 71 * per-CPU stack and boot info.
72 */ 72 */
73DEFINE_PER_CPU(unsigned long, boot_sp) = 73DEFINE_PER_CPU(unsigned long, boot_sp) =
74 (unsigned long)init_stack + THREAD_SIZE; 74 (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA;
75 75
76#ifdef CONFIG_SMP 76#ifdef CONFIG_SMP
77DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; 77DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index c42dce50acd8..35d34635e4f1 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/dcache.h> 24#include <linux/dcache.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/hardirq.h>
26#include <linux/string.h> 27#include <linux/string.h>
27#include <asm/backtrace.h> 28#include <asm/backtrace.h>
28#include <asm/page.h> 29#include <asm/page.h>
@@ -109,7 +110,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
109 if (kbt->verbose) 110 if (kbt->verbose)
110 pr_err(" <%s while in user mode>\n", fault); 111 pr_err(" <%s while in user mode>\n", fault);
111 } else { 112 } else {
112 if (kbt->verbose) 113 if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
113 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", 114 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
114 p->pc, p->sp, p->ex1); 115 p->pc, p->sp, p->ex1);
115 return NULL; 116 return NULL;
@@ -119,10 +120,12 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
119 return p; 120 return p;
120} 121}
121 122
122/* Is the pc pointing to a sigreturn trampoline? */ 123/* Is the iterator pointing to a sigreturn trampoline? */
123static int is_sigreturn(unsigned long pc) 124static int is_sigreturn(struct KBacktraceIterator *kbt)
124{ 125{
125 return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn)); 126 return kbt->task->mm &&
127 (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
128 (ulong)&__vdso_rt_sigreturn));
126} 129}
127 130
128/* Return a pt_regs pointer for a valid signal handler frame */ 131/* Return a pt_regs pointer for a valid signal handler frame */
@@ -131,7 +134,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
131{ 134{
132 BacktraceIterator *b = &kbt->it; 135 BacktraceIterator *b = &kbt->it;
133 136
134 if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET && 137 if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
135 b->sp % sizeof(long) == 0) { 138 b->sp % sizeof(long) == 0) {
136 int retval; 139 int retval;
137 pagefault_disable(); 140 pagefault_disable();
@@ -151,11 +154,6 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
151 return NULL; 154 return NULL;
152} 155}
153 156
154static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
155{
156 return is_sigreturn(kbt->it.pc);
157}
158
159static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) 157static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
160{ 158{
161 struct pt_regs *p; 159 struct pt_regs *p;
@@ -178,7 +176,7 @@ static int KBacktraceIterator_next_item_inclusive(
178{ 176{
179 for (;;) { 177 for (;;) {
180 do { 178 do {
181 if (!KBacktraceIterator_is_sigreturn(kbt)) 179 if (!is_sigreturn(kbt))
182 return KBT_ONGOING; 180 return KBT_ONGOING;
183 } while (backtrace_next(&kbt->it)); 181 } while (backtrace_next(&kbt->it));
184 182
@@ -357,51 +355,50 @@ static void describe_addr(struct KBacktraceIterator *kbt,
357 */ 355 */
358static bool start_backtrace(void) 356static bool start_backtrace(void)
359{ 357{
360 if (current->thread.in_backtrace) { 358 if (current_thread_info()->in_backtrace) {
361 pr_err("Backtrace requested while in backtrace!\n"); 359 pr_err("Backtrace requested while in backtrace!\n");
362 return false; 360 return false;
363 } 361 }
364 current->thread.in_backtrace = true; 362 current_thread_info()->in_backtrace = true;
365 return true; 363 return true;
366} 364}
367 365
368static void end_backtrace(void) 366static void end_backtrace(void)
369{ 367{
370 current->thread.in_backtrace = false; 368 current_thread_info()->in_backtrace = false;
371} 369}
372 370
373/* 371/*
374 * This method wraps the backtracer's more generic support. 372 * This method wraps the backtracer's more generic support.
375 * It is only invoked from the architecture-specific code; show_stack() 373 * It is only invoked from the architecture-specific code; show_stack()
376 * and dump_stack() (in entry.S) are architecture-independent entry points. 374 * and dump_stack() are architecture-independent entry points.
377 */ 375 */
378void tile_show_stack(struct KBacktraceIterator *kbt, int headers) 376void tile_show_stack(struct KBacktraceIterator *kbt)
379{ 377{
380 int i; 378 int i;
381 int have_mmap_sem = 0; 379 int have_mmap_sem = 0;
382 380
383 if (!start_backtrace()) 381 if (!start_backtrace())
384 return; 382 return;
385 if (headers) {
386 /*
387 * Add a blank line since if we are called from panic(),
388 * then bust_spinlocks() spit out a space in front of us
389 * and it will mess up our KERN_ERR.
390 */
391 pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
392 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
393 raw_smp_processor_id(), get_cycles());
394 }
395 kbt->verbose = 1; 383 kbt->verbose = 1;
396 i = 0; 384 i = 0;
397 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { 385 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
398 char namebuf[KSYM_NAME_LEN+100]; 386 char namebuf[KSYM_NAME_LEN+100];
399 unsigned long address = kbt->it.pc; 387 unsigned long address = kbt->it.pc;
400 388
401 /* Try to acquire the mmap_sem as we pass into userspace. */ 389 /*
402 if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) 390 * Try to acquire the mmap_sem as we pass into userspace.
391 * If we're in an interrupt context, don't even try, since
392 * it's not safe to call e.g. d_path() from an interrupt,
393 * since it uses spin locks without disabling interrupts.
394 * Note we test "kbt->task == current", not "kbt->is_current",
395 * since we're checking that "current" will work in d_path().
396 */
397 if (kbt->task == current && address < PAGE_OFFSET &&
398 !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
403 have_mmap_sem = 399 have_mmap_sem =
404 down_read_trylock(&kbt->task->mm->mmap_sem); 400 down_read_trylock(&kbt->task->mm->mmap_sem);
401 }
405 402
406 describe_addr(kbt, address, have_mmap_sem, 403 describe_addr(kbt, address, have_mmap_sem,
407 namebuf, sizeof(namebuf)); 404 namebuf, sizeof(namebuf));
@@ -416,24 +413,12 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
416 } 413 }
417 if (kbt->end == KBT_LOOP) 414 if (kbt->end == KBT_LOOP)
418 pr_err("Stack dump stopped; next frame identical to this one\n"); 415 pr_err("Stack dump stopped; next frame identical to this one\n");
419 if (headers)
420 pr_err("Stack dump complete\n");
421 if (have_mmap_sem) 416 if (have_mmap_sem)
422 up_read(&kbt->task->mm->mmap_sem); 417 up_read(&kbt->task->mm->mmap_sem);
423 end_backtrace(); 418 end_backtrace();
424} 419}
425EXPORT_SYMBOL(tile_show_stack); 420EXPORT_SYMBOL(tile_show_stack);
426 421
427
428/* This is called from show_regs() and _dump_stack() */
429void dump_stack_regs(struct pt_regs *regs)
430{
431 struct KBacktraceIterator kbt;
432 KBacktraceIterator_init(&kbt, NULL, regs);
433 tile_show_stack(&kbt, 1);
434}
435EXPORT_SYMBOL(dump_stack_regs);
436
437static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, 422static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
438 ulong pc, ulong lr, ulong sp, ulong r52) 423 ulong pc, ulong lr, ulong sp, ulong r52)
439{ 424{
@@ -445,11 +430,15 @@ static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
445 return regs; 430 return regs;
446} 431}
447 432
448/* This is called from dump_stack() and just converts to pt_regs */ 433/* Deprecated function currently only used by kernel_double_fault(). */
449void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) 434void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
450{ 435{
436 struct KBacktraceIterator kbt;
451 struct pt_regs regs; 437 struct pt_regs regs;
452 dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52)); 438
439 regs_to_pt_regs(&regs, pc, lr, sp, r52);
440 KBacktraceIterator_init(&kbt, NULL, &regs);
441 tile_show_stack(&kbt);
453} 442}
454 443
455/* This is called from KBacktraceIterator_init_current() */ 444/* This is called from KBacktraceIterator_init_current() */
@@ -461,22 +450,30 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
461 regs_to_pt_regs(&regs, pc, lr, sp, r52)); 450 regs_to_pt_regs(&regs, pc, lr, sp, r52));
462} 451}
463 452
464/* This is called only from kernel/sched/core.c, with esp == NULL */ 453/*
454 * Called from sched_show_task() with task != NULL, or dump_stack()
455 * with task == NULL. The esp argument is always NULL.
456 */
465void show_stack(struct task_struct *task, unsigned long *esp) 457void show_stack(struct task_struct *task, unsigned long *esp)
466{ 458{
467 struct KBacktraceIterator kbt; 459 struct KBacktraceIterator kbt;
468 if (task == NULL || task == current) 460 if (task == NULL || task == current) {
469 KBacktraceIterator_init_current(&kbt); 461 KBacktraceIterator_init_current(&kbt);
470 else 462 KBacktraceIterator_next(&kbt); /* don't show first frame */
463 } else {
471 KBacktraceIterator_init(&kbt, task, NULL); 464 KBacktraceIterator_init(&kbt, task, NULL);
472 tile_show_stack(&kbt, 0); 465 }
466 tile_show_stack(&kbt);
473} 467}
474 468
475#ifdef CONFIG_STACKTRACE 469#ifdef CONFIG_STACKTRACE
476 470
477/* Support generic Linux stack API too */ 471/* Support generic Linux stack API too */
478 472
479void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) 473static void save_stack_trace_common(struct task_struct *task,
474 struct pt_regs *regs,
475 bool user,
476 struct stack_trace *trace)
480{ 477{
481 struct KBacktraceIterator kbt; 478 struct KBacktraceIterator kbt;
482 int skip = trace->skip; 479 int skip = trace->skip;
@@ -484,31 +481,57 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
484 481
485 if (!start_backtrace()) 482 if (!start_backtrace())
486 goto done; 483 goto done;
487 if (task == NULL || task == current) 484 if (regs != NULL) {
485 KBacktraceIterator_init(&kbt, NULL, regs);
486 } else if (task == NULL || task == current) {
488 KBacktraceIterator_init_current(&kbt); 487 KBacktraceIterator_init_current(&kbt);
489 else 488 skip++; /* don't show KBacktraceIterator_init_current */
489 } else {
490 KBacktraceIterator_init(&kbt, task, NULL); 490 KBacktraceIterator_init(&kbt, task, NULL);
491 }
491 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { 492 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
492 if (skip) { 493 if (skip) {
493 --skip; 494 --skip;
494 continue; 495 continue;
495 } 496 }
496 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) 497 if (i >= trace->max_entries ||
498 (!user && kbt.it.pc < PAGE_OFFSET))
497 break; 499 break;
498 trace->entries[i++] = kbt.it.pc; 500 trace->entries[i++] = kbt.it.pc;
499 } 501 }
500 end_backtrace(); 502 end_backtrace();
501done: 503done:
504 if (i < trace->max_entries)
505 trace->entries[i++] = ULONG_MAX;
502 trace->nr_entries = i; 506 trace->nr_entries = i;
503} 507}
508
509void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
510{
511 save_stack_trace_common(task, NULL, false, trace);
512}
504EXPORT_SYMBOL(save_stack_trace_tsk); 513EXPORT_SYMBOL(save_stack_trace_tsk);
505 514
506void save_stack_trace(struct stack_trace *trace) 515void save_stack_trace(struct stack_trace *trace)
507{ 516{
508 save_stack_trace_tsk(NULL, trace); 517 save_stack_trace_common(NULL, NULL, false, trace);
509} 518}
510EXPORT_SYMBOL_GPL(save_stack_trace); 519EXPORT_SYMBOL_GPL(save_stack_trace);
511 520
521void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
522{
523 save_stack_trace_common(NULL, regs, false, trace);
524}
525
526void save_stack_trace_user(struct stack_trace *trace)
527{
528 /* Trace user stack if we are not a kernel thread. */
529 if (current->mm)
530 save_stack_trace_common(NULL, task_pt_regs(current),
531 true, trace);
532 else if (trace->nr_entries < trace->max_entries)
533 trace->entries[trace->nr_entries++] = ULONG_MAX;
534}
512#endif 535#endif
513 536
514/* In entry.S */ 537/* In entry.S */
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 312fc134c1cb..0011a9ff0525 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -395,6 +395,21 @@ done:
395 exception_exit(prev_state); 395 exception_exit(prev_state);
396} 396}
397 397
398void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
399{
400 switch (reason) {
401 case TILE_NMI_DUMP_STACK:
402 do_nmi_dump_stack(regs);
403 break;
404 default:
405 panic("Unexpected do_nmi type %ld", reason);
406 return;
407 }
408}
409
410/* Deprecated function currently only used here. */
411extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
412
398void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) 413void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
399{ 414{
400 _dump_stack(dummy, pc, lr, sp, r52); 415 _dump_stack(dummy, pc, lr, sp, r52);
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
index 8bb21eda07d8..e63310c49742 100644
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ b/arch/tile/kernel/vdso/vgettimeofday.c
@@ -67,7 +67,7 @@ static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
67 u64 ns; 67 u64 ns;
68 68
69 do { 69 do {
70 count = read_seqcount_begin(&vdso->tb_seq); 70 count = raw_read_seqcount_begin(&vdso->tb_seq);
71 ts->tv_sec = vdso->wall_time_sec; 71 ts->tv_sec = vdso->wall_time_sec;
72 ns = vdso->wall_time_snsec; 72 ns = vdso->wall_time_snsec;
73 ns += vgetsns(vdso); 73 ns += vgetsns(vdso);
@@ -86,7 +86,7 @@ static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
86 u64 ns; 86 u64 ns;
87 87
88 do { 88 do {
89 count = read_seqcount_begin(&vdso->tb_seq); 89 count = raw_read_seqcount_begin(&vdso->tb_seq);
90 ts->tv_sec = vdso->monotonic_time_sec; 90 ts->tv_sec = vdso->monotonic_time_sec;
91 ns = vdso->monotonic_time_snsec; 91 ns = vdso->monotonic_time_snsec;
92 ns += vgetsns(vdso); 92 ns += vgetsns(vdso);
@@ -105,7 +105,7 @@ static inline int do_realtime_coarse(struct vdso_data *vdso,
105 unsigned count; 105 unsigned count;
106 106
107 do { 107 do {
108 count = read_seqcount_begin(&vdso->tb_seq); 108 count = raw_read_seqcount_begin(&vdso->tb_seq);
109 ts->tv_sec = vdso->wall_time_coarse_sec; 109 ts->tv_sec = vdso->wall_time_coarse_sec;
110 ts->tv_nsec = vdso->wall_time_coarse_nsec; 110 ts->tv_nsec = vdso->wall_time_coarse_nsec;
111 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); 111 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
@@ -119,7 +119,7 @@ static inline int do_monotonic_coarse(struct vdso_data *vdso,
119 unsigned count; 119 unsigned count;
120 120
121 do { 121 do {
122 count = read_seqcount_begin(&vdso->tb_seq); 122 count = raw_read_seqcount_begin(&vdso->tb_seq);
123 ts->tv_sec = vdso->monotonic_time_coarse_sec; 123 ts->tv_sec = vdso->monotonic_time_coarse_sec;
124 ts->tv_nsec = vdso->monotonic_time_coarse_nsec; 124 ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
125 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); 125 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
@@ -137,7 +137,7 @@ struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
137 /* The use of the timezone is obsolete, normally tz is NULL. */ 137 /* The use of the timezone is obsolete, normally tz is NULL. */
138 if (unlikely(tz != NULL)) { 138 if (unlikely(tz != NULL)) {
139 do { 139 do {
140 count = read_seqcount_begin(&vdso->tz_seq); 140 count = raw_read_seqcount_begin(&vdso->tz_seq);
141 tz->tz_minuteswest = vdso->tz_minuteswest; 141 tz->tz_minuteswest = vdso->tz_minuteswest;
142 tz->tz_dsttime = vdso->tz_dsttime; 142 tz->tz_dsttime = vdso->tz_dsttime;
143 } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count))); 143 } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 82733c87d67e..9d171ca4302c 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -18,8 +18,6 @@
18 18
19/* arch/tile/lib/usercopy.S */ 19/* arch/tile/lib/usercopy.S */
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21EXPORT_SYMBOL(strnlen_user_asm);
22EXPORT_SYMBOL(strncpy_from_user_asm);
23EXPORT_SYMBOL(clear_user_asm); 21EXPORT_SYMBOL(clear_user_asm);
24EXPORT_SYMBOL(flush_user_asm); 22EXPORT_SYMBOL(flush_user_asm);
25EXPORT_SYMBOL(finv_user_asm); 23EXPORT_SYMBOL(finv_user_asm);
@@ -28,7 +26,6 @@ EXPORT_SYMBOL(finv_user_asm);
28#include <linux/kernel.h> 26#include <linux/kernel.h>
29#include <asm/processor.h> 27#include <asm/processor.h>
30EXPORT_SYMBOL(current_text_addr); 28EXPORT_SYMBOL(current_text_addr);
31EXPORT_SYMBOL(dump_stack);
32 29
33/* arch/tile/kernel/head.S */ 30/* arch/tile/kernel/head.S */
34EXPORT_SYMBOL(empty_zero_page); 31EXPORT_SYMBOL(empty_zero_page);
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index b34f79aada48..88c2a53362e7 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock);
65void arch_spin_unlock_wait(arch_spinlock_t *lock) 65void arch_spin_unlock_wait(arch_spinlock_t *lock)
66{ 66{
67 u32 iterations = 0; 67 u32 iterations = 0;
68 while (arch_spin_is_locked(lock)) 68 int curr = READ_ONCE(lock->current_ticket);
69 int next = READ_ONCE(lock->next_ticket);
70
71 /* Return immediately if unlocked. */
72 if (next == curr)
73 return;
74
75 /* Wait until the current locker has released the lock. */
76 do {
69 delay_backoff(iterations++); 77 delay_backoff(iterations++);
78 } while (READ_ONCE(lock->current_ticket) == curr);
70} 79}
71EXPORT_SYMBOL(arch_spin_unlock_wait); 80EXPORT_SYMBOL(arch_spin_unlock_wait);
72 81
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c
index d6fb9581e980..c8d1f94ff1fe 100644
--- a/arch/tile/lib/spinlock_64.c
+++ b/arch/tile/lib/spinlock_64.c
@@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock);
65void arch_spin_unlock_wait(arch_spinlock_t *lock) 65void arch_spin_unlock_wait(arch_spinlock_t *lock)
66{ 66{
67 u32 iterations = 0; 67 u32 iterations = 0;
68 while (arch_spin_is_locked(lock)) 68 u32 val = READ_ONCE(lock->lock);
69 u32 curr = arch_spin_current(val);
70
71 /* Return immediately if unlocked. */
72 if (arch_spin_next(val) == curr)
73 return;
74
75 /* Wait until the current locker has released the lock. */
76 do {
69 delay_backoff(iterations++); 77 delay_backoff(iterations++);
78 } while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
70} 79}
71EXPORT_SYMBOL(arch_spin_unlock_wait); 80EXPORT_SYMBOL(arch_spin_unlock_wait);
72 81
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
index 1bc162224638..db93ad5fae25 100644
--- a/arch/tile/lib/usercopy_32.S
+++ b/arch/tile/lib/usercopy_32.S
@@ -20,52 +20,6 @@
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */ 20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21 21
22/* 22/*
23 * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
24 * It returns the length, including the terminating NUL, or zero on exception.
25 * If length is greater than the bound, returns one plus the bound.
26 */
27STD_ENTRY(strnlen_user_asm)
28 { bz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */
291: { lb_u r4, r0; addi r1, r1, -1 }
30 bz r4, 2f
31 { bnzt r1, 1b; addi r0, r0, 1 }
322: { sub r0, r0, r3; jrp lr }
33 STD_ENDPROC(strnlen_user_asm)
34 .pushsection .fixup,"ax"
35strnlen_user_fault:
36 { move r0, zero; jrp lr }
37 ENDPROC(strnlen_user_fault)
38 .section __ex_table,"a"
39 .align 4
40 .word 1b, strnlen_user_fault
41 .popsection
42
43/*
44 * strncpy_from_user_asm takes the kernel target pointer in r0,
45 * the userspace source pointer in r1, and the length bound (including
46 * the trailing NUL) in r2. On success, it returns the string length
47 * (not including the trailing NUL), or -EFAULT on failure.
48 */
49STD_ENTRY(strncpy_from_user_asm)
50 { bz r2, 2f; move r3, r0 }
511: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
52 { sb r0, r4; addi r0, r0, 1 }
53 bz r4, 2f
54 bnzt r2, 1b
55 { sub r0, r0, r3; jrp lr }
562: addi r0, r0, -1 /* don't count the trailing NUL */
57 { sub r0, r0, r3; jrp lr }
58 STD_ENDPROC(strncpy_from_user_asm)
59 .pushsection .fixup,"ax"
60strncpy_from_user_fault:
61 { movei r0, -EFAULT; jrp lr }
62 ENDPROC(strncpy_from_user_fault)
63 .section __ex_table,"a"
64 .align 4
65 .word 1b, strncpy_from_user_fault
66 .popsection
67
68/*
69 * clear_user_asm takes the user target address in r0 and the 23 * clear_user_asm takes the user target address in r0 and the
70 * number of bytes to zero in r1. 24 * number of bytes to zero in r1.
71 * It returns the number of uncopiable bytes (hopefully zero) in r0. 25 * It returns the number of uncopiable bytes (hopefully zero) in r0.
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
index b3b31a3306f8..9322dc551e91 100644
--- a/arch/tile/lib/usercopy_64.S
+++ b/arch/tile/lib/usercopy_64.S
@@ -20,52 +20,6 @@
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */ 20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21 21
22/* 22/*
23 * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
24 * It returns the length, including the terminating NUL, or zero on exception.
25 * If length is greater than the bound, returns one plus the bound.
26 */
27STD_ENTRY(strnlen_user_asm)
28 { beqz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */
291: { ld1u r4, r0; addi r1, r1, -1 }
30 beqz r4, 2f
31 { bnezt r1, 1b; addi r0, r0, 1 }
322: { sub r0, r0, r3; jrp lr }
33 STD_ENDPROC(strnlen_user_asm)
34 .pushsection .fixup,"ax"
35strnlen_user_fault:
36 { move r0, zero; jrp lr }
37 ENDPROC(strnlen_user_fault)
38 .section __ex_table,"a"
39 .align 8
40 .quad 1b, strnlen_user_fault
41 .popsection
42
43/*
44 * strncpy_from_user_asm takes the kernel target pointer in r0,
45 * the userspace source pointer in r1, and the length bound (including
46 * the trailing NUL) in r2. On success, it returns the string length
47 * (not including the trailing NUL), or -EFAULT on failure.
48 */
49STD_ENTRY(strncpy_from_user_asm)
50 { beqz r2, 2f; move r3, r0 }
511: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
52 { st1 r0, r4; addi r0, r0, 1 }
53 beqz r4, 2f
54 bnezt r2, 1b
55 { sub r0, r0, r3; jrp lr }
562: addi r0, r0, -1 /* don't count the trailing NUL */
57 { sub r0, r0, r3; jrp lr }
58 STD_ENDPROC(strncpy_from_user_asm)
59 .pushsection .fixup,"ax"
60strncpy_from_user_fault:
61 { movei r0, -EFAULT; jrp lr }
62 ENDPROC(strncpy_from_user_fault)
63 .section __ex_table,"a"
64 .align 8
65 .quad 1b, strncpy_from_user_fault
66 .popsection
67
68/*
69 * clear_user_asm takes the user target address in r0 and the 23 * clear_user_asm takes the user target address in r0 and the
70 * number of bytes to zero in r1. 24 * number of bytes to zero in r1.
71 * It returns the number of uncopiable bytes (hopefully zero) in r0. 25 * It returns the number of uncopiable bytes (hopefully zero) in r0.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 3f4f58d34a92..13eac59bf16a 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -699,11 +699,10 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
699 * interrupt away appropriately and return immediately. We can't do 699 * interrupt away appropriately and return immediately. We can't do
700 * page faults for user code while in kernel mode. 700 * page faults for user code while in kernel mode.
701 */ 701 */
702void do_page_fault(struct pt_regs *regs, int fault_num, 702static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
703 unsigned long address, unsigned long write) 703 unsigned long address, unsigned long write)
704{ 704{
705 int is_page_fault; 705 int is_page_fault;
706 enum ctx_state prev_state = exception_enter();
707 706
708#ifdef CONFIG_KPROBES 707#ifdef CONFIG_KPROBES
709 /* 708 /*
@@ -713,7 +712,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
713 */ 712 */
714 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, 713 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
715 regs->faultnum, SIGSEGV) == NOTIFY_STOP) 714 regs->faultnum, SIGSEGV) == NOTIFY_STOP)
716 goto done; 715 return;
717#endif 716#endif
718 717
719#ifdef __tilegx__ 718#ifdef __tilegx__
@@ -835,18 +834,22 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
835 async->is_fault = is_page_fault; 834 async->is_fault = is_page_fault;
836 async->is_write = write; 835 async->is_write = write;
837 async->address = address; 836 async->address = address;
838 goto done; 837 return;
839 } 838 }
840 } 839 }
841#endif 840#endif
842 841
843 handle_page_fault(regs, fault_num, is_page_fault, address, write); 842 handle_page_fault(regs, fault_num, is_page_fault, address, write);
843}
844 844
845done: 845void do_page_fault(struct pt_regs *regs, int fault_num,
846 unsigned long address, unsigned long write)
847{
848 enum ctx_state prev_state = exception_enter();
849 __do_page_fault(regs, fault_num, address, write);
846 exception_exit(prev_state); 850 exception_exit(prev_state);
847} 851}
848 852
849
850#if CHIP_HAS_TILE_DMA() 853#if CHIP_HAS_TILE_DMA()
851/* 854/*
852 * This routine effectively re-issues asynchronous page faults 855 * This routine effectively re-issues asynchronous page faults
diff --git a/drivers/tty/hvc/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index 3f6cd3102db5..9da1e842bbe9 100644
--- a/drivers/tty/hvc/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
@@ -51,7 +51,8 @@ int tile_console_write(const char *buf, int count)
51 _SIM_CONTROL_OPERATOR_BITS)); 51 _SIM_CONTROL_OPERATOR_BITS));
52 return 0; 52 return 0;
53 } else { 53 } else {
54 return hv_console_write((HV_VirtAddr)buf, count); 54 /* Translate 0 bytes written to EAGAIN for hvc_console_print. */
55 return hv_console_write((HV_VirtAddr)buf, count) ?: -EAGAIN;
55 } 56 }
56} 57}
57 58