aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/kernel/signal.c24
-rw-r--r--arch/arm64/include/asm/cmpxchg.h32
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/percpu.h44
-rw-r--r--arch/metag/include/asm/io.h1
-rw-r--r--arch/metag/include/asm/pgtable-bits.h104
-rw-r--r--arch/metag/include/asm/pgtable.h95
-rw-r--r--arch/parisc/include/asm/pgalloc.h17
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/platforms/powernv/smp.c14
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c44
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/kernel/ftrace.c61
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/swsusp_asm64.S11
-rw-r--r--arch/sparc/include/asm/hypervisor.h12
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S16
-rw-r--r--arch/sparc/kernel/pcr.c33
-rw-r--r--arch/sparc/kernel/perf_event.c55
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/lib/memmove.S35
-rw-r--r--arch/x86/kvm/ioapic.c4
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/vmx.c7
32 files changed, 487 insertions, 196 deletions
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 114234e83caa..edda76fae83f 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
67 sigset_t *set) 67 sigset_t *set)
68{ 68{
69 int err; 69 int err;
70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, 70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
71 sizeof(sf->uc.uc_mcontext.regs.scratch)); 71 sizeof(sf->uc.uc_mcontext.regs.scratch));
72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); 72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
73 73
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
83 if (!err) 83 if (!err)
84 set_current_blocked(&set); 84 set_current_blocked(&set);
85 85
86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), 86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
87 sizeof(sf->uc.uc_mcontext.regs.scratch)); 87 sizeof(sf->uc.uc_mcontext.regs.scratch));
88 88
89 return err; 89 return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
134 /*
135 * Ensure that sigreturn always returns to user mode (in case the
136 * regs saved on user stack got fudged between save and sigreturn)
137 * Otherwise it is easy to panic the kernel with a custom
138 * signal handler and/or restorer which clobberes the status32/ret
139 * to return to a bogus location in kernel mode.
140 */
141 regs->status32 |= STATUS_U_MASK;
142
134 return regs->r0; 143 return regs->r0;
135 144
136badframe: 145badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
229 238
230 /* 239 /*
231 * handler returns using sigreturn stub provided already by userpsace 240 * handler returns using sigreturn stub provided already by userpsace
241 * If not, nuke the process right away
232 */ 242 */
233 BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); 243 if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
244 return 1;
245
234 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; 246 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
235 247
236 /* User Stack for signal handler will be above the frame just carved */ 248 /* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
296handle_signal(struct ksignal *ksig, struct pt_regs *regs) 308handle_signal(struct ksignal *ksig, struct pt_regs *regs)
297{ 309{
298 sigset_t *oldset = sigmask_to_save(); 310 sigset_t *oldset = sigmask_to_save();
299 int ret; 311 int failed;
300 312
301 /* Set up the stack frame */ 313 /* Set up the stack frame */
302 ret = setup_rt_frame(ksig, oldset, regs); 314 failed = setup_rt_frame(ksig, oldset, regs);
303 315
304 signal_setup_done(ret, ksig, 0); 316 signal_setup_done(failed, ksig, 0);
305} 317}
306 318
307void do_signal(struct pt_regs *regs) 319void do_signal(struct pt_regs *regs)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index cb9593079f29..d8c25b7b18fb 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
246 __ret; \ 246 __ret; \
247}) 247})
248 248
249#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 249#define _protect_cmpxchg_local(pcp, o, n) \
250#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 250({ \
251#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 251 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
252#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 252 preempt_disable(); \
253 253 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
254#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 254 preempt_enable(); \
255 cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ 255 __ret; \
256 o1, o2, n1, n2) 256})
257
258#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
259#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
260#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
261#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
262
263#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
264({ \
265 int __ret; \
266 preempt_disable(); \
267 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
268 raw_cpu_ptr(&(ptr2)), \
269 o1, o2, n1, n2); \
270 preempt_enable(); \
271 __ret; \
272})
257 273
258#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) 274#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
259#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) 275#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a9eee33dfa62..101a42bde728 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
151{ 151{
152 unsigned int cpu = smp_processor_id(); 152 unsigned int cpu = smp_processor_id();
153 153
154 /*
155 * init_mm.pgd does not contain any user mappings and it is always
156 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
157 */
158 if (next == &init_mm) {
159 cpu_set_reserved_ttbr0();
160 return;
161 }
162
154 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) 163 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
155 check_and_switch_context(next, tsk); 164 check_and_switch_context(next, tsk);
156} 165}
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 09da25bc596f..4fde8c1df97f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
204 return ret; 204 return ret;
205} 205}
206 206
207#define _percpu_read(pcp) \
208({ \
209 typeof(pcp) __retval; \
210 preempt_disable(); \
211 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
212 sizeof(pcp)); \
213 preempt_enable(); \
214 __retval; \
215})
216
217#define _percpu_write(pcp, val) \
218do { \
219 preempt_disable(); \
220 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
221 sizeof(pcp)); \
222 preempt_enable(); \
223} while(0) \
224
225#define _pcp_protect(operation, pcp, val) \
226({ \
227 typeof(pcp) __retval; \
228 preempt_disable(); \
229 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
230 (val), sizeof(pcp)); \
231 preempt_enable(); \
232 __retval; \
233})
234
207#define _percpu_add(pcp, val) \ 235#define _percpu_add(pcp, val) \
208 __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 236 _pcp_protect(__percpu_add, pcp, val)
209 237
210#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) 238#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
211 239
212#define _percpu_and(pcp, val) \ 240#define _percpu_and(pcp, val) \
213 __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 241 _pcp_protect(__percpu_and, pcp, val)
214 242
215#define _percpu_or(pcp, val) \ 243#define _percpu_or(pcp, val) \
216 __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 244 _pcp_protect(__percpu_or, pcp, val)
217
218#define _percpu_read(pcp) (typeof(pcp)) \
219 (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
220
221#define _percpu_write(pcp, val) \
222 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
223 245
224#define _percpu_xchg(pcp, val) (typeof(pcp)) \ 246#define _percpu_xchg(pcp, val) (typeof(pcp)) \
225 (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) 247 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
226 248
227#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) 249#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
228#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) 250#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
index 9359e5048442..d5779b0ec573 100644
--- a/arch/metag/include/asm/io.h
+++ b/arch/metag/include/asm/io.h
@@ -2,6 +2,7 @@
2#define _ASM_METAG_IO_H 2#define _ASM_METAG_IO_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable-bits.h>
5 6
6#define IO_SPACE_LIMIT 0 7#define IO_SPACE_LIMIT 0
7 8
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..25ba6729f496
--- /dev/null
+++ b/arch/metag/include/asm/pgtable-bits.h
@@ -0,0 +1,104 @@
1/*
2 * Meta page table definitions.
3 */
4
5#ifndef _METAG_PGTABLE_BITS_H
6#define _METAG_PGTABLE_BITS_H
7
8#include <asm/metag_mem.h>
9
10/*
11 * Definitions for MMU descriptors
12 *
13 * These are the hardware bits in the MMCU pte entries.
14 * Derived from the Meta toolkit headers.
15 */
16#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
17#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
18#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
19/* Write combine bit - this can cause writes to occur out of order */
20#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
21/* Sys coherent bit - this bit is never used by Linux */
22#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
23#define _PAGE_ALWAYS_ZERO_1 0x020
24#define _PAGE_CACHE_CTRL0 0x040
25#define _PAGE_CACHE_CTRL1 0x080
26#define _PAGE_ALWAYS_ZERO_2 0x100
27#define _PAGE_ALWAYS_ZERO_3 0x200
28#define _PAGE_ALWAYS_ZERO_4 0x400
29#define _PAGE_ALWAYS_ZERO_5 0x800
30
31/* These are software bits that we stuff into the gaps in the hardware
32 * pte entries that are not used. Note, these DO get stored in the actual
33 * hardware, but the hardware just does not use them.
34 */
35#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
36#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
37
38/* Pages owned, and protected by, the kernel. */
39#define _PAGE_KERNEL _PAGE_PRIV
40
41/* No cacheing of this page */
42#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
43/* burst cacheing - good for data streaming */
44#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
45/* One cache way per thread */
46#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
47/* Full on cacheing */
48#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
49
50#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
51
52/* which bits are used for cache control ... */
53#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
54 _PAGE_WR_COMBINE)
55
56/* This is a mask of the bits that pte_modify is allowed to change. */
57#define _PAGE_CHG_MASK (PAGE_MASK)
58
59#define _PAGE_SZ_SHIFT 1
60#define _PAGE_SZ_4K (0x0)
61#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
62#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
63#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
64#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
65#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
66#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
67#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
68#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
69#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
70#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
71#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
72
73#if defined(CONFIG_PAGE_SIZE_4K)
74#define _PAGE_SZ (_PAGE_SZ_4K)
75#elif defined(CONFIG_PAGE_SIZE_8K)
76#define _PAGE_SZ (_PAGE_SZ_8K)
77#elif defined(CONFIG_PAGE_SIZE_16K)
78#define _PAGE_SZ (_PAGE_SZ_16K)
79#endif
80#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
81
82#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
83# define _PAGE_SZHUGE (_PAGE_SZ_8K)
84#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
85# define _PAGE_SZHUGE (_PAGE_SZ_16K)
86#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
87# define _PAGE_SZHUGE (_PAGE_SZ_32K)
88#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
89# define _PAGE_SZHUGE (_PAGE_SZ_64K)
90#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
91# define _PAGE_SZHUGE (_PAGE_SZ_128K)
92#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
93# define _PAGE_SZHUGE (_PAGE_SZ_256K)
94#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
95# define _PAGE_SZHUGE (_PAGE_SZ_512K)
96#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
97# define _PAGE_SZHUGE (_PAGE_SZ_1M)
98#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
99# define _PAGE_SZHUGE (_PAGE_SZ_2M)
100#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
101# define _PAGE_SZHUGE (_PAGE_SZ_4M)
102#endif
103
104#endif /* _METAG_PGTABLE_BITS_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index d0604c0a8702..ffa3a3a2ecad 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -5,6 +5,7 @@
5#ifndef _METAG_PGTABLE_H 5#ifndef _METAG_PGTABLE_H
6#define _METAG_PGTABLE_H 6#define _METAG_PGTABLE_H
7 7
8#include <asm/pgtable-bits.h>
8#include <asm-generic/pgtable-nopmd.h> 9#include <asm-generic/pgtable-nopmd.h>
9 10
10/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ 11/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@@ -21,100 +22,6 @@
21#endif 22#endif
22 23
23/* 24/*
24 * Definitions for MMU descriptors
25 *
26 * These are the hardware bits in the MMCU pte entries.
27 * Derived from the Meta toolkit headers.
28 */
29#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
30#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
31#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
32/* Write combine bit - this can cause writes to occur out of order */
33#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
34/* Sys coherent bit - this bit is never used by Linux */
35#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
36#define _PAGE_ALWAYS_ZERO_1 0x020
37#define _PAGE_CACHE_CTRL0 0x040
38#define _PAGE_CACHE_CTRL1 0x080
39#define _PAGE_ALWAYS_ZERO_2 0x100
40#define _PAGE_ALWAYS_ZERO_3 0x200
41#define _PAGE_ALWAYS_ZERO_4 0x400
42#define _PAGE_ALWAYS_ZERO_5 0x800
43
44/* These are software bits that we stuff into the gaps in the hardware
45 * pte entries that are not used. Note, these DO get stored in the actual
46 * hardware, but the hardware just does not use them.
47 */
48#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
49#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
50
51/* Pages owned, and protected by, the kernel. */
52#define _PAGE_KERNEL _PAGE_PRIV
53
54/* No cacheing of this page */
55#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
56/* burst cacheing - good for data streaming */
57#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
58/* One cache way per thread */
59#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
60/* Full on cacheing */
61#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
62
63#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
64
65/* which bits are used for cache control ... */
66#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
67 _PAGE_WR_COMBINE)
68
69/* This is a mask of the bits that pte_modify is allowed to change. */
70#define _PAGE_CHG_MASK (PAGE_MASK)
71
72#define _PAGE_SZ_SHIFT 1
73#define _PAGE_SZ_4K (0x0)
74#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
75#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
76#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
77#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
78#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
79#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
80#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
81#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
82#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
83#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
84#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
85
86#if defined(CONFIG_PAGE_SIZE_4K)
87#define _PAGE_SZ (_PAGE_SZ_4K)
88#elif defined(CONFIG_PAGE_SIZE_8K)
89#define _PAGE_SZ (_PAGE_SZ_8K)
90#elif defined(CONFIG_PAGE_SIZE_16K)
91#define _PAGE_SZ (_PAGE_SZ_16K)
92#endif
93#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
94
95#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
96# define _PAGE_SZHUGE (_PAGE_SZ_8K)
97#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
98# define _PAGE_SZHUGE (_PAGE_SZ_16K)
99#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
100# define _PAGE_SZHUGE (_PAGE_SZ_32K)
101#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
102# define _PAGE_SZHUGE (_PAGE_SZ_64K)
103#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
104# define _PAGE_SZHUGE (_PAGE_SZ_128K)
105#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
106# define _PAGE_SZHUGE (_PAGE_SZ_256K)
107#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
108# define _PAGE_SZHUGE (_PAGE_SZ_512K)
109#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
110# define _PAGE_SZHUGE (_PAGE_SZ_1M)
111#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
112# define _PAGE_SZHUGE (_PAGE_SZ_2M)
113#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
114# define _PAGE_SZHUGE (_PAGE_SZ_4M)
115#endif
116
117/*
118 * The Linux memory management assumes a three-level page table setup. On 25 * The Linux memory management assumes a three-level page table setup. On
119 * Meta, we use that, but "fold" the mid level into the top-level page 26 * Meta, we use that, but "fold" the mid level into the top-level page
120 * table. 27 * table.
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f213f5b4c423..d17437238a2c 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
26 26
27 if (likely(pgd != NULL)) { 27 if (likely(pgd != NULL)) {
28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); 28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
29#ifdef CONFIG_64BIT 29#if PT_NLEVELS == 3
30 actual_pgd += PTRS_PER_PGD; 30 actual_pgd += PTRS_PER_PGD;
31 /* Populate first pmd with allocated memory. We mark it 31 /* Populate first pmd with allocated memory. We mark it
32 * with PxD_FLAG_ATTACHED as a signal to the system that this 32 * with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45 45
46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
47{ 47{
48#ifdef CONFIG_64BIT 48#if PT_NLEVELS == 3
49 pgd -= PTRS_PER_PGD; 49 pgd -= PTRS_PER_PGD;
50#endif 50#endif
51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); 51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72 72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{ 74{
75#ifdef CONFIG_64BIT
76 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 75 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
77 /* This is the permanent pmd attached to the pgd; 76 /*
78 * cannot free it */ 77 * This is the permanent pmd attached to the pgd;
78 * cannot free it.
79 * Increment the counter to compensate for the decrement
80 * done by generic mm code.
81 */
82 mm_inc_nr_pmds(mm);
79 return; 83 return;
80#endif
81 free_pages((unsigned long)pmd, PMD_ORDER); 84 free_pages((unsigned long)pmd, PMD_ORDER);
82} 85}
83 86
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
99static inline void 102static inline void
100pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 103pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
101{ 104{
102#ifdef CONFIG_64BIT 105#if PT_NLEVELS == 3
103 /* preserve the gateway marker if this is the beginning of 106 /* preserve the gateway marker if this is the beginning of
104 * the permanent pmd */ 107 * the permanent pmd */
105 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 108 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 5a8997d63899..8eefb12d1d33 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -55,8 +55,8 @@
55#define ENTRY_COMP(_name_) .word sys_##_name_ 55#define ENTRY_COMP(_name_) .word sys_##_name_
56#endif 56#endif
57 57
58 ENTRY_SAME(restart_syscall) /* 0 */ 5890: ENTRY_SAME(restart_syscall) /* 0 */
59 ENTRY_SAME(exit) 5991: ENTRY_SAME(exit)
60 ENTRY_SAME(fork_wrapper) 60 ENTRY_SAME(fork_wrapper)
61 ENTRY_SAME(read) 61 ENTRY_SAME(read)
62 ENTRY_SAME(write) 62 ENTRY_SAME(write)
@@ -439,7 +439,10 @@
439 ENTRY_SAME(bpf) 439 ENTRY_SAME(bpf)
440 ENTRY_COMP(execveat) 440 ENTRY_COMP(execveat)
441 441
442 /* Nothing yet */ 442
443.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
444.error "size of syscall table does not fit value of __NR_Linux_syscalls"
445.endif
443 446
444#undef ENTRY_SAME 447#undef ENTRY_SAME
445#undef ENTRY_DIFF 448#undef ENTRY_DIFF
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 03cd858a401c..4cbe23af400a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -153,6 +153,7 @@
153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff 153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
154#define PPC_INST_MFTMR 0x7c0002dc 154#define PPC_INST_MFTMR 0x7c0002dc
155#define PPC_INST_MSGSND 0x7c00019c 155#define PPC_INST_MSGSND 0x7c00019c
156#define PPC_INST_MSGCLR 0x7c0001dc
156#define PPC_INST_MSGSNDP 0x7c00011c 157#define PPC_INST_MSGSNDP 0x7c00011c
157#define PPC_INST_MTTMR 0x7c0003dc 158#define PPC_INST_MTTMR 0x7c0003dc
158#define PPC_INST_NOP 0x60000000 159#define PPC_INST_NOP 0x60000000
@@ -309,6 +310,8 @@
309 ___PPC_RB(b) | __PPC_EH(eh)) 310 ___PPC_RB(b) | __PPC_EH(eh))
310#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ 311#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
311 ___PPC_RB(b)) 312 ___PPC_RB(b))
313#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
314 ___PPC_RB(b))
312#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ 315#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
313 ___PPC_RB(b)) 316 ___PPC_RB(b))
314#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ 317#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1c874fb533bb..af56b5c6c81a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -608,13 +608,16 @@
608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ 608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ 609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
611#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
611#define SRR1_WAKESYSERR 0x00300000 /* System error */ 612#define SRR1_WAKESYSERR 0x00300000 /* System error */
612#define SRR1_WAKEEE 0x00200000 /* External interrupt */ 613#define SRR1_WAKEEE 0x00200000 /* External interrupt */
613#define SRR1_WAKEMT 0x00280000 /* mtctrl */ 614#define SRR1_WAKEMT 0x00280000 /* mtctrl */
614#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ 615#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
615#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ 616#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
617#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
616#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ 618#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
617#define SRR1_WAKERESET 0x00100000 /* System reset */ 619#define SRR1_WAKERESET 0x00100000 /* System reset */
620#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
618#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ 621#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
619#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, 622#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
620 * may not be recoverable */ 623 * may not be recoverable */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f337666768a7..f83046878336 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
437 .machine_check_early = __machine_check_early_realmode_p8, 437 .machine_check_early = __machine_check_early_realmode_p8,
438 .platform = "power8", 438 .platform = "power8",
439 }, 439 },
440 { /* Power8NVL */
441 .pvr_mask = 0xffff0000,
442 .pvr_value = 0x004c0000,
443 .cpu_name = "POWER8NVL (raw)",
444 .cpu_features = CPU_FTRS_POWER8,
445 .cpu_user_features = COMMON_USER_POWER8,
446 .cpu_user_features2 = COMMON_USER2_POWER8,
447 .mmu_features = MMU_FTRS_POWER8,
448 .icache_bsize = 128,
449 .dcache_bsize = 128,
450 .num_pmcs = 6,
451 .pmc_type = PPC_PMC_IBM,
452 .oprofile_cpu_type = "ppc64/power8",
453 .oprofile_type = PPC_OPROFILE_INVALID,
454 .cpu_setup = __setup_cpu_power8,
455 .cpu_restore = __restore_cpu_power8,
456 .flush_tlb = __flush_tlb_power8,
457 .machine_check_early = __machine_check_early_realmode_p8,
458 .platform = "power8",
459 },
440 { /* Power8 DD1: Does not support doorbell IPIs */ 460 { /* Power8 DD1: Does not support doorbell IPIs */
441 .pvr_mask = 0xffffff00, 461 .pvr_mask = 0xffffff00,
442 .pvr_value = 0x004d0100, 462 .pvr_value = 0x004d0100,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f4217819cc31..2128f3a96c32 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/dbell.h> 18#include <asm/dbell.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20#include <asm/kvm_ppc.h>
20 21
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22void doorbell_setup_this_cpu(void) 23void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
41 42
42 may_hard_irq_enable(); 43 may_hard_irq_enable();
43 44
45 kvmppc_set_host_ipi(smp_processor_id(), 0);
44 __this_cpu_inc(irq_stat.doorbell_irqs); 46 __this_cpu_inc(irq_stat.doorbell_irqs);
45 47
46 smp_ipi_demux(); 48 smp_ipi_demux();
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c2df8150bd7a..9519e6bdc6d7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
1408 bne 9f /* continue in V mode if we are. */ 1408 bne 9f /* continue in V mode if we are. */
1409 1409
14105: 14105:
1411#ifdef CONFIG_KVM_BOOK3S_64_HV 1411#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1412 /* 1412 /*
1413 * We are coming from kernel context. Check if we are coming from 1413 * We are coming from kernel context. Check if we are coming from
1414 * guest. if yes, then we can continue. We will fall through 1414 * guest. if yes, then we can continue. We will fall through
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de4018a1bc4b..de747563d29d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
636 spin_lock(&vcpu->arch.vpa_update_lock); 636 spin_lock(&vcpu->arch.vpa_update_lock);
637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; 637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
638 if (lppaca) 638 if (lppaca)
639 yield_count = lppaca->yield_count; 639 yield_count = be32_to_cpu(lppaca->yield_count);
640 spin_unlock(&vcpu->arch.vpa_update_lock); 640 spin_unlock(&vcpu->arch.vpa_update_lock);
641 return yield_count; 641 return yield_count;
642} 642}
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, 942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
943 bool preserve_top32) 943 bool preserve_top32)
944{ 944{
945 struct kvm *kvm = vcpu->kvm;
945 struct kvmppc_vcore *vc = vcpu->arch.vcore; 946 struct kvmppc_vcore *vc = vcpu->arch.vcore;
946 u64 mask; 947 u64 mask;
947 948
949 mutex_lock(&kvm->lock);
948 spin_lock(&vc->lock); 950 spin_lock(&vc->lock);
949 /* 951 /*
950 * If ILE (interrupt little-endian) has changed, update the 952 * If ILE (interrupt little-endian) has changed, update the
951 * MSR_LE bit in the intr_msr for each vcpu in this vcore. 953 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
952 */ 954 */
953 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 955 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
954 struct kvm *kvm = vcpu->kvm;
955 struct kvm_vcpu *vcpu; 956 struct kvm_vcpu *vcpu;
956 int i; 957 int i;
957 958
958 mutex_lock(&kvm->lock);
959 kvm_for_each_vcpu(i, vcpu, kvm) { 959 kvm_for_each_vcpu(i, vcpu, kvm) {
960 if (vcpu->arch.vcore != vc) 960 if (vcpu->arch.vcore != vc)
961 continue; 961 continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
964 else 964 else
965 vcpu->arch.intr_msr &= ~MSR_LE; 965 vcpu->arch.intr_msr &= ~MSR_LE;
966 } 966 }
967 mutex_unlock(&kvm->lock);
968 } 967 }
969 968
970 /* 969 /*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
981 mask &= 0xFFFFFFFF; 980 mask &= 0xFFFFFFFF;
982 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 981 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
983 spin_unlock(&vc->lock); 982 spin_unlock(&vc->lock);
983 mutex_unlock(&kvm->lock);
984} 984}
985 985
986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bb94e6f20c81..6cbf1630cb70 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1005 /* Save HEIR (HV emulation assist reg) in emul_inst 1005 /* Save HEIR (HV emulation assist reg) in emul_inst
1006 if this is an HEI (HV emulation interrupt, e40) */ 1006 if this is an HEI (HV emulation interrupt, e40) */
1007 li r3,KVM_INST_FETCH_FAILED 1007 li r3,KVM_INST_FETCH_FAILED
1008 stw r3,VCPU_LAST_INST(r9)
1008 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1009 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1009 bne 11f 1010 bne 11f
1010 mfspr r3,SPRN_HEIR 1011 mfspr r3,SPRN_HEIR
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fc34025ef822..38a45088f633 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -33,6 +33,8 @@
33#include <asm/runlatch.h> 33#include <asm/runlatch.h>
34#include <asm/code-patching.h> 34#include <asm/code-patching.h>
35#include <asm/dbell.h> 35#include <asm/dbell.h>
36#include <asm/kvm_ppc.h>
37#include <asm/ppc-opcode.h>
36 38
37#include "powernv.h" 39#include "powernv.h"
38 40
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
149static void pnv_smp_cpu_kill_self(void) 151static void pnv_smp_cpu_kill_self(void)
150{ 152{
151 unsigned int cpu; 153 unsigned int cpu;
152 unsigned long srr1; 154 unsigned long srr1, wmask;
153 u32 idle_states; 155 u32 idle_states;
154 156
155 /* Standard hot unplug procedure */ 157 /* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
161 generic_set_cpu_dead(cpu); 163 generic_set_cpu_dead(cpu);
162 smp_wmb(); 164 smp_wmb();
163 165
166 wmask = SRR1_WAKEMASK;
167 if (cpu_has_feature(CPU_FTR_ARCH_207S))
168 wmask = SRR1_WAKEMASK_P8;
169
164 idle_states = pnv_get_supported_cpuidle_states(); 170 idle_states = pnv_get_supported_cpuidle_states();
165 /* We don't want to take decrementer interrupts while we are offline, 171 /* We don't want to take decrementer interrupts while we are offline,
166 * so clear LPCR:PECE1. We keep PECE2 enabled. 172 * so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
191 * having finished executing in a KVM guest, then srr1 197 * having finished executing in a KVM guest, then srr1
192 * contains 0. 198 * contains 0.
193 */ 199 */
194 if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { 200 if ((srr1 & wmask) == SRR1_WAKEEE) {
195 icp_native_flush_interrupt(); 201 icp_native_flush_interrupt();
196 local_paca->irq_happened &= PACA_IRQ_HARD_DIS; 202 local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
197 smp_mb(); 203 smp_mb();
204 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
205 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
206 asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
207 kvmppc_set_host_ipi(cpu, 0);
198 } 208 }
199 209
200 if (cpu_core_split_required()) 210 if (cpu_core_split_required())
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 90cf3dcbd9f2..8f35d525cede 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -25,10 +25,10 @@
25static struct kobject *mobility_kobj; 25static struct kobject *mobility_kobj;
26 26
27struct update_props_workarea { 27struct update_props_workarea {
28 u32 phandle; 28 __be32 phandle;
29 u32 state; 29 __be32 state;
30 u64 reserved; 30 __be64 reserved;
31 u32 nprops; 31 __be32 nprops;
32} __packed; 32} __packed;
33 33
34#define NODE_ACTION_MASK 0xff000000 34#define NODE_ACTION_MASK 0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
54 return rc; 54 return rc;
55} 55}
56 56
57static int delete_dt_node(u32 phandle) 57static int delete_dt_node(__be32 phandle)
58{ 58{
59 struct device_node *dn; 59 struct device_node *dn;
60 60
61 dn = of_find_node_by_phandle(phandle); 61 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
62 if (!dn) 62 if (!dn)
63 return -ENOENT; 63 return -ENOENT;
64 64
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
127 return 0; 127 return 0;
128} 128}
129 129
130static int update_dt_node(u32 phandle, s32 scope) 130static int update_dt_node(__be32 phandle, s32 scope)
131{ 131{
132 struct update_props_workarea *upwa; 132 struct update_props_workarea *upwa;
133 struct device_node *dn; 133 struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
136 char *prop_data; 136 char *prop_data;
137 char *rtas_buf; 137 char *rtas_buf;
138 int update_properties_token; 138 int update_properties_token;
139 u32 nprops;
139 u32 vd; 140 u32 vd;
140 141
141 update_properties_token = rtas_token("ibm,update-properties"); 142 update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
146 if (!rtas_buf) 147 if (!rtas_buf)
147 return -ENOMEM; 148 return -ENOMEM;
148 149
149 dn = of_find_node_by_phandle(phandle); 150 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
150 if (!dn) { 151 if (!dn) {
151 kfree(rtas_buf); 152 kfree(rtas_buf);
152 return -ENOENT; 153 return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
162 break; 163 break;
163 164
164 prop_data = rtas_buf + sizeof(*upwa); 165 prop_data = rtas_buf + sizeof(*upwa);
166 nprops = be32_to_cpu(upwa->nprops);
165 167
166 /* On the first call to ibm,update-properties for a node the 168 /* On the first call to ibm,update-properties for a node the
167 * the first property value descriptor contains an empty 169 * the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
170 */ 172 */
171 if (*prop_data == 0) { 173 if (*prop_data == 0) {
172 prop_data++; 174 prop_data++;
173 vd = *(u32 *)prop_data; 175 vd = be32_to_cpu(*(__be32 *)prop_data);
174 prop_data += vd + sizeof(vd); 176 prop_data += vd + sizeof(vd);
175 upwa->nprops--; 177 nprops--;
176 } 178 }
177 179
178 for (i = 0; i < upwa->nprops; i++) { 180 for (i = 0; i < nprops; i++) {
179 char *prop_name; 181 char *prop_name;
180 182
181 prop_name = prop_data; 183 prop_name = prop_data;
182 prop_data += strlen(prop_name) + 1; 184 prop_data += strlen(prop_name) + 1;
183 vd = *(u32 *)prop_data; 185 vd = be32_to_cpu(*(__be32 *)prop_data);
184 prop_data += sizeof(vd); 186 prop_data += sizeof(vd);
185 187
186 switch (vd) { 188 switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
212 return 0; 214 return 0;
213} 215}
214 216
215static int add_dt_node(u32 parent_phandle, u32 drc_index) 217static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
216{ 218{
217 struct device_node *dn; 219 struct device_node *dn;
218 struct device_node *parent_dn; 220 struct device_node *parent_dn;
219 int rc; 221 int rc;
220 222
221 parent_dn = of_find_node_by_phandle(parent_phandle); 223 parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
222 if (!parent_dn) 224 if (!parent_dn)
223 return -ENOENT; 225 return -ENOENT;
224 226
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
237int pseries_devicetree_update(s32 scope) 239int pseries_devicetree_update(s32 scope)
238{ 240{
239 char *rtas_buf; 241 char *rtas_buf;
240 u32 *data; 242 __be32 *data;
241 int update_nodes_token; 243 int update_nodes_token;
242 int rc; 244 int rc;
243 245
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
254 if (rc && rc != 1) 256 if (rc && rc != 1)
255 break; 257 break;
256 258
257 data = (u32 *)rtas_buf + 4; 259 data = (__be32 *)rtas_buf + 4;
258 while (*data & NODE_ACTION_MASK) { 260 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
259 int i; 261 int i;
260 u32 action = *data & NODE_ACTION_MASK; 262 u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
261 int node_count = *data & NODE_COUNT_MASK; 263 u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
262 264
263 data++; 265 data++;
264 266
265 for (i = 0; i < node_count; i++) { 267 for (i = 0; i < node_count; i++) {
266 u32 phandle = *data++; 268 __be32 phandle = *data++;
267 u32 drc_index; 269 __be32 drc_index;
268 270
269 switch (action) { 271 switch (action) {
270 case DELETE_DT_NODE: 272 case DELETE_DT_NODE:
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9df40b5c0ac..c9c875d9ed31 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -211,7 +211,7 @@ do { \
211 211
212extern unsigned long mmap_rnd_mask; 212extern unsigned long mmap_rnd_mask;
213 213
214#define STACK_RND_MASK (mmap_rnd_mask) 214#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
215 215
216#define ARCH_DLINFO \ 216#define ARCH_DLINFO \
217do { \ 217do { \
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82c19899574f..6c79f1b44fe7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -57,6 +57,44 @@
57 57
58unsigned long ftrace_plt; 58unsigned long ftrace_plt;
59 59
60static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
61{
62#ifdef CC_USING_HOTPATCH
63 /* brcl 0,0 */
64 insn->opc = 0xc004;
65 insn->disp = 0;
66#else
67 /* stg r14,8(r15) */
68 insn->opc = 0xe3e0;
69 insn->disp = 0xf0080024;
70#endif
71}
72
73static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
74{
75#ifdef CONFIG_KPROBES
76 if (insn->opc == BREAKPOINT_INSTRUCTION)
77 return 1;
78#endif
79 return 0;
80}
81
82static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
83{
84#ifdef CONFIG_KPROBES
85 insn->opc = BREAKPOINT_INSTRUCTION;
86 insn->disp = KPROBE_ON_FTRACE_NOP;
87#endif
88}
89
90static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
91{
92#ifdef CONFIG_KPROBES
93 insn->opc = BREAKPOINT_INSTRUCTION;
94 insn->disp = KPROBE_ON_FTRACE_CALL;
95#endif
96}
97
60int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 98int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
61 unsigned long addr) 99 unsigned long addr)
62{ 100{
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
72 return -EFAULT; 110 return -EFAULT;
73 if (addr == MCOUNT_ADDR) { 111 if (addr == MCOUNT_ADDR) {
74 /* Initial code replacement */ 112 /* Initial code replacement */
75#ifdef CC_USING_HOTPATCH 113 ftrace_generate_orig_insn(&orig);
76 /* We expect to see brcl 0,0 */
77 ftrace_generate_nop_insn(&orig);
78#else
79 /* We expect to see stg r14,8(r15) */
80 orig.opc = 0xe3e0;
81 orig.disp = 0xf0080024;
82#endif
83 ftrace_generate_nop_insn(&new); 114 ftrace_generate_nop_insn(&new);
84 } else if (old.opc == BREAKPOINT_INSTRUCTION) { 115 } else if (is_kprobe_on_ftrace(&old)) {
85 /* 116 /*
86 * If we find a breakpoint instruction, a kprobe has been 117 * If we find a breakpoint instruction, a kprobe has been
87 * placed at the beginning of the function. We write the 118 * placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
89 * bytes of the original instruction so that the kprobes 120 * bytes of the original instruction so that the kprobes
90 * handler can execute a nop, if it reaches this breakpoint. 121 * handler can execute a nop, if it reaches this breakpoint.
91 */ 122 */
92 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 123 ftrace_generate_kprobe_call_insn(&orig);
93 orig.disp = KPROBE_ON_FTRACE_CALL; 124 ftrace_generate_kprobe_nop_insn(&new);
94 new.disp = KPROBE_ON_FTRACE_NOP;
95 } else { 125 } else {
96 /* Replace ftrace call with a nop. */ 126 /* Replace ftrace call with a nop. */
97 ftrace_generate_call_insn(&orig, rec->ip); 127 ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
111 141
112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) 142 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
113 return -EFAULT; 143 return -EFAULT;
114 if (old.opc == BREAKPOINT_INSTRUCTION) { 144 if (is_kprobe_on_ftrace(&old)) {
115 /* 145 /*
116 * If we find a breakpoint instruction, a kprobe has been 146 * If we find a breakpoint instruction, a kprobe has been
117 * placed at the beginning of the function. We write the 147 * placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
119 * bytes of the original instruction so that the kprobes 149 * bytes of the original instruction so that the kprobes
120 * handler can execute a brasl if it reaches this breakpoint. 150 * handler can execute a brasl if it reaches this breakpoint.
121 */ 151 */
122 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 152 ftrace_generate_kprobe_nop_insn(&orig);
123 orig.disp = KPROBE_ON_FTRACE_NOP; 153 ftrace_generate_kprobe_call_insn(&new);
124 new.disp = KPROBE_ON_FTRACE_CALL;
125 } else { 154 } else {
126 /* Replace nop with an ftrace call. */ 155 /* Replace nop with an ftrace call. */
127 ftrace_generate_nop_insn(&orig); 156 ftrace_generate_nop_insn(&orig);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d157cb0d..e6a1578fc000 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1415 1415
1416static struct attribute *cpumsf_pmu_events_attr[] = { 1416static struct attribute *cpumsf_pmu_events_attr[] = {
1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), 1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
1418 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), 1418 NULL,
1419 NULL, 1419 NULL,
1420}; 1420};
1421 1421
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
1606 return -EINVAL; 1606 return -EINVAL;
1607 } 1607 }
1608 1608
1609 if (si.ad) 1609 if (si.ad) {
1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1611 cpumsf_pmu_events_attr[1] =
1612 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
1613 }
1611 1614
1612 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 1615 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
1613 if (!sfdbg) 1616 if (!sfdbg)
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 6b09fdffbd2f..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -177,6 +177,17 @@ restart_entry:
177 lhi %r1,1 177 lhi %r1,1
178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
179 sam64 179 sam64
180#ifdef CONFIG_SMP
181 larl %r1,smp_cpu_mt_shift
182 icm %r1,15,0(%r1)
183 jz smt_done
184 llgfr %r1,%r1
185smt_loop:
186 sigp %r1,%r0,SIGP_SET_MULTI_THREADING
187 brc 8,smt_done /* accepted */
188 brc 2,smt_loop /* busy, try again */
189smt_done:
190#endif
180 larl %r1,.Lnew_pgm_check_psw 191 larl %r1,.Lnew_pgm_check_psw
181 lpswe 0(%r1) 192 lpswe 0(%r1)
182pgm_check_entry: 193pgm_check_entry:
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 4f6725ff4c33..f5b6537306f0 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
2957 unsigned long reg_val); 2957 unsigned long reg_val);
2958#endif 2958#endif
2959 2959
2960
2961#define HV_FAST_M7_GET_PERFREG 0x43
2962#define HV_FAST_M7_SET_PERFREG 0x44
2963
2964#ifndef __ASSEMBLY__
2965unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
2966 unsigned long *reg_val);
2967unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
2968 unsigned long reg_val);
2969#endif
2970
2960/* Function numbers for HV_CORE_TRAP. */ 2971/* Function numbers for HV_CORE_TRAP. */
2961#define HV_CORE_SET_VER 0x00 2972#define HV_CORE_SET_VER 0x00
2962#define HV_CORE_PUTCHAR 0x01 2973#define HV_CORE_PUTCHAR 0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
2981#define HV_GRP_SDIO 0x0108 2992#define HV_GRP_SDIO 0x0108
2982#define HV_GRP_SDIO_ERR 0x0109 2993#define HV_GRP_SDIO_ERR 0x0109
2983#define HV_GRP_REBOOT_DATA 0x0110 2994#define HV_GRP_REBOOT_DATA 0x0110
2995#define HV_GRP_M7_PERF 0x0114
2984#define HV_GRP_NIAG_PERF 0x0200 2996#define HV_GRP_NIAG_PERF 0x0200
2985#define HV_GRP_FIRE_PERF 0x0201 2997#define HV_GRP_FIRE_PERF 0x0201
2986#define HV_GRP_N2_CPU 0x0202 2998#define HV_GRP_N2_CPU 0x0202
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 5c55145bfbf0..662500fa555f 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
48 { .group = HV_GRP_VT_CPU, }, 48 { .group = HV_GRP_VT_CPU, },
49 { .group = HV_GRP_T5_CPU, }, 49 { .group = HV_GRP_T5_CPU, },
50 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, 50 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
51 { .group = HV_GRP_M7_PERF, },
51}; 52};
52 53
53static DEFINE_SPINLOCK(hvapi_lock); 54static DEFINE_SPINLOCK(hvapi_lock);
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index caedf8320416..afbaba52d2f1 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
837 retl 837 retl
838 nop 838 nop
839ENDPROC(sun4v_t5_set_perfreg) 839ENDPROC(sun4v_t5_set_perfreg)
840
841ENTRY(sun4v_m7_get_perfreg)
842 mov %o1, %o4
843 mov HV_FAST_M7_GET_PERFREG, %o5
844 ta HV_FAST_TRAP
845 stx %o1, [%o4]
846 retl
847 nop
848ENDPROC(sun4v_m7_get_perfreg)
849
850ENTRY(sun4v_m7_set_perfreg)
851 mov HV_FAST_M7_SET_PERFREG, %o5
852 ta HV_FAST_TRAP
853 retl
854 nop
855ENDPROC(sun4v_m7_set_perfreg)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7e967c8018c8..eb978c77c76a 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
217 .pcr_nmi_disable = PCR_N4_PICNPT, 217 .pcr_nmi_disable = PCR_N4_PICNPT,
218}; 218};
219 219
220static u64 m7_pcr_read(unsigned long reg_num)
221{
222 unsigned long val;
223
224 (void) sun4v_m7_get_perfreg(reg_num, &val);
225
226 return val;
227}
228
229static void m7_pcr_write(unsigned long reg_num, u64 val)
230{
231 (void) sun4v_m7_set_perfreg(reg_num, val);
232}
233
234static const struct pcr_ops m7_pcr_ops = {
235 .read_pcr = m7_pcr_read,
236 .write_pcr = m7_pcr_write,
237 .read_pic = n4_pic_read,
238 .write_pic = n4_pic_write,
239 .nmi_picl_value = n4_picl_value,
240 .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
241 PCR_N4_UTRACE | PCR_N4_TOE |
242 (26 << PCR_N4_SL_SHIFT)),
243 .pcr_nmi_disable = PCR_N4_PICNPT,
244};
220 245
221static unsigned long perf_hsvc_group; 246static unsigned long perf_hsvc_group;
222static unsigned long perf_hsvc_major; 247static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
248 perf_hsvc_group = HV_GRP_T5_CPU; 273 perf_hsvc_group = HV_GRP_T5_CPU;
249 break; 274 break;
250 275
276 case SUN4V_CHIP_SPARC_M7:
277 perf_hsvc_group = HV_GRP_M7_PERF;
278 break;
279
251 default: 280 default:
252 return -ENODEV; 281 return -ENODEV;
253 } 282 }
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
293 pcr_ops = &n5_pcr_ops; 322 pcr_ops = &n5_pcr_ops;
294 break; 323 break;
295 324
325 case SUN4V_CHIP_SPARC_M7:
326 pcr_ops = &m7_pcr_ops;
327 break;
328
296 default: 329 default:
297 ret = -ENODEV; 330 ret = -ENODEV;
298 break; 331 break;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 46a5e4508752..86eebfa3b158 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
792 .num_pic_regs = 4, 792 .num_pic_regs = 4,
793}; 793};
794 794
795static void sparc_m7_write_pmc(int idx, u64 val)
796{
797 u64 pcr;
798
799 pcr = pcr_ops->read_pcr(idx);
800 /* ensure ov and ntc are reset */
801 pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
802
803 pcr_ops->write_pic(idx, val & 0xffffffff);
804
805 pcr_ops->write_pcr(idx, pcr);
806}
807
808static const struct sparc_pmu sparc_m7_pmu = {
809 .event_map = niagara4_event_map,
810 .cache_map = &niagara4_cache_map,
811 .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
812 .read_pmc = sparc_vt_read_pmc,
813 .write_pmc = sparc_m7_write_pmc,
814 .upper_shift = 5,
815 .lower_shift = 5,
816 .event_mask = 0x7ff,
817 .user_bit = PCR_N4_UTRACE,
818 .priv_bit = PCR_N4_STRACE,
819
820 /* We explicitly don't support hypervisor tracing. */
821 .hv_bit = 0,
822
823 .irq_bit = PCR_N4_TOE,
824 .upper_nop = 0,
825 .lower_nop = 0,
826 .flags = 0,
827 .max_hw_events = 4,
828 .num_pcrs = 4,
829 .num_pic_regs = 4,
830};
795static const struct sparc_pmu *sparc_pmu __read_mostly; 831static const struct sparc_pmu *sparc_pmu __read_mostly;
796 832
797static u64 event_encoding(u64 event_id, int idx) 833static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
960 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; 996 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
961} 997}
962 998
999static void sparc_pmu_start(struct perf_event *event, int flags);
1000
963/* On this PMU each PIC has it's own PCR control register. */ 1001/* On this PMU each PIC has it's own PCR control register. */
964static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) 1002static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
965{ 1003{
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
972 struct perf_event *cp = cpuc->event[i]; 1010 struct perf_event *cp = cpuc->event[i];
973 struct hw_perf_event *hwc = &cp->hw; 1011 struct hw_perf_event *hwc = &cp->hw;
974 int idx = hwc->idx; 1012 int idx = hwc->idx;
975 u64 enc;
976 1013
977 if (cpuc->current_idx[i] != PIC_NO_INDEX) 1014 if (cpuc->current_idx[i] != PIC_NO_INDEX)
978 continue; 1015 continue;
979 1016
980 sparc_perf_event_set_period(cp, hwc, idx);
981 cpuc->current_idx[i] = idx; 1017 cpuc->current_idx[i] = idx;
982 1018
983 enc = perf_event_get_enc(cpuc->events[i]); 1019 sparc_pmu_start(cp, PERF_EF_RELOAD);
984 cpuc->pcr[idx] &= ~mask_for_index(idx);
985 if (hwc->state & PERF_HES_STOPPED)
986 cpuc->pcr[idx] |= nop_for_index(idx);
987 else
988 cpuc->pcr[idx] |= event_encoding(enc, idx);
989 } 1020 }
990out: 1021out:
991 for (i = 0; i < cpuc->n_events; i++) { 1022 for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
1101 int i; 1132 int i;
1102 1133
1103 local_irq_save(flags); 1134 local_irq_save(flags);
1104 perf_pmu_disable(event->pmu);
1105 1135
1106 for (i = 0; i < cpuc->n_events; i++) { 1136 for (i = 0; i < cpuc->n_events; i++) {
1107 if (event == cpuc->event[i]) { 1137 if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
1127 } 1157 }
1128 } 1158 }
1129 1159
1130 perf_pmu_enable(event->pmu);
1131 local_irq_restore(flags); 1160 local_irq_restore(flags);
1132} 1161}
1133 1162
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1361 unsigned long flags; 1390 unsigned long flags;
1362 1391
1363 local_irq_save(flags); 1392 local_irq_save(flags);
1364 perf_pmu_disable(event->pmu);
1365 1393
1366 n0 = cpuc->n_events; 1394 n0 = cpuc->n_events;
1367 if (n0 >= sparc_pmu->max_hw_events) 1395 if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
1394 1422
1395 ret = 0; 1423 ret = 0;
1396out: 1424out:
1397 perf_pmu_enable(event->pmu);
1398 local_irq_restore(flags); 1425 local_irq_restore(flags);
1399 return ret; 1426 return ret;
1400} 1427}
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
1667 sparc_pmu = &niagara4_pmu; 1694 sparc_pmu = &niagara4_pmu;
1668 return true; 1695 return true;
1669 } 1696 }
1697 if (!strcmp(sparc_pmu_type, "sparc-m7")) {
1698 sparc_pmu = &sparc_m7_pmu;
1699 return true;
1700 }
1670 return false; 1701 return false;
1671} 1702}
1672 1703
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 0be7bf978cb1..46a59643bb1c 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
287 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", 287 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
288 gp->tpc, gp->o7, gp->i7, gp->rpc); 288 gp->tpc, gp->o7, gp->i7, gp->rpc);
289 } 289 }
290
291 touch_nmi_watchdog();
290 } 292 }
291 293
292 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 294 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
362 (cpu == this_cpu ? '*' : ' '), cpu, 364 (cpu == this_cpu ? '*' : ' '), cpu,
363 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], 365 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
364 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); 366 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
367
368 touch_nmi_watchdog();
365 } 369 }
366 370
367 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 371 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index b7f6334e159f..857ad4f8905f 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -8,9 +8,11 @@
8 8
9 .text 9 .text
10ENTRY(memmove) /* o0=dst o1=src o2=len */ 10ENTRY(memmove) /* o0=dst o1=src o2=len */
11 mov %o0, %g1 11 brz,pn %o2, 99f
12 mov %o0, %g1
13
12 cmp %o0, %o1 14 cmp %o0, %o1
13 bleu,pt %xcc, memcpy 15 bleu,pt %xcc, 2f
14 add %o1, %o2, %g7 16 add %o1, %o2, %g7
15 cmp %g7, %o0 17 cmp %g7, %o0
16 bleu,pt %xcc, memcpy 18 bleu,pt %xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
24 stb %g7, [%o0] 26 stb %g7, [%o0]
25 bne,pt %icc, 1b 27 bne,pt %icc, 1b
26 sub %o0, 1, %o0 28 sub %o0, 1, %o0
27 2999:
28 retl 30 retl
29 mov %g1, %o0 31 mov %g1, %o0
32
33 /* We can't just call memcpy for these memmove cases. On some
34 * chips the memcpy uses cache initializing stores and when dst
35 * and src are close enough, those can clobber the source data
36 * before we've loaded it in.
37 */
382: or %o0, %o1, %g7
39 or %o2, %g7, %g7
40 andcc %g7, 0x7, %g0
41 bne,pn %xcc, 4f
42 nop
43
443: ldx [%o1], %g7
45 add %o1, 8, %o1
46 subcc %o2, 8, %o2
47 add %o0, 8, %o0
48 bne,pt %icc, 3b
49 stx %g7, [%o0 - 0x8]
50 ba,a,pt %xcc, 99b
51
524: ldub [%o1], %g7
53 add %o1, 1, %o1
54 subcc %o2, 1, %o2
55 add %o0, 1, %o0
56 bne,pt %icc, 4b
57 stb %g7, [%o0 - 0x1]
58 ba,a,pt %xcc, 99b
30ENDPROC(memmove) 59ENDPROC(memmove)
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index b1947e0f3e10..46d4449772bc 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
422 struct kvm_ioapic *ioapic, int vector, int trigger_mode) 422 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
423{ 423{
424 int i; 424 int i;
425 struct kvm_lapic *apic = vcpu->arch.apic;
425 426
426 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 427 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
427 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 428 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
443 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); 444 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
444 spin_lock(&ioapic->lock); 445 spin_lock(&ioapic->lock);
445 446
446 if (trigger_mode != IOAPIC_LEVEL_TRIG) 447 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
448 kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
447 continue; 449 continue;
448 450
449 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 451 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd4e34de24c7..4ee827d7bf36 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
833 833
834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) 834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
835{ 835{
836 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && 836 if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
837 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
838 int trigger_mode; 837 int trigger_mode;
839 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 838 if (apic_test_vector(vector, apic->regs + APIC_TMR))
840 trigger_mode = IOAPIC_LEVEL_TRIG; 839 trigger_mode = IOAPIC_LEVEL_TRIG;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 10a481b7674d..ae4f6d35d19c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2479 if (enable_ept) { 2479 if (enable_ept) {
2480 /* nested EPT: emulate EPT also to L1 */ 2480 /* nested EPT: emulate EPT also to L1 */
2481 vmx->nested.nested_vmx_secondary_ctls_high |= 2481 vmx->nested.nested_vmx_secondary_ctls_high |=
2482 SECONDARY_EXEC_ENABLE_EPT | 2482 SECONDARY_EXEC_ENABLE_EPT;
2483 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2484 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 2483 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2485 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | 2484 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2486 VMX_EPT_INVEPT_BIT; 2485 VMX_EPT_INVEPT_BIT;
@@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2494 } else 2493 } else
2495 vmx->nested.nested_vmx_ept_caps = 0; 2494 vmx->nested.nested_vmx_ept_caps = 0;
2496 2495
2496 if (enable_unrestricted_guest)
2497 vmx->nested.nested_vmx_secondary_ctls_high |=
2498 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2499
2497 /* miscellaneous data */ 2500 /* miscellaneous data */
2498 rdmsr(MSR_IA32_VMX_MISC, 2501 rdmsr(MSR_IA32_VMX_MISC,
2499 vmx->nested.nested_vmx_misc_low, 2502 vmx->nested.nested_vmx_misc_low,