diff options
author | David S. Miller <davem@davemloft.net> | 2015-03-09 23:38:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-09 23:38:02 -0400 |
commit | 3cef5c5b0b56f3f90b0e9ff8d3f8dc57f464cc14 (patch) | |
tree | 02e95f15bd8a04071a9a36f534a92a066a8ce66a /arch | |
parent | 8ac467e837a24eb024177b4b01013d8e6764913a (diff) | |
parent | affb8172de395a6e1db52ed9790ca0456d8c29a9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/cadence/macb.c
Overlapping changes in macb driver, mostly fixes and cleanups
in 'net' overlapping with the integration of at91_ether into
macb in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
42 files changed, 264 insertions, 204 deletions
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 4e547296831d..52312cb5dbe2 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -47,9 +47,6 @@ struct thread_struct { | |||
47 | /* Forward declaration, a strange C thing */ | 47 | /* Forward declaration, a strange C thing */ |
48 | struct task_struct; | 48 | struct task_struct; |
49 | 49 | ||
50 | /* Return saved PC of a blocked thread */ | ||
51 | unsigned long thread_saved_pc(struct task_struct *t); | ||
52 | |||
53 | #define task_pt_regs(p) \ | 50 | #define task_pt_regs(p) \ |
54 | ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) | 51 | ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) |
55 | 52 | ||
@@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t); | |||
72 | #define release_segments(mm) do { } while (0) | 69 | #define release_segments(mm) do { } while (0) |
73 | 70 | ||
74 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) | 71 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) |
72 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) | ||
75 | 73 | ||
76 | /* | 74 | /* |
77 | * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. | 75 | * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. |
78 | * Look in process.c for details of kernel stack layout | 76 | * Look in process.c for details of kernel stack layout |
79 | */ | 77 | */ |
80 | #define KSTK_ESP(tsk) (tsk->thread.ksp) | 78 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) |
81 | 79 | ||
82 | #define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ | 80 | #define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \ |
83 | sizeof(struct callee_regs) + off))) | 81 | sizeof(struct callee_regs) + off))) |
84 | 82 | ||
85 | #define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) | 83 | #define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) |
86 | #define KSTK_FP(tsk) KSTK_REG(tsk, 0) | 84 | #define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) |
85 | |||
86 | #define thread_saved_pc(tsk) TSK_K_BLINK(tsk) | ||
87 | 87 | ||
88 | extern void start_thread(struct pt_regs * regs, unsigned long pc, | 88 | extern void start_thread(struct pt_regs * regs, unsigned long pc, |
89 | unsigned long usp); | 89 | unsigned long usp); |
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h new file mode 100644 index 000000000000..b29b6064ea14 --- /dev/null +++ b/arch/arc/include/asm/stacktrace.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) | ||
3 | * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef __ASM_STACKTRACE_H | ||
11 | #define __ASM_STACKTRACE_H | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | |||
15 | /** | ||
16 | * arc_unwind_core - Unwind the kernel mode stack for an execution context | ||
17 | * @tsk: NULL for current task, specific task otherwise | ||
18 | * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC} | ||
19 | * If NULL, use pt_regs of @tsk (if !NULL) otherwise | ||
20 | * use the current values of {SP, FP, BLINK, PC} | ||
21 | * @consumer_fn: Callback invoked for each frame unwound | ||
22 | * Returns 0 to continue unwinding, -1 to stop | ||
23 | * @arg: Arg to callback | ||
24 | * | ||
25 | * Returns the address of first function in stack | ||
26 | * | ||
27 | * Semantics: | ||
28 | * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL | ||
29 | * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL | ||
30 | * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL | ||
31 | */ | ||
32 | notrace noinline unsigned int arc_unwind_core( | ||
33 | struct task_struct *tsk, struct pt_regs *regs, | ||
34 | int (*consumer_fn) (unsigned int, void *), | ||
35 | void *arg); | ||
36 | |||
37 | #endif /* __ASM_STACKTRACE_H */ | ||
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index fdd89715d2d3..98c00a2d4dd9 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
@@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
192 | return 0; | 192 | return 0; |
193 | } | 193 | } |
194 | 194 | ||
195 | /* | ||
196 | * API: expected by schedular Code: If thread is sleeping where is that. | ||
197 | * What is this good for? it will be always the scheduler or ret_from_fork. | ||
198 | * So we hard code that anyways. | ||
199 | */ | ||
200 | unsigned long thread_saved_pc(struct task_struct *t) | ||
201 | { | ||
202 | struct pt_regs *regs = task_pt_regs(t); | ||
203 | unsigned long blink = 0; | ||
204 | |||
205 | /* | ||
206 | * If the thread being queried for in not itself calling this, then it | ||
207 | * implies it is not executing, which in turn implies it is sleeping, | ||
208 | * which in turn implies it got switched OUT by the schedular. | ||
209 | * In that case, it's kernel mode blink can reliably retrieved as per | ||
210 | * the picture above (right above pt_regs). | ||
211 | */ | ||
212 | if (t != current && t->state != TASK_RUNNING) | ||
213 | blink = *((unsigned int *)regs - 1); | ||
214 | |||
215 | return blink; | ||
216 | } | ||
217 | |||
218 | int elf_check_arch(const struct elf32_hdr *x) | 195 | int elf_check_arch(const struct elf32_hdr *x) |
219 | { | 196 | { |
220 | unsigned int eflags; | 197 | unsigned int eflags; |
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 9ce47cfe2303..92320d6f737c 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c | |||
@@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
43 | struct pt_regs *regs, | 43 | struct pt_regs *regs, |
44 | struct unwind_frame_info *frame_info) | 44 | struct unwind_frame_info *frame_info) |
45 | { | 45 | { |
46 | /* | ||
47 | * synchronous unwinding (e.g. dump_stack) | ||
48 | * - uses current values of SP and friends | ||
49 | */ | ||
46 | if (tsk == NULL && regs == NULL) { | 50 | if (tsk == NULL && regs == NULL) { |
47 | unsigned long fp, sp, blink, ret; | 51 | unsigned long fp, sp, blink, ret; |
48 | frame_info->task = current; | 52 | frame_info->task = current; |
@@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
61 | frame_info->regs.r63 = ret; | 65 | frame_info->regs.r63 = ret; |
62 | frame_info->call_frame = 0; | 66 | frame_info->call_frame = 0; |
63 | } else if (regs == NULL) { | 67 | } else if (regs == NULL) { |
68 | /* | ||
69 | * Asynchronous unwinding of sleeping task | ||
70 | * - Gets SP etc from task's pt_regs (saved bottom of kernel | ||
71 | * mode stack of task) | ||
72 | */ | ||
64 | 73 | ||
65 | frame_info->task = tsk; | 74 | frame_info->task = tsk; |
66 | 75 | ||
67 | frame_info->regs.r27 = KSTK_FP(tsk); | 76 | frame_info->regs.r27 = TSK_K_FP(tsk); |
68 | frame_info->regs.r28 = KSTK_ESP(tsk); | 77 | frame_info->regs.r28 = TSK_K_ESP(tsk); |
69 | frame_info->regs.r31 = KSTK_BLINK(tsk); | 78 | frame_info->regs.r31 = TSK_K_BLINK(tsk); |
70 | frame_info->regs.r63 = (unsigned int)__switch_to; | 79 | frame_info->regs.r63 = (unsigned int)__switch_to; |
71 | 80 | ||
72 | /* In the prologue of __switch_to, first FP is saved on stack | 81 | /* In the prologue of __switch_to, first FP is saved on stack |
@@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
83 | frame_info->call_frame = 0; | 92 | frame_info->call_frame = 0; |
84 | 93 | ||
85 | } else { | 94 | } else { |
95 | /* | ||
96 | * Asynchronous unwinding of intr/exception | ||
97 | * - Just uses the pt_regs passed | ||
98 | */ | ||
86 | frame_info->task = tsk; | 99 | frame_info->task = tsk; |
87 | 100 | ||
88 | frame_info->regs.r27 = regs->fp; | 101 | frame_info->regs.r27 = regs->fp; |
@@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
95 | 108 | ||
96 | #endif | 109 | #endif |
97 | 110 | ||
98 | static noinline unsigned int | 111 | notrace noinline unsigned int |
99 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, | 112 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
100 | int (*consumer_fn) (unsigned int, void *), void *arg) | 113 | int (*consumer_fn) (unsigned int, void *), void *arg) |
101 | { | 114 | { |
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 7ff5b5c183bb..74db59b6f392 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/perf_event.h> | ||
15 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
16 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
17 | #include <asm/disasm.h> | 18 | #include <asm/disasm.h> |
@@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, | |||
253 | } | 254 | } |
254 | } | 255 | } |
255 | 256 | ||
257 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); | ||
256 | return 0; | 258 | return 0; |
257 | 259 | ||
258 | fault: | 260 | fault: |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 563cb27e37f5..6a2e006cbcce 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/ptrace.h> | 14 | #include <linux/ptrace.h> |
15 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
16 | #include <linux/kdebug.h> | 16 | #include <linux/kdebug.h> |
17 | #include <linux/perf_event.h> | ||
17 | #include <asm/pgalloc.h> | 18 | #include <asm/pgalloc.h> |
18 | #include <asm/mmu.h> | 19 | #include <asm/mmu.h> |
19 | 20 | ||
@@ -139,13 +140,20 @@ good_area: | |||
139 | return; | 140 | return; |
140 | } | 141 | } |
141 | 142 | ||
143 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | ||
144 | |||
142 | if (likely(!(fault & VM_FAULT_ERROR))) { | 145 | if (likely(!(fault & VM_FAULT_ERROR))) { |
143 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 146 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
144 | /* To avoid updating stats twice for retry case */ | 147 | /* To avoid updating stats twice for retry case */ |
145 | if (fault & VM_FAULT_MAJOR) | 148 | if (fault & VM_FAULT_MAJOR) { |
146 | tsk->maj_flt++; | 149 | tsk->maj_flt++; |
147 | else | 150 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
151 | regs, address); | ||
152 | } else { | ||
148 | tsk->min_flt++; | 153 | tsk->min_flt++; |
154 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
155 | regs, address); | ||
156 | } | ||
149 | 157 | ||
150 | if (fault & VM_FAULT_RETRY) { | 158 | if (fault & VM_FAULT_RETRY) { |
151 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | 159 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 37ca2a4c6f09..bf0fe99e8ca9 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -207,7 +207,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, | |||
207 | 207 | ||
208 | bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; | 208 | bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; |
209 | 209 | ||
210 | VM_BUG_ON(size & PAGE_MASK); | 210 | VM_BUG_ON(size & ~PAGE_MASK); |
211 | 211 | ||
212 | if (!need_flush && !icache_is_pipt()) | 212 | if (!need_flush && !icache_is_pipt()) |
213 | goto vipt_cache; | 213 | goto vipt_cache; |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 07e7eb1d7ab6..5560f74f9eee 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
540 | 540 | ||
541 | vcpu->mode = OUTSIDE_GUEST_MODE; | 541 | vcpu->mode = OUTSIDE_GUEST_MODE; |
542 | kvm_guest_exit(); | 542 | kvm_guest_exit(); |
543 | trace_kvm_exit(*vcpu_pc(vcpu)); | 543 | trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); |
544 | /* | 544 | /* |
545 | * We may have taken a host interrupt in HYP mode (ie | 545 | * We may have taken a host interrupt in HYP mode (ie |
546 | * while executing the guest). This interrupt is still | 546 | * while executing the guest). This interrupt is still |
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index 881874b1a036..6817664b46b8 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h | |||
@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry, | |||
25 | ); | 25 | ); |
26 | 26 | ||
27 | TRACE_EVENT(kvm_exit, | 27 | TRACE_EVENT(kvm_exit, |
28 | TP_PROTO(unsigned long vcpu_pc), | 28 | TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc), |
29 | TP_ARGS(vcpu_pc), | 29 | TP_ARGS(exit_reason, vcpu_pc), |
30 | 30 | ||
31 | TP_STRUCT__entry( | 31 | TP_STRUCT__entry( |
32 | __field( unsigned int, exit_reason ) | ||
32 | __field( unsigned long, vcpu_pc ) | 33 | __field( unsigned long, vcpu_pc ) |
33 | ), | 34 | ), |
34 | 35 | ||
35 | TP_fast_assign( | 36 | TP_fast_assign( |
37 | __entry->exit_reason = exit_reason; | ||
36 | __entry->vcpu_pc = vcpu_pc; | 38 | __entry->vcpu_pc = vcpu_pc; |
37 | ), | 39 | ), |
38 | 40 | ||
39 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) | 41 | TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx", |
42 | __entry->exit_reason, | ||
43 | __entry->vcpu_pc) | ||
40 | ); | 44 | ); |
41 | 45 | ||
42 | TRACE_EVENT(kvm_guest_fault, | 46 | TRACE_EVENT(kvm_guest_fault, |
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 7d8eab857a93..f6d02e4cbcda 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/platform_data/video-pxafb.h> | 36 | #include <linux/platform_data/video-pxafb.h> |
37 | #include <mach/bitfield.h> | 37 | #include <mach/bitfield.h> |
38 | #include <linux/platform_data/mmc-pxamci.h> | 38 | #include <linux/platform_data/mmc-pxamci.h> |
39 | #include <linux/smc91x.h> | ||
39 | 40 | ||
40 | #include "generic.h" | 41 | #include "generic.h" |
41 | #include "devices.h" | 42 | #include "devices.h" |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index 28da319d389f..eaee2c20b189 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -195,7 +195,7 @@ static struct resource smc91x_resources[] = { | |||
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct smc91x_platdata smc91x_platdata = { | 197 | struct smc91x_platdata smc91x_platdata = { |
198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT; | 198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, |
199 | }; | 199 | }; |
200 | 200 | ||
201 | static struct platform_device smc91x_device = { | 201 | static struct platform_device smc91x_device = { |
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 7b0cd3172354..af868d258e66 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -268,8 +268,8 @@ static int neponset_probe(struct platform_device *dev) | |||
268 | .id = 0, | 268 | .id = 0, |
269 | .res = smc91x_resources, | 269 | .res = smc91x_resources, |
270 | .num_res = ARRAY_SIZE(smc91x_resources), | 270 | .num_res = ARRAY_SIZE(smc91x_resources), |
271 | .data = &smc91c_platdata, | 271 | .data = &smc91x_platdata, |
272 | .size_data = sizeof(smc91c_platdata), | 272 | .size_data = sizeof(smc91x_platdata), |
273 | }; | 273 | }; |
274 | int ret, irq; | 274 | int ret, irq; |
275 | 275 | ||
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 696fd0fe4806..1525d7b5f1b7 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -54,7 +54,7 @@ static struct platform_device smc91x_device = { | |||
54 | .num_resources = ARRAY_SIZE(smc91x_resources), | 54 | .num_resources = ARRAY_SIZE(smc91x_resources), |
55 | .resource = smc91x_resources, | 55 | .resource = smc91x_resources, |
56 | .dev = { | 56 | .dev = { |
57 | .platform_data = &smc91c_platdata, | 57 | .platform_data = &smc91x_platdata, |
58 | }, | 58 | }, |
59 | }; | 59 | }; |
60 | 60 | ||
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..a857794432d6 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi | |||
@@ -622,7 +622,7 @@ | |||
622 | }; | 622 | }; |
623 | 623 | ||
624 | sgenet0: ethernet@1f210000 { | 624 | sgenet0: ethernet@1f210000 { |
625 | compatible = "apm,xgene-enet"; | 625 | compatible = "apm,xgene1-sgenet"; |
626 | status = "disabled"; | 626 | status = "disabled"; |
627 | reg = <0x0 0x1f210000 0x0 0xd100>, | 627 | reg = <0x0 0x1f210000 0x0 0xd100>, |
628 | <0x0 0x1f200000 0x0 0Xc300>, | 628 | <0x0 0x1f200000 0x0 0Xc300>, |
@@ -636,7 +636,7 @@ | |||
636 | }; | 636 | }; |
637 | 637 | ||
638 | xgenet: ethernet@1f610000 { | 638 | xgenet: ethernet@1f610000 { |
639 | compatible = "apm,xgene-enet"; | 639 | compatible = "apm,xgene1-xgenet"; |
640 | status = "disabled"; | 640 | status = "disabled"; |
641 | reg = <0x0 0x1f610000 0x0 0xd100>, | 641 | reg = <0x0 0x1f610000 0x0 0xd100>, |
642 | <0x0 0x1f600000 0x0 0Xc300>, | 642 | <0x0 0x1f600000 0x0 0Xc300>, |
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index bb0ea94c4ba1..1d3ec3ddd84b 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c | |||
@@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages, | |||
51 | WARN_ON_ONCE(1); | 51 | WARN_ON_ONCE(1); |
52 | } | 52 | } |
53 | 53 | ||
54 | if (!is_module_address(start) || !is_module_address(end - 1)) | 54 | if (start < MODULES_VADDR || start >= MODULES_END) |
55 | return -EINVAL; | ||
56 | |||
57 | if (end < MODULES_VADDR || end >= MODULES_END) | ||
55 | return -EINVAL; | 58 | return -EINVAL; |
56 | 59 | ||
57 | data.set_mask = set_mask; | 60 | data.set_mask = set_mask; |
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index bbcd82242059..b6beb0e07b1b 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | |||
216 | if (idx > current_cpu_data.tlbsize) { | 216 | if (idx > current_cpu_data.tlbsize) { |
217 | kvm_err("%s: Invalid Index: %d\n", __func__, idx); | 217 | kvm_err("%s: Invalid Index: %d\n", __func__, idx); |
218 | kvm_mips_dump_host_tlbs(); | 218 | kvm_mips_dump_host_tlbs(); |
219 | local_irq_restore(flags); | ||
219 | return -1; | 220 | return -1; |
220 | } | 221 | } |
221 | 222 | ||
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index c1388d40663b..bd6437f67dc0 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h | |||
@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit, | |||
24 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), | 24 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), |
25 | TP_ARGS(vcpu, reason), | 25 | TP_ARGS(vcpu, reason), |
26 | TP_STRUCT__entry( | 26 | TP_STRUCT__entry( |
27 | __field(struct kvm_vcpu *, vcpu) | 27 | __field(unsigned long, pc) |
28 | __field(unsigned int, reason) | 28 | __field(unsigned int, reason) |
29 | ), | 29 | ), |
30 | 30 | ||
31 | TP_fast_assign( | 31 | TP_fast_assign( |
32 | __entry->vcpu = vcpu; | 32 | __entry->pc = vcpu->arch.pc; |
33 | __entry->reason = reason; | 33 | __entry->reason = reason; |
34 | ), | 34 | ), |
35 | 35 | ||
36 | TP_printk("[%s]PC: 0x%08lx", | 36 | TP_printk("[%s]PC: 0x%08lx", |
37 | kvm_mips_exit_types_str[__entry->reason], | 37 | kvm_mips_exit_types_str[__entry->reason], |
38 | __entry->vcpu->arch.pc) | 38 | __entry->pc) |
39 | ); | 39 | ); |
40 | 40 | ||
41 | #endif /* _TRACE_KVM_H */ | 41 | #endif /* _TRACE_KVM_H */ |
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 9cfa3706a1b8..f1ea5972f6ec 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
@@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl, | |||
113 | int pci_domain_number, unsigned long pe_num); | 113 | int pci_domain_number, unsigned long pe_num); |
114 | extern int iommu_add_device(struct device *dev); | 114 | extern int iommu_add_device(struct device *dev); |
115 | extern void iommu_del_device(struct device *dev); | 115 | extern void iommu_del_device(struct device *dev); |
116 | extern int __init tce_iommu_bus_notifier_init(void); | ||
116 | #else | 117 | #else |
117 | static inline void iommu_register_group(struct iommu_table *tbl, | 118 | static inline void iommu_register_group(struct iommu_table *tbl, |
118 | int pci_domain_number, | 119 | int pci_domain_number, |
@@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev) | |||
128 | static inline void iommu_del_device(struct device *dev) | 129 | static inline void iommu_del_device(struct device *dev) |
129 | { | 130 | { |
130 | } | 131 | } |
132 | |||
133 | static inline int __init tce_iommu_bus_notifier_init(void) | ||
134 | { | ||
135 | return 0; | ||
136 | } | ||
131 | #endif /* !CONFIG_IOMMU_API */ | 137 | #endif /* !CONFIG_IOMMU_API */ |
132 | 138 | ||
133 | static inline void set_iommu_table_base_and_group(struct device *dev, | 139 | static inline void set_iommu_table_base_and_group(struct device *dev, |
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h new file mode 100644 index 000000000000..744fd54de374 --- /dev/null +++ b/arch/powerpc/include/asm/irq_work.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _ASM_POWERPC_IRQ_WORK_H | ||
2 | #define _ASM_POWERPC_IRQ_WORK_H | ||
3 | |||
4 | static inline bool arch_irq_work_has_interrupt(void) | ||
5 | { | ||
6 | return true; | ||
7 | } | ||
8 | |||
9 | #endif /* _ASM_POWERPC_IRQ_WORK_H */ | ||
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5d3968c4d799..b054f33ab1fb 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev) | |||
1175 | } | 1175 | } |
1176 | EXPORT_SYMBOL_GPL(iommu_del_device); | 1176 | EXPORT_SYMBOL_GPL(iommu_del_device); |
1177 | 1177 | ||
1178 | static int tce_iommu_bus_notifier(struct notifier_block *nb, | ||
1179 | unsigned long action, void *data) | ||
1180 | { | ||
1181 | struct device *dev = data; | ||
1182 | |||
1183 | switch (action) { | ||
1184 | case BUS_NOTIFY_ADD_DEVICE: | ||
1185 | return iommu_add_device(dev); | ||
1186 | case BUS_NOTIFY_DEL_DEVICE: | ||
1187 | if (dev->iommu_group) | ||
1188 | iommu_del_device(dev); | ||
1189 | return 0; | ||
1190 | default: | ||
1191 | return 0; | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | static struct notifier_block tce_iommu_bus_nb = { | ||
1196 | .notifier_call = tce_iommu_bus_notifier, | ||
1197 | }; | ||
1198 | |||
1199 | int __init tce_iommu_bus_notifier_init(void) | ||
1200 | { | ||
1201 | bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); | ||
1202 | return 0; | ||
1203 | } | ||
1178 | #endif /* CONFIG_IOMMU_API */ | 1204 | #endif /* CONFIG_IOMMU_API */ |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 6e19afa35a15..ec9ec2058d2d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
541 | if (smp_ops->give_timebase) | 541 | if (smp_ops->give_timebase) |
542 | smp_ops->give_timebase(); | 542 | smp_ops->give_timebase(); |
543 | 543 | ||
544 | /* Wait until cpu puts itself in the online map */ | 544 | /* Wait until cpu puts itself in the online & active maps */ |
545 | while (!cpu_online(cpu)) | 545 | while (!cpu_online(cpu) || !cpu_active(cpu)) |
546 | cpu_relax(); | 546 | cpu_relax(); |
547 | 547 | ||
548 | return 0; | 548 | return 0; |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index e69142f4af08..54323d6b5166 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -836,30 +836,4 @@ void __init pnv_pci_init(void) | |||
836 | #endif | 836 | #endif |
837 | } | 837 | } |
838 | 838 | ||
839 | static int tce_iommu_bus_notifier(struct notifier_block *nb, | ||
840 | unsigned long action, void *data) | ||
841 | { | ||
842 | struct device *dev = data; | ||
843 | |||
844 | switch (action) { | ||
845 | case BUS_NOTIFY_ADD_DEVICE: | ||
846 | return iommu_add_device(dev); | ||
847 | case BUS_NOTIFY_DEL_DEVICE: | ||
848 | if (dev->iommu_group) | ||
849 | iommu_del_device(dev); | ||
850 | return 0; | ||
851 | default: | ||
852 | return 0; | ||
853 | } | ||
854 | } | ||
855 | |||
856 | static struct notifier_block tce_iommu_bus_nb = { | ||
857 | .notifier_call = tce_iommu_bus_notifier, | ||
858 | }; | ||
859 | |||
860 | static int __init tce_iommu_bus_notifier_init(void) | ||
861 | { | ||
862 | bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); | ||
863 | return 0; | ||
864 | } | ||
865 | machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); | 839 | machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 1d3d52dc3ff3..7803a19adb31 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str) | |||
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | __setup("multitce=", disable_multitce); | 1342 | __setup("multitce=", disable_multitce); |
1343 | |||
1344 | machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); | ||
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d84559e31f32..f407bbf5ee94 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -515,15 +515,15 @@ struct s390_io_adapter { | |||
515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ | 515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ |
516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) | 516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) |
517 | 517 | ||
518 | struct s390_model_fac { | 518 | struct kvm_s390_fac { |
519 | /* facilities used in SIE context */ | 519 | /* facility list requested by guest */ |
520 | __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; | 520 | __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; |
521 | /* subset enabled by kvm */ | 521 | /* facility mask supported by kvm & hosting machine */ |
522 | __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; | 522 | __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; |
523 | }; | 523 | }; |
524 | 524 | ||
525 | struct kvm_s390_cpu_model { | 525 | struct kvm_s390_cpu_model { |
526 | struct s390_model_fac *fac; | 526 | struct kvm_s390_fac *fac; |
527 | struct cpuid cpu_id; | 527 | struct cpuid cpu_id; |
528 | unsigned short ibc; | 528 | unsigned short ibc; |
529 | }; | 529 | }; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index f49b71954654..8fb3802f8fad 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
62 | { | 62 | { |
63 | int cpu = smp_processor_id(); | 63 | int cpu = smp_processor_id(); |
64 | 64 | ||
65 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
65 | if (prev == next) | 66 | if (prev == next) |
66 | return; | 67 | return; |
67 | if (MACHINE_HAS_TLB_LC) | 68 | if (MACHINE_HAS_TLB_LC) |
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
73 | atomic_dec(&prev->context.attach_count); | 74 | atomic_dec(&prev->context.attach_count); |
74 | if (MACHINE_HAS_TLB_LC) | 75 | if (MACHINE_HAS_TLB_LC) |
75 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 76 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
76 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
77 | } | 77 | } |
78 | 78 | ||
79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 7b2ac6e44166..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end | |||
37 | #endif | 37 | #endif |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void clear_page(void *page) | 40 | #define clear_page(page) memset((page), 0, PAGE_SIZE) |
41 | { | ||
42 | register unsigned long reg1 asm ("1") = 0; | ||
43 | register void *reg2 asm ("2") = page; | ||
44 | register unsigned long reg3 asm ("3") = 4096; | ||
45 | asm volatile( | ||
46 | " mvcl 2,0" | ||
47 | : "+d" (reg2), "+d" (reg3) : "d" (reg1) | ||
48 | : "memory", "cc"); | ||
49 | } | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to | 43 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cb2d51e779df..830066f936c8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) | |||
36 | insn->offset = (entry->target - entry->code) >> 1; | 36 | insn->offset = (entry->target - entry->code) >> 1; |
37 | } | 37 | } |
38 | 38 | ||
39 | static void jump_label_bug(struct jump_entry *entry, struct insn *insn) | 39 | static void jump_label_bug(struct jump_entry *entry, struct insn *expected, |
40 | struct insn *new) | ||
40 | { | 41 | { |
41 | unsigned char *ipc = (unsigned char *)entry->code; | 42 | unsigned char *ipc = (unsigned char *)entry->code; |
42 | unsigned char *ipe = (unsigned char *)insn; | 43 | unsigned char *ipe = (unsigned char *)expected; |
44 | unsigned char *ipn = (unsigned char *)new; | ||
43 | 45 | ||
44 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); | 46 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); |
45 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", | 47 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", |
46 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); | 48 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); |
47 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", | 49 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", |
48 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); | 50 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); |
51 | pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", | ||
52 | ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); | ||
49 | panic("Corrupted kernel text"); | 53 | panic("Corrupted kernel text"); |
50 | } | 54 | } |
51 | 55 | ||
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
69 | } | 73 | } |
70 | if (init) { | 74 | if (init) { |
71 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) | 75 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) |
72 | jump_label_bug(entry, &old); | 76 | jump_label_bug(entry, &orignop, &new); |
73 | } else { | 77 | } else { |
74 | if (memcmp((void *)entry->code, &old, sizeof(old))) | 78 | if (memcmp((void *)entry->code, &old, sizeof(old))) |
75 | jump_label_bug(entry, &old); | 79 | jump_label_bug(entry, &old, &new); |
76 | } | 80 | } |
77 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); | 81 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); |
78 | } | 82 | } |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 36154a2f1814..2ca95862e336 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
436 | const Elf_Shdr *sechdrs, | 436 | const Elf_Shdr *sechdrs, |
437 | struct module *me) | 437 | struct module *me) |
438 | { | 438 | { |
439 | jump_label_apply_nops(me); | ||
439 | vfree(me->arch.syminfo); | 440 | vfree(me->arch.syminfo); |
440 | me->arch.syminfo = NULL; | 441 | me->arch.syminfo = NULL; |
441 | return 0; | 442 | return 0; |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 26108232fcaa..dc488e13b7e3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); | 19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); |
20 | 20 | ||
21 | void cpu_relax(void) | 21 | void notrace cpu_relax(void) |
22 | { | 22 | { |
23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) | 23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) |
24 | asm volatile("diag 0,0,0x44"); | 24 | asm volatile("diag 0,0,0x44"); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c3623927563..f6579cfde2df 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, | 522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, |
523 | sizeof(struct cpuid)); | 523 | sizeof(struct cpuid)); |
524 | kvm->arch.model.ibc = proc->ibc; | 524 | kvm->arch.model.ibc = proc->ibc; |
525 | memcpy(kvm->arch.model.fac->kvm, proc->fac_list, | 525 | memcpy(kvm->arch.model.fac->list, proc->fac_list, |
526 | S390_ARCH_FAC_LIST_SIZE_BYTE); | 526 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
527 | } else | 527 | } else |
528 | ret = -EFAULT; | 528 | ret = -EFAULT; |
@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
556 | } | 556 | } |
557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); | 557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); |
558 | proc->ibc = kvm->arch.model.ibc; | 558 | proc->ibc = kvm->arch.model.ibc; |
559 | memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); | 559 | memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); |
560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) | 560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) |
561 | ret = -EFAULT; | 561 | ret = -EFAULT; |
562 | kfree(proc); | 562 | kfree(proc); |
@@ -576,10 +576,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) | |||
576 | } | 576 | } |
577 | get_cpu_id((struct cpuid *) &mach->cpuid); | 577 | get_cpu_id((struct cpuid *) &mach->cpuid); |
578 | mach->ibc = sclp_get_ibc(); | 578 | mach->ibc = sclp_get_ibc(); |
579 | memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, | 579 | memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, |
580 | kvm_s390_fac_list_mask_size() * sizeof(u64)); | 580 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, | 581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, |
582 | S390_ARCH_FAC_LIST_SIZE_U64); | 582 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) | 583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) |
584 | ret = -EFAULT; | 584 | ret = -EFAULT; |
585 | kfree(mach); | 585 | kfree(mach); |
@@ -778,15 +778,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
778 | static int kvm_s390_query_ap_config(u8 *config) | 778 | static int kvm_s390_query_ap_config(u8 *config) |
779 | { | 779 | { |
780 | u32 fcn_code = 0x04000000UL; | 780 | u32 fcn_code = 0x04000000UL; |
781 | u32 cc; | 781 | u32 cc = 0; |
782 | 782 | ||
783 | memset(config, 0, 128); | ||
783 | asm volatile( | 784 | asm volatile( |
784 | "lgr 0,%1\n" | 785 | "lgr 0,%1\n" |
785 | "lgr 2,%2\n" | 786 | "lgr 2,%2\n" |
786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ | 787 | ".long 0xb2af0000\n" /* PQAP(QCI) */ |
787 | "ipm %0\n" | 788 | "0: ipm %0\n" |
788 | "srl %0,28\n" | 789 | "srl %0,28\n" |
789 | : "=r" (cc) | 790 | "1:\n" |
791 | EX_TABLE(0b, 1b) | ||
792 | : "+r" (cc) | ||
790 | : "r" (fcn_code), "r" (config) | 793 | : "r" (fcn_code), "r" (config) |
791 | : "cc", "0", "2", "memory" | 794 | : "cc", "0", "2", "memory" |
792 | ); | 795 | ); |
@@ -839,9 +842,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm) | |||
839 | 842 | ||
840 | kvm_s390_set_crycb_format(kvm); | 843 | kvm_s390_set_crycb_format(kvm); |
841 | 844 | ||
842 | /* Disable AES/DEA protected key functions by default */ | 845 | /* Enable AES/DEA protected key functions by default */ |
843 | kvm->arch.crypto.aes_kw = 0; | 846 | kvm->arch.crypto.aes_kw = 1; |
844 | kvm->arch.crypto.dea_kw = 0; | 847 | kvm->arch.crypto.dea_kw = 1; |
848 | get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, | ||
849 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); | ||
850 | get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, | ||
851 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); | ||
845 | 852 | ||
846 | return 0; | 853 | return 0; |
847 | } | 854 | } |
@@ -886,40 +893,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
886 | /* | 893 | /* |
887 | * The architectural maximum amount of facilities is 16 kbit. To store | 894 | * The architectural maximum amount of facilities is 16 kbit. To store |
888 | * this amount, 2 kbyte of memory is required. Thus we need a full | 895 | * this amount, 2 kbyte of memory is required. Thus we need a full |
889 | * page to hold the active copy (arch.model.fac->sie) and the current | 896 | * page to hold the guest facility list (arch.model.fac->list) and the |
890 | * facilities set (arch.model.fac->kvm). Its address size has to be | 897 | * facility mask (arch.model.fac->mask). Its address size has to be |
891 | * 31 bits and word aligned. | 898 | * 31 bits and word aligned. |
892 | */ | 899 | */ |
893 | kvm->arch.model.fac = | 900 | kvm->arch.model.fac = |
894 | (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 901 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
895 | if (!kvm->arch.model.fac) | 902 | if (!kvm->arch.model.fac) |
896 | goto out_nofac; | 903 | goto out_nofac; |
897 | 904 | ||
898 | memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, | 905 | /* Populate the facility mask initially. */ |
899 | S390_ARCH_FAC_LIST_SIZE_U64); | 906 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, |
900 | 907 | S390_ARCH_FAC_LIST_SIZE_BYTE); | |
901 | /* | ||
902 | * If this KVM host runs *not* in a LPAR, relax the facility bits | ||
903 | * of the kvm facility mask by all missing facilities. This will allow | ||
904 | * to determine the right CPU model by means of the remaining facilities. | ||
905 | * Live guest migration must prohibit the migration of KVMs running in | ||
906 | * a LPAR to non LPAR hosts. | ||
907 | */ | ||
908 | if (!MACHINE_IS_LPAR) | ||
909 | for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) | ||
910 | kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; | ||
911 | |||
912 | /* | ||
913 | * Apply the kvm facility mask to limit the kvm supported/tolerated | ||
914 | * facility list. | ||
915 | */ | ||
916 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { | 908 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { |
917 | if (i < kvm_s390_fac_list_mask_size()) | 909 | if (i < kvm_s390_fac_list_mask_size()) |
918 | kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; | 910 | kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; |
919 | else | 911 | else |
920 | kvm->arch.model.fac->kvm[i] = 0UL; | 912 | kvm->arch.model.fac->mask[i] = 0UL; |
921 | } | 913 | } |
922 | 914 | ||
915 | /* Populate the facility list initially. */ | ||
916 | memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, | ||
917 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
918 | |||
923 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); | 919 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); |
924 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | 920 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; |
925 | 921 | ||
@@ -1165,8 +1161,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1165 | 1161 | ||
1166 | mutex_lock(&vcpu->kvm->lock); | 1162 | mutex_lock(&vcpu->kvm->lock); |
1167 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | 1163 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; |
1168 | memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, | ||
1169 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
1170 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | 1164 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; |
1171 | mutex_unlock(&vcpu->kvm->lock); | 1165 | mutex_unlock(&vcpu->kvm->lock); |
1172 | 1166 | ||
@@ -1212,7 +1206,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1212 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1206 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
1213 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1207 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
1214 | } | 1208 | } |
1215 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; | 1209 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; |
1216 | 1210 | ||
1217 | spin_lock_init(&vcpu->arch.local_int.lock); | 1211 | spin_lock_init(&vcpu->arch.local_int.lock); |
1218 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1212 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 985c2114d7ef..c34109aa552d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
128 | /* test availability of facility in a kvm intance */ | 128 | /* test availability of facility in a kvm intance */ |
129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) | 129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
130 | { | 130 | { |
131 | return __test_facility(nr, kvm->arch.model.fac->kvm); | 131 | return __test_facility(nr, kvm->arch.model.fac->mask) && |
132 | __test_facility(nr, kvm->arch.model.fac->list); | ||
132 | } | 133 | } |
133 | 134 | ||
134 | /* are cpu states controlled by user space */ | 135 | /* are cpu states controlled by user space */ |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index bdd9b5b17e03..351116939ea2 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 | 348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 |
349 | * into a u32 memory representation. They will remain bits 0-31. | 349 | * into a u32 memory representation. They will remain bits 0-31. |
350 | */ | 350 | */ |
351 | fac = *vcpu->kvm->arch.model.fac->sie >> 32; | 351 | fac = *vcpu->kvm->arch.model.fac->list >> 32; |
352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
353 | &fac, sizeof(fac)); | 353 | &fac, sizeof(fac)); |
354 | if (rc) | 354 | if (rc) |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 753a56731951..f0b85443e060 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, | |||
287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | 287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); |
288 | return (void __iomem *) addr + offset; | 288 | return (void __iomem *) addr + offset; |
289 | } | 289 | } |
290 | EXPORT_SYMBOL_GPL(pci_iomap_range); | 290 | EXPORT_SYMBOL(pci_iomap_range); |
291 | 291 | ||
292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
293 | { | 293 | { |
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | |||
309 | } | 309 | } |
310 | spin_unlock(&zpci_iomap_lock); | 310 | spin_unlock(&zpci_iomap_lock); |
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(pci_iounmap); | 312 | EXPORT_SYMBOL(pci_iounmap); |
313 | 313 | ||
314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | 314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, |
315 | int size, u32 *val) | 315 | int size, u32 *val) |
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) | |||
483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); | 483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); |
484 | } | 484 | } |
485 | 485 | ||
486 | static void zpci_map_resources(struct zpci_dev *zdev) | 486 | static void zpci_map_resources(struct pci_dev *pdev) |
487 | { | 487 | { |
488 | struct pci_dev *pdev = zdev->pdev; | ||
489 | resource_size_t len; | 488 | resource_size_t len; |
490 | int i; | 489 | int i; |
491 | 490 | ||
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) | |||
499 | } | 498 | } |
500 | } | 499 | } |
501 | 500 | ||
502 | static void zpci_unmap_resources(struct zpci_dev *zdev) | 501 | static void zpci_unmap_resources(struct pci_dev *pdev) |
503 | { | 502 | { |
504 | struct pci_dev *pdev = zdev->pdev; | ||
505 | resource_size_t len; | 503 | resource_size_t len; |
506 | int i; | 504 | int i; |
507 | 505 | ||
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
651 | 649 | ||
652 | zdev->pdev = pdev; | 650 | zdev->pdev = pdev; |
653 | pdev->dev.groups = zpci_attr_groups; | 651 | pdev->dev.groups = zpci_attr_groups; |
654 | zpci_map_resources(zdev); | 652 | zpci_map_resources(pdev); |
655 | 653 | ||
656 | for (i = 0; i < PCI_BAR_COUNT; i++) { | 654 | for (i = 0; i < PCI_BAR_COUNT; i++) { |
657 | res = &pdev->resource[i]; | 655 | res = &pdev->resource[i]; |
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
663 | return 0; | 661 | return 0; |
664 | } | 662 | } |
665 | 663 | ||
664 | void pcibios_release_device(struct pci_dev *pdev) | ||
665 | { | ||
666 | zpci_unmap_resources(pdev); | ||
667 | } | ||
668 | |||
666 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | 669 | int pcibios_enable_device(struct pci_dev *pdev, int mask) |
667 | { | 670 | { |
668 | struct zpci_dev *zdev = get_zdev(pdev); | 671 | struct zpci_dev *zdev = get_zdev(pdev); |
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) | |||
670 | zdev->pdev = pdev; | 673 | zdev->pdev = pdev; |
671 | zpci_debug_init_device(zdev); | 674 | zpci_debug_init_device(zdev); |
672 | zpci_fmb_enable_device(zdev); | 675 | zpci_fmb_enable_device(zdev); |
673 | zpci_map_resources(zdev); | ||
674 | 676 | ||
675 | return pci_enable_resources(pdev, mask); | 677 | return pci_enable_resources(pdev, mask); |
676 | } | 678 | } |
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
679 | { | 681 | { |
680 | struct zpci_dev *zdev = get_zdev(pdev); | 682 | struct zpci_dev *zdev = get_zdev(pdev); |
681 | 683 | ||
682 | zpci_unmap_resources(zdev); | ||
683 | zpci_fmb_disable_device(zdev); | 684 | zpci_fmb_disable_device(zdev); |
684 | zpci_debug_exit_device(zdev); | 685 | zpci_debug_exit_device(zdev); |
685 | zdev->pdev = NULL; | 686 | zdev->pdev = NULL; |
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
688 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 689 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
689 | static int zpci_restore(struct device *dev) | 690 | static int zpci_restore(struct device *dev) |
690 | { | 691 | { |
691 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 692 | struct pci_dev *pdev = to_pci_dev(dev); |
693 | struct zpci_dev *zdev = get_zdev(pdev); | ||
692 | int ret = 0; | 694 | int ret = 0; |
693 | 695 | ||
694 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 696 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) | |||
698 | if (ret) | 700 | if (ret) |
699 | goto out; | 701 | goto out; |
700 | 702 | ||
701 | zpci_map_resources(zdev); | 703 | zpci_map_resources(pdev); |
702 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, | 704 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, |
703 | zdev->start_dma + zdev->iommu_size - 1, | 705 | zdev->start_dma + zdev->iommu_size - 1, |
704 | (u64) zdev->dma_table); | 706 | (u64) zdev->dma_table); |
@@ -709,12 +711,14 @@ out: | |||
709 | 711 | ||
710 | static int zpci_freeze(struct device *dev) | 712 | static int zpci_freeze(struct device *dev) |
711 | { | 713 | { |
712 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 714 | struct pci_dev *pdev = to_pci_dev(dev); |
715 | struct zpci_dev *zdev = get_zdev(pdev); | ||
713 | 716 | ||
714 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 717 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
715 | return 0; | 718 | return 0; |
716 | 719 | ||
717 | zpci_unregister_ioat(zdev, 0); | 720 | zpci_unregister_ioat(zdev, 0); |
721 | zpci_unmap_resources(pdev); | ||
718 | return clp_disable_fh(zdev); | 722 | return clp_disable_fh(zdev); |
719 | } | 723 | } |
720 | 724 | ||
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 8aa271b3d1ad..b1bb2b72302c 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c | |||
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, | |||
64 | if (copy_from_user(buf, user_buffer, length)) | 64 | if (copy_from_user(buf, user_buffer, length)) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | memcpy_toio(io_addr, buf, length); | 67 | ret = zpci_memcpy_toio(io_addr, buf, length); |
68 | ret = 0; | ||
69 | out: | 68 | out: |
70 | if (buf != local_buf) | 69 | if (buf != local_buf) |
71 | kfree(buf); | 70 | kfree(buf); |
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, | |||
98 | goto out; | 97 | goto out; |
99 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); | 98 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); |
100 | 99 | ||
101 | ret = -EFAULT; | 100 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { |
102 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) | 101 | ret = -EFAULT; |
103 | goto out; | 102 | goto out; |
104 | 103 | } | |
105 | memcpy_fromio(buf, io_addr, length); | 104 | ret = zpci_memcpy_fromio(buf, io_addr, length); |
106 | 105 | if (ret) | |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | goto out; | 106 | goto out; |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | ret = -EFAULT; | ||
109 | 109 | ||
110 | ret = 0; | ||
111 | out: | 110 | out: |
112 | if (buf != local_buf) | 111 | if (buf != local_buf) |
113 | kfree(buf); | 112 | kfree(buf); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c2fb8a87dccb..b7d31ca55187 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -499,6 +499,7 @@ config X86_INTEL_QUARK | |||
499 | depends on X86_IO_APIC | 499 | depends on X86_IO_APIC |
500 | select IOSF_MBI | 500 | select IOSF_MBI |
501 | select INTEL_IMR | 501 | select INTEL_IMR |
502 | select COMMON_CLK | ||
502 | ---help--- | 503 | ---help--- |
503 | Select to include support for Quark X1000 SoC. | 504 | Select to include support for Quark X1000 SoC. |
504 | Say Y here if you have a Quark based system such as the Arduino | 505 | Say Y here if you have a Quark based system such as the Arduino |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 5fa9770035dc..c9a6d68b8d62 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask) | |||
82 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 82 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
83 | asm volatile("1:"XSAVES"\n\t" | 83 | asm volatile("1:"XSAVES"\n\t" |
84 | "2:\n\t" | 84 | "2:\n\t" |
85 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 85 | xstate_fault |
86 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
86 | : "memory"); | 87 | : "memory"); |
87 | else | 88 | else |
88 | asm volatile("1:"XSAVE"\n\t" | 89 | asm volatile("1:"XSAVE"\n\t" |
89 | "2:\n\t" | 90 | "2:\n\t" |
90 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 91 | xstate_fault |
92 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
91 | : "memory"); | 93 | : "memory"); |
92 | |||
93 | asm volatile(xstate_fault | ||
94 | : "0" (0) | ||
95 | : "memory"); | ||
96 | |||
97 | return err; | 94 | return err; |
98 | } | 95 | } |
99 | 96 | ||
@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask) | |||
112 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 109 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
113 | asm volatile("1:"XRSTORS"\n\t" | 110 | asm volatile("1:"XRSTORS"\n\t" |
114 | "2:\n\t" | 111 | "2:\n\t" |
115 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 112 | xstate_fault |
113 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
116 | : "memory"); | 114 | : "memory"); |
117 | else | 115 | else |
118 | asm volatile("1:"XRSTOR"\n\t" | 116 | asm volatile("1:"XRSTOR"\n\t" |
119 | "2:\n\t" | 117 | "2:\n\t" |
120 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 118 | xstate_fault |
119 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
121 | : "memory"); | 120 | : "memory"); |
122 | |||
123 | asm volatile(xstate_fault | ||
124 | : "0" (0) | ||
125 | : "memory"); | ||
126 | |||
127 | return err; | 121 | return err; |
128 | } | 122 | } |
129 | 123 | ||
@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask) | |||
149 | */ | 143 | */ |
150 | alternative_input_2( | 144 | alternative_input_2( |
151 | "1:"XSAVE, | 145 | "1:"XSAVE, |
152 | "1:"XSAVEOPT, | 146 | XSAVEOPT, |
153 | X86_FEATURE_XSAVEOPT, | 147 | X86_FEATURE_XSAVEOPT, |
154 | "1:"XSAVES, | 148 | XSAVES, |
155 | X86_FEATURE_XSAVES, | 149 | X86_FEATURE_XSAVES, |
156 | [fx] "D" (fx), "a" (lmask), "d" (hmask) : | 150 | [fx] "D" (fx), "a" (lmask), "d" (hmask) : |
157 | "memory"); | 151 | "memory"); |
@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask) | |||
178 | */ | 172 | */ |
179 | alternative_input( | 173 | alternative_input( |
180 | "1: " XRSTOR, | 174 | "1: " XRSTOR, |
181 | "1: " XRSTORS, | 175 | XRSTORS, |
182 | X86_FEATURE_XSAVES, | 176 | X86_FEATURE_XSAVES, |
183 | "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 177 | "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) |
184 | : "memory"); | 178 | : "memory"); |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 10074ad9ebf8..1d74d161687c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -269,11 +269,14 @@ ENTRY(ret_from_fork) | |||
269 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? | 269 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? |
270 | jz 1f | 270 | jz 1f |
271 | 271 | ||
272 | testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET | 272 | /* |
273 | jnz int_ret_from_sys_call | 273 | * By the time we get here, we have no idea whether our pt_regs, |
274 | 274 | * ti flags, and ti status came from the 64-bit SYSCALL fast path, | |
275 | RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET | 275 | * the slow path, or one of the ia32entry paths. |
276 | jmp ret_from_sys_call # go to the SYSRET fastpath | 276 | * Use int_ret_from_sys_call to return, since it can safely handle |
277 | * all of the above. | ||
278 | */ | ||
279 | jmp int_ret_from_sys_call | ||
277 | 280 | ||
278 | 1: | 281 | 1: |
279 | subq $REST_SKIP, %rsp # leave space for volatiles | 282 | subq $REST_SKIP, %rsp # leave space for volatiles |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e0b794a84c35..106c01557f2b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4950 | goto done; | 4950 | goto done; |
4951 | } | 4951 | } |
4952 | } | 4952 | } |
4953 | ctxt->dst.orig_val = ctxt->dst.val; | 4953 | /* Copy full 64-bit value for CMPXCHG8B. */ |
4954 | ctxt->dst.orig_val64 = ctxt->dst.val64; | ||
4954 | 4955 | ||
4955 | special_insn: | 4956 | special_insn: |
4956 | 4957 | ||
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e55b5fc344eb..bd4e34de24c7 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) | |||
1572 | apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); | 1572 | apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); |
1573 | } | 1573 | } |
1574 | apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); | 1574 | apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); |
1575 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); | 1575 | apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0; |
1576 | apic->highest_isr_cache = -1; | 1576 | apic->highest_isr_cache = -1; |
1577 | update_divide_count(apic); | 1577 | update_divide_count(apic); |
1578 | atomic_set(&apic->lapic_timer.pending, 0); | 1578 | atomic_set(&apic->lapic_timer.pending, 0); |
@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, | |||
1782 | update_divide_count(apic); | 1782 | update_divide_count(apic); |
1783 | start_apic_timer(apic); | 1783 | start_apic_timer(apic); |
1784 | apic->irr_pending = true; | 1784 | apic->irr_pending = true; |
1785 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? | 1785 | apic->isr_count = kvm_x86_ops->hwapic_isr_update ? |
1786 | 1 : count_vectors(apic->regs + APIC_ISR); | 1786 | 1 : count_vectors(apic->regs + APIC_ISR); |
1787 | apic->highest_isr_cache = -1; | 1787 | apic->highest_isr_cache = -1; |
1788 | if (kvm_x86_ops->hwapic_irr_update) | 1788 | if (kvm_x86_ops->hwapic_irr_update) |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d319e0c24758..cc618c882f90 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) | |||
3649 | return; | 3649 | return; |
3650 | } | 3650 | } |
3651 | 3651 | ||
3652 | static void svm_hwapic_isr_update(struct kvm *kvm, int isr) | ||
3653 | { | ||
3654 | return; | ||
3655 | } | ||
3656 | |||
3657 | static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) | 3652 | static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) |
3658 | { | 3653 | { |
3659 | return; | 3654 | return; |
@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
4403 | .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, | 4398 | .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, |
4404 | .vm_has_apicv = svm_vm_has_apicv, | 4399 | .vm_has_apicv = svm_vm_has_apicv, |
4405 | .load_eoi_exitmap = svm_load_eoi_exitmap, | 4400 | .load_eoi_exitmap = svm_load_eoi_exitmap, |
4406 | .hwapic_isr_update = svm_hwapic_isr_update, | ||
4407 | .sync_pir_to_irr = svm_sync_pir_to_irr, | 4401 | .sync_pir_to_irr = svm_sync_pir_to_irr, |
4408 | 4402 | ||
4409 | .set_tss_addr = svm_set_tss_addr, | 4403 | .set_tss_addr = svm_set_tss_addr, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 14c1a18d206a..f7b20b417a3a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) | |||
4367 | return 0; | 4367 | return 0; |
4368 | } | 4368 | } |
4369 | 4369 | ||
4370 | static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) | ||
4371 | { | ||
4372 | #ifdef CONFIG_SMP | ||
4373 | if (vcpu->mode == IN_GUEST_MODE) { | ||
4374 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), | ||
4375 | POSTED_INTR_VECTOR); | ||
4376 | return true; | ||
4377 | } | ||
4378 | #endif | ||
4379 | return false; | ||
4380 | } | ||
4381 | |||
4370 | static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, | 4382 | static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, |
4371 | int vector) | 4383 | int vector) |
4372 | { | 4384 | { |
@@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, | |||
4375 | if (is_guest_mode(vcpu) && | 4387 | if (is_guest_mode(vcpu) && |
4376 | vector == vmx->nested.posted_intr_nv) { | 4388 | vector == vmx->nested.posted_intr_nv) { |
4377 | /* the PIR and ON have been set by L1. */ | 4389 | /* the PIR and ON have been set by L1. */ |
4378 | if (vcpu->mode == IN_GUEST_MODE) | 4390 | kvm_vcpu_trigger_posted_interrupt(vcpu); |
4379 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), | ||
4380 | POSTED_INTR_VECTOR); | ||
4381 | /* | 4391 | /* |
4382 | * If a posted intr is not recognized by hardware, | 4392 | * If a posted intr is not recognized by hardware, |
4383 | * we will accomplish it in the next vmentry. | 4393 | * we will accomplish it in the next vmentry. |
@@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) | |||
4409 | 4419 | ||
4410 | r = pi_test_and_set_on(&vmx->pi_desc); | 4420 | r = pi_test_and_set_on(&vmx->pi_desc); |
4411 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 4421 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
4412 | #ifdef CONFIG_SMP | 4422 | if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu)) |
4413 | if (!r && (vcpu->mode == IN_GUEST_MODE)) | ||
4414 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), | ||
4415 | POSTED_INTR_VECTOR); | ||
4416 | else | ||
4417 | #endif | ||
4418 | kvm_vcpu_kick(vcpu); | 4423 | kvm_vcpu_kick(vcpu); |
4419 | } | 4424 | } |
4420 | 4425 | ||
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 6ac273832f28..e4695985f9de 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info, | |||
331 | struct list_head *list) | 331 | struct list_head *list) |
332 | { | 332 | { |
333 | int ret; | 333 | int ret; |
334 | struct resource_entry *entry; | 334 | struct resource_entry *entry, *tmp; |
335 | 335 | ||
336 | sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); | 336 | sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); |
337 | info->bridge = device; | 337 | info->bridge = device; |
@@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info, | |||
345 | dev_dbg(&device->dev, | 345 | dev_dbg(&device->dev, |
346 | "no IO and memory resources present in _CRS\n"); | 346 | "no IO and memory resources present in _CRS\n"); |
347 | else | 347 | else |
348 | resource_list_for_each_entry(entry, list) | 348 | resource_list_for_each_entry_safe(entry, tmp, list) { |
349 | entry->res->name = info->name; | 349 | if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || |
350 | (entry->res->flags & IORESOURCE_DISABLED)) | ||
351 | resource_list_destroy_entry(entry); | ||
352 | else | ||
353 | entry->res->name = info->name; | ||
354 | } | ||
350 | } | 355 | } |
351 | 356 | ||
352 | struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) | 357 | struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) |