diff options
author | David S. Miller <davem@davemloft.net> | 2019-02-20 03:34:07 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-02-20 03:34:07 -0500 |
commit | 375ca548f7e3ac82acdd0959eddd1fa0e17c35cc (patch) | |
tree | 5360dc427e4eff7123613419ee522b7fda831de0 /arch | |
parent | 58066ac9d7f5dcde4ef08c03b7e127f0522d9ea0 (diff) | |
parent | 40e196a906d969fd10d885c692d2674b3d657006 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two easily resolvable overlapping change conflicts, one in
TCP and one in the eBPF verifier.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
49 files changed, 404 insertions, 184 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 664e918e2624..26524b75970a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1400,6 +1400,7 @@ config NR_CPUS | |||
1400 | config HOTPLUG_CPU | 1400 | config HOTPLUG_CPU |
1401 | bool "Support for hot-pluggable CPUs" | 1401 | bool "Support for hot-pluggable CPUs" |
1402 | depends on SMP | 1402 | depends on SMP |
1403 | select GENERIC_IRQ_MIGRATION | ||
1403 | help | 1404 | help |
1404 | Say Y here to experiment with turning CPUs off and on. CPUs | 1405 | Say Y here to experiment with turning CPUs off and on. CPUs |
1405 | can be controlled through /sys/devices/system/cpu. | 1406 | can be controlled through /sys/devices/system/cpu. |
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f0..67d77eee9433 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts | |||
@@ -644,6 +644,17 @@ | |||
644 | }; | 644 | }; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | /* Configure pwm clock source for timers 8 & 9 */ | ||
648 | &timer8 { | ||
649 | assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; | ||
650 | assigned-clock-parents = <&sys_clkin_ck>; | ||
651 | }; | ||
652 | |||
653 | &timer9 { | ||
654 | assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; | ||
655 | assigned-clock-parents = <&sys_clkin_ck>; | ||
656 | }; | ||
657 | |||
647 | /* | 658 | /* |
648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for | 659 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for |
649 | * uart1 wakeirq. | 660 | * uart1 wakeirq. |
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index bc853ebeda22..61a06f6add3c 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi | |||
@@ -317,7 +317,8 @@ | |||
317 | 317 | ||
318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | 318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { |
319 | pinctrl-single,pins = < | 319 | pinctrl-single,pins = < |
320 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ | 320 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ |
321 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
321 | >; | 322 | >; |
322 | }; | 323 | }; |
323 | 324 | ||
@@ -385,7 +386,8 @@ | |||
385 | 386 | ||
386 | palmas: palmas@48 { | 387 | palmas: palmas@48 { |
387 | compatible = "ti,palmas"; | 388 | compatible = "ti,palmas"; |
388 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | 389 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
390 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
389 | reg = <0x48>; | 391 | reg = <0x48>; |
390 | interrupt-controller; | 392 | interrupt-controller; |
391 | #interrupt-cells = <2>; | 393 | #interrupt-cells = <2>; |
@@ -651,7 +653,8 @@ | |||
651 | pinctrl-names = "default"; | 653 | pinctrl-names = "default"; |
652 | pinctrl-0 = <&twl6040_pins>; | 654 | pinctrl-0 = <&twl6040_pins>; |
653 | 655 | ||
654 | interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ | 656 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
657 | interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>; | ||
655 | 658 | ||
656 | /* audpwron gpio defined in the board specific dts */ | 659 | /* audpwron gpio defined in the board specific dts */ |
657 | 660 | ||
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65..e78d3718f145 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts | |||
@@ -181,6 +181,13 @@ | |||
181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ | 181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ |
182 | >; | 182 | >; |
183 | }; | 183 | }; |
184 | |||
185 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | ||
186 | pinctrl-single,pins = < | ||
187 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ | ||
188 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
189 | >; | ||
190 | }; | ||
184 | }; | 191 | }; |
185 | 192 | ||
186 | &omap5_pmx_core { | 193 | &omap5_pmx_core { |
@@ -414,8 +421,11 @@ | |||
414 | 421 | ||
415 | palmas: palmas@48 { | 422 | palmas: palmas@48 { |
416 | compatible = "ti,palmas"; | 423 | compatible = "ti,palmas"; |
417 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | ||
418 | reg = <0x48>; | 424 | reg = <0x48>; |
425 | pinctrl-0 = <&palmas_sys_nirq_pins>; | ||
426 | pinctrl-names = "default"; | ||
427 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ | ||
428 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
419 | interrupt-controller; | 429 | interrupt-controller; |
420 | #interrupt-cells = <2>; | 430 | #interrupt-cells = <2>; |
421 | ti,system-power-controller; | 431 | ti,system-power-controller; |
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 4acb501dd3f8..3ed49898f4b2 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi | |||
@@ -719,7 +719,6 @@ | |||
719 | pm_qos = <&qos_lcdc0>, | 719 | pm_qos = <&qos_lcdc0>, |
720 | <&qos_lcdc1>, | 720 | <&qos_lcdc1>, |
721 | <&qos_cif0>, | 721 | <&qos_cif0>, |
722 | <&qos_cif1>, | ||
723 | <&qos_ipp>, | 722 | <&qos_ipp>, |
724 | <&qos_rga>; | 723 | <&qos_rga>; |
725 | }; | 724 | }; |
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
26 | struct irqaction; | 26 | struct irqaction; |
27 | struct pt_regs; | 27 | struct pt_regs; |
28 | extern void migrate_irqs(void); | ||
29 | 28 | ||
30 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); | 29 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); |
31 | void handle_IRQ(unsigned int, struct pt_regs *); | 30 | void handle_IRQ(unsigned int, struct pt_regs *); |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca56537b61bc..50e89869178a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -147,6 +148,13 @@ struct kvm_cpu_context { | |||
147 | 148 | ||
148 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 149 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
149 | 150 | ||
151 | struct vcpu_reset_state { | ||
152 | unsigned long pc; | ||
153 | unsigned long r0; | ||
154 | bool be; | ||
155 | bool reset; | ||
156 | }; | ||
157 | |||
150 | struct kvm_vcpu_arch { | 158 | struct kvm_vcpu_arch { |
151 | struct kvm_cpu_context ctxt; | 159 | struct kvm_cpu_context ctxt; |
152 | 160 | ||
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch { | |||
186 | /* Cache some mmu pages needed inside spinlock regions */ | 194 | /* Cache some mmu pages needed inside spinlock regions */ |
187 | struct kvm_mmu_memory_cache mmu_page_cache; | 195 | struct kvm_mmu_memory_cache mmu_page_cache; |
188 | 196 | ||
197 | struct vcpu_reset_state reset_state; | ||
198 | |||
189 | /* Detect first run of a vcpu */ | 199 | /* Detect first run of a vcpu */ |
190 | bool has_run_once; | 200 | bool has_run_once; |
191 | }; | 201 | }; |
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index c4b1d4fb1797..de2089501b8b 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h | |||
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) | |||
76 | #define S2_PMD_MASK PMD_MASK | 76 | #define S2_PMD_MASK PMD_MASK |
77 | #define S2_PMD_SIZE PMD_SIZE | 77 | #define S2_PMD_SIZE PMD_SIZE |
78 | 78 | ||
79 | static inline bool kvm_stage2_has_pmd(struct kvm *kvm) | ||
80 | { | ||
81 | return true; | ||
82 | } | ||
83 | |||
79 | #endif /* __ARM_S2_PGTABLE_H_ */ | 84 | #endif /* __ARM_S2_PGTABLE_H_ */ |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/ratelimit.h> | ||
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
36 | #include <linux/list.h> | 35 | #include <linux/list.h> |
37 | #include <linux/kallsyms.h> | 36 | #include <linux/kallsyms.h> |
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) | |||
109 | return nr_irqs; | 108 | return nr_irqs; |
110 | } | 109 | } |
111 | #endif | 110 | #endif |
112 | |||
113 | #ifdef CONFIG_HOTPLUG_CPU | ||
114 | static bool migrate_one_irq(struct irq_desc *desc) | ||
115 | { | ||
116 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
117 | const struct cpumask *affinity = irq_data_get_affinity_mask(d); | ||
118 | struct irq_chip *c; | ||
119 | bool ret = false; | ||
120 | |||
121 | /* | ||
122 | * If this is a per-CPU interrupt, or the affinity does not | ||
123 | * include this CPU, then we have nothing to do. | ||
124 | */ | ||
125 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
126 | return false; | ||
127 | |||
128 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
129 | affinity = cpu_online_mask; | ||
130 | ret = true; | ||
131 | } | ||
132 | |||
133 | c = irq_data_get_irq_chip(d); | ||
134 | if (!c->irq_set_affinity) | ||
135 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
136 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) | ||
137 | cpumask_copy(irq_data_get_affinity_mask(d), affinity); | ||
138 | |||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | ||
144 | * If the affinity settings do not allow other CPUs, force them onto any | ||
145 | * available CPU. | ||
146 | * | ||
147 | * Note: we must iterate over all IRQs, whether they have an attached | ||
148 | * action structure or not, as we need to get chained interrupts too. | ||
149 | */ | ||
150 | void migrate_irqs(void) | ||
151 | { | ||
152 | unsigned int i; | ||
153 | struct irq_desc *desc; | ||
154 | unsigned long flags; | ||
155 | |||
156 | local_irq_save(flags); | ||
157 | |||
158 | for_each_irq_desc(i, desc) { | ||
159 | bool affinity_broken; | ||
160 | |||
161 | raw_spin_lock(&desc->lock); | ||
162 | affinity_broken = migrate_one_irq(desc); | ||
163 | raw_spin_unlock(&desc->lock); | ||
164 | |||
165 | if (affinity_broken) | ||
166 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
167 | i, smp_processor_id()); | ||
168 | } | ||
169 | |||
170 | local_irq_restore(flags); | ||
171 | } | ||
172 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -254,7 +254,7 @@ int __cpu_disable(void) | |||
254 | /* | 254 | /* |
255 | * OK - migrate IRQs away from this CPU | 255 | * OK - migrate IRQs away from this CPU |
256 | */ | 256 | */ |
257 | migrate_irqs(); | 257 | irq_migrate_all_off_this_cpu(); |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Flush user cache and TLB mappings, and then remove this CPU | 260 | * Flush user cache and TLB mappings, and then remove this CPU |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 222c1635bc7a..e8bd288fd5be 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | |||
1450 | reset_coproc_regs(vcpu, table, num); | 1450 | reset_coproc_regs(vcpu, table, num); |
1451 | 1451 | ||
1452 | for (num = 1; num < NR_CP15_REGS; num++) | 1452 | for (num = 1; num < NR_CP15_REGS; num++) |
1453 | if (vcpu_cp15(vcpu, num) == 0x42424242) | 1453 | WARN(vcpu_cp15(vcpu, num) == 0x42424242, |
1454 | panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); | 1454 | "Didn't reset vcpu_cp15(vcpu, %zi)", num); |
1455 | } | 1455 | } |
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d6..e53327912adc 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/cputype.h> | 26 | #include <asm/cputype.h> |
27 | #include <asm/kvm_arm.h> | 27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_coproc.h> | 28 | #include <asm/kvm_coproc.h> |
29 | #include <asm/kvm_emulate.h> | ||
29 | 30 | ||
30 | #include <kvm/arm_arch_timer.h> | 31 | #include <kvm/arm_arch_timer.h> |
31 | 32 | ||
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
69 | /* Reset CP15 registers */ | 70 | /* Reset CP15 registers */ |
70 | kvm_reset_coprocs(vcpu); | 71 | kvm_reset_coprocs(vcpu); |
71 | 72 | ||
73 | /* | ||
74 | * Additional reset state handling that PSCI may have imposed on us. | ||
75 | * Must be done after all the sys_reg reset. | ||
76 | */ | ||
77 | if (READ_ONCE(vcpu->arch.reset_state.reset)) { | ||
78 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
79 | |||
80 | /* Gracefully handle Thumb2 entry point */ | ||
81 | if (target_pc & 1) { | ||
82 | target_pc &= ~1UL; | ||
83 | vcpu_set_thumb(vcpu); | ||
84 | } | ||
85 | |||
86 | /* Propagate caller endianness */ | ||
87 | if (vcpu->arch.reset_state.be) | ||
88 | kvm_vcpu_set_be(vcpu); | ||
89 | |||
90 | *vcpu_pc(vcpu) = target_pc; | ||
91 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
92 | |||
93 | vcpu->arch.reset_state.reset = false; | ||
94 | } | ||
95 | |||
72 | /* Reset arch_timer context */ | 96 | /* Reset arch_timer context */ |
73 | return kvm_timer_vcpu_reset(vcpu); | 97 | return kvm_timer_vcpu_reset(vcpu); |
74 | } | 98 | } |
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109..dae514c8276a 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, | |||
152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && | 152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && |
153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); | 153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); |
154 | 154 | ||
155 | /* Enter broadcast mode for periodic timers */ | ||
156 | tick_broadcast_enable(); | ||
157 | |||
158 | /* Enter broadcast mode for one-shot timers */ | ||
155 | tick_broadcast_enter(); | 159 | tick_broadcast_enter(); |
156 | 160 | ||
157 | /* | 161 | /* |
@@ -218,15 +222,6 @@ fail: | |||
218 | return index; | 222 | return index; |
219 | } | 223 | } |
220 | 224 | ||
221 | /* | ||
222 | * For each cpu, setup the broadcast timer because local timers | ||
223 | * stops for the states above C1. | ||
224 | */ | ||
225 | static void omap_setup_broadcast_timer(void *arg) | ||
226 | { | ||
227 | tick_broadcast_enable(); | ||
228 | } | ||
229 | |||
230 | static struct cpuidle_driver omap4_idle_driver = { | 225 | static struct cpuidle_driver omap4_idle_driver = { |
231 | .name = "omap4_idle", | 226 | .name = "omap4_idle", |
232 | .owner = THIS_MODULE, | 227 | .owner = THIS_MODULE, |
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void) | |||
319 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) | 314 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) |
320 | return -ENODEV; | 315 | return -ENODEV; |
321 | 316 | ||
322 | /* Configure the broadcast timer on each cpu */ | ||
323 | on_each_cpu(omap_setup_broadcast_timer, NULL, 1); | ||
324 | |||
325 | return cpuidle_register(idle_driver, cpu_online_mask); | 317 | return cpuidle_register(idle_driver, cpu_online_mask); |
326 | } | 318 | } |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index f86b72d1d59e..1444b4b4bd9f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
83 | u32 enable_mask, enable_shift; | 83 | u32 enable_mask, enable_shift; |
84 | u32 pipd_mask, pipd_shift; | 84 | u32 pipd_mask, pipd_shift; |
85 | u32 reg; | 85 | u32 reg; |
86 | int ret; | ||
86 | 87 | ||
87 | if (dsi_id == 0) { | 88 | if (dsi_id == 0) { |
88 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | 89 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; |
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
98 | return -ENODEV; | 99 | return -ENODEV; |
99 | } | 100 | } |
100 | 101 | ||
101 | regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); | 102 | ret = regmap_read(omap4_dsi_mux_syscon, |
103 | OMAP4_DSIPHY_SYSCON_OFFSET, | ||
104 | ®); | ||
105 | if (ret) | ||
106 | return ret; | ||
102 | 107 | ||
103 | reg &= ~enable_mask; | 108 | reg &= ~enable_mask; |
104 | reg &= ~pipd_mask; | 109 | reg &= ~pipd_mask; |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a710..17558be4bf0a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -50,6 +50,9 @@ | |||
50 | #define OMAP4_NR_BANKS 4 | 50 | #define OMAP4_NR_BANKS 4 |
51 | #define OMAP4_NR_IRQS 128 | 51 | #define OMAP4_NR_IRQS 128 |
52 | 52 | ||
53 | #define SYS_NIRQ1_EXT_SYS_IRQ_1 7 | ||
54 | #define SYS_NIRQ2_EXT_SYS_IRQ_2 119 | ||
55 | |||
53 | static void __iomem *wakeupgen_base; | 56 | static void __iomem *wakeupgen_base; |
54 | static void __iomem *sar_base; | 57 | static void __iomem *sar_base; |
55 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); | 58 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); |
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) | |||
153 | irq_chip_unmask_parent(d); | 156 | irq_chip_unmask_parent(d); |
154 | } | 157 | } |
155 | 158 | ||
159 | /* | ||
160 | * The sys_nirq pins bypass peripheral modules and are wired directly | ||
161 | * to MPUSS wakeupgen. They get automatically inverted for GIC. | ||
162 | */ | ||
163 | static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) | ||
164 | { | ||
165 | bool inverted = false; | ||
166 | |||
167 | switch (type) { | ||
168 | case IRQ_TYPE_LEVEL_LOW: | ||
169 | type &= ~IRQ_TYPE_LEVEL_MASK; | ||
170 | type |= IRQ_TYPE_LEVEL_HIGH; | ||
171 | inverted = true; | ||
172 | break; | ||
173 | case IRQ_TYPE_EDGE_FALLING: | ||
174 | type &= ~IRQ_TYPE_EDGE_BOTH; | ||
175 | type |= IRQ_TYPE_EDGE_RISING; | ||
176 | inverted = true; | ||
177 | break; | ||
178 | default: | ||
179 | break; | ||
180 | } | ||
181 | |||
182 | if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && | ||
183 | d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) | ||
184 | pr_warn("wakeupgen: irq%li polarity inverted in dts\n", | ||
185 | d->hwirq); | ||
186 | |||
187 | return irq_chip_set_type_parent(d, type); | ||
188 | } | ||
189 | |||
156 | #ifdef CONFIG_HOTPLUG_CPU | 190 | #ifdef CONFIG_HOTPLUG_CPU |
157 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); | 191 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); |
158 | 192 | ||
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { | |||
446 | .irq_mask = wakeupgen_mask, | 480 | .irq_mask = wakeupgen_mask, |
447 | .irq_unmask = wakeupgen_unmask, | 481 | .irq_unmask = wakeupgen_unmask, |
448 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 482 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
449 | .irq_set_type = irq_chip_set_type_parent, | 483 | .irq_set_type = wakeupgen_irq_set_type, |
450 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, | 484 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, |
451 | #ifdef CONFIG_SMP | 485 | #ifdef CONFIG_SMP |
452 | .irq_set_affinity = irq_chip_set_affinity_parent, | 486 | .irq_set_affinity = irq_chip_set_affinity_parent, |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..1e3e08a1c456 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev) | |||
2390 | return; | 2390 | return; |
2391 | 2391 | ||
2392 | arm_teardown_iommu_dma_ops(dev); | 2392 | arm_teardown_iommu_dma_ops(dev); |
2393 | /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ | ||
2394 | set_dma_ops(dev, NULL); | ||
2393 | } | 2395 | } |
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c | |||
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or | |||
247 | } | 247 | } |
248 | 248 | ||
249 | /* Copy arch-dep-instance from template. */ | 249 | /* Copy arch-dep-instance from template. */ |
250 | memcpy(code, (unsigned char *)optprobe_template_entry, | 250 | memcpy(code, (unsigned long *)&optprobe_template_entry, |
251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); | 251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); |
252 | 252 | ||
253 | /* Adjust buffer according to instruction. */ | 253 | /* Adjust buffer according to instruction. */ |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 64acccc4bfcb..f74b13aa5aa5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts | |||
@@ -227,34 +227,34 @@ | |||
227 | 227 | ||
228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { | 228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { |
229 | fsl,pins = < | 229 | fsl,pins = < |
230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 | 230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d |
231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 | 231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd |
232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 | 232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd |
233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 | 233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd |
234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 | 234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd |
235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 | 235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd |
236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 | 236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd |
237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 | 237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd |
238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 | 238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd |
239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 | 239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd |
240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 | 240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d |
241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
242 | >; | 242 | >; |
243 | }; | 243 | }; |
244 | 244 | ||
245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { | 245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { |
246 | fsl,pins = < | 246 | fsl,pins = < |
247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 | 247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f |
248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 | 248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf |
249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 | 249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf |
250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 | 250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf |
251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 | 251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf |
252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 | 252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf |
253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 | 253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf |
254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 | 254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf |
255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 | 255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf |
256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 | 256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf |
257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 | 257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f |
258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
259 | >; | 259 | >; |
260 | }; | 260 | }; |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 8e9d6d5ed7b2..b6d31499fb43 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi | |||
@@ -360,6 +360,8 @@ | |||
360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, | 360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, |
361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; | 361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; |
362 | clock-names = "ipg", "ahb", "per"; | 362 | clock-names = "ipg", "ahb", "per"; |
363 | assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; | ||
364 | assigned-clock-rates = <400000000>; | ||
363 | fsl,tuning-start-tap = <20>; | 365 | fsl,tuning-start-tap = <20>; |
364 | fsl,tuning-step = <2>; | 366 | fsl,tuning-step = <2>; |
365 | bus-width = <4>; | 367 | bus-width = <4>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index bd937d68ca3b..040b36ef0dd2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
@@ -40,6 +40,7 @@ | |||
40 | pinctrl-0 = <&usb30_host_drv>; | 40 | pinctrl-0 = <&usb30_host_drv>; |
41 | regulator-name = "vcc_host_5v"; | 41 | regulator-name = "vcc_host_5v"; |
42 | regulator-always-on; | 42 | regulator-always-on; |
43 | regulator-boot-on; | ||
43 | vin-supply = <&vcc_sys>; | 44 | vin-supply = <&vcc_sys>; |
44 | }; | 45 | }; |
45 | 46 | ||
@@ -51,6 +52,7 @@ | |||
51 | pinctrl-0 = <&usb20_host_drv>; | 52 | pinctrl-0 = <&usb20_host_drv>; |
52 | regulator-name = "vcc_host1_5v"; | 53 | regulator-name = "vcc_host1_5v"; |
53 | regulator-always-on; | 54 | regulator-always-on; |
55 | regulator-boot-on; | ||
54 | vin-supply = <&vcc_sys>; | 56 | vin-supply = <&vcc_sys>; |
55 | }; | 57 | }; |
56 | 58 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10..d1cf404b8708 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts | |||
@@ -22,7 +22,7 @@ | |||
22 | backlight = <&backlight>; | 22 | backlight = <&backlight>; |
23 | power-supply = <&pp3300_disp>; | 23 | power-supply = <&pp3300_disp>; |
24 | 24 | ||
25 | ports { | 25 | port { |
26 | panel_in_edp: endpoint { | 26 | panel_in_edp: endpoint { |
27 | remote-endpoint = <&edp_out_panel>; | 27 | remote-endpoint = <&edp_out_panel>; |
28 | }; | 28 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 81e73103fa78..15e254a77391 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts | |||
@@ -43,7 +43,7 @@ | |||
43 | backlight = <&backlight>; | 43 | backlight = <&backlight>; |
44 | power-supply = <&pp3300_disp>; | 44 | power-supply = <&pp3300_disp>; |
45 | 45 | ||
46 | ports { | 46 | port { |
47 | panel_in_edp: endpoint { | 47 | panel_in_edp: endpoint { |
48 | remote-endpoint = <&edp_out_panel>; | 48 | remote-endpoint = <&edp_out_panel>; |
49 | }; | 49 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 0b8f1edbd746..b48a63c3efc3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts | |||
@@ -91,7 +91,7 @@ | |||
91 | pinctrl-0 = <&lcd_panel_reset>; | 91 | pinctrl-0 = <&lcd_panel_reset>; |
92 | power-supply = <&vcc3v3_s0>; | 92 | power-supply = <&vcc3v3_s0>; |
93 | 93 | ||
94 | ports { | 94 | port { |
95 | panel_in_edp: endpoint { | 95 | panel_in_edp: endpoint { |
96 | remote-endpoint = <&edp_out_panel>; | 96 | remote-endpoint = <&edp_out_panel>; |
97 | }; | 97 | }; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..da3fc7324d68 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -208,6 +209,13 @@ struct kvm_cpu_context { | |||
208 | 209 | ||
209 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 210 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
210 | 211 | ||
212 | struct vcpu_reset_state { | ||
213 | unsigned long pc; | ||
214 | unsigned long r0; | ||
215 | bool be; | ||
216 | bool reset; | ||
217 | }; | ||
218 | |||
211 | struct kvm_vcpu_arch { | 219 | struct kvm_vcpu_arch { |
212 | struct kvm_cpu_context ctxt; | 220 | struct kvm_cpu_context ctxt; |
213 | 221 | ||
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch { | |||
297 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ | 305 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ |
298 | u64 vsesr_el2; | 306 | u64 vsesr_el2; |
299 | 307 | ||
308 | /* Additional reset state */ | ||
309 | struct vcpu_reset_state reset_state; | ||
310 | |||
300 | /* True when deferrable sysregs are loaded on the physical CPU, | 311 | /* True when deferrable sysregs are loaded on the physical CPU, |
301 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ | 312 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ |
302 | bool sysregs_loaded_on_cpu; | 313 | bool sysregs_loaded_on_cpu; |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..0c656850eeea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x) | |||
332 | #define virt_addr_valid(kaddr) \ | 332 | #define virt_addr_valid(kaddr) \ |
333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) | 333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) |
334 | 334 | ||
335 | /* | ||
336 | * Given that the GIC architecture permits ITS implementations that can only be | ||
337 | * configured with a LPI table address once, GICv3 systems with many CPUs may | ||
338 | * end up reserving a lot of different regions after a kexec for their LPI | ||
339 | * tables (one per CPU), as we are forced to reuse the same memory after kexec | ||
340 | * (and thus reserve it persistently with EFI beforehand) | ||
341 | */ | ||
342 | #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) | ||
343 | # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) | ||
344 | #endif | ||
345 | |||
335 | #include <asm-generic/memory_model.h> | 346 | #include <asm-generic/memory_model.h> |
336 | 347 | ||
337 | #endif | 348 | #endif |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..d09ec76f08cf 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p) | |||
313 | arm64_memblock_init(); | 313 | arm64_memblock_init(); |
314 | 314 | ||
315 | paging_init(); | 315 | paging_init(); |
316 | efi_apply_persistent_mem_reservations(); | ||
317 | 316 | ||
318 | acpi_table_upgrade(); | 317 | acpi_table_upgrade(); |
319 | 318 | ||
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..421ebf6f7086 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <kvm/arm_psci.h> | 23 | #include <kvm/arm_psci.h> |
24 | 24 | ||
25 | #include <asm/cpufeature.h> | 25 | #include <asm/cpufeature.h> |
26 | #include <asm/kprobes.h> | ||
26 | #include <asm/kvm_asm.h> | 27 | #include <asm/kvm_asm.h> |
27 | #include <asm/kvm_emulate.h> | 28 | #include <asm/kvm_emulate.h> |
28 | #include <asm/kvm_host.h> | 29 | #include <asm/kvm_host.h> |
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) | |||
107 | 108 | ||
108 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); | 109 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); |
109 | } | 110 | } |
111 | NOKPROBE_SYMBOL(activate_traps_vhe); | ||
110 | 112 | ||
111 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) | 113 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) |
112 | { | 114 | { |
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void) | |||
154 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); | 156 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); |
155 | write_sysreg(vectors, vbar_el1); | 157 | write_sysreg(vectors, vbar_el1); |
156 | } | 158 | } |
159 | NOKPROBE_SYMBOL(deactivate_traps_vhe); | ||
157 | 160 | ||
158 | static void __hyp_text __deactivate_traps_nvhe(void) | 161 | static void __hyp_text __deactivate_traps_nvhe(void) |
159 | { | 162 | { |
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) | |||
513 | 516 | ||
514 | return exit_code; | 517 | return exit_code; |
515 | } | 518 | } |
519 | NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); | ||
516 | 520 | ||
517 | /* Switch to the guest for legacy non-VHE systems */ | 521 | /* Switch to the guest for legacy non-VHE systems */ |
518 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) | 522 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) |
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, | |||
620 | read_sysreg_el2(esr), read_sysreg_el2(far), | 624 | read_sysreg_el2(esr), read_sysreg_el2(far), |
621 | read_sysreg(hpfar_el2), par, vcpu); | 625 | read_sysreg(hpfar_el2), par, vcpu); |
622 | } | 626 | } |
627 | NOKPROBE_SYMBOL(__hyp_call_panic_vhe); | ||
623 | 628 | ||
624 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) | 629 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) |
625 | { | 630 | { |
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..b426e2cf973c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
20 | 20 | ||
21 | #include <asm/kprobes.h> | ||
21 | #include <asm/kvm_asm.h> | 22 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_emulate.h> | 23 | #include <asm/kvm_emulate.h> |
23 | #include <asm/kvm_hyp.h> | 24 | #include <asm/kvm_hyp.h> |
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
98 | { | 99 | { |
99 | __sysreg_save_common_state(ctxt); | 100 | __sysreg_save_common_state(ctxt); |
100 | } | 101 | } |
102 | NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); | ||
101 | 103 | ||
102 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) | 104 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) |
103 | { | 105 | { |
104 | __sysreg_save_common_state(ctxt); | 106 | __sysreg_save_common_state(ctxt); |
105 | __sysreg_save_el2_return_state(ctxt); | 107 | __sysreg_save_el2_return_state(ctxt); |
106 | } | 108 | } |
109 | NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); | ||
107 | 110 | ||
108 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) | 111 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) |
109 | { | 112 | { |
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
188 | { | 191 | { |
189 | __sysreg_restore_common_state(ctxt); | 192 | __sysreg_restore_common_state(ctxt); |
190 | } | 193 | } |
194 | NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); | ||
191 | 195 | ||
192 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) | 196 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) |
193 | { | 197 | { |
194 | __sysreg_restore_common_state(ctxt); | 198 | __sysreg_restore_common_state(ctxt); |
195 | __sysreg_restore_el2_return_state(ctxt); | 199 | __sysreg_restore_el2_return_state(ctxt); |
196 | } | 200 | } |
201 | NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); | ||
197 | 202 | ||
198 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) | 203 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) |
199 | { | 204 | { |
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b72a3dd56204..f16a5f8ff2b4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/kvm_arm.h> | 32 | #include <asm/kvm_arm.h> |
33 | #include <asm/kvm_asm.h> | 33 | #include <asm/kvm_asm.h> |
34 | #include <asm/kvm_coproc.h> | 34 | #include <asm/kvm_coproc.h> |
35 | #include <asm/kvm_emulate.h> | ||
35 | #include <asm/kvm_mmu.h> | 36 | #include <asm/kvm_mmu.h> |
36 | 37 | ||
37 | /* Maximum phys_shift supported for any VM on this host */ | 38 | /* Maximum phys_shift supported for any VM on this host */ |
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
105 | * This function finds the right table above and sets the registers on | 106 | * This function finds the right table above and sets the registers on |
106 | * the virtual CPU struct to their architecturally defined reset | 107 | * the virtual CPU struct to their architecturally defined reset |
107 | * values. | 108 | * values. |
109 | * | ||
110 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT | ||
111 | * ioctl or as part of handling a request issued by another VCPU in the PSCI | ||
112 | * handling code. In the first case, the VCPU will not be loaded, and in the | ||
113 | * second case the VCPU will be loaded. Because this function operates purely | ||
114 | * on the memory-backed valus of system registers, we want to do a full put if | ||
115 | * we were loaded (handling a request) and load the values back at the end of | ||
116 | * the function. Otherwise we leave the state alone. In both cases, we | ||
117 | * disable preemption around the vcpu reset as we would otherwise race with | ||
118 | * preempt notifiers which also call put/load. | ||
108 | */ | 119 | */ |
109 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | 120 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
110 | { | 121 | { |
111 | const struct kvm_regs *cpu_reset; | 122 | const struct kvm_regs *cpu_reset; |
123 | int ret = -EINVAL; | ||
124 | bool loaded; | ||
125 | |||
126 | preempt_disable(); | ||
127 | loaded = (vcpu->cpu != -1); | ||
128 | if (loaded) | ||
129 | kvm_arch_vcpu_put(vcpu); | ||
112 | 130 | ||
113 | switch (vcpu->arch.target) { | 131 | switch (vcpu->arch.target) { |
114 | default: | 132 | default: |
115 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { | 133 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { |
116 | if (!cpu_has_32bit_el1()) | 134 | if (!cpu_has_32bit_el1()) |
117 | return -EINVAL; | 135 | goto out; |
118 | cpu_reset = &default_regs_reset32; | 136 | cpu_reset = &default_regs_reset32; |
119 | } else { | 137 | } else { |
120 | cpu_reset = &default_regs_reset; | 138 | cpu_reset = &default_regs_reset; |
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
129 | /* Reset system registers */ | 147 | /* Reset system registers */ |
130 | kvm_reset_sys_regs(vcpu); | 148 | kvm_reset_sys_regs(vcpu); |
131 | 149 | ||
150 | /* | ||
151 | * Additional reset state handling that PSCI may have imposed on us. | ||
152 | * Must be done after all the sys_reg reset. | ||
153 | */ | ||
154 | if (vcpu->arch.reset_state.reset) { | ||
155 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
156 | |||
157 | /* Gracefully handle Thumb2 entry point */ | ||
158 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
159 | target_pc &= ~1UL; | ||
160 | vcpu_set_thumb(vcpu); | ||
161 | } | ||
162 | |||
163 | /* Propagate caller endianness */ | ||
164 | if (vcpu->arch.reset_state.be) | ||
165 | kvm_vcpu_set_be(vcpu); | ||
166 | |||
167 | *vcpu_pc(vcpu) = target_pc; | ||
168 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
169 | |||
170 | vcpu->arch.reset_state.reset = false; | ||
171 | } | ||
172 | |||
132 | /* Reset PMU */ | 173 | /* Reset PMU */ |
133 | kvm_pmu_vcpu_reset(vcpu); | 174 | kvm_pmu_vcpu_reset(vcpu); |
134 | 175 | ||
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
137 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; | 178 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; |
138 | 179 | ||
139 | /* Reset timer */ | 180 | /* Reset timer */ |
140 | return kvm_timer_vcpu_reset(vcpu); | 181 | ret = kvm_timer_vcpu_reset(vcpu); |
182 | out: | ||
183 | if (loaded) | ||
184 | kvm_arch_vcpu_load(vcpu, smp_processor_id()); | ||
185 | preempt_enable(); | ||
186 | return ret; | ||
141 | } | 187 | } |
142 | 188 | ||
143 | void kvm_set_ipa_limit(void) | 189 | void kvm_set_ipa_limit(void) |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..c936aa40c3f4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, | |||
314 | return read_zero(vcpu, p); | 314 | return read_zero(vcpu, p); |
315 | } | 315 | } |
316 | 316 | ||
317 | static bool trap_undef(struct kvm_vcpu *vcpu, | 317 | /* |
318 | struct sys_reg_params *p, | 318 | * ARMv8.1 mandates at least a trivial LORegion implementation, where all the |
319 | const struct sys_reg_desc *r) | 319 | * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 |
320 | * system, these registers should UNDEF. LORID_EL1 being a RO register, we | ||
321 | * treat it separately. | ||
322 | */ | ||
323 | static bool trap_loregion(struct kvm_vcpu *vcpu, | ||
324 | struct sys_reg_params *p, | ||
325 | const struct sys_reg_desc *r) | ||
320 | { | 326 | { |
321 | kvm_inject_undefined(vcpu); | 327 | u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
322 | return false; | 328 | u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, |
329 | (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); | ||
330 | |||
331 | if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { | ||
332 | kvm_inject_undefined(vcpu); | ||
333 | return false; | ||
334 | } | ||
335 | |||
336 | if (p->is_write && sr == SYS_LORID_EL1) | ||
337 | return write_to_read_only(vcpu, p, r); | ||
338 | |||
339 | return trap_raz_wi(vcpu, p, r); | ||
323 | } | 340 | } |
324 | 341 | ||
325 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, | 342 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) | |||
1048 | if (val & ptrauth_mask) | 1065 | if (val & ptrauth_mask) |
1049 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); | 1066 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); |
1050 | val &= ~ptrauth_mask; | 1067 | val &= ~ptrauth_mask; |
1051 | } else if (id == SYS_ID_AA64MMFR1_EL1) { | ||
1052 | if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) | ||
1053 | kvm_debug("LORegions unsupported for guests, suppressing\n"); | ||
1054 | |||
1055 | val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); | ||
1056 | } | 1068 | } |
1057 | 1069 | ||
1058 | return val; | 1070 | return val; |
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
1338 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, | 1350 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, |
1339 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, | 1351 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
1340 | 1352 | ||
1341 | { SYS_DESC(SYS_LORSA_EL1), trap_undef }, | 1353 | { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, |
1342 | { SYS_DESC(SYS_LOREA_EL1), trap_undef }, | 1354 | { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, |
1343 | { SYS_DESC(SYS_LORN_EL1), trap_undef }, | 1355 | { SYS_DESC(SYS_LORN_EL1), trap_loregion }, |
1344 | { SYS_DESC(SYS_LORC_EL1), trap_undef }, | 1356 | { SYS_DESC(SYS_LORC_EL1), trap_loregion }, |
1345 | { SYS_DESC(SYS_LORID_EL1), trap_undef }, | 1357 | { SYS_DESC(SYS_LORID_EL1), trap_loregion }, |
1346 | 1358 | ||
1347 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, | 1359 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, |
1348 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, | 1360 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, |
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | |||
2596 | table = get_target_table(vcpu->arch.target, true, &num); | 2608 | table = get_target_table(vcpu->arch.target, true, &num); |
2597 | reset_sys_reg_descs(vcpu, table, num); | 2609 | reset_sys_reg_descs(vcpu, table, num); |
2598 | 2610 | ||
2599 | for (num = 1; num < NR_SYS_REGS; num++) | 2611 | for (num = 1; num < NR_SYS_REGS; num++) { |
2600 | if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | 2612 | if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, |
2601 | panic("Didn't reset __vcpu_sys_reg(%zi)", num); | 2613 | "Didn't reset __vcpu_sys_reg(%zi)\n", num)) |
2614 | break; | ||
2615 | } | ||
2602 | } | 2616 | } |
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index edfcbb25fd9f..dcea277c09ae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h | |||
@@ -45,8 +45,8 @@ | |||
45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) | 45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) |
46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) | 46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) |
47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ | 47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ |
48 | (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) | 48 | (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) |
49 | #define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) | 49 | #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) |
50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | 50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) | 51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) |
52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ | 52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ |
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
241 | 241 | ||
242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | 242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
243 | 243 | ||
244 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
245 | struct file; | ||
246 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
247 | unsigned long size, pgprot_t vma_prot); | ||
248 | |||
244 | /* | 249 | /* |
245 | * Macro to make mark a page protection value as "uncacheable". Note | 250 | * Macro to make mark a page protection value as "uncacheable". Note |
246 | * that "protection" is really a misnomer here as the protection value | 251 | * that "protection" is really a misnomer here as the protection value |
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 8f454810514f..21e0bd5293dd 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h | |||
@@ -49,7 +49,7 @@ struct thread_struct { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define INIT_THREAD { \ | 51 | #define INIT_THREAD { \ |
52 | .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ | 52 | .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ |
53 | .sr = DEFAULT_PSR_VALUE, \ | 53 | .sr = DEFAULT_PSR_VALUE, \ |
54 | } | 54 | } |
55 | 55 | ||
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) | 95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) |
96 | 96 | ||
97 | #define task_pt_regs(p) \ | 97 | #define task_pt_regs(p) \ |
98 | ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) | 98 | ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) |
99 | 99 | ||
100 | #define cpu_relax() barrier() | 100 | #define cpu_relax() barrier() |
101 | 101 | ||
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index 659253e9989c..d67f9777cfd9 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c | |||
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) | |||
38 | if (task) | 38 | if (task) |
39 | stack = (unsigned long *)thread_saved_fp(task); | 39 | stack = (unsigned long *)thread_saved_fp(task); |
40 | else | 40 | else |
41 | #ifdef CONFIG_STACKTRACE | ||
42 | asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); | ||
43 | #else | ||
41 | stack = (unsigned long *)&stack; | 44 | stack = (unsigned long *)&stack; |
45 | #endif | ||
42 | } | 46 | } |
43 | 47 | ||
44 | show_trace(stack); | 48 | show_trace(stack); |
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 57f1afe19a52..f2f12fff36f7 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/ptrace.h> | 8 | #include <linux/ptrace.h> |
9 | #include <linux/regset.h> | 9 | #include <linux/regset.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/sched/task_stack.h> | ||
11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
12 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target, | |||
159 | static const struct user_regset csky_regsets[] = { | 160 | static const struct user_regset csky_regsets[] = { |
160 | [REGSET_GPR] = { | 161 | [REGSET_GPR] = { |
161 | .core_note_type = NT_PRSTATUS, | 162 | .core_note_type = NT_PRSTATUS, |
162 | .n = ELF_NGREG, | 163 | .n = sizeof(struct pt_regs) / sizeof(u32), |
163 | .size = sizeof(u32), | 164 | .size = sizeof(u32), |
164 | .align = sizeof(u32), | 165 | .align = sizeof(u32), |
165 | .get = &gpr_get, | 166 | .get = &gpr_get, |
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index ddc4dd79f282..b07a534b3062 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c | |||
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
160 | { | 160 | { |
161 | unsigned long mask = 1 << cpu; | 161 | unsigned long mask = 1 << cpu; |
162 | 162 | ||
163 | secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; | 163 | secondary_stack = |
164 | (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; | ||
164 | secondary_hint = mfcr("cr31"); | 165 | secondary_hint = mfcr("cr31"); |
165 | secondary_ccr = mfcr("cr18"); | 166 | secondary_ccr = mfcr("cr18"); |
166 | 167 | ||
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index cb7c03e5cd21..8473b6bdf512 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c | |||
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr) | |||
46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); | 46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(iounmap); | 48 | EXPORT_SYMBOL(iounmap); |
49 | |||
50 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
51 | unsigned long size, pgprot_t vma_prot) | ||
52 | { | ||
53 | if (!pfn_valid(pfn)) { | ||
54 | vma_prot.pgprot |= _PAGE_SO; | ||
55 | return pgprot_noncached(vma_prot); | ||
56 | } else if (file->f_flags & O_SYNC) { | ||
57 | return pgprot_noncached(vma_prot); | ||
58 | } | ||
59 | |||
60 | return vma_prot; | ||
61 | } | ||
62 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..76e9bf88d3b9 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c | |||
@@ -79,8 +79,6 @@ enum reg_val_type { | |||
79 | REG_64BIT_32BIT, | 79 | REG_64BIT_32BIT, |
80 | /* 32-bit compatible, need truncation for 64-bit ops. */ | 80 | /* 32-bit compatible, need truncation for 64-bit ops. */ |
81 | REG_32BIT, | 81 | REG_32BIT, |
82 | /* 32-bit zero extended. */ | ||
83 | REG_32BIT_ZERO_EX, | ||
84 | /* 32-bit no sign/zero extension needed. */ | 82 | /* 32-bit no sign/zero extension needed. */ |
85 | REG_32BIT_POS | 83 | REG_32BIT_POS |
86 | }; | 84 | }; |
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) | |||
343 | const struct bpf_prog *prog = ctx->skf; | 341 | const struct bpf_prog *prog = ctx->skf; |
344 | int stack_adjust = ctx->stack_size; | 342 | int stack_adjust = ctx->stack_size; |
345 | int store_offset = stack_adjust - 8; | 343 | int store_offset = stack_adjust - 8; |
344 | enum reg_val_type td; | ||
346 | int r0 = MIPS_R_V0; | 345 | int r0 = MIPS_R_V0; |
347 | 346 | ||
348 | if (dest_reg == MIPS_R_RA && | 347 | if (dest_reg == MIPS_R_RA) { |
349 | get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) | ||
350 | /* Don't let zero extended value escape. */ | 348 | /* Don't let zero extended value escape. */ |
351 | emit_instr(ctx, sll, r0, r0, 0); | 349 | td = get_reg_val_type(ctx, prog->len, BPF_REG_0); |
350 | if (td == REG_64BIT) | ||
351 | emit_instr(ctx, sll, r0, r0, 0); | ||
352 | } | ||
352 | 353 | ||
353 | if (ctx->flags & EBPF_SAVE_RA) { | 354 | if (ctx->flags & EBPF_SAVE_RA) { |
354 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); | 355 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); |
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
692 | if (dst < 0) | 693 | if (dst < 0) |
693 | return dst; | 694 | return dst; |
694 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 695 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
695 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 696 | if (td == REG_64BIT) { |
696 | /* sign extend */ | 697 | /* sign extend */ |
697 | emit_instr(ctx, sll, dst, dst, 0); | 698 | emit_instr(ctx, sll, dst, dst, 0); |
698 | } | 699 | } |
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
707 | if (dst < 0) | 708 | if (dst < 0) |
708 | return dst; | 709 | return dst; |
709 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 710 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
710 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 711 | if (td == REG_64BIT) { |
711 | /* sign extend */ | 712 | /* sign extend */ |
712 | emit_instr(ctx, sll, dst, dst, 0); | 713 | emit_instr(ctx, sll, dst, dst, 0); |
713 | } | 714 | } |
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
721 | if (dst < 0) | 722 | if (dst < 0) |
722 | return dst; | 723 | return dst; |
723 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 724 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
724 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) | 725 | if (td == REG_64BIT) |
725 | /* sign extend */ | 726 | /* sign extend */ |
726 | emit_instr(ctx, sll, dst, dst, 0); | 727 | emit_instr(ctx, sll, dst, dst, 0); |
727 | if (insn->imm == 1) { | 728 | if (insn->imm == 1) { |
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
860 | if (src < 0 || dst < 0) | 861 | if (src < 0 || dst < 0) |
861 | return -EINVAL; | 862 | return -EINVAL; |
862 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 863 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
863 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 864 | if (td == REG_64BIT) { |
864 | /* sign extend */ | 865 | /* sign extend */ |
865 | emit_instr(ctx, sll, dst, dst, 0); | 866 | emit_instr(ctx, sll, dst, dst, 0); |
866 | } | 867 | } |
867 | did_move = false; | 868 | did_move = false; |
868 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); | 869 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); |
869 | if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { | 870 | if (ts == REG_64BIT) { |
870 | int tmp_reg = MIPS_R_AT; | 871 | int tmp_reg = MIPS_R_AT; |
871 | 872 | ||
872 | if (bpf_op == BPF_MOV) { | 873 | if (bpf_op == BPF_MOV) { |
@@ -1254,8 +1255,7 @@ jeq_common: | |||
1254 | if (insn->imm == 64 && td == REG_32BIT) | 1255 | if (insn->imm == 64 && td == REG_32BIT) |
1255 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | 1256 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); |
1256 | 1257 | ||
1257 | if (insn->imm != 64 && | 1258 | if (insn->imm != 64 && td == REG_64BIT) { |
1258 | (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { | ||
1259 | /* sign extend */ | 1259 | /* sign extend */ |
1260 | emit_instr(ctx, sll, dst, dst, 0); | 1260 | emit_instr(ctx, sll, dst, dst, 0); |
1261 | } | 1261 | } |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index c9bfe526ca9d..d8c8d7c9df15 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud) | |||
904 | 904 | ||
905 | static inline int pud_present(pud_t pud) | 905 | static inline int pud_present(pud_t pud) |
906 | { | 906 | { |
907 | return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); | 907 | return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); |
908 | } | 908 | } |
909 | 909 | ||
910 | extern struct page *pud_page(pud_t pud); | 910 | extern struct page *pud_page(pud_t pud); |
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd) | |||
951 | 951 | ||
952 | static inline int pgd_present(pgd_t pgd) | 952 | static inline int pgd_present(pgd_t pgd) |
953 | { | 953 | { |
954 | return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); | 954 | return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); |
955 | } | 955 | } |
956 | 956 | ||
957 | static inline pte_t pgd_pte(pgd_t pgd) | 957 | static inline pte_t pgd_pte(pgd_t pgd) |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..b684f0294f35 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void) | |||
2278 | x86_pmu.check_microcode(); | 2278 | x86_pmu.check_microcode(); |
2279 | } | 2279 | } |
2280 | 2280 | ||
2281 | static int x86_pmu_check_period(struct perf_event *event, u64 value) | ||
2282 | { | ||
2283 | if (x86_pmu.check_period && x86_pmu.check_period(event, value)) | ||
2284 | return -EINVAL; | ||
2285 | |||
2286 | if (value && x86_pmu.limit_period) { | ||
2287 | if (x86_pmu.limit_period(event, value) > value) | ||
2288 | return -EINVAL; | ||
2289 | } | ||
2290 | |||
2291 | return 0; | ||
2292 | } | ||
2293 | |||
2281 | static struct pmu pmu = { | 2294 | static struct pmu pmu = { |
2282 | .pmu_enable = x86_pmu_enable, | 2295 | .pmu_enable = x86_pmu_enable, |
2283 | .pmu_disable = x86_pmu_disable, | 2296 | .pmu_disable = x86_pmu_disable, |
@@ -2302,6 +2315,7 @@ static struct pmu pmu = { | |||
2302 | .event_idx = x86_pmu_event_idx, | 2315 | .event_idx = x86_pmu_event_idx, |
2303 | .sched_task = x86_pmu_sched_task, | 2316 | .sched_task = x86_pmu_sched_task, |
2304 | .task_ctx_size = sizeof(struct x86_perf_task_context), | 2317 | .task_ctx_size = sizeof(struct x86_perf_task_context), |
2318 | .check_period = x86_pmu_check_period, | ||
2305 | }; | 2319 | }; |
2306 | 2320 | ||
2307 | void arch_perf_update_userpage(struct perf_event *event, | 2321 | void arch_perf_update_userpage(struct perf_event *event, |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index daafb893449b..730978dff63f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, | |||
3587 | intel_pmu_lbr_sched_task(ctx, sched_in); | 3587 | intel_pmu_lbr_sched_task(ctx, sched_in); |
3588 | } | 3588 | } |
3589 | 3589 | ||
3590 | static int intel_pmu_check_period(struct perf_event *event, u64 value) | ||
3591 | { | ||
3592 | return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; | ||
3593 | } | ||
3594 | |||
3590 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 3595 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
3591 | 3596 | ||
3592 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 3597 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = { | |||
3667 | .cpu_starting = intel_pmu_cpu_starting, | 3672 | .cpu_starting = intel_pmu_cpu_starting, |
3668 | .cpu_dying = intel_pmu_cpu_dying, | 3673 | .cpu_dying = intel_pmu_cpu_dying, |
3669 | .cpu_dead = intel_pmu_cpu_dead, | 3674 | .cpu_dead = intel_pmu_cpu_dead, |
3675 | |||
3676 | .check_period = intel_pmu_check_period, | ||
3670 | }; | 3677 | }; |
3671 | 3678 | ||
3672 | static struct attribute *intel_pmu_attrs[]; | 3679 | static struct attribute *intel_pmu_attrs[]; |
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
3711 | 3718 | ||
3712 | .guest_get_msrs = intel_guest_get_msrs, | 3719 | .guest_get_msrs = intel_guest_get_msrs, |
3713 | .sched_task = intel_pmu_sched_task, | 3720 | .sched_task = intel_pmu_sched_task, |
3721 | |||
3722 | .check_period = intel_pmu_check_period, | ||
3714 | }; | 3723 | }; |
3715 | 3724 | ||
3716 | static __init void intel_clovertown_quirk(void) | 3725 | static __init void intel_clovertown_quirk(void) |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..d46fd6754d92 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -646,6 +646,11 @@ struct x86_pmu { | |||
646 | * Intel host/guest support (KVM) | 646 | * Intel host/guest support (KVM) |
647 | */ | 647 | */ |
648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
649 | |||
650 | /* | ||
651 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
652 | */ | ||
653 | int (*check_period) (struct perf_event *event, u64 period); | ||
649 | }; | 654 | }; |
650 | 655 | ||
651 | struct x86_perf_task_context { | 656 | struct x86_perf_task_context { |
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void) | |||
857 | 862 | ||
858 | #ifdef CONFIG_CPU_SUP_INTEL | 863 | #ifdef CONFIG_CPU_SUP_INTEL |
859 | 864 | ||
860 | static inline bool intel_pmu_has_bts(struct perf_event *event) | 865 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
861 | { | 866 | { |
862 | struct hw_perf_event *hwc = &event->hw; | 867 | struct hw_perf_event *hwc = &event->hw; |
863 | unsigned int hw_event, bts_event; | 868 | unsigned int hw_event, bts_event; |
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) | |||
868 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | 873 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
869 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | 874 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
870 | 875 | ||
871 | return hw_event == bts_event && hwc->sample_period == 1; | 876 | return hw_event == bts_event && period == 1; |
877 | } | ||
878 | |||
879 | static inline bool intel_pmu_has_bts(struct perf_event *event) | ||
880 | { | ||
881 | struct hw_perf_event *hwc = &event->hw; | ||
882 | |||
883 | return intel_pmu_has_bts_period(event, hwc->sample_period); | ||
872 | } | 884 | } |
873 | 885 | ||
874 | int intel_pmu_save_and_restart(struct perf_event *event); | 886 | int intel_pmu_save_and_restart(struct perf_event *event); |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index f65b78d32f5e..7dbbe9ffda17 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n) | |||
51 | /* | 51 | /* |
52 | * fill in the user structure for a core dump.. | 52 | * fill in the user structure for a core dump.. |
53 | */ | 53 | */ |
54 | static void dump_thread32(struct pt_regs *regs, struct user32 *dump) | 54 | static void fill_dump(struct pt_regs *regs, struct user32 *dump) |
55 | { | 55 | { |
56 | u32 fs, gs; | 56 | u32 fs, gs; |
57 | memset(dump, 0, sizeof(*dump)); | 57 | memset(dump, 0, sizeof(*dump)); |
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm) | |||
157 | fs = get_fs(); | 157 | fs = get_fs(); |
158 | set_fs(KERNEL_DS); | 158 | set_fs(KERNEL_DS); |
159 | has_dumped = 1; | 159 | has_dumped = 1; |
160 | |||
161 | fill_dump(cprm->regs, &dump); | ||
162 | |||
160 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); | 163 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); |
161 | dump.u_ar0 = offsetof(struct user32, regs); | 164 | dump.u_ar0 = offsetof(struct user32, regs); |
162 | dump.signal = cprm->siginfo->si_signo; | 165 | dump.signal = cprm->siginfo->si_signo; |
163 | dump_thread32(cprm->regs, &dump); | ||
164 | 166 | ||
165 | /* | 167 | /* |
166 | * If the size of the dump file exceeds the rlimit, then see | 168 | * If the size of the dump file exceeds the rlimit, then see |
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index d9a9993af882..9f15384c504a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h | |||
@@ -52,6 +52,8 @@ | |||
52 | 52 | ||
53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 | 53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 |
54 | 54 | ||
55 | #define INTEL_FAM6_ICELAKE_MOBILE 0x7E | ||
56 | |||
55 | /* "Small Core" Processors (Atom) */ | 57 | /* "Small Core" Processors (Atom) */ |
56 | 58 | ||
57 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ | 59 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ |
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc6186..3f697a9e3f59 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h | |||
@@ -48,7 +48,8 @@ enum { | |||
48 | BIOS_STATUS_SUCCESS = 0, | 48 | BIOS_STATUS_SUCCESS = 0, |
49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, | 49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, |
50 | BIOS_STATUS_EINVAL = -EINVAL, | 50 | BIOS_STATUS_EINVAL = -EINVAL, |
51 | BIOS_STATUS_UNAVAIL = -EBUSY | 51 | BIOS_STATUS_UNAVAIL = -EBUSY, |
52 | BIOS_STATUS_ABORT = -EINTR, | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | /* Address map parameters */ | 55 | /* Address map parameters */ |
@@ -167,4 +168,9 @@ extern long system_serial_number; | |||
167 | 168 | ||
168 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ | 169 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ |
169 | 170 | ||
171 | /* | ||
172 | * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details | ||
173 | */ | ||
174 | extern struct semaphore __efi_uv_runtime_lock; | ||
175 | |||
170 | #endif /* _ASM_X86_UV_BIOS_H */ | 176 | #endif /* _ASM_X86_UV_BIOS_H */ |
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d8ea4ebd79e7..d737a51a53ca 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, | |||
2473 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) | 2473 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) |
2474 | return -EINVAL; | 2474 | return -EINVAL; |
2475 | 2475 | ||
2476 | if (!nested_cpu_has_preemption_timer(vmcs12) && | ||
2477 | nested_cpu_has_save_preemption_timer(vmcs12)) | ||
2478 | return -EINVAL; | ||
2479 | |||
2476 | if (nested_cpu_has_ept(vmcs12) && | 2480 | if (nested_cpu_has_ept(vmcs12) && |
2477 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) | 2481 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) |
2478 | return -EINVAL; | 2482 | return -EINVAL; |
@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, | |||
5557 | * secondary cpu-based controls. Do not include those that | 5561 | * secondary cpu-based controls. Do not include those that |
5558 | * depend on CPUID bits, they are added later by vmx_cpuid_update. | 5562 | * depend on CPUID bits, they are added later by vmx_cpuid_update. |
5559 | */ | 5563 | */ |
5560 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | 5564 | if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) |
5561 | msrs->secondary_ctls_low, | 5565 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, |
5562 | msrs->secondary_ctls_high); | 5566 | msrs->secondary_ctls_low, |
5567 | msrs->secondary_ctls_high); | ||
5568 | |||
5563 | msrs->secondary_ctls_low = 0; | 5569 | msrs->secondary_ctls_low = 0; |
5564 | msrs->secondary_ctls_high &= | 5570 | msrs->secondary_ctls_high &= |
5565 | SECONDARY_EXEC_DESC | | 5571 | SECONDARY_EXEC_DESC | |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 95d618045001..30a6bcd735ec 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | |||
863 | if (!entry_only) | 863 | if (!entry_only) |
864 | j = find_msr(&m->host, msr); | 864 | j = find_msr(&m->host, msr); |
865 | 865 | ||
866 | if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { | 866 | if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || |
867 | (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { | ||
867 | printk_once(KERN_WARNING "Not enough msr switch entries. " | 868 | printk_once(KERN_WARNING "Not enough msr switch entries. " |
868 | "Can't add msr %x\n", msr); | 869 | "Can't add msr %x\n", msr); |
869 | return; | 870 | return; |
@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1193 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) | 1194 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) |
1194 | return; | 1195 | return; |
1195 | 1196 | ||
1196 | /* | ||
1197 | * First handle the simple case where no cmpxchg is necessary; just | ||
1198 | * allow posting non-urgent interrupts. | ||
1199 | * | ||
1200 | * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change | ||
1201 | * PI.NDST: pi_post_block will do it for us and the wakeup_handler | ||
1202 | * expects the VCPU to be on the blocked_vcpu_list that matches | ||
1203 | * PI.NDST. | ||
1204 | */ | ||
1205 | if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || | ||
1206 | vcpu->cpu == cpu) { | ||
1207 | pi_clear_sn(pi_desc); | ||
1208 | return; | ||
1209 | } | ||
1210 | |||
1211 | /* The full case. */ | 1197 | /* The full case. */ |
1212 | do { | 1198 | do { |
1213 | old.control = new.control = pi_desc->control; | 1199 | old.control = new.control = pi_desc->control; |
@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1222 | new.sn = 0; | 1208 | new.sn = 0; |
1223 | } while (cmpxchg64(&pi_desc->control, old.control, | 1209 | } while (cmpxchg64(&pi_desc->control, old.control, |
1224 | new.control) != old.control); | 1210 | new.control) != old.control); |
1211 | |||
1212 | /* | ||
1213 | * Clear SN before reading the bitmap. The VT-d firmware | ||
1214 | * writes the bitmap and reads SN atomically (5.2.3 in the | ||
1215 | * spec), so it doesn't really have a memory barrier that | ||
1216 | * pairs with this, but we cannot do that and we need one. | ||
1217 | */ | ||
1218 | smp_mb__after_atomic(); | ||
1219 | |||
1220 | if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS)) | ||
1221 | pi_set_on(pi_desc); | ||
1225 | } | 1222 | } |
1226 | 1223 | ||
1227 | /* | 1224 | /* |
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 99328954c2fc..0ac0a64c7790 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h | |||
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) | |||
337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); | 337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); |
338 | } | 338 | } |
339 | 339 | ||
340 | static inline void pi_clear_sn(struct pi_desc *pi_desc) | 340 | static inline void pi_set_sn(struct pi_desc *pi_desc) |
341 | { | 341 | { |
342 | return clear_bit(POSTED_INTR_SN, | 342 | return set_bit(POSTED_INTR_SN, |
343 | (unsigned long *)&pi_desc->control); | 343 | (unsigned long *)&pi_desc->control); |
344 | } | 344 | } |
345 | 345 | ||
346 | static inline void pi_set_sn(struct pi_desc *pi_desc) | 346 | static inline void pi_set_on(struct pi_desc *pi_desc) |
347 | { | 347 | { |
348 | return set_bit(POSTED_INTR_SN, | 348 | set_bit(POSTED_INTR_ON, |
349 | (unsigned long *)&pi_desc->control); | 349 | (unsigned long *)&pi_desc->control); |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline void pi_clear_on(struct pi_desc *pi_desc) | 352 | static inline void pi_clear_on(struct pi_desc *pi_desc) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e67ecf25e690..941f932373d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
7801 | * 1) We should set ->mode before checking ->requests. Please see | 7801 | * 1) We should set ->mode before checking ->requests. Please see |
7802 | * the comment in kvm_vcpu_exiting_guest_mode(). | 7802 | * the comment in kvm_vcpu_exiting_guest_mode(). |
7803 | * | 7803 | * |
7804 | * 2) For APICv, we should set ->mode before checking PIR.ON. This | 7804 | * 2) For APICv, we should set ->mode before checking PID.ON. This |
7805 | * pairs with the memory barrier implicit in pi_test_and_set_on | 7805 | * pairs with the memory barrier implicit in pi_test_and_set_on |
7806 | * (see vmx_deliver_posted_interrupt). | 7806 | * (see vmx_deliver_posted_interrupt). |
7807 | * | 7807 | * |
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c582..eb33432f2f24 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c | |||
@@ -29,7 +29,8 @@ | |||
29 | 29 | ||
30 | struct uv_systab *uv_systab; | 30 | struct uv_systab *uv_systab; |
31 | 31 | ||
32 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | 32 | static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
33 | u64 a4, u64 a5) | ||
33 | { | 34 | { |
34 | struct uv_systab *tab = uv_systab; | 35 | struct uv_systab *tab = uv_systab; |
35 | s64 ret; | 36 | s64 ret; |
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | |||
51 | 52 | ||
52 | return ret; | 53 | return ret; |
53 | } | 54 | } |
55 | |||
56 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | ||
57 | { | ||
58 | s64 ret; | ||
59 | |||
60 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
61 | return BIOS_STATUS_ABORT; | ||
62 | |||
63 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); | ||
64 | up(&__efi_uv_runtime_lock); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
54 | EXPORT_SYMBOL_GPL(uv_bios_call); | 68 | EXPORT_SYMBOL_GPL(uv_bios_call); |
55 | 69 | ||
56 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | 70 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | |||
59 | unsigned long bios_flags; | 73 | unsigned long bios_flags; |
60 | s64 ret; | 74 | s64 ret; |
61 | 75 | ||
76 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
77 | return BIOS_STATUS_ABORT; | ||
78 | |||
62 | local_irq_save(bios_flags); | 79 | local_irq_save(bios_flags); |
63 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); | 80 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); |
64 | local_irq_restore(bios_flags); | 81 | local_irq_restore(bios_flags); |
65 | 82 | ||
83 | up(&__efi_uv_runtime_lock); | ||
84 | |||
66 | return ret; | 85 | return ret; |
67 | } | 86 | } |
68 | 87 | ||