aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts11
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi9
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts12
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi1
-rw-r--r--arch/arm/include/asm/kvm_host.h10
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h5
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/reset.c24
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c16
-rw-r--r--arch/arm/mach-omap2/display.c7
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c36
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts44
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts2
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/kvm/hyp/switch.c5
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c5
-rw-r--r--arch/arm64/kvm/reset.c50
-rw-r--r--arch/arm64/kvm/sys_regs.c50
-rw-r--r--arch/csky/include/asm/pgtable.h9
-rw-r--r--arch/csky/include/asm/processor.h4
-rw-r--r--arch/csky/kernel/dumpstack.c4
-rw-r--r--arch/csky/kernel/ptrace.c3
-rw-r--r--arch/csky/kernel/smp.c3
-rw-r--r--arch/csky/mm/ioremap.c14
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/perf_event.h16
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/uv/bios.h8
-rw-r--r--arch/x86/kvm/vmx/nested.c12
-rw-r--r--arch/x86/kvm/vmx/vmx.c29
-rw-r--r--arch/x86/kvm/vmx/vmx.h10
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/platform/uv/bios_uv.c23
-rw-r--r--drivers/auxdisplay/ht16k33.c2
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/clocksource/timer-ti-dm.c5
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c7
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/cap11xx.c35
-rw-r--r--drivers/input/keyboard/matrix_keypad.c2
-rw-r--r--drivers/input/keyboard/qt2160.c69
-rw-r--r--drivers/input/keyboard/st-keyscan.c4
-rw-r--r--drivers/input/misc/apanel.c24
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/pwm-vibra.c19
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/mouse/elantech.c9
-rw-r--r--drivers/input/serio/ps2-gpio.c1
-rw-r--r--fs/nfs/write.c11
-rw-r--r--fs/nfsd/nfsctl.c4
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--lib/crc32.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c49
-rw-r--r--net/sunrpc/debugfs.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--virt/kvm/arm/arm.c10
-rw-r--r--virt/kvm/arm/mmu.c9
-rw-r--r--virt/kvm/arm/psci.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c118
81 files changed, 735 insertions, 385 deletions
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 04758a2a87f0..67d77eee9433 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -644,6 +644,17 @@
644 }; 644 };
645}; 645};
646 646
647/* Configure pwm clock source for timers 8 & 9 */
648&timer8 {
649 assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
650 assigned-clock-parents = <&sys_clkin_ck>;
651};
652
653&timer9 {
654 assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
655 assigned-clock-parents = <&sys_clkin_ck>;
656};
657
647/* 658/*
648 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for 659 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
649 * uart1 wakeirq. 660 * uart1 wakeirq.
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index bc853ebeda22..61a06f6add3c 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -317,7 +317,8 @@
317 317
318 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { 318 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
319 pinctrl-single,pins = < 319 pinctrl-single,pins = <
320 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ 320 /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
321 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
321 >; 322 >;
322 }; 323 };
323 324
@@ -385,7 +386,8 @@
385 386
386 palmas: palmas@48 { 387 palmas: palmas@48 {
387 compatible = "ti,palmas"; 388 compatible = "ti,palmas";
388 interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ 389 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
390 interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
389 reg = <0x48>; 391 reg = <0x48>;
390 interrupt-controller; 392 interrupt-controller;
391 #interrupt-cells = <2>; 393 #interrupt-cells = <2>;
@@ -651,7 +653,8 @@
651 pinctrl-names = "default"; 653 pinctrl-names = "default";
652 pinctrl-0 = <&twl6040_pins>; 654 pinctrl-0 = <&twl6040_pins>;
653 655
654 interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ 656 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
657 interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
655 658
656 /* audpwron gpio defined in the board specific dts */ 659 /* audpwron gpio defined in the board specific dts */
657 660
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5e21fb430a65..e78d3718f145 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -181,6 +181,13 @@
181 OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ 181 OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
182 >; 182 >;
183 }; 183 };
184
185 palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
186 pinctrl-single,pins = <
187 /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
188 OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
189 >;
190 };
184}; 191};
185 192
186&omap5_pmx_core { 193&omap5_pmx_core {
@@ -414,8 +421,11 @@
414 421
415 palmas: palmas@48 { 422 palmas: palmas@48 {
416 compatible = "ti,palmas"; 423 compatible = "ti,palmas";
417 interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
418 reg = <0x48>; 424 reg = <0x48>;
425 pinctrl-0 = <&palmas_sys_nirq_pins>;
426 pinctrl-names = "default";
427 /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
428 interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
419 interrupt-controller; 429 interrupt-controller;
420 #interrupt-cells = <2>; 430 #interrupt-cells = <2>;
421 ti,system-power-controller; 431 ti,system-power-controller;
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 4acb501dd3f8..3ed49898f4b2 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -719,7 +719,6 @@
719 pm_qos = <&qos_lcdc0>, 719 pm_qos = <&qos_lcdc0>,
720 <&qos_lcdc1>, 720 <&qos_lcdc1>,
721 <&qos_cif0>, 721 <&qos_cif0>,
722 <&qos_cif1>,
723 <&qos_ipp>, 722 <&qos_ipp>,
724 <&qos_rga>; 723 <&qos_rga>;
725 }; 724 };
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index ca56537b61bc..50e89869178a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -147,6 +148,13 @@ struct kvm_cpu_context {
147 148
148typedef struct kvm_cpu_context kvm_cpu_context_t; 149typedef struct kvm_cpu_context kvm_cpu_context_t;
149 150
151struct vcpu_reset_state {
152 unsigned long pc;
153 unsigned long r0;
154 bool be;
155 bool reset;
156};
157
150struct kvm_vcpu_arch { 158struct kvm_vcpu_arch {
151 struct kvm_cpu_context ctxt; 159 struct kvm_cpu_context ctxt;
152 160
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
186 /* Cache some mmu pages needed inside spinlock regions */ 194 /* Cache some mmu pages needed inside spinlock regions */
187 struct kvm_mmu_memory_cache mmu_page_cache; 195 struct kvm_mmu_memory_cache mmu_page_cache;
188 196
197 struct vcpu_reset_state reset_state;
198
189 /* Detect first run of a vcpu */ 199 /* Detect first run of a vcpu */
190 bool has_run_once; 200 bool has_run_once;
191}; 201};
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index c4b1d4fb1797..de2089501b8b 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
76#define S2_PMD_MASK PMD_MASK 76#define S2_PMD_MASK PMD_MASK
77#define S2_PMD_SIZE PMD_SIZE 77#define S2_PMD_SIZE PMD_SIZE
78 78
79static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
80{
81 return true;
82}
83
79#endif /* __ARM_S2_PGTABLE_H_ */ 84#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 222c1635bc7a..e8bd288fd5be 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1450 reset_coproc_regs(vcpu, table, num); 1450 reset_coproc_regs(vcpu, table, num);
1451 1451
1452 for (num = 1; num < NR_CP15_REGS; num++) 1452 for (num = 1; num < NR_CP15_REGS; num++)
1453 if (vcpu_cp15(vcpu, num) == 0x42424242) 1453 WARN(vcpu_cp15(vcpu, num) == 0x42424242,
1454 panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); 1454 "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455} 1455}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 5ed0c3ee33d6..e53327912adc 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -26,6 +26,7 @@
26#include <asm/cputype.h> 26#include <asm/cputype.h>
27#include <asm/kvm_arm.h> 27#include <asm/kvm_arm.h>
28#include <asm/kvm_coproc.h> 28#include <asm/kvm_coproc.h>
29#include <asm/kvm_emulate.h>
29 30
30#include <kvm/arm_arch_timer.h> 31#include <kvm/arm_arch_timer.h>
31 32
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
69 /* Reset CP15 registers */ 70 /* Reset CP15 registers */
70 kvm_reset_coprocs(vcpu); 71 kvm_reset_coprocs(vcpu);
71 72
73 /*
74 * Additional reset state handling that PSCI may have imposed on us.
75 * Must be done after all the sys_reg reset.
76 */
77 if (READ_ONCE(vcpu->arch.reset_state.reset)) {
78 unsigned long target_pc = vcpu->arch.reset_state.pc;
79
80 /* Gracefully handle Thumb2 entry point */
81 if (target_pc & 1) {
82 target_pc &= ~1UL;
83 vcpu_set_thumb(vcpu);
84 }
85
86 /* Propagate caller endianness */
87 if (vcpu->arch.reset_state.be)
88 kvm_vcpu_set_be(vcpu);
89
90 *vcpu_pc(vcpu) = target_pc;
91 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
92
93 vcpu->arch.reset_state.reset = false;
94 }
95
72 /* Reset arch_timer context */ 96 /* Reset arch_timer context */
73 return kvm_timer_vcpu_reset(vcpu); 97 return kvm_timer_vcpu_reset(vcpu);
74} 98}
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index a8b291f00109..dae514c8276a 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && 152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
153 (cx->mpu_logic_state == PWRDM_POWER_OFF); 153 (cx->mpu_logic_state == PWRDM_POWER_OFF);
154 154
155 /* Enter broadcast mode for periodic timers */
156 tick_broadcast_enable();
157
158 /* Enter broadcast mode for one-shot timers */
155 tick_broadcast_enter(); 159 tick_broadcast_enter();
156 160
157 /* 161 /*
@@ -218,15 +222,6 @@ fail:
218 return index; 222 return index;
219} 223}
220 224
221/*
222 * For each cpu, setup the broadcast timer because local timers
223 * stops for the states above C1.
224 */
225static void omap_setup_broadcast_timer(void *arg)
226{
227 tick_broadcast_enable();
228}
229
230static struct cpuidle_driver omap4_idle_driver = { 225static struct cpuidle_driver omap4_idle_driver = {
231 .name = "omap4_idle", 226 .name = "omap4_idle",
232 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
319 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 314 if (!cpu_clkdm[0] || !cpu_clkdm[1])
320 return -ENODEV; 315 return -ENODEV;
321 316
322 /* Configure the broadcast timer on each cpu */
323 on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
324
325 return cpuidle_register(idle_driver, cpu_online_mask); 317 return cpuidle_register(idle_driver, cpu_online_mask);
326} 318}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index f86b72d1d59e..1444b4b4bd9f 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
83 u32 enable_mask, enable_shift; 83 u32 enable_mask, enable_shift;
84 u32 pipd_mask, pipd_shift; 84 u32 pipd_mask, pipd_shift;
85 u32 reg; 85 u32 reg;
86 int ret;
86 87
87 if (dsi_id == 0) { 88 if (dsi_id == 0) {
88 enable_mask = OMAP4_DSI1_LANEENABLE_MASK; 89 enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
98 return -ENODEV; 99 return -ENODEV;
99 } 100 }
100 101
101 regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg); 102 ret = regmap_read(omap4_dsi_mux_syscon,
103 OMAP4_DSIPHY_SYSCON_OFFSET,
104 &reg);
105 if (ret)
106 return ret;
102 107
103 reg &= ~enable_mask; 108 reg &= ~enable_mask;
104 reg &= ~pipd_mask; 109 reg &= ~pipd_mask;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index fc5fb776a710..17558be4bf0a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -50,6 +50,9 @@
50#define OMAP4_NR_BANKS 4 50#define OMAP4_NR_BANKS 4
51#define OMAP4_NR_IRQS 128 51#define OMAP4_NR_IRQS 128
52 52
53#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
54#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
55
53static void __iomem *wakeupgen_base; 56static void __iomem *wakeupgen_base;
54static void __iomem *sar_base; 57static void __iomem *sar_base;
55static DEFINE_RAW_SPINLOCK(wakeupgen_lock); 58static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
153 irq_chip_unmask_parent(d); 156 irq_chip_unmask_parent(d);
154} 157}
155 158
159/*
160 * The sys_nirq pins bypass peripheral modules and are wired directly
161 * to MPUSS wakeupgen. They get automatically inverted for GIC.
162 */
163static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
164{
165 bool inverted = false;
166
167 switch (type) {
168 case IRQ_TYPE_LEVEL_LOW:
169 type &= ~IRQ_TYPE_LEVEL_MASK;
170 type |= IRQ_TYPE_LEVEL_HIGH;
171 inverted = true;
172 break;
173 case IRQ_TYPE_EDGE_FALLING:
174 type &= ~IRQ_TYPE_EDGE_BOTH;
175 type |= IRQ_TYPE_EDGE_RISING;
176 inverted = true;
177 break;
178 default:
179 break;
180 }
181
182 if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
183 d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
184 pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
185 d->hwirq);
186
187 return irq_chip_set_type_parent(d, type);
188}
189
156#ifdef CONFIG_HOTPLUG_CPU 190#ifdef CONFIG_HOTPLUG_CPU
157static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); 191static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
158 192
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
446 .irq_mask = wakeupgen_mask, 480 .irq_mask = wakeupgen_mask,
447 .irq_unmask = wakeupgen_unmask, 481 .irq_unmask = wakeupgen_unmask,
448 .irq_retrigger = irq_chip_retrigger_hierarchy, 482 .irq_retrigger = irq_chip_retrigger_hierarchy,
449 .irq_set_type = irq_chip_set_type_parent, 483 .irq_set_type = wakeupgen_irq_set_type,
450 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 484 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
451#ifdef CONFIG_SMP 485#ifdef CONFIG_SMP
452 .irq_set_affinity = irq_chip_set_affinity_parent, 486 .irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 64acccc4bfcb..f74b13aa5aa5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -227,34 +227,34 @@
227 227
228 pinctrl_usdhc1_100mhz: usdhc1-100grp { 228 pinctrl_usdhc1_100mhz: usdhc1-100grp {
229 fsl,pins = < 229 fsl,pins = <
230 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 230 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d
231 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 231 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd
232 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 232 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd
233 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 233 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd
234 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 234 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd
235 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 235 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd
236 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 236 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd
237 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 237 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd
238 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 238 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd
239 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 239 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd
240 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 240 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d
241 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 241 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
242 >; 242 >;
243 }; 243 };
244 244
245 pinctrl_usdhc1_200mhz: usdhc1-200grp { 245 pinctrl_usdhc1_200mhz: usdhc1-200grp {
246 fsl,pins = < 246 fsl,pins = <
247 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 247 MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f
248 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 248 MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf
249 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 249 MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf
250 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 250 MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf
251 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 251 MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf
252 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 252 MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf
253 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 253 MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf
254 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 254 MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf
255 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 255 MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf
256 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 256 MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf
257 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 257 MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f
258 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 258 MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
259 >; 259 >;
260 }; 260 };
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 8e9d6d5ed7b2..b6d31499fb43 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -360,6 +360,8 @@
360 <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, 360 <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
361 <&clk IMX8MQ_CLK_USDHC1_ROOT>; 361 <&clk IMX8MQ_CLK_USDHC1_ROOT>;
362 clock-names = "ipg", "ahb", "per"; 362 clock-names = "ipg", "ahb", "per";
363 assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
364 assigned-clock-rates = <400000000>;
363 fsl,tuning-start-tap = <20>; 365 fsl,tuning-start-tap = <20>;
364 fsl,tuning-step = <2>; 366 fsl,tuning-step = <2>;
365 bus-width = <4>; 367 bus-width = <4>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index bd937d68ca3b..040b36ef0dd2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -40,6 +40,7 @@
40 pinctrl-0 = <&usb30_host_drv>; 40 pinctrl-0 = <&usb30_host_drv>;
41 regulator-name = "vcc_host_5v"; 41 regulator-name = "vcc_host_5v";
42 regulator-always-on; 42 regulator-always-on;
43 regulator-boot-on;
43 vin-supply = <&vcc_sys>; 44 vin-supply = <&vcc_sys>;
44 }; 45 };
45 46
@@ -51,6 +52,7 @@
51 pinctrl-0 = <&usb20_host_drv>; 52 pinctrl-0 = <&usb20_host_drv>;
52 regulator-name = "vcc_host1_5v"; 53 regulator-name = "vcc_host1_5v";
53 regulator-always-on; 54 regulator-always-on;
55 regulator-boot-on;
54 vin-supply = <&vcc_sys>; 56 vin-supply = <&vcc_sys>;
55 }; 57 };
56 58
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index 1ee0dc0d9f10..d1cf404b8708 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -22,7 +22,7 @@
22 backlight = <&backlight>; 22 backlight = <&backlight>;
23 power-supply = <&pp3300_disp>; 23 power-supply = <&pp3300_disp>;
24 24
25 ports { 25 port {
26 panel_in_edp: endpoint { 26 panel_in_edp: endpoint {
27 remote-endpoint = <&edp_out_panel>; 27 remote-endpoint = <&edp_out_panel>;
28 }; 28 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index 81e73103fa78..15e254a77391 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -43,7 +43,7 @@
43 backlight = <&backlight>; 43 backlight = <&backlight>;
44 power-supply = <&pp3300_disp>; 44 power-supply = <&pp3300_disp>;
45 45
46 ports { 46 port {
47 panel_in_edp: endpoint { 47 panel_in_edp: endpoint {
48 remote-endpoint = <&edp_out_panel>; 48 remote-endpoint = <&edp_out_panel>;
49 }; 49 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index 0b8f1edbd746..b48a63c3efc3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -91,7 +91,7 @@
91 pinctrl-0 = <&lcd_panel_reset>; 91 pinctrl-0 = <&lcd_panel_reset>;
92 power-supply = <&vcc3v3_s0>; 92 power-supply = <&vcc3v3_s0>;
93 93
94 ports { 94 port {
95 panel_in_edp: endpoint { 95 panel_in_edp: endpoint {
96 remote-endpoint = <&edp_out_panel>; 96 remote-endpoint = <&edp_out_panel>;
97 }; 97 };
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7732d0ba4e60..da3fc7324d68 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
48#define KVM_REQ_SLEEP \ 48#define KVM_REQ_SLEEP \
49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 49 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 50#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
51#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
51 52
52DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 53DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
53 54
@@ -208,6 +209,13 @@ struct kvm_cpu_context {
208 209
209typedef struct kvm_cpu_context kvm_cpu_context_t; 210typedef struct kvm_cpu_context kvm_cpu_context_t;
210 211
212struct vcpu_reset_state {
213 unsigned long pc;
214 unsigned long r0;
215 bool be;
216 bool reset;
217};
218
211struct kvm_vcpu_arch { 219struct kvm_vcpu_arch {
212 struct kvm_cpu_context ctxt; 220 struct kvm_cpu_context ctxt;
213 221
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch {
297 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 305 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
298 u64 vsesr_el2; 306 u64 vsesr_el2;
299 307
308 /* Additional reset state */
309 struct vcpu_reset_state reset_state;
310
300 /* True when deferrable sysregs are loaded on the physical CPU, 311 /* True when deferrable sysregs are loaded on the physical CPU,
301 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ 312 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
302 bool sysregs_loaded_on_cpu; 313 bool sysregs_loaded_on_cpu;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index b0b1478094b4..421ebf6f7086 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -23,6 +23,7 @@
23#include <kvm/arm_psci.h> 23#include <kvm/arm_psci.h>
24 24
25#include <asm/cpufeature.h> 25#include <asm/cpufeature.h>
26#include <asm/kprobes.h>
26#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
27#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
28#include <asm/kvm_host.h> 29#include <asm/kvm_host.h>
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
107 108
108 write_sysreg(kvm_get_hyp_vector(), vbar_el1); 109 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
109} 110}
111NOKPROBE_SYMBOL(activate_traps_vhe);
110 112
111static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) 113static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
112{ 114{
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void)
154 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); 156 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
155 write_sysreg(vectors, vbar_el1); 157 write_sysreg(vectors, vbar_el1);
156} 158}
159NOKPROBE_SYMBOL(deactivate_traps_vhe);
157 160
158static void __hyp_text __deactivate_traps_nvhe(void) 161static void __hyp_text __deactivate_traps_nvhe(void)
159{ 162{
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
513 516
514 return exit_code; 517 return exit_code;
515} 518}
519NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
516 520
517/* Switch to the guest for legacy non-VHE systems */ 521/* Switch to the guest for legacy non-VHE systems */
518int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) 522int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
620 read_sysreg_el2(esr), read_sysreg_el2(far), 624 read_sysreg_el2(esr), read_sysreg_el2(far),
621 read_sysreg(hpfar_el2), par, vcpu); 625 read_sysreg(hpfar_el2), par, vcpu);
622} 626}
627NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
623 628
624void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) 629void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
625{ 630{
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 68d6f7c3b237..b426e2cf973c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -18,6 +18,7 @@
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
20 20
21#include <asm/kprobes.h>
21#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
22#include <asm/kvm_emulate.h> 23#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 24#include <asm/kvm_hyp.h>
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
98{ 99{
99 __sysreg_save_common_state(ctxt); 100 __sysreg_save_common_state(ctxt);
100} 101}
102NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
101 103
102void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) 104void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
103{ 105{
104 __sysreg_save_common_state(ctxt); 106 __sysreg_save_common_state(ctxt);
105 __sysreg_save_el2_return_state(ctxt); 107 __sysreg_save_el2_return_state(ctxt);
106} 108}
109NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
107 110
108static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) 111static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
109{ 112{
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
188{ 191{
189 __sysreg_restore_common_state(ctxt); 192 __sysreg_restore_common_state(ctxt);
190} 193}
194NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
191 195
192void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) 196void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
193{ 197{
194 __sysreg_restore_common_state(ctxt); 198 __sysreg_restore_common_state(ctxt);
195 __sysreg_restore_el2_return_state(ctxt); 199 __sysreg_restore_el2_return_state(ctxt);
196} 200}
201NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
197 202
198void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) 203void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
199{ 204{
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b72a3dd56204..f16a5f8ff2b4 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -32,6 +32,7 @@
32#include <asm/kvm_arm.h> 32#include <asm/kvm_arm.h>
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/kvm_coproc.h> 34#include <asm/kvm_coproc.h>
35#include <asm/kvm_emulate.h>
35#include <asm/kvm_mmu.h> 36#include <asm/kvm_mmu.h>
36 37
37/* Maximum phys_shift supported for any VM on this host */ 38/* Maximum phys_shift supported for any VM on this host */
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
105 * This function finds the right table above and sets the registers on 106 * This function finds the right table above and sets the registers on
106 * the virtual CPU struct to their architecturally defined reset 107 * the virtual CPU struct to their architecturally defined reset
107 * values. 108 * values.
109 *
110 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
111 * ioctl or as part of handling a request issued by another VCPU in the PSCI
112 * handling code. In the first case, the VCPU will not be loaded, and in the
113 * second case the VCPU will be loaded. Because this function operates purely
114 * on the memory-backed valus of system registers, we want to do a full put if
115 * we were loaded (handling a request) and load the values back at the end of
116 * the function. Otherwise we leave the state alone. In both cases, we
117 * disable preemption around the vcpu reset as we would otherwise race with
118 * preempt notifiers which also call put/load.
108 */ 119 */
109int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 120int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
110{ 121{
111 const struct kvm_regs *cpu_reset; 122 const struct kvm_regs *cpu_reset;
123 int ret = -EINVAL;
124 bool loaded;
125
126 preempt_disable();
127 loaded = (vcpu->cpu != -1);
128 if (loaded)
129 kvm_arch_vcpu_put(vcpu);
112 130
113 switch (vcpu->arch.target) { 131 switch (vcpu->arch.target) {
114 default: 132 default:
115 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 133 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
116 if (!cpu_has_32bit_el1()) 134 if (!cpu_has_32bit_el1())
117 return -EINVAL; 135 goto out;
118 cpu_reset = &default_regs_reset32; 136 cpu_reset = &default_regs_reset32;
119 } else { 137 } else {
120 cpu_reset = &default_regs_reset; 138 cpu_reset = &default_regs_reset;
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
129 /* Reset system registers */ 147 /* Reset system registers */
130 kvm_reset_sys_regs(vcpu); 148 kvm_reset_sys_regs(vcpu);
131 149
150 /*
151 * Additional reset state handling that PSCI may have imposed on us.
152 * Must be done after all the sys_reg reset.
153 */
154 if (vcpu->arch.reset_state.reset) {
155 unsigned long target_pc = vcpu->arch.reset_state.pc;
156
157 /* Gracefully handle Thumb2 entry point */
158 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
159 target_pc &= ~1UL;
160 vcpu_set_thumb(vcpu);
161 }
162
163 /* Propagate caller endianness */
164 if (vcpu->arch.reset_state.be)
165 kvm_vcpu_set_be(vcpu);
166
167 *vcpu_pc(vcpu) = target_pc;
168 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
169
170 vcpu->arch.reset_state.reset = false;
171 }
172
132 /* Reset PMU */ 173 /* Reset PMU */
133 kvm_pmu_vcpu_reset(vcpu); 174 kvm_pmu_vcpu_reset(vcpu);
134 175
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
137 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
138 179
139 /* Reset timer */ 180 /* Reset timer */
140 return kvm_timer_vcpu_reset(vcpu); 181 ret = kvm_timer_vcpu_reset(vcpu);
182out:
183 if (loaded)
184 kvm_arch_vcpu_load(vcpu, smp_processor_id());
185 preempt_enable();
186 return ret;
141} 187}
142 188
143void kvm_set_ipa_limit(void) 189void kvm_set_ipa_limit(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e3e37228ae4e..c936aa40c3f4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
314 return read_zero(vcpu, p); 314 return read_zero(vcpu, p);
315} 315}
316 316
317static bool trap_undef(struct kvm_vcpu *vcpu, 317/*
318 struct sys_reg_params *p, 318 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
319 const struct sys_reg_desc *r) 319 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
320 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
321 * treat it separately.
322 */
323static bool trap_loregion(struct kvm_vcpu *vcpu,
324 struct sys_reg_params *p,
325 const struct sys_reg_desc *r)
320{ 326{
321 kvm_inject_undefined(vcpu); 327 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
322 return false; 328 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
329 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
330
331 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
332 kvm_inject_undefined(vcpu);
333 return false;
334 }
335
336 if (p->is_write && sr == SYS_LORID_EL1)
337 return write_to_read_only(vcpu, p, r);
338
339 return trap_raz_wi(vcpu, p, r);
323} 340}
324 341
325static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 342static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1048 if (val & ptrauth_mask) 1065 if (val & ptrauth_mask)
1049 kvm_debug("ptrauth unsupported for guests, suppressing\n"); 1066 kvm_debug("ptrauth unsupported for guests, suppressing\n");
1050 val &= ~ptrauth_mask; 1067 val &= ~ptrauth_mask;
1051 } else if (id == SYS_ID_AA64MMFR1_EL1) {
1052 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1053 kvm_debug("LORegions unsupported for guests, suppressing\n");
1054
1055 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1056 } 1068 }
1057 1069
1058 return val; 1070 return val;
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1338 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 1350 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1339 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 1351 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1340 1352
1341 { SYS_DESC(SYS_LORSA_EL1), trap_undef }, 1353 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1342 { SYS_DESC(SYS_LOREA_EL1), trap_undef }, 1354 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1343 { SYS_DESC(SYS_LORN_EL1), trap_undef }, 1355 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1344 { SYS_DESC(SYS_LORC_EL1), trap_undef }, 1356 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1345 { SYS_DESC(SYS_LORID_EL1), trap_undef }, 1357 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1346 1358
1347 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, 1359 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1348 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 1360 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2596 table = get_target_table(vcpu->arch.target, true, &num); 2608 table = get_target_table(vcpu->arch.target, true, &num);
2597 reset_sys_reg_descs(vcpu, table, num); 2609 reset_sys_reg_descs(vcpu, table, num);
2598 2610
2599 for (num = 1; num < NR_SYS_REGS; num++) 2611 for (num = 1; num < NR_SYS_REGS; num++) {
2600 if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 2612 if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
2601 panic("Didn't reset __vcpu_sys_reg(%zi)", num); 2613 "Didn't reset __vcpu_sys_reg(%zi)\n", num))
2614 break;
2615 }
2602} 2616}
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index edfcbb25fd9f..dcea277c09ae 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -45,8 +45,8 @@
45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) 45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
47#define pte_clear(mm, addr, ptep) set_pte((ptep), \ 47#define pte_clear(mm, addr, ptep) set_pte((ptep), \
48 (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) 48 (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
49#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) 49#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) 51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ 52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
241 241
242#define pgd_index(address) ((address) >> PGDIR_SHIFT) 242#define pgd_index(address) ((address) >> PGDIR_SHIFT)
243 243
244#define __HAVE_PHYS_MEM_ACCESS_PROT
245struct file;
246extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
247 unsigned long size, pgprot_t vma_prot);
248
244/* 249/*
245 * Macro to make mark a page protection value as "uncacheable". Note 250 * Macro to make mark a page protection value as "uncacheable". Note
246 * that "protection" is really a misnomer here as the protection value 251 * that "protection" is really a misnomer here as the protection value
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 8f454810514f..21e0bd5293dd 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -49,7 +49,7 @@ struct thread_struct {
49}; 49};
50 50
51#define INIT_THREAD { \ 51#define INIT_THREAD { \
52 .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ 52 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
53 .sr = DEFAULT_PSR_VALUE, \ 53 .sr = DEFAULT_PSR_VALUE, \
54} 54}
55 55
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p);
95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) 95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
96 96
97#define task_pt_regs(p) \ 97#define task_pt_regs(p) \
98 ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) 98 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
99 99
100#define cpu_relax() barrier() 100#define cpu_relax() barrier()
101 101
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c
index 659253e9989c..d67f9777cfd9 100644
--- a/arch/csky/kernel/dumpstack.c
+++ b/arch/csky/kernel/dumpstack.c
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
38 if (task) 38 if (task)
39 stack = (unsigned long *)thread_saved_fp(task); 39 stack = (unsigned long *)thread_saved_fp(task);
40 else 40 else
41#ifdef CONFIG_STACKTRACE
42 asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
43#else
41 stack = (unsigned long *)&stack; 44 stack = (unsigned long *)&stack;
45#endif
42 } 46 }
43 47
44 show_trace(stack); 48 show_trace(stack);
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 57f1afe19a52..f2f12fff36f7 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -8,6 +8,7 @@
8#include <linux/ptrace.h> 8#include <linux/ptrace.h>
9#include <linux/regset.h> 9#include <linux/regset.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/sched/task_stack.h>
11#include <linux/signal.h> 12#include <linux/signal.h>
12#include <linux/smp.h> 13#include <linux/smp.h>
13#include <linux/uaccess.h> 14#include <linux/uaccess.h>
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target,
159static const struct user_regset csky_regsets[] = { 160static const struct user_regset csky_regsets[] = {
160 [REGSET_GPR] = { 161 [REGSET_GPR] = {
161 .core_note_type = NT_PRSTATUS, 162 .core_note_type = NT_PRSTATUS,
162 .n = ELF_NGREG, 163 .n = sizeof(struct pt_regs) / sizeof(u32),
163 .size = sizeof(u32), 164 .size = sizeof(u32),
164 .align = sizeof(u32), 165 .align = sizeof(u32),
165 .get = &gpr_get, 166 .get = &gpr_get,
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index ddc4dd79f282..b07a534b3062 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
160{ 160{
161 unsigned long mask = 1 << cpu; 161 unsigned long mask = 1 << cpu;
162 162
163 secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; 163 secondary_stack =
164 (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
164 secondary_hint = mfcr("cr31"); 165 secondary_hint = mfcr("cr31");
165 secondary_ccr = mfcr("cr18"); 166 secondary_ccr = mfcr("cr18");
166 167
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
index cb7c03e5cd21..8473b6bdf512 100644
--- a/arch/csky/mm/ioremap.c
+++ b/arch/csky/mm/ioremap.c
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr)
46 vunmap((void *)((unsigned long)addr & PAGE_MASK)); 46 vunmap((void *)((unsigned long)addr & PAGE_MASK));
47} 47}
48EXPORT_SYMBOL(iounmap); 48EXPORT_SYMBOL(iounmap);
49
50pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
51 unsigned long size, pgprot_t vma_prot)
52{
53 if (!pfn_valid(pfn)) {
54 vma_prot.pgprot |= _PAGE_SO;
55 return pgprot_noncached(vma_prot);
56 } else if (file->f_flags & O_SYNC) {
57 return pgprot_noncached(vma_prot);
58 }
59
60 return vma_prot;
61}
62EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index c9bfe526ca9d..d8c8d7c9df15 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud)
904 904
905static inline int pud_present(pud_t pud) 905static inline int pud_present(pud_t pud)
906{ 906{
907 return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); 907 return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
908} 908}
909 909
910extern struct page *pud_page(pud_t pud); 910extern struct page *pud_page(pud_t pud);
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd)
951 951
952static inline int pgd_present(pgd_t pgd) 952static inline int pgd_present(pgd_t pgd)
953{ 953{
954 return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); 954 return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
955} 955}
956 956
957static inline pte_t pgd_pte(pgd_t pgd) 957static inline pte_t pgd_pte(pgd_t pgd)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 374a19712e20..b684f0294f35 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
2278 x86_pmu.check_microcode(); 2278 x86_pmu.check_microcode();
2279} 2279}
2280 2280
2281static int x86_pmu_check_period(struct perf_event *event, u64 value)
2282{
2283 if (x86_pmu.check_period && x86_pmu.check_period(event, value))
2284 return -EINVAL;
2285
2286 if (value && x86_pmu.limit_period) {
2287 if (x86_pmu.limit_period(event, value) > value)
2288 return -EINVAL;
2289 }
2290
2291 return 0;
2292}
2293
2281static struct pmu pmu = { 2294static struct pmu pmu = {
2282 .pmu_enable = x86_pmu_enable, 2295 .pmu_enable = x86_pmu_enable,
2283 .pmu_disable = x86_pmu_disable, 2296 .pmu_disable = x86_pmu_disable,
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
2302 .event_idx = x86_pmu_event_idx, 2315 .event_idx = x86_pmu_event_idx,
2303 .sched_task = x86_pmu_sched_task, 2316 .sched_task = x86_pmu_sched_task,
2304 .task_ctx_size = sizeof(struct x86_perf_task_context), 2317 .task_ctx_size = sizeof(struct x86_perf_task_context),
2318 .check_period = x86_pmu_check_period,
2305}; 2319};
2306 2320
2307void arch_perf_update_userpage(struct perf_event *event, 2321void arch_perf_update_userpage(struct perf_event *event,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index daafb893449b..730978dff63f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
3587 intel_pmu_lbr_sched_task(ctx, sched_in); 3587 intel_pmu_lbr_sched_task(ctx, sched_in);
3588} 3588}
3589 3589
3590static int intel_pmu_check_period(struct perf_event *event, u64 value)
3591{
3592 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3593}
3594
3590PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3595PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3591 3596
3592PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 3597PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
3667 .cpu_starting = intel_pmu_cpu_starting, 3672 .cpu_starting = intel_pmu_cpu_starting,
3668 .cpu_dying = intel_pmu_cpu_dying, 3673 .cpu_dying = intel_pmu_cpu_dying,
3669 .cpu_dead = intel_pmu_cpu_dead, 3674 .cpu_dead = intel_pmu_cpu_dead,
3675
3676 .check_period = intel_pmu_check_period,
3670}; 3677};
3671 3678
3672static struct attribute *intel_pmu_attrs[]; 3679static struct attribute *intel_pmu_attrs[];
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {
3711 3718
3712 .guest_get_msrs = intel_guest_get_msrs, 3719 .guest_get_msrs = intel_guest_get_msrs,
3713 .sched_task = intel_pmu_sched_task, 3720 .sched_task = intel_pmu_sched_task,
3721
3722 .check_period = intel_pmu_check_period,
3714}; 3723};
3715 3724
3716static __init void intel_clovertown_quirk(void) 3725static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 78d7b7031bfc..d46fd6754d92 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -646,6 +646,11 @@ struct x86_pmu {
646 * Intel host/guest support (KVM) 646 * Intel host/guest support (KVM)
647 */ 647 */
648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); 648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
649
650 /*
651 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
652 */
653 int (*check_period) (struct perf_event *event, u64 period);
649}; 654};
650 655
651struct x86_perf_task_context { 656struct x86_perf_task_context {
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
857 862
858#ifdef CONFIG_CPU_SUP_INTEL 863#ifdef CONFIG_CPU_SUP_INTEL
859 864
860static inline bool intel_pmu_has_bts(struct perf_event *event) 865static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
861{ 866{
862 struct hw_perf_event *hwc = &event->hw; 867 struct hw_perf_event *hwc = &event->hw;
863 unsigned int hw_event, bts_event; 868 unsigned int hw_event, bts_event;
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 873 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 874 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
870 875
871 return hw_event == bts_event && hwc->sample_period == 1; 876 return hw_event == bts_event && period == 1;
877}
878
879static inline bool intel_pmu_has_bts(struct perf_event *event)
880{
881 struct hw_perf_event *hwc = &event->hw;
882
883 return intel_pmu_has_bts_period(event, hwc->sample_period);
872} 884}
873 885
874int intel_pmu_save_and_restart(struct perf_event *event); 886int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f65b78d32f5e..7dbbe9ffda17 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
51/* 51/*
52 * fill in the user structure for a core dump.. 52 * fill in the user structure for a core dump..
53 */ 53 */
54static void dump_thread32(struct pt_regs *regs, struct user32 *dump) 54static void fill_dump(struct pt_regs *regs, struct user32 *dump)
55{ 55{
56 u32 fs, gs; 56 u32 fs, gs;
57 memset(dump, 0, sizeof(*dump)); 57 memset(dump, 0, sizeof(*dump));
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
157 fs = get_fs(); 157 fs = get_fs();
158 set_fs(KERNEL_DS); 158 set_fs(KERNEL_DS);
159 has_dumped = 1; 159 has_dumped = 1;
160
161 fill_dump(cprm->regs, &dump);
162
160 strncpy(dump.u_comm, current->comm, sizeof(current->comm)); 163 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
161 dump.u_ar0 = offsetof(struct user32, regs); 164 dump.u_ar0 = offsetof(struct user32, regs);
162 dump.signal = cprm->siginfo->si_signo; 165 dump.signal = cprm->siginfo->si_signo;
163 dump_thread32(cprm->regs, &dump);
164 166
165 /* 167 /*
166 * If the size of the dump file exceeds the rlimit, then see 168 * If the size of the dump file exceeds the rlimit, then see
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index d9a9993af882..9f15384c504a 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -52,6 +52,8 @@
52 52
53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
54 54
55#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
56
55/* "Small Core" Processors (Atom) */ 57/* "Small Core" Processors (Atom) */
56 58
57#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ 59#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index e652a7cc6186..3f697a9e3f59 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -48,7 +48,8 @@ enum {
48 BIOS_STATUS_SUCCESS = 0, 48 BIOS_STATUS_SUCCESS = 0,
49 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, 49 BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
50 BIOS_STATUS_EINVAL = -EINVAL, 50 BIOS_STATUS_EINVAL = -EINVAL,
51 BIOS_STATUS_UNAVAIL = -EBUSY 51 BIOS_STATUS_UNAVAIL = -EBUSY,
52 BIOS_STATUS_ABORT = -EINTR,
52}; 53};
53 54
54/* Address map parameters */ 55/* Address map parameters */
@@ -167,4 +168,9 @@ extern long system_serial_number;
167 168
168extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ 169extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
169 170
171/*
172 * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
173 */
174extern struct semaphore __efi_uv_runtime_lock;
175
170#endif /* _ASM_X86_UV_BIOS_H */ 176#endif /* _ASM_X86_UV_BIOS_H */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d8ea4ebd79e7..d737a51a53ca 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2473 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2473 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2474 return -EINVAL; 2474 return -EINVAL;
2475 2475
2476 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2477 nested_cpu_has_save_preemption_timer(vmcs12))
2478 return -EINVAL;
2479
2476 if (nested_cpu_has_ept(vmcs12) && 2480 if (nested_cpu_has_ept(vmcs12) &&
2477 !valid_ept_address(vcpu, vmcs12->ept_pointer)) 2481 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2478 return -EINVAL; 2482 return -EINVAL;
@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5557 * secondary cpu-based controls. Do not include those that 5561 * secondary cpu-based controls. Do not include those that
5558 * depend on CPUID bits, they are added later by vmx_cpuid_update. 5562 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5559 */ 5563 */
5560 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 5564 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
5561 msrs->secondary_ctls_low, 5565 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5562 msrs->secondary_ctls_high); 5566 msrs->secondary_ctls_low,
5567 msrs->secondary_ctls_high);
5568
5563 msrs->secondary_ctls_low = 0; 5569 msrs->secondary_ctls_low = 0;
5564 msrs->secondary_ctls_high &= 5570 msrs->secondary_ctls_high &=
5565 SECONDARY_EXEC_DESC | 5571 SECONDARY_EXEC_DESC |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 95d618045001..30a6bcd735ec 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
863 if (!entry_only) 863 if (!entry_only)
864 j = find_msr(&m->host, msr); 864 j = find_msr(&m->host, msr);
865 865
866 if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { 866 if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
867 (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
867 printk_once(KERN_WARNING "Not enough msr switch entries. " 868 printk_once(KERN_WARNING "Not enough msr switch entries. "
868 "Can't add msr %x\n", msr); 869 "Can't add msr %x\n", msr);
869 return; 870 return;
@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1193 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) 1194 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
1194 return; 1195 return;
1195 1196
1196 /*
1197 * First handle the simple case where no cmpxchg is necessary; just
1198 * allow posting non-urgent interrupts.
1199 *
1200 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
1201 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
1202 * expects the VCPU to be on the blocked_vcpu_list that matches
1203 * PI.NDST.
1204 */
1205 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
1206 vcpu->cpu == cpu) {
1207 pi_clear_sn(pi_desc);
1208 return;
1209 }
1210
1211 /* The full case. */ 1197 /* The full case. */
1212 do { 1198 do {
1213 old.control = new.control = pi_desc->control; 1199 old.control = new.control = pi_desc->control;
@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1222 new.sn = 0; 1208 new.sn = 0;
1223 } while (cmpxchg64(&pi_desc->control, old.control, 1209 } while (cmpxchg64(&pi_desc->control, old.control,
1224 new.control) != old.control); 1210 new.control) != old.control);
1211
1212 /*
1213 * Clear SN before reading the bitmap. The VT-d firmware
1214 * writes the bitmap and reads SN atomically (5.2.3 in the
1215 * spec), so it doesn't really have a memory barrier that
1216 * pairs with this, but we cannot do that and we need one.
1217 */
1218 smp_mb__after_atomic();
1219
1220 if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
1221 pi_set_on(pi_desc);
1225} 1222}
1226 1223
1227/* 1224/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 99328954c2fc..0ac0a64c7790 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
337 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); 337 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
338} 338}
339 339
340static inline void pi_clear_sn(struct pi_desc *pi_desc) 340static inline void pi_set_sn(struct pi_desc *pi_desc)
341{ 341{
342 return clear_bit(POSTED_INTR_SN, 342 return set_bit(POSTED_INTR_SN,
343 (unsigned long *)&pi_desc->control); 343 (unsigned long *)&pi_desc->control);
344} 344}
345 345
346static inline void pi_set_sn(struct pi_desc *pi_desc) 346static inline void pi_set_on(struct pi_desc *pi_desc)
347{ 347{
348 return set_bit(POSTED_INTR_SN, 348 set_bit(POSTED_INTR_ON,
349 (unsigned long *)&pi_desc->control); 349 (unsigned long *)&pi_desc->control);
350} 350}
351 351
352static inline void pi_clear_on(struct pi_desc *pi_desc) 352static inline void pi_clear_on(struct pi_desc *pi_desc)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e67ecf25e690..941f932373d0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7801 * 1) We should set ->mode before checking ->requests. Please see 7801 * 1) We should set ->mode before checking ->requests. Please see
7802 * the comment in kvm_vcpu_exiting_guest_mode(). 7802 * the comment in kvm_vcpu_exiting_guest_mode().
7803 * 7803 *
7804 * 2) For APICv, we should set ->mode before checking PIR.ON. This 7804 * 2) For APICv, we should set ->mode before checking PID.ON. This
7805 * pairs with the memory barrier implicit in pi_test_and_set_on 7805 * pairs with the memory barrier implicit in pi_test_and_set_on
7806 * (see vmx_deliver_posted_interrupt). 7806 * (see vmx_deliver_posted_interrupt).
7807 * 7807 *
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4a6a5a26c582..eb33432f2f24 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -29,7 +29,8 @@
29 29
30struct uv_systab *uv_systab; 30struct uv_systab *uv_systab;
31 31
32s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) 32static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
33 u64 a4, u64 a5)
33{ 34{
34 struct uv_systab *tab = uv_systab; 35 struct uv_systab *tab = uv_systab;
35 s64 ret; 36 s64 ret;
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
51 52
52 return ret; 53 return ret;
53} 54}
55
56s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
57{
58 s64 ret;
59
60 if (down_interruptible(&__efi_uv_runtime_lock))
61 return BIOS_STATUS_ABORT;
62
63 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
64 up(&__efi_uv_runtime_lock);
65
66 return ret;
67}
54EXPORT_SYMBOL_GPL(uv_bios_call); 68EXPORT_SYMBOL_GPL(uv_bios_call);
55 69
56s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, 70s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
59 unsigned long bios_flags; 73 unsigned long bios_flags;
60 s64 ret; 74 s64 ret;
61 75
76 if (down_interruptible(&__efi_uv_runtime_lock))
77 return BIOS_STATUS_ABORT;
78
62 local_irq_save(bios_flags); 79 local_irq_save(bios_flags);
63 ret = uv_bios_call(which, a1, a2, a3, a4, a5); 80 ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
64 local_irq_restore(bios_flags); 81 local_irq_restore(bios_flags);
65 82
83 up(&__efi_uv_runtime_lock);
84
66 return ret; 85 return ret;
67} 86}
68 87
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a43276c76fc6..21393ec3b9a4 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
509 struct ht16k33_priv *priv = i2c_get_clientdata(client); 509 struct ht16k33_priv *priv = i2c_get_clientdata(client);
510 struct ht16k33_fbdev *fbdev = &priv->fbdev; 510 struct ht16k33_fbdev *fbdev = &priv->fbdev;
511 511
512 cancel_delayed_work(&fbdev->work); 512 cancel_delayed_work_sync(&fbdev->work);
513 unregister_framebuffer(fbdev->info); 513 unregister_framebuffer(fbdev->info);
514 framebuffer_release(fbdev->info); 514 framebuffer_release(fbdev->info);
515 free_page((unsigned long) fbdev->buffer); 515 free_page((unsigned long) fbdev->buffer);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f94d33525771..d299ec79e4c3 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, 781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
782 SYSC_QUIRK_LEGACY_IDLE), 782 SYSC_QUIRK_LEGACY_IDLE),
783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
784 SYSC_QUIRK_LEGACY_IDLE), 784 0),
785 /* Some timers on omap4 and later */ 785 /* Some timers on omap4 and later */
786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, 786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
787 SYSC_QUIRK_LEGACY_IDLE), 787 0),
788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, 788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
789 SYSC_QUIRK_LEGACY_IDLE), 789 0),
790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, 790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
791 SYSC_QUIRK_LEGACY_IDLE), 791 SYSC_QUIRK_LEGACY_IDLE),
792 /* Uarts on omap4 and later */ 792 /* Uarts on omap4 and later */
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 595124074821..c364027638e1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
154 if (IS_ERR(parent)) 154 if (IS_ERR(parent))
155 return -ENODEV; 155 return -ENODEV;
156 156
157 /* Bail out if both clocks point to fck */
158 if (clk_is_match(parent, timer->fclk))
159 return 0;
160
157 ret = clk_set_parent(timer->fclk, parent); 161 ret = clk_set_parent(timer->fclk, parent);
158 if (ret < 0) 162 if (ret < 0)
159 pr_err("%s: failed to set parent\n", __func__); 163 pr_err("%s: failed to set parent\n", __func__);
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
864 timer->pdev = pdev; 868 timer->pdev = pdev;
865 869
866 pm_runtime_enable(dev); 870 pm_runtime_enable(dev);
867 pm_runtime_irq_safe(dev);
868 871
869 if (!timer->reserved) { 872 if (!timer->reserved) {
870 ret = pm_runtime_get_sync(dev); 873 ret = pm_runtime_get_sync(dev);
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 8903b9ccfc2b..e2abfdb5cee6 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
147static DEFINE_SEMAPHORE(efi_runtime_lock); 147static DEFINE_SEMAPHORE(efi_runtime_lock);
148 148
149/* 149/*
150 * Expose the EFI runtime lock to the UV platform
151 */
152#ifdef CONFIG_X86_UV
153extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
154#endif
155
156/*
150 * Calls the appropriate efi_runtime_service() with the appropriate 157 * Calls the appropriate efi_runtime_service() with the appropriate
151 * arguments. 158 * arguments.
152 * 159 *
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ec6e69aa3a8e..d2fbb4bb4a43 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); 183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
184} 184}
185 185
186static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
187{
188 i2c_dev->curr_msg = NULL;
189 i2c_dev->num_msgs = 0;
190
191 i2c_dev->msg_buf = NULL;
192 i2c_dev->msg_buf_remaining = 0;
193}
194
186/* 195/*
187 * Note about I2C_C_CLEAR on error: 196 * Note about I2C_C_CLEAR on error:
188 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in 197 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
283 292
284 time_left = wait_for_completion_timeout(&i2c_dev->completion, 293 time_left = wait_for_completion_timeout(&i2c_dev->completion,
285 adap->timeout); 294 adap->timeout);
295
296 bcm2835_i2c_finish_transfer(i2c_dev);
297
286 if (!time_left) { 298 if (!time_left) {
287 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 299 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
288 BCM2835_I2C_C_CLEAR); 300 BCM2835_I2C_C_CLEAR);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index b13605718291..d917cefc5a19 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
382 * Check for the message size against FIFO depth and set the 382 * Check for the message size against FIFO depth and set the
383 * 'hold bus' bit if it is greater than FIFO depth. 383 * 'hold bus' bit if it is greater than FIFO depth.
384 */ 384 */
385 if (id->recv_count > CDNS_I2C_FIFO_DEPTH) 385 if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
386 ctrl_reg |= CDNS_I2C_CR_HOLD; 386 ctrl_reg |= CDNS_I2C_CR_HOLD;
387 else
388 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
387 389
388 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 390 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
389 391
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
440 * Check for the message size against FIFO depth and set the 442 * Check for the message size against FIFO depth and set the
441 * 'hold bus' bit if it is greater than FIFO depth. 443 * 'hold bus' bit if it is greater than FIFO depth.
442 */ 444 */
443 if (id->send_count > CDNS_I2C_FIFO_DEPTH) 445 if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
444 ctrl_reg |= CDNS_I2C_CR_HOLD; 446 ctrl_reg |= CDNS_I2C_CR_HOLD;
447 else
448 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
449
445 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 450 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
446 451
447 /* Clear the interrupts in interrupt status register. */ 452 /* Clear the interrupts in interrupt status register. */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4713957b0cbb..a878351f1643 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
420 420
421config KEYBOARD_SNVS_PWRKEY 421config KEYBOARD_SNVS_PWRKEY
422 tristate "IMX SNVS Power Key Driver" 422 tristate "IMX SNVS Power Key Driver"
423 depends on SOC_IMX6SX 423 depends on SOC_IMX6SX || SOC_IMX7D
424 depends on OF 424 depends on OF
425 help 425 help
426 This is the snvs powerkey driver for the Freescale i.MX application 426 This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 312916f99597..73686c2460ce 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -75,9 +75,7 @@
75struct cap11xx_led { 75struct cap11xx_led {
76 struct cap11xx_priv *priv; 76 struct cap11xx_priv *priv;
77 struct led_classdev cdev; 77 struct led_classdev cdev;
78 struct work_struct work;
79 u32 reg; 78 u32 reg;
80 enum led_brightness new_brightness;
81}; 79};
82#endif 80#endif
83 81
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
233} 231}
234 232
235#ifdef CONFIG_LEDS_CLASS 233#ifdef CONFIG_LEDS_CLASS
236static void cap11xx_led_work(struct work_struct *work) 234static int cap11xx_led_set(struct led_classdev *cdev,
235 enum led_brightness value)
237{ 236{
238 struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); 237 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
239 struct cap11xx_priv *priv = led->priv; 238 struct cap11xx_priv *priv = led->priv;
240 int value = led->new_brightness;
241 239
242 /* 240 /*
243 * All LEDs share the same duty cycle as this is a HW limitation. 241 * All LEDs share the same duty cycle as this is a HW
244 * Brightness levels per LED are either 0 (OFF) and 1 (ON). 242 * limitation. Brightness levels per LED are either
243 * 0 (OFF) and 1 (ON).
245 */ 244 */
246 regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, 245 return regmap_update_bits(priv->regmap,
247 BIT(led->reg), value ? BIT(led->reg) : 0); 246 CAP11XX_REG_LED_OUTPUT_CONTROL,
248} 247 BIT(led->reg),
249 248 value ? BIT(led->reg) : 0);
250static void cap11xx_led_set(struct led_classdev *cdev,
251 enum led_brightness value)
252{
253 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
254
255 if (led->new_brightness == value)
256 return;
257
258 led->new_brightness = value;
259 schedule_work(&led->work);
260} 249}
261 250
262static int cap11xx_init_leds(struct device *dev, 251static int cap11xx_init_leds(struct device *dev,
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
299 led->cdev.default_trigger = 288 led->cdev.default_trigger =
300 of_get_property(child, "linux,default-trigger", NULL); 289 of_get_property(child, "linux,default-trigger", NULL);
301 led->cdev.flags = 0; 290 led->cdev.flags = 0;
302 led->cdev.brightness_set = cap11xx_led_set; 291 led->cdev.brightness_set_blocking = cap11xx_led_set;
303 led->cdev.max_brightness = 1; 292 led->cdev.max_brightness = 1;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 294
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
312 led->reg = reg; 301 led->reg = reg;
313 led->priv = priv; 302 led->priv = priv;
314 303
315 INIT_WORK(&led->work, cap11xx_led_work);
316
317 error = devm_led_classdev_register(dev, &led->cdev); 304 error = devm_led_classdev_register(dev, &led->cdev);
318 if (error) { 305 if (error) {
319 of_node_put(child); 306 of_node_put(child);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 403452ef00e6..3d1cb7bf5e35 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
222 keypad->stopped = true; 222 keypad->stopped = true;
223 spin_unlock_irq(&keypad->lock); 223 spin_unlock_irq(&keypad->lock);
224 224
225 flush_work(&keypad->work.work); 225 flush_delayed_work(&keypad->work);
226 /* 226 /*
227 * matrix_keypad_scan() will leave IRQs enabled; 227 * matrix_keypad_scan() will leave IRQs enabled;
228 * we should disable them now. 228 * we should disable them now.
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 43b86482dda0..d466bc07aebb 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = {
58struct qt2160_led { 58struct qt2160_led {
59 struct qt2160_data *qt2160; 59 struct qt2160_data *qt2160;
60 struct led_classdev cdev; 60 struct led_classdev cdev;
61 struct work_struct work;
62 char name[32]; 61 char name[32];
63 int id; 62 int id;
64 enum led_brightness new_brightness; 63 enum led_brightness brightness;
65}; 64};
66#endif 65#endif
67 66
@@ -74,7 +73,6 @@ struct qt2160_data {
74 u16 key_matrix; 73 u16 key_matrix;
75#ifdef CONFIG_LEDS_CLASS 74#ifdef CONFIG_LEDS_CLASS
76 struct qt2160_led leds[QT2160_NUM_LEDS_X]; 75 struct qt2160_led leds[QT2160_NUM_LEDS_X];
77 struct mutex led_lock;
78#endif 76#endif
79}; 77};
80 78
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
83 81
84#ifdef CONFIG_LEDS_CLASS 82#ifdef CONFIG_LEDS_CLASS
85 83
86static void qt2160_led_work(struct work_struct *work) 84static int qt2160_led_set(struct led_classdev *cdev,
85 enum led_brightness value)
87{ 86{
88 struct qt2160_led *led = container_of(work, struct qt2160_led, work); 87 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
89 struct qt2160_data *qt2160 = led->qt2160; 88 struct qt2160_data *qt2160 = led->qt2160;
90 struct i2c_client *client = qt2160->client; 89 struct i2c_client *client = qt2160->client;
91 int value = led->new_brightness;
92 u32 drive, pwmen; 90 u32 drive, pwmen;
93 91
94 mutex_lock(&qt2160->led_lock); 92 if (value != led->brightness) {
95 93 drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
96 drive = qt2160_read(client, QT2160_CMD_DRIVE_X); 94 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
97 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); 95 if (value != LED_OFF) {
98 if (value != LED_OFF) { 96 drive |= BIT(led->id);
99 drive |= (1 << led->id); 97 pwmen |= BIT(led->id);
100 pwmen |= (1 << led->id);
101
102 } else {
103 drive &= ~(1 << led->id);
104 pwmen &= ~(1 << led->id);
105 }
106 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
107 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
108 98
109 /* 99 } else {
110 * Changing this register will change the brightness 100 drive &= ~BIT(led->id);
111 * of every LED in the qt2160. It's a HW limitation. 101 pwmen &= ~BIT(led->id);
112 */ 102 }
113 if (value != LED_OFF) 103 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
114 qt2160_write(client, QT2160_CMD_PWM_DUTY, value); 104 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
115 105
116 mutex_unlock(&qt2160->led_lock); 106 /*
117} 107 * Changing this register will change the brightness
108 * of every LED in the qt2160. It's a HW limitation.
109 */
110 if (value != LED_OFF)
111 qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
118 112
119static void qt2160_led_set(struct led_classdev *cdev, 113 led->brightness = value;
120 enum led_brightness value) 114 }
121{
122 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
123 115
124 led->new_brightness = value; 116 return 0;
125 schedule_work(&led->work);
126} 117}
127 118
128#endif /* CONFIG_LEDS_CLASS */ 119#endif /* CONFIG_LEDS_CLASS */
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
293 int ret; 284 int ret;
294 int i; 285 int i;
295 286
296 mutex_init(&qt2160->led_lock);
297
298 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 287 for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
299 struct qt2160_led *led = &qt2160->leds[i]; 288 struct qt2160_led *led = &qt2160->leds[i];
300 289
301 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); 290 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
302 led->cdev.name = led->name; 291 led->cdev.name = led->name;
303 led->cdev.brightness_set = qt2160_led_set; 292 led->cdev.brightness_set_blocking = qt2160_led_set;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 led->id = i; 294 led->id = i;
306 led->qt2160 = qt2160; 295 led->qt2160 = qt2160;
307 296
308 INIT_WORK(&led->work, qt2160_led_work);
309
310 ret = led_classdev_register(&client->dev, &led->cdev); 297 ret = led_classdev_register(&client->dev, &led->cdev);
311 if (ret < 0) 298 if (ret < 0)
312 return ret; 299 return ret;
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160)
324{ 311{
325 int i; 312 int i;
326 313
327 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 314 for (i = 0; i < QT2160_NUM_LEDS_X; i++)
328 led_classdev_unregister(&qt2160->leds[i].cdev); 315 led_classdev_unregister(&qt2160->leds[i].cdev);
329 cancel_work_sync(&qt2160->leds[i].work);
330 }
331} 316}
332 317
333#else 318#else
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index babcfb165e4f..3b85631fde91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
153 153
154 input_dev->id.bustype = BUS_HOST; 154 input_dev->id.bustype = BUS_HOST;
155 155
156 keypad_data->input_dev = input_dev;
157
156 error = keypad_matrix_key_parse_dt(keypad_data); 158 error = keypad_matrix_key_parse_dt(keypad_data);
157 if (error) 159 if (error)
158 return error; 160 return error;
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
168 170
169 input_set_drvdata(input_dev, keypad_data); 171 input_set_drvdata(input_dev, keypad_data);
170 172
171 keypad_data->input_dev = input_dev;
172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res); 174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
175 if (IS_ERR(keypad_data->base)) 175 if (IS_ERR(keypad_data->base))
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 094bddf56755..c1e66f45d552 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -22,7 +22,6 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/input-polldev.h> 23#include <linux/input-polldev.h>
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/workqueue.h>
26#include <linux/leds.h> 25#include <linux/leds.h>
27 26
28#define APANEL_NAME "Fujitsu Application Panel" 27#define APANEL_NAME "Fujitsu Application Panel"
@@ -59,8 +58,6 @@ struct apanel {
59 struct i2c_client *client; 58 struct i2c_client *client;
60 unsigned short keymap[MAX_PANEL_KEYS]; 59 unsigned short keymap[MAX_PANEL_KEYS];
61 u16 nkeys; 60 u16 nkeys;
62 u16 led_bits;
63 struct work_struct led_work;
64 struct led_classdev mail_led; 61 struct led_classdev mail_led;
65}; 62};
66 63
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev)
109 report_key(idev, ap->keymap[i]); 106 report_key(idev, ap->keymap[i]);
110} 107}
111 108
112/* Track state changes of LED */ 109static int mail_led_set(struct led_classdev *led,
113static void led_update(struct work_struct *work)
114{
115 struct apanel *ap = container_of(work, struct apanel, led_work);
116
117 i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits);
118}
119
120static void mail_led_set(struct led_classdev *led,
121 enum led_brightness value) 110 enum led_brightness value)
122{ 111{
123 struct apanel *ap = container_of(led, struct apanel, mail_led); 112 struct apanel *ap = container_of(led, struct apanel, mail_led);
113 u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000;
124 114
125 if (value != LED_OFF) 115 return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
126 ap->led_bits |= 0x8000;
127 else
128 ap->led_bits &= ~0x8000;
129
130 schedule_work(&ap->led_work);
131} 116}
132 117
133static int apanel_remove(struct i2c_client *client) 118static int apanel_remove(struct i2c_client *client)
@@ -179,7 +164,7 @@ static struct apanel apanel = {
179 }, 164 },
180 .mail_led = { 165 .mail_led = {
181 .name = "mail:blue", 166 .name = "mail:blue",
182 .brightness_set = mail_led_set, 167 .brightness_set_blocking = mail_led_set,
183 }, 168 },
184}; 169};
185 170
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client,
235 if (err) 220 if (err)
236 goto out3; 221 goto out3;
237 222
238 INIT_WORK(&ap->led_work, led_update);
239 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { 223 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
240 err = led_classdev_register(&client->dev, &ap->mail_led); 224 err = led_classdev_register(&client->dev, &ap->mail_led);
241 if (err) 225 if (err)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1efcfdf9f8a8..dd9dd4e40827 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
481 idev->close = bma150_irq_close; 481 idev->close = bma150_irq_close;
482 input_set_drvdata(idev, bma150); 482 input_set_drvdata(idev, bma150);
483 483
484 bma150->input = idev;
485
484 error = input_register_device(idev); 486 error = input_register_device(idev);
485 if (error) { 487 if (error) {
486 input_free_device(idev); 488 input_free_device(idev);
487 return error; 489 return error;
488 } 490 }
489 491
490 bma150->input = idev;
491 return 0; 492 return 0;
492} 493}
493 494
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
510 511
511 bma150_init_input_device(bma150, ipoll_dev->input); 512 bma150_init_input_device(bma150, ipoll_dev->input);
512 513
514 bma150->input_polled = ipoll_dev;
515 bma150->input = ipoll_dev->input;
516
513 error = input_register_polled_device(ipoll_dev); 517 error = input_register_polled_device(ipoll_dev);
514 if (error) { 518 if (error) {
515 input_free_polled_device(ipoll_dev); 519 input_free_polled_device(ipoll_dev);
516 return error; 520 return error;
517 } 521 }
518 522
519 bma150->input_polled = ipoll_dev;
520 bma150->input = ipoll_dev->input;
521
522 return 0; 523 return 0;
523} 524}
524 525
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 55da191ae550..dbb6d9e1b947 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -34,6 +34,7 @@ struct pwm_vibrator {
34 struct work_struct play_work; 34 struct work_struct play_work;
35 u16 level; 35 u16 level;
36 u32 direction_duty_cycle; 36 u32 direction_duty_cycle;
37 bool vcc_on;
37}; 38};
38 39
39static int pwm_vibrator_start(struct pwm_vibrator *vibrator) 40static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
42 struct pwm_state state; 43 struct pwm_state state;
43 int err; 44 int err;
44 45
45 err = regulator_enable(vibrator->vcc); 46 if (!vibrator->vcc_on) {
46 if (err) { 47 err = regulator_enable(vibrator->vcc);
47 dev_err(pdev, "failed to enable regulator: %d", err); 48 if (err) {
48 return err; 49 dev_err(pdev, "failed to enable regulator: %d", err);
50 return err;
51 }
52 vibrator->vcc_on = true;
49 } 53 }
50 54
51 pwm_get_state(vibrator->pwm, &state); 55 pwm_get_state(vibrator->pwm, &state);
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
76 80
77static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) 81static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
78{ 82{
79 regulator_disable(vibrator->vcc);
80
81 if (vibrator->pwm_dir) 83 if (vibrator->pwm_dir)
82 pwm_disable(vibrator->pwm_dir); 84 pwm_disable(vibrator->pwm_dir);
83 pwm_disable(vibrator->pwm); 85 pwm_disable(vibrator->pwm);
86
87 if (vibrator->vcc_on) {
88 regulator_disable(vibrator->vcc);
89 vibrator->vcc_on = false;
90 }
84} 91}
85 92
86static void pwm_vibrator_play_work(struct work_struct *work) 93static void pwm_vibrator_play_work(struct work_struct *work)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f322a1768fbb..225ae6980182 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1336static const struct acpi_device_id elan_acpi_id[] = { 1336static const struct acpi_device_id elan_acpi_id[] = {
1337 { "ELAN0000", 0 }, 1337 { "ELAN0000", 0 },
1338 { "ELAN0100", 0 }, 1338 { "ELAN0100", 0 },
1339 { "ELAN0501", 0 },
1340 { "ELAN0600", 0 }, 1339 { "ELAN0600", 0 },
1341 { "ELAN0602", 0 }, 1340 { "ELAN0602", 0 },
1342 { "ELAN0605", 0 }, 1341 { "ELAN0605", 0 },
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1346 { "ELAN060C", 0 }, 1345 { "ELAN060C", 0 },
1347 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1348 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0617", 0 },
1349 { "ELAN0618", 0 }, 1349 { "ELAN0618", 0 },
1350 { "ELAN061C", 0 }, 1350 { "ELAN061C", 0 },
1351 { "ELAN061D", 0 }, 1351 { "ELAN061D", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 9fe075c137dc..a7f8b1614559 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1119 * Asus UX31 0x361f00 20, 15, 0e clickpad 1119 * Asus UX31 0x361f00 20, 15, 0e clickpad
1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1121 * Avatar AVIU-145A2 0x361f00 ? clickpad 1121 * Avatar AVIU-145A2 0x361f00 ? clickpad
1122 * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
1123 * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
1122 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1123 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons 1125 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1124 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1126 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1171 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), 1173 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
1172 }, 1174 },
1173 }, 1175 },
1176 {
1177 /* Fujitsu H780 also has a middle button */
1178 .matches = {
1179 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1180 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
1181 },
1182 },
1174#endif 1183#endif
1175 { } 1184 { }
1176}; 1185};
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index c62cceb97bb1..5e8d8384aa2a 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
76{ 76{
77 struct ps2_gpio_data *drvdata = serio->port_data; 77 struct ps2_gpio_data *drvdata = serio->port_data;
78 78
79 flush_delayed_work(&drvdata->tx_work);
79 disable_irq(drvdata->irq); 80 disable_irq(drvdata->irq);
80} 81}
81 82
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index f12cb31a41e5..d09c9f878141 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -238,9 +238,9 @@ out:
238} 238}
239 239
240/* A writeback failed: mark the page as bad, and invalidate the page cache */ 240/* A writeback failed: mark the page as bad, and invalidate the page cache */
241static void nfs_set_pageerror(struct page *page) 241static void nfs_set_pageerror(struct address_space *mapping)
242{ 242{
243 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); 243 nfs_zap_mapping(mapping->host, mapping);
244} 244}
245 245
246/* 246/*
@@ -994,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
994 nfs_list_remove_request(req); 994 nfs_list_remove_request(req);
995 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 995 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
996 (hdr->good_bytes < bytes)) { 996 (hdr->good_bytes < bytes)) {
997 nfs_set_pageerror(req->wb_page); 997 nfs_set_pageerror(page_file_mapping(req->wb_page));
998 nfs_context_set_write_error(req->wb_context, hdr->error); 998 nfs_context_set_write_error(req->wb_context, hdr->error);
999 goto remove_req; 999 goto remove_req;
1000 } 1000 }
@@ -1348,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page,
1348 unsigned int offset, unsigned int count) 1348 unsigned int offset, unsigned int count)
1349{ 1349{
1350 struct nfs_open_context *ctx = nfs_file_open_context(file); 1350 struct nfs_open_context *ctx = nfs_file_open_context(file);
1351 struct inode *inode = page_file_mapping(page)->host; 1351 struct address_space *mapping = page_file_mapping(page);
1352 struct inode *inode = mapping->host;
1352 int status = 0; 1353 int status = 0;
1353 1354
1354 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1355 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -1366,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page,
1366 1367
1367 status = nfs_writepage_setup(ctx, page, offset, count); 1368 status = nfs_writepage_setup(ctx, page, offset, count);
1368 if (status < 0) 1369 if (status < 0)
1369 nfs_set_pageerror(page); 1370 nfs_set_pageerror(mapping);
1370 else 1371 else
1371 __set_page_dirty_nobuffers(page); 1372 __set_page_dirty_nobuffers(page);
1372out: 1373out:
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index b33f9785b756..72a7681f4046 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
1239 retval = nfsd_idmap_init(net); 1239 retval = nfsd_idmap_init(net);
1240 if (retval) 1240 if (retval)
1241 goto out_idmap_error; 1241 goto out_idmap_error;
1242 nn->nfsd4_lease = 45; /* default lease time */ 1242 nn->nfsd4_lease = 90; /* default lease time */
1243 nn->nfsd4_grace = 45; 1243 nn->nfsd4_grace = 90;
1244 nn->somebody_reclaimed = false; 1244 nn->somebody_reclaimed = false;
1245 nn->clverifier_counter = prandom_u32(); 1245 nn->clverifier_counter = prandom_u32();
1246 nn->clientid_counter = prandom_u32(); 1246 nn->clientid_counter = prandom_u32();
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4f31f96bbfab..c36c86f1ec9a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -100,7 +100,7 @@ enum vgic_irq_config {
100}; 100};
101 101
102struct vgic_irq { 102struct vgic_irq {
103 spinlock_t irq_lock; /* Protects the content of the struct */ 103 raw_spinlock_t irq_lock; /* Protects the content of the struct */
104 struct list_head lpi_list; /* Used to link all LPIs together */ 104 struct list_head lpi_list; /* Used to link all LPIs together */
105 struct list_head ap_list; 105 struct list_head ap_list;
106 106
@@ -256,7 +256,7 @@ struct vgic_dist {
256 u64 propbaser; 256 u64 propbaser;
257 257
258 /* Protects the lpi_list and the count value below. */ 258 /* Protects the lpi_list and the count value below. */
259 spinlock_t lpi_list_lock; 259 raw_spinlock_t lpi_list_lock;
260 struct list_head lpi_list_head; 260 struct list_head lpi_list_head;
261 int lpi_list_count; 261 int lpi_list_count;
262 262
@@ -307,7 +307,7 @@ struct vgic_cpu {
307 unsigned int used_lrs; 307 unsigned int used_lrs;
308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; 308 struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
309 309
310 spinlock_t ap_list_lock; /* Protects the ap_list */ 310 raw_spinlock_t ap_list_lock; /* Protects the ap_list */
311 311
312 /* 312 /*
313 * List of IRQs that this VCPU should consider because they are either 313 * List of IRQs that this VCPU should consider because they are either
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 19f32b0c29af..6b318efd8a74 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -34,6 +34,7 @@
34#ifndef __has_attribute 34#ifndef __has_attribute
35# define __has_attribute(x) __GCC4_has_attribute_##x 35# define __has_attribute(x) __GCC4_has_attribute_##x
36# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) 36# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
37# define __GCC4_has_attribute___copy__ 0
37# define __GCC4_has_attribute___designated_init__ 0 38# define __GCC4_has_attribute___designated_init__ 0
38# define __GCC4_has_attribute___externally_visible__ 1 39# define __GCC4_has_attribute___externally_visible__ 1
39# define __GCC4_has_attribute___noclone__ 1 40# define __GCC4_has_attribute___noclone__ 1
@@ -101,6 +102,19 @@
101#define __attribute_const__ __attribute__((__const__)) 102#define __attribute_const__ __attribute__((__const__))
102 103
103/* 104/*
105 * Optional: only supported since gcc >= 9
106 * Optional: not supported by clang
107 * Optional: not supported by icc
108 *
109 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
110 */
111#if __has_attribute(__copy__)
112# define __copy(symbol) __attribute__((__copy__(symbol)))
113#else
114# define __copy(symbol)
115#endif
116
117/*
104 * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' 118 * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
105 * attribute warnings entirely and for good") for more information. 119 * attribute warnings entirely and for good") for more information.
106 * 120 *
diff --git a/include/linux/module.h b/include/linux/module.h
index 8fa38d3e7538..f5bc4c046461 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -129,13 +129,13 @@ extern void cleanup_module(void);
129#define module_init(initfn) \ 129#define module_init(initfn) \
130 static inline initcall_t __maybe_unused __inittest(void) \ 130 static inline initcall_t __maybe_unused __inittest(void) \
131 { return initfn; } \ 131 { return initfn; } \
132 int init_module(void) __attribute__((alias(#initfn))); 132 int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
133 133
134/* This is only required if you want to be unloadable. */ 134/* This is only required if you want to be unloadable. */
135#define module_exit(exitfn) \ 135#define module_exit(exitfn) \
136 static inline exitcall_t __maybe_unused __exittest(void) \ 136 static inline exitcall_t __maybe_unused __exittest(void) \
137 { return exitfn; } \ 137 { return exitfn; } \
138 void cleanup_module(void) __attribute__((alias(#exitfn))); 138 void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
139 139
140#endif 140#endif
141 141
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1d5c551a5add..e1a051724f7e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -447,6 +447,11 @@ struct pmu {
447 * Filter events for PMU-specific reasons. 447 * Filter events for PMU-specific reasons.
448 */ 448 */
449 int (*filter_match) (struct perf_event *event); /* optional */ 449 int (*filter_match) (struct perf_event *event); /* optional */
450
451 /*
452 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
453 */
454 int (*check_period) (struct perf_event *event, u64 value); /* optional */
450}; 455};
451 456
452enum perf_addr_filter_action_t { 457enum perf_addr_filter_action_t {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e5ede6918050..26d6edab051a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
4963 } 4963 }
4964} 4964}
4965 4965
4966static int perf_event_check_period(struct perf_event *event, u64 value)
4967{
4968 return event->pmu->check_period(event, value);
4969}
4970
4966static int perf_event_period(struct perf_event *event, u64 __user *arg) 4971static int perf_event_period(struct perf_event *event, u64 __user *arg)
4967{ 4972{
4968 u64 value; 4973 u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
4979 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4984 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4980 return -EINVAL; 4985 return -EINVAL;
4981 4986
4987 if (perf_event_check_period(event, value))
4988 return -EINVAL;
4989
4982 event_function_call(event, __perf_event_period, &value); 4990 event_function_call(event, __perf_event_period, &value);
4983 4991
4984 return 0; 4992 return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
9391 return 0; 9399 return 0;
9392} 9400}
9393 9401
9402static int perf_event_nop_int(struct perf_event *event, u64 value)
9403{
9404 return 0;
9405}
9406
9394static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 9407static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
9395 9408
9396static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) 9409static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ got_cpu_context:
9691 pmu->pmu_disable = perf_pmu_nop_void; 9704 pmu->pmu_disable = perf_pmu_nop_void;
9692 } 9705 }
9693 9706
9707 if (!pmu->check_period)
9708 pmu->check_period = perf_event_nop_int;
9709
9694 if (!pmu->event_idx) 9710 if (!pmu->event_idx)
9695 pmu->event_idx = perf_event_idx_default; 9711 pmu->event_idx = perf_event_idx_default;
9696 9712
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 309ef5a64af5..5ab4fe3b1dcc 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
734 size = sizeof(struct ring_buffer); 734 size = sizeof(struct ring_buffer);
735 size += nr_pages * sizeof(void *); 735 size += nr_pages * sizeof(void *);
736 736
737 if (order_base_2(size) >= MAX_ORDER) 737 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
738 goto fail; 738 goto fail;
739 739
740 rb = kzalloc(size, GFP_KERNEL); 740 rb = kzalloc(size, GFP_KERNEL);
diff --git a/lib/crc32.c b/lib/crc32.c
index 45b1d67a1767..4a20455d1f61 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
206EXPORT_SYMBOL(crc32_le); 206EXPORT_SYMBOL(crc32_le);
207EXPORT_SYMBOL(__crc32c_le); 207EXPORT_SYMBOL(__crc32c_le);
208 208
209u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); 209u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
210u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); 210u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
211 211
212/* 212/*
213 * This multiplies the polynomials x and y modulo the given modulus. 213 * This multiplies the polynomials x and y modulo the given modulus.
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index fb6656295204..507105127095 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
44 unsigned char *cksum, unsigned char *buf) 44 unsigned char *cksum, unsigned char *buf)
45{ 45{
46 struct crypto_sync_skcipher *cipher; 46 struct crypto_sync_skcipher *cipher;
47 unsigned char plain[8]; 47 unsigned char *plain;
48 s32 code; 48 s32 code;
49 49
50 dprintk("RPC: %s:\n", __func__); 50 dprintk("RPC: %s:\n", __func__);
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
52 if (IS_ERR(cipher)) 52 if (IS_ERR(cipher))
53 return PTR_ERR(cipher); 53 return PTR_ERR(cipher);
54 54
55 plain = kmalloc(8, GFP_NOFS);
56 if (!plain)
57 return -ENOMEM;
58
55 plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); 59 plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
56 plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); 60 plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
57 plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); 61 plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
67 71
68 code = krb5_encrypt(cipher, cksum, plain, buf, 8); 72 code = krb5_encrypt(cipher, cksum, plain, buf, 8);
69out: 73out:
74 kfree(plain);
70 crypto_free_sync_skcipher(cipher); 75 crypto_free_sync_skcipher(cipher);
71 return code; 76 return code;
72} 77}
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
77 u32 seqnum, 82 u32 seqnum,
78 unsigned char *cksum, unsigned char *buf) 83 unsigned char *cksum, unsigned char *buf)
79{ 84{
80 unsigned char plain[8]; 85 unsigned char *plain;
86 s32 code;
81 87
82 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) 88 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
83 return krb5_make_rc4_seq_num(kctx, direction, seqnum, 89 return krb5_make_rc4_seq_num(kctx, direction, seqnum,
84 cksum, buf); 90 cksum, buf);
85 91
92 plain = kmalloc(8, GFP_NOFS);
93 if (!plain)
94 return -ENOMEM;
95
86 plain[0] = (unsigned char) (seqnum & 0xff); 96 plain[0] = (unsigned char) (seqnum & 0xff);
87 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); 97 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
88 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); 98 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
93 plain[6] = direction; 103 plain[6] = direction;
94 plain[7] = direction; 104 plain[7] = direction;
95 105
96 return krb5_encrypt(key, cksum, plain, buf, 8); 106 code = krb5_encrypt(key, cksum, plain, buf, 8);
107 kfree(plain);
108 return code;
97} 109}
98 110
99static s32 111static s32
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
101 unsigned char *buf, int *direction, s32 *seqnum) 113 unsigned char *buf, int *direction, s32 *seqnum)
102{ 114{
103 struct crypto_sync_skcipher *cipher; 115 struct crypto_sync_skcipher *cipher;
104 unsigned char plain[8]; 116 unsigned char *plain;
105 s32 code; 117 s32 code;
106 118
107 dprintk("RPC: %s:\n", __func__); 119 dprintk("RPC: %s:\n", __func__);
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
113 if (code) 125 if (code)
114 goto out; 126 goto out;
115 127
128 plain = kmalloc(8, GFP_NOFS);
129 if (!plain) {
130 code = -ENOMEM;
131 goto out;
132 }
133
116 code = krb5_decrypt(cipher, cksum, buf, plain, 8); 134 code = krb5_decrypt(cipher, cksum, buf, plain, 8);
117 if (code) 135 if (code)
118 goto out; 136 goto out_plain;
119 137
120 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) 138 if ((plain[4] != plain[5]) || (plain[4] != plain[6])
121 || (plain[4] != plain[7])) { 139 || (plain[4] != plain[7])) {
122 code = (s32)KG_BAD_SEQ; 140 code = (s32)KG_BAD_SEQ;
123 goto out; 141 goto out_plain;
124 } 142 }
125 143
126 *direction = plain[4]; 144 *direction = plain[4];
127 145
128 *seqnum = ((plain[0] << 24) | (plain[1] << 16) | 146 *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
129 (plain[2] << 8) | (plain[3])); 147 (plain[2] << 8) | (plain[3]));
148out_plain:
149 kfree(plain);
130out: 150out:
131 crypto_free_sync_skcipher(cipher); 151 crypto_free_sync_skcipher(cipher);
132 return code; 152 return code;
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
139 int *direction, u32 *seqnum) 159 int *direction, u32 *seqnum)
140{ 160{
141 s32 code; 161 s32 code;
142 unsigned char plain[8]; 162 unsigned char *plain;
143 struct crypto_sync_skcipher *key = kctx->seq; 163 struct crypto_sync_skcipher *key = kctx->seq;
144 164
145 dprintk("RPC: krb5_get_seq_num:\n"); 165 dprintk("RPC: krb5_get_seq_num:\n");
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
147 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) 167 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
148 return krb5_get_rc4_seq_num(kctx, cksum, buf, 168 return krb5_get_rc4_seq_num(kctx, cksum, buf,
149 direction, seqnum); 169 direction, seqnum);
170 plain = kmalloc(8, GFP_NOFS);
171 if (!plain)
172 return -ENOMEM;
150 173
151 if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) 174 if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
152 return code; 175 goto out;
153 176
154 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || 177 if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
155 (plain[4] != plain[7])) 178 (plain[4] != plain[7])) {
156 return (s32)KG_BAD_SEQ; 179 code = (s32)KG_BAD_SEQ;
180 goto out;
181 }
157 182
158 *direction = plain[4]; 183 *direction = plain[4];
159 184
160 *seqnum = ((plain[0]) | 185 *seqnum = ((plain[0]) |
161 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); 186 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
162 187
163 return 0; 188out:
189 kfree(plain);
190 return code;
164} 191}
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index 45a033329cd4..19bb356230ed 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
146 rcu_read_lock(); 146 rcu_read_lock();
147 xprt = rcu_dereference(clnt->cl_xprt); 147 xprt = rcu_dereference(clnt->cl_xprt);
148 /* no "debugfs" dentry? Don't bother with the symlink. */ 148 /* no "debugfs" dentry? Don't bother with the symlink. */
149 if (!xprt->debugfs) { 149 if (IS_ERR_OR_NULL(xprt->debugfs)) {
150 rcu_read_unlock(); 150 rcu_read_unlock();
151 return; 151 return;
152 } 152 }
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 4994e75945b8..21113bfd4eca 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
527 527
528 sendcq = ib_alloc_cq(ia->ri_device, NULL, 528 sendcq = ib_alloc_cq(ia->ri_device, NULL,
529 ep->rep_attr.cap.max_send_wr + 1, 529 ep->rep_attr.cap.max_send_wr + 1,
530 1, IB_POLL_WORKQUEUE); 530 ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
531 IB_POLL_WORKQUEUE);
531 if (IS_ERR(sendcq)) { 532 if (IS_ERR(sendcq)) {
532 rc = PTR_ERR(sendcq); 533 rc = PTR_ERR(sendcq);
533 goto out1; 534 goto out1;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9e350fd34504..9c486fad3f9f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
626 /* Awaken to handle a signal, request we sleep again later. */ 626 /* Awaken to handle a signal, request we sleep again later. */
627 kvm_make_request(KVM_REQ_SLEEP, vcpu); 627 kvm_make_request(KVM_REQ_SLEEP, vcpu);
628 } 628 }
629
630 /*
631 * Make sure we will observe a potential reset request if we've
632 * observed a change to the power state. Pairs with the smp_wmb() in
633 * kvm_psci_vcpu_on().
634 */
635 smp_rmb();
629} 636}
630 637
631static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 638static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
639 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 646 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
640 vcpu_req_sleep(vcpu); 647 vcpu_req_sleep(vcpu);
641 648
649 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
650 kvm_reset_vcpu(vcpu);
651
642 /* 652 /*
643 * Clear IRQ_PENDING requests that were made to guarantee 653 * Clear IRQ_PENDING requests that were made to guarantee
644 * that a VCPU sees new virtual interrupts. 654 * that a VCPU sees new virtual interrupts.
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..30251e288629 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1695 1695
1696 vma_pagesize = vma_kernel_pagesize(vma); 1696 vma_pagesize = vma_kernel_pagesize(vma);
1697 /* 1697 /*
1698 * PUD level may not exist for a VM but PMD is guaranteed to 1698 * The stage2 has a minimum of 2 level table (For arm64 see
1699 * exist. 1699 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1700 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1701 * As for PUD huge maps, we must make sure that we have at least
1702 * 3 levels, i.e, PMD is not folded.
1700 */ 1703 */
1701 if ((vma_pagesize == PMD_SIZE || 1704 if ((vma_pagesize == PMD_SIZE ||
1702 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && 1705 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
1703 !force_pte) { 1706 !force_pte) {
1704 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1707 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1705 } 1708 }
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..34d08ee63747 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
104 104
105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) 105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106{ 106{
107 struct vcpu_reset_state *reset_state;
107 struct kvm *kvm = source_vcpu->kvm; 108 struct kvm *kvm = source_vcpu->kvm;
108 struct kvm_vcpu *vcpu = NULL; 109 struct kvm_vcpu *vcpu = NULL;
109 struct swait_queue_head *wq;
110 unsigned long cpu_id; 110 unsigned long cpu_id;
111 unsigned long context_id;
112 phys_addr_t target_pc;
113 111
114 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; 112 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
115 if (vcpu_mode_is_32bit(source_vcpu)) 113 if (vcpu_mode_is_32bit(source_vcpu))
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
130 return PSCI_RET_INVALID_PARAMS; 128 return PSCI_RET_INVALID_PARAMS;
131 } 129 }
132 130
133 target_pc = smccc_get_arg2(source_vcpu); 131 reset_state = &vcpu->arch.reset_state;
134 context_id = smccc_get_arg3(source_vcpu);
135 132
136 kvm_reset_vcpu(vcpu); 133 reset_state->pc = smccc_get_arg2(source_vcpu);
137
138 /* Gracefully handle Thumb2 entry point */
139 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
140 target_pc &= ~((phys_addr_t) 1);
141 vcpu_set_thumb(vcpu);
142 }
143 134
144 /* Propagate caller endianness */ 135 /* Propagate caller endianness */
145 if (kvm_vcpu_is_be(source_vcpu)) 136 reset_state->be = kvm_vcpu_is_be(source_vcpu);
146 kvm_vcpu_set_be(vcpu);
147 137
148 *vcpu_pc(vcpu) = target_pc;
149 /* 138 /*
150 * NOTE: We always update r0 (or x0) because for PSCI v0.1 139 * NOTE: We always update r0 (or x0) because for PSCI v0.1
151 * the general puspose registers are undefined upon CPU_ON. 140 * the general puspose registers are undefined upon CPU_ON.
152 */ 141 */
153 smccc_set_retval(vcpu, context_id, 0, 0, 0); 142 reset_state->r0 = smccc_get_arg3(source_vcpu);
154 vcpu->arch.power_off = false; 143
155 smp_mb(); /* Make sure the above is visible */ 144 WRITE_ONCE(reset_state->reset, true);
145 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
156 146
157 wq = kvm_arch_vcpu_wq(vcpu); 147 /*
158 swake_up_one(wq); 148 * Make sure the reset request is observed if the change to
149 * power_state is observed.
150 */
151 smp_wmb();
152
153 vcpu->arch.power_off = false;
154 kvm_vcpu_wake_up(vcpu);
159 155
160 return PSCI_RET_SUCCESS; 156 return PSCI_RET_SUCCESS;
161} 157}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 07aa900bac56..1f62f2b8065d 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
251 return 0; 251 return 0;
252 } 252 }
253 253
254 spin_lock_irqsave(&irq->irq_lock, flags); 254 raw_spin_lock_irqsave(&irq->irq_lock, flags);
255 print_irq_state(s, irq, vcpu); 255 print_irq_state(s, irq, vcpu);
256 spin_unlock_irqrestore(&irq->irq_lock, flags); 256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
257 257
258 vgic_put_irq(kvm, irq); 258 vgic_put_irq(kvm, irq);
259 return 0; 259 return 0;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index c0c0b88af1d5..3bdb31eaed64 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
64 struct vgic_dist *dist = &kvm->arch.vgic; 64 struct vgic_dist *dist = &kvm->arch.vgic;
65 65
66 INIT_LIST_HEAD(&dist->lpi_list_head); 66 INIT_LIST_HEAD(&dist->lpi_list_head);
67 spin_lock_init(&dist->lpi_list_lock); 67 raw_spin_lock_init(&dist->lpi_list_lock);
68} 68}
69 69
70/* CREATION */ 70/* CREATION */
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
171 171
172 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 172 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
173 INIT_LIST_HEAD(&irq->ap_list); 173 INIT_LIST_HEAD(&irq->ap_list);
174 spin_lock_init(&irq->irq_lock); 174 raw_spin_lock_init(&irq->irq_lock);
175 irq->vcpu = NULL; 175 irq->vcpu = NULL;
176 irq->target_vcpu = vcpu0; 176 irq->target_vcpu = vcpu0;
177 kref_init(&irq->refcount); 177 kref_init(&irq->refcount);
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; 206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
207 207
208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
209 spin_lock_init(&vgic_cpu->ap_list_lock); 209 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
210 210
211 /* 211 /*
212 * Enable and configure all SGIs to be edge-triggered and 212 * Enable and configure all SGIs to be edge-triggered and
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
217 217
218 INIT_LIST_HEAD(&irq->ap_list); 218 INIT_LIST_HEAD(&irq->ap_list);
219 spin_lock_init(&irq->irq_lock); 219 raw_spin_lock_init(&irq->irq_lock);
220 irq->intid = i; 220 irq->intid = i;
221 irq->vcpu = NULL; 221 irq->vcpu = NULL;
222 irq->target_vcpu = vcpu; 222 irq->target_vcpu = vcpu;
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
231 irq->config = VGIC_CONFIG_LEVEL; 231 irq->config = VGIC_CONFIG_LEVEL;
232 } 232 }
233 233
234 /*
235 * GICv3 can only be created via the KVM_DEVICE_CREATE API and
236 * so we always know the emulation type at this point as it's
237 * either explicitly configured as GICv3, or explicitly
238 * configured as GICv2, or not configured yet which also
239 * implies GICv2.
240 */
241 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 234 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
242 irq->group = 1; 235 irq->group = 1;
243 else 236 else
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
281{ 274{
282 struct vgic_dist *dist = &kvm->arch.vgic; 275 struct vgic_dist *dist = &kvm->arch.vgic;
283 struct kvm_vcpu *vcpu; 276 struct kvm_vcpu *vcpu;
284 int ret = 0, i; 277 int ret = 0, i, idx;
285 278
286 if (vgic_initialized(kvm)) 279 if (vgic_initialized(kvm))
287 return 0; 280 return 0;
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
298 if (ret) 291 if (ret)
299 goto out; 292 goto out;
300 293
294 /* Initialize groups on CPUs created before the VGIC type was known */
295 kvm_for_each_vcpu(idx, vcpu, kvm) {
296 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
297
298 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
299 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
300 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
301 irq->group = 1;
302 else
303 irq->group = 0;
304 }
305 }
306
301 if (vgic_has_its(kvm)) { 307 if (vgic_has_its(kvm)) {
302 ret = vgic_v4_init(kvm); 308 ret = vgic_v4_init(kvm);
303 if (ret) 309 if (ret)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index eb2a390a6c86..ab3f47745d9c 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
65 65
66 INIT_LIST_HEAD(&irq->lpi_list); 66 INIT_LIST_HEAD(&irq->lpi_list);
67 INIT_LIST_HEAD(&irq->ap_list); 67 INIT_LIST_HEAD(&irq->ap_list);
68 spin_lock_init(&irq->irq_lock); 68 raw_spin_lock_init(&irq->irq_lock);
69 69
70 irq->config = VGIC_CONFIG_EDGE; 70 irq->config = VGIC_CONFIG_EDGE;
71 kref_init(&irq->refcount); 71 kref_init(&irq->refcount);
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
73 irq->target_vcpu = vcpu; 73 irq->target_vcpu = vcpu;
74 irq->group = 1; 74 irq->group = 1;
75 75
76 spin_lock_irqsave(&dist->lpi_list_lock, flags); 76 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
77 77
78 /* 78 /*
79 * There could be a race with another vgic_add_lpi(), so we need to 79 * There could be a race with another vgic_add_lpi(), so we need to
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
101 dist->lpi_list_count++; 101 dist->lpi_list_count++;
102 102
103out_unlock: 103out_unlock:
104 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 104 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
105 105
106 /* 106 /*
107 * We "cache" the configuration table entries in our struct vgic_irq's. 107 * We "cache" the configuration table entries in our struct vgic_irq's.
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
287 if (ret) 287 if (ret)
288 return ret; 288 return ret;
289 289
290 spin_lock_irqsave(&irq->irq_lock, flags); 290 raw_spin_lock_irqsave(&irq->irq_lock, flags);
291 291
292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
293 irq->priority = LPI_PROP_PRIORITY(prop); 293 irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
299 } 299 }
300 } 300 }
301 301
302 spin_unlock_irqrestore(&irq->irq_lock, flags); 302 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
303 303
304 if (irq->hw) 304 if (irq->hw)
305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
332 if (!intids) 332 if (!intids)
333 return -ENOMEM; 333 return -ENOMEM;
334 334
335 spin_lock_irqsave(&dist->lpi_list_lock, flags); 335 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
337 if (i == irq_count) 337 if (i == irq_count)
338 break; 338 break;
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
341 continue; 341 continue;
342 intids[i++] = irq->intid; 342 intids[i++] = irq->intid;
343 } 343 }
344 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 344 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
345 345
346 *intid_ptr = intids; 346 *intid_ptr = intids;
347 return i; 347 return i;
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
352 int ret = 0; 352 int ret = 0;
353 unsigned long flags; 353 unsigned long flags;
354 354
355 spin_lock_irqsave(&irq->irq_lock, flags); 355 raw_spin_lock_irqsave(&irq->irq_lock, flags);
356 irq->target_vcpu = vcpu; 356 irq->target_vcpu = vcpu;
357 spin_unlock_irqrestore(&irq->irq_lock, flags); 357 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
358 358
359 if (irq->hw) { 359 if (irq->hw) {
360 struct its_vlpi_map map; 360 struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
455 } 455 }
456 456
457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
458 spin_lock_irqsave(&irq->irq_lock, flags); 458 raw_spin_lock_irqsave(&irq->irq_lock, flags);
459 irq->pending_latch = pendmask & (1U << bit_nr); 459 irq->pending_latch = pendmask & (1U << bit_nr);
460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
461 vgic_put_irq(vcpu->kvm, irq); 461 vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
612 return irq_set_irqchip_state(irq->host_irq, 612 return irq_set_irqchip_state(irq->host_irq,
613 IRQCHIP_STATE_PENDING, true); 613 IRQCHIP_STATE_PENDING, true);
614 614
615 spin_lock_irqsave(&irq->irq_lock, flags); 615 raw_spin_lock_irqsave(&irq->irq_lock, flags);
616 irq->pending_latch = true; 616 irq->pending_latch = true;
617 vgic_queue_irq_unlock(kvm, irq, flags); 617 vgic_queue_irq_unlock(kvm, irq, flags);
618 618
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 738b65d2d0e7..b535fffc7400 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
147 147
148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149 149
150 spin_lock_irqsave(&irq->irq_lock, flags); 150 raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 irq->pending_latch = true; 151 irq->pending_latch = true;
152 irq->source |= 1U << source_vcpu->vcpu_id; 152 irq->source |= 1U << source_vcpu->vcpu_id;
153 153
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); 191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192 int target; 192 int target;
193 193
194 spin_lock_irqsave(&irq->irq_lock, flags); 194 raw_spin_lock_irqsave(&irq->irq_lock, flags);
195 195
196 irq->targets = (val >> (i * 8)) & cpu_mask; 196 irq->targets = (val >> (i * 8)) & cpu_mask;
197 target = irq->targets ? __ffs(irq->targets) : 0; 197 target = irq->targets ? __ffs(irq->targets) : 0;
198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); 198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199 199
200 spin_unlock_irqrestore(&irq->irq_lock, flags); 200 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
201 vgic_put_irq(vcpu->kvm, irq); 201 vgic_put_irq(vcpu->kvm, irq);
202 } 202 }
203} 203}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
230 for (i = 0; i < len; i++) { 230 for (i = 0; i < len; i++) {
231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232 232
233 spin_lock_irqsave(&irq->irq_lock, flags); 233 raw_spin_lock_irqsave(&irq->irq_lock, flags);
234 234
235 irq->source &= ~((val >> (i * 8)) & 0xff); 235 irq->source &= ~((val >> (i * 8)) & 0xff);
236 if (!irq->source) 236 if (!irq->source)
237 irq->pending_latch = false; 237 irq->pending_latch = false;
238 238
239 spin_unlock_irqrestore(&irq->irq_lock, flags); 239 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
240 vgic_put_irq(vcpu->kvm, irq); 240 vgic_put_irq(vcpu->kvm, irq);
241 } 241 }
242} 242}
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
252 for (i = 0; i < len; i++) { 252 for (i = 0; i < len; i++) {
253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254 254
255 spin_lock_irqsave(&irq->irq_lock, flags); 255 raw_spin_lock_irqsave(&irq->irq_lock, flags);
256 256
257 irq->source |= (val >> (i * 8)) & 0xff; 257 irq->source |= (val >> (i * 8)) & 0xff;
258 258
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
260 irq->pending_latch = true; 260 irq->pending_latch = true;
261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262 } else { 262 } else {
263 spin_unlock_irqrestore(&irq->irq_lock, flags); 263 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
264 } 264 }
265 vgic_put_irq(vcpu->kvm, irq); 265 vgic_put_irq(vcpu->kvm, irq);
266 } 266 }
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index b3d1f0985117..4a12322bf7df 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
169 if (!irq) 169 if (!irq)
170 return; 170 return;
171 171
172 spin_lock_irqsave(&irq->irq_lock, flags); 172 raw_spin_lock_irqsave(&irq->irq_lock, flags);
173 173
174 /* We only care about and preserve Aff0, Aff1 and Aff2. */ 174 /* We only care about and preserve Aff0, Aff1 and Aff2. */
175 irq->mpidr = val & GENMASK(23, 0); 175 irq->mpidr = val & GENMASK(23, 0);
176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); 176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
177 177
178 spin_unlock_irqrestore(&irq->irq_lock, flags); 178 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
179 vgic_put_irq(vcpu->kvm, irq); 179 vgic_put_irq(vcpu->kvm, irq);
180} 180}
181 181
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
281 for (i = 0; i < len * 8; i++) { 281 for (i = 0; i < len * 8; i++) {
282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283 283
284 spin_lock_irqsave(&irq->irq_lock, flags); 284 raw_spin_lock_irqsave(&irq->irq_lock, flags);
285 if (test_bit(i, &val)) { 285 if (test_bit(i, &val)) {
286 /* 286 /*
287 * pending_latch is set irrespective of irq type 287 * pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
293 } else { 293 } else {
294 irq->pending_latch = false; 294 irq->pending_latch = false;
295 spin_unlock_irqrestore(&irq->irq_lock, flags); 295 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
296 } 296 }
297 297
298 vgic_put_irq(vcpu->kvm, irq); 298 vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
957 957
958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
959 959
960 spin_lock_irqsave(&irq->irq_lock, flags); 960 raw_spin_lock_irqsave(&irq->irq_lock, flags);
961 961
962 /* 962 /*
963 * An access targetting Group0 SGIs can only generate 963 * An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
968 irq->pending_latch = true; 968 irq->pending_latch = true;
969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
970 } else { 970 } else {
971 spin_unlock_irqrestore(&irq->irq_lock, flags); 971 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
972 } 972 }
973 973
974 vgic_put_irq(vcpu->kvm, irq); 974 vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e04a4d..7de42fba05b5 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
77 for (i = 0; i < len * 8; i++) { 77 for (i = 0; i < len * 8; i++) {
78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 79
80 spin_lock_irqsave(&irq->irq_lock, flags); 80 raw_spin_lock_irqsave(&irq->irq_lock, flags);
81 irq->group = !!(val & BIT(i)); 81 irq->group = !!(val & BIT(i));
82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
83 83
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
120 for_each_set_bit(i, &val, len * 8) { 120 for_each_set_bit(i, &val, len * 8) {
121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122 122
123 spin_lock_irqsave(&irq->irq_lock, flags); 123 raw_spin_lock_irqsave(&irq->irq_lock, flags);
124 irq->enabled = true; 124 irq->enabled = true;
125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
126 126
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
139 for_each_set_bit(i, &val, len * 8) { 139 for_each_set_bit(i, &val, len * 8) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141 141
142 spin_lock_irqsave(&irq->irq_lock, flags); 142 raw_spin_lock_irqsave(&irq->irq_lock, flags);
143 143
144 irq->enabled = false; 144 irq->enabled = false;
145 145
146 spin_unlock_irqrestore(&irq->irq_lock, flags); 146 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
147 vgic_put_irq(vcpu->kvm, irq); 147 vgic_put_irq(vcpu->kvm, irq);
148 } 148 }
149} 149}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
161 unsigned long flags; 161 unsigned long flags;
162 162
163 spin_lock_irqsave(&irq->irq_lock, flags); 163 raw_spin_lock_irqsave(&irq->irq_lock, flags);
164 if (irq_is_pending(irq)) 164 if (irq_is_pending(irq))
165 value |= (1U << i); 165 value |= (1U << i);
166 spin_unlock_irqrestore(&irq->irq_lock, flags); 166 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
167 167
168 vgic_put_irq(vcpu->kvm, irq); 168 vgic_put_irq(vcpu->kvm, irq);
169 } 169 }
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 for_each_set_bit(i, &val, len * 8) { 215 for_each_set_bit(i, &val, len * 8) {
216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217 217
218 spin_lock_irqsave(&irq->irq_lock, flags); 218 raw_spin_lock_irqsave(&irq->irq_lock, flags);
219 if (irq->hw) 219 if (irq->hw)
220 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 220 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221 else 221 else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262 for_each_set_bit(i, &val, len * 8) { 262 for_each_set_bit(i, &val, len * 8) {
263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264 264
265 spin_lock_irqsave(&irq->irq_lock, flags); 265 raw_spin_lock_irqsave(&irq->irq_lock, flags);
266 266
267 if (irq->hw) 267 if (irq->hw)
268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess); 268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269 else 269 else
270 irq->pending_latch = false; 270 irq->pending_latch = false;
271 271
272 spin_unlock_irqrestore(&irq->irq_lock, flags); 272 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
273 vgic_put_irq(vcpu->kvm, irq); 273 vgic_put_irq(vcpu->kvm, irq);
274 } 274 }
275} 275}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
311 unsigned long flags; 311 unsigned long flags;
312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); 312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
313 313
314 spin_lock_irqsave(&irq->irq_lock, flags); 314 raw_spin_lock_irqsave(&irq->irq_lock, flags);
315 315
316 if (irq->hw) { 316 if (irq->hw) {
317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
342 if (irq->active) 342 if (irq->active)
343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
344 else 344 else
345 spin_unlock_irqrestore(&irq->irq_lock, flags); 345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346} 346}
347 347
348/* 348/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
485 for (i = 0; i < len; i++) { 485 for (i = 0; i < len; i++) {
486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
487 487
488 spin_lock_irqsave(&irq->irq_lock, flags); 488 raw_spin_lock_irqsave(&irq->irq_lock, flags);
489 /* Narrow the priority range to what we actually support */ 489 /* Narrow the priority range to what we actually support */
490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
491 spin_unlock_irqrestore(&irq->irq_lock, flags); 491 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
492 492
493 vgic_put_irq(vcpu->kvm, irq); 493 vgic_put_irq(vcpu->kvm, irq);
494 } 494 }
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
534 continue; 534 continue;
535 535
536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537 spin_lock_irqsave(&irq->irq_lock, flags); 537 raw_spin_lock_irqsave(&irq->irq_lock, flags);
538 538
539 if (test_bit(i * 2 + 1, &val)) 539 if (test_bit(i * 2 + 1, &val))
540 irq->config = VGIC_CONFIG_EDGE; 540 irq->config = VGIC_CONFIG_EDGE;
541 else 541 else
542 irq->config = VGIC_CONFIG_LEVEL; 542 irq->config = VGIC_CONFIG_LEVEL;
543 543
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 544 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 545 vgic_put_irq(vcpu->kvm, irq);
546 } 546 }
547} 547}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
590 * restore irq config before line level. 590 * restore irq config before line level.
591 */ 591 */
592 new_level = !!(val & (1U << i)); 592 new_level = !!(val & (1U << i));
593 spin_lock_irqsave(&irq->irq_lock, flags); 593 raw_spin_lock_irqsave(&irq->irq_lock, flags);
594 irq->line_level = new_level; 594 irq->line_level = new_level;
595 if (new_level) 595 if (new_level)
596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
597 else 597 else
598 spin_unlock_irqrestore(&irq->irq_lock, flags); 598 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599 599
600 vgic_put_irq(vcpu->kvm, irq); 600 vgic_put_irq(vcpu->kvm, irq);
601 } 601 }
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892abd7dc..d91a8938aa7c 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
84 84
85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
86 86
87 spin_lock(&irq->irq_lock); 87 raw_spin_lock(&irq->irq_lock);
88 88
89 /* Always preserve the active bit */ 89 /* Always preserve the active bit */
90 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 90 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
127 vgic_irq_set_phys_active(irq, false); 127 vgic_irq_set_phys_active(irq, false);
128 } 128 }
129 129
130 spin_unlock(&irq->irq_lock); 130 raw_spin_unlock(&irq->irq_lock);
131 vgic_put_irq(vcpu->kvm, irq); 131 vgic_put_irq(vcpu->kvm, irq);
132 } 132 }
133 133
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd234ebe8..4ee0aeb9a905 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
76 if (!irq) /* An LPI could have been unmapped. */ 76 if (!irq) /* An LPI could have been unmapped. */
77 continue; 77 continue;
78 78
79 spin_lock(&irq->irq_lock); 79 raw_spin_lock(&irq->irq_lock);
80 80
81 /* Always preserve the active bit */ 81 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 82 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
119 vgic_irq_set_phys_active(irq, false); 119 vgic_irq_set_phys_active(irq, false);
120 } 120 }
121 121
122 spin_unlock(&irq->irq_lock); 122 raw_spin_unlock(&irq->irq_lock);
123 vgic_put_irq(vcpu->kvm, irq); 123 vgic_put_irq(vcpu->kvm, irq);
124 } 124 }
125 125
@@ -347,9 +347,9 @@ retry:
347 347
348 status = val & (1 << bit_nr); 348 status = val & (1 << bit_nr);
349 349
350 spin_lock_irqsave(&irq->irq_lock, flags); 350 raw_spin_lock_irqsave(&irq->irq_lock, flags);
351 if (irq->target_vcpu != vcpu) { 351 if (irq->target_vcpu != vcpu) {
352 spin_unlock_irqrestore(&irq->irq_lock, flags); 352 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
353 goto retry; 353 goto retry;
354 } 354 }
355 irq->pending_latch = status; 355 irq->pending_latch = status;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 870b1185173b..abd9c7352677 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
54 * When taking more than one ap_list_lock at the same time, always take the 54 * When taking more than one ap_list_lock at the same time, always take the
55 * lowest numbered VCPU's ap_list_lock first, so: 55 * lowest numbered VCPU's ap_list_lock first, so:
56 * vcpuX->vcpu_id < vcpuY->vcpu_id: 56 * vcpuX->vcpu_id < vcpuY->vcpu_id:
57 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); 57 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
58 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); 58 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
59 * 59 *
60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have 60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
61 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer 61 * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
62 * spinlocks for any lock that may be taken while injecting an interrupt. 62 * spinlocks for any lock that may be taken while injecting an interrupt.
63 */ 63 */
64 64
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
72 struct vgic_irq *irq = NULL; 72 struct vgic_irq *irq = NULL;
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&dist->lpi_list_lock, flags); 75 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
76 76
77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
78 if (irq->intid != intid) 78 if (irq->intid != intid)
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
88 irq = NULL; 88 irq = NULL;
89 89
90out_unlock: 90out_unlock:
91 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 91 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
92 92
93 return irq; 93 return irq;
94} 94}
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
138 if (irq->intid < VGIC_MIN_LPI) 138 if (irq->intid < VGIC_MIN_LPI)
139 return; 139 return;
140 140
141 spin_lock_irqsave(&dist->lpi_list_lock, flags); 141 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
142 if (!kref_put(&irq->refcount, vgic_irq_release)) { 142 if (!kref_put(&irq->refcount, vgic_irq_release)) {
143 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 143 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
144 return; 144 return;
145 }; 145 };
146 146
147 list_del(&irq->lpi_list); 147 list_del(&irq->lpi_list);
148 dist->lpi_list_count--; 148 dist->lpi_list_count--;
149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 149 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 150
151 kfree(irq); 151 kfree(irq);
152} 152}
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
244 bool penda, pendb; 244 bool penda, pendb;
245 int ret; 245 int ret;
246 246
247 spin_lock(&irqa->irq_lock); 247 raw_spin_lock(&irqa->irq_lock);
248 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 248 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
249 249
250 if (irqa->active || irqb->active) { 250 if (irqa->active || irqb->active) {
251 ret = (int)irqb->active - (int)irqa->active; 251 ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
263 /* Both pending and enabled, sort by priority */ 263 /* Both pending and enabled, sort by priority */
264 ret = irqa->priority - irqb->priority; 264 ret = irqa->priority - irqb->priority;
265out: 265out:
266 spin_unlock(&irqb->irq_lock); 266 raw_spin_unlock(&irqb->irq_lock);
267 spin_unlock(&irqa->irq_lock); 267 raw_spin_unlock(&irqa->irq_lock);
268 return ret; 268 return ret;
269} 269}
270 270
@@ -325,7 +325,7 @@ retry:
325 * not need to be inserted into an ap_list and there is also 325 * not need to be inserted into an ap_list and there is also
326 * no more work for us to do. 326 * no more work for us to do.
327 */ 327 */
328 spin_unlock_irqrestore(&irq->irq_lock, flags); 328 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
329 329
330 /* 330 /*
331 * We have to kick the VCPU here, because we could be 331 * We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
347 * We must unlock the irq lock to take the ap_list_lock where 347 * We must unlock the irq lock to take the ap_list_lock where
348 * we are going to insert this new pending interrupt. 348 * we are going to insert this new pending interrupt.
349 */ 349 */
350 spin_unlock_irqrestore(&irq->irq_lock, flags); 350 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
351 351
352 /* someone can do stuff here, which we re-check below */ 352 /* someone can do stuff here, which we re-check below */
353 353
354 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 354 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
355 spin_lock(&irq->irq_lock); 355 raw_spin_lock(&irq->irq_lock);
356 356
357 /* 357 /*
358 * Did something change behind our backs? 358 * Did something change behind our backs?
@@ -367,10 +367,11 @@ retry:
367 */ 367 */
368 368
369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { 369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
370 spin_unlock(&irq->irq_lock); 370 raw_spin_unlock(&irq->irq_lock);
371 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 371 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
372 flags);
372 373
373 spin_lock_irqsave(&irq->irq_lock, flags); 374 raw_spin_lock_irqsave(&irq->irq_lock, flags);
374 goto retry; 375 goto retry;
375 } 376 }
376 377
@@ -382,8 +383,8 @@ retry:
382 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 383 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
383 irq->vcpu = vcpu; 384 irq->vcpu = vcpu;
384 385
385 spin_unlock(&irq->irq_lock); 386 raw_spin_unlock(&irq->irq_lock);
386 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 387 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387 388
388 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 389 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
389 kvm_vcpu_kick(vcpu); 390 kvm_vcpu_kick(vcpu);
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
430 if (!irq) 431 if (!irq)
431 return -EINVAL; 432 return -EINVAL;
432 433
433 spin_lock_irqsave(&irq->irq_lock, flags); 434 raw_spin_lock_irqsave(&irq->irq_lock, flags);
434 435
435 if (!vgic_validate_injection(irq, level, owner)) { 436 if (!vgic_validate_injection(irq, level, owner)) {
436 /* Nothing to see here, move along... */ 437 /* Nothing to see here, move along... */
437 spin_unlock_irqrestore(&irq->irq_lock, flags); 438 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
438 vgic_put_irq(kvm, irq); 439 vgic_put_irq(kvm, irq);
439 return 0; 440 return 0;
440 } 441 }
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
494 495
495 BUG_ON(!irq); 496 BUG_ON(!irq);
496 497
497 spin_lock_irqsave(&irq->irq_lock, flags); 498 raw_spin_lock_irqsave(&irq->irq_lock, flags);
498 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); 499 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
499 spin_unlock_irqrestore(&irq->irq_lock, flags); 500 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
500 vgic_put_irq(vcpu->kvm, irq); 501 vgic_put_irq(vcpu->kvm, irq);
501 502
502 return ret; 503 return ret;
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
519 if (!irq->hw) 520 if (!irq->hw)
520 goto out; 521 goto out;
521 522
522 spin_lock_irqsave(&irq->irq_lock, flags); 523 raw_spin_lock_irqsave(&irq->irq_lock, flags);
523 irq->active = false; 524 irq->active = false;
524 irq->pending_latch = false; 525 irq->pending_latch = false;
525 irq->line_level = false; 526 irq->line_level = false;
526 spin_unlock_irqrestore(&irq->irq_lock, flags); 527 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527out: 528out:
528 vgic_put_irq(vcpu->kvm, irq); 529 vgic_put_irq(vcpu->kvm, irq);
529} 530}
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
539 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 540 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
540 BUG_ON(!irq); 541 BUG_ON(!irq);
541 542
542 spin_lock_irqsave(&irq->irq_lock, flags); 543 raw_spin_lock_irqsave(&irq->irq_lock, flags);
543 kvm_vgic_unmap_irq(irq); 544 kvm_vgic_unmap_irq(irq);
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 545 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 546 vgic_put_irq(vcpu->kvm, irq);
546 547
547 return 0; 548 return 0;
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
571 return -EINVAL; 572 return -EINVAL;
572 573
573 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 574 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
574 spin_lock_irqsave(&irq->irq_lock, flags); 575 raw_spin_lock_irqsave(&irq->irq_lock, flags);
575 if (irq->owner && irq->owner != owner) 576 if (irq->owner && irq->owner != owner)
576 ret = -EEXIST; 577 ret = -EEXIST;
577 else 578 else
578 irq->owner = owner; 579 irq->owner = owner;
579 spin_unlock_irqrestore(&irq->irq_lock, flags); 580 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
580 581
581 return ret; 582 return ret;
582} 583}
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
597 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 598 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
598 599
599retry: 600retry:
600 spin_lock(&vgic_cpu->ap_list_lock); 601 raw_spin_lock(&vgic_cpu->ap_list_lock);
601 602
602 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 603 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
603 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 604 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
604 bool target_vcpu_needs_kick = false; 605 bool target_vcpu_needs_kick = false;
605 606
606 spin_lock(&irq->irq_lock); 607 raw_spin_lock(&irq->irq_lock);
607 608
608 BUG_ON(vcpu != irq->vcpu); 609 BUG_ON(vcpu != irq->vcpu);
609 610
@@ -616,7 +617,7 @@ retry:
616 */ 617 */
617 list_del(&irq->ap_list); 618 list_del(&irq->ap_list);
618 irq->vcpu = NULL; 619 irq->vcpu = NULL;
619 spin_unlock(&irq->irq_lock); 620 raw_spin_unlock(&irq->irq_lock);
620 621
621 /* 622 /*
622 * This vgic_put_irq call matches the 623 * This vgic_put_irq call matches the
@@ -631,14 +632,14 @@ retry:
631 632
632 if (target_vcpu == vcpu) { 633 if (target_vcpu == vcpu) {
633 /* We're on the right CPU */ 634 /* We're on the right CPU */
634 spin_unlock(&irq->irq_lock); 635 raw_spin_unlock(&irq->irq_lock);
635 continue; 636 continue;
636 } 637 }
637 638
638 /* This interrupt looks like it has to be migrated. */ 639 /* This interrupt looks like it has to be migrated. */
639 640
640 spin_unlock(&irq->irq_lock); 641 raw_spin_unlock(&irq->irq_lock);
641 spin_unlock(&vgic_cpu->ap_list_lock); 642 raw_spin_unlock(&vgic_cpu->ap_list_lock);
642 643
643 /* 644 /*
644 * Ensure locking order by always locking the smallest 645 * Ensure locking order by always locking the smallest
@@ -652,10 +653,10 @@ retry:
652 vcpuB = vcpu; 653 vcpuB = vcpu;
653 } 654 }
654 655
655 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); 656 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
656 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, 657 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
657 SINGLE_DEPTH_NESTING); 658 SINGLE_DEPTH_NESTING);
658 spin_lock(&irq->irq_lock); 659 raw_spin_lock(&irq->irq_lock);
659 660
660 /* 661 /*
661 * If the affinity has been preserved, move the 662 * If the affinity has been preserved, move the
@@ -675,9 +676,9 @@ retry:
675 target_vcpu_needs_kick = true; 676 target_vcpu_needs_kick = true;
676 } 677 }
677 678
678 spin_unlock(&irq->irq_lock); 679 raw_spin_unlock(&irq->irq_lock);
679 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 680 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
680 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); 681 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
681 682
682 if (target_vcpu_needs_kick) { 683 if (target_vcpu_needs_kick) {
683 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); 684 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ retry:
687 goto retry; 688 goto retry;
688 } 689 }
689 690
690 spin_unlock(&vgic_cpu->ap_list_lock); 691 raw_spin_unlock(&vgic_cpu->ap_list_lock);
691} 692}
692 693
693static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 694static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 742 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
742 int w; 743 int w;
743 744
744 spin_lock(&irq->irq_lock); 745 raw_spin_lock(&irq->irq_lock);
745 /* GICv2 SGIs can count for more than one... */ 746 /* GICv2 SGIs can count for more than one... */
746 w = vgic_irq_get_lr_count(irq); 747 w = vgic_irq_get_lr_count(irq);
747 spin_unlock(&irq->irq_lock); 748 raw_spin_unlock(&irq->irq_lock);
748 749
749 count += w; 750 count += w;
750 *multi_sgi |= (w > 1); 751 *multi_sgi |= (w > 1);
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
770 count = 0; 771 count = 0;
771 772
772 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 773 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
773 spin_lock(&irq->irq_lock); 774 raw_spin_lock(&irq->irq_lock);
774 775
775 /* 776 /*
776 * If we have multi-SGIs in the pipeline, we need to 777 * If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
780 * the AP list has been sorted already. 781 * the AP list has been sorted already.
781 */ 782 */
782 if (multi_sgi && irq->priority > prio) { 783 if (multi_sgi && irq->priority > prio) {
783 spin_unlock(&irq->irq_lock); 784 _raw_spin_unlock(&irq->irq_lock);
784 break; 785 break;
785 } 786 }
786 787
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791 prio = irq->priority; 792 prio = irq->priority;
792 } 793 }
793 794
794 spin_unlock(&irq->irq_lock); 795 raw_spin_unlock(&irq->irq_lock);
795 796
796 if (count == kvm_vgic_global_state.nr_lr) { 797 if (count == kvm_vgic_global_state.nr_lr) {
797 if (!list_is_last(&irq->ap_list, 798 if (!list_is_last(&irq->ap_list,
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
872 873
873 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 874 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
874 875
875 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 876 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
876 vgic_flush_lr_state(vcpu); 877 vgic_flush_lr_state(vcpu);
877 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 878 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
878 879
879 if (can_access_vgic_from_kernel()) 880 if (can_access_vgic_from_kernel())
880 vgic_restore_state(vcpu); 881 vgic_restore_state(vcpu);
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
918 919
919 vgic_get_vmcr(vcpu, &vmcr); 920 vgic_get_vmcr(vcpu, &vmcr);
920 921
921 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 922 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922 923
923 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 924 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
924 spin_lock(&irq->irq_lock); 925 raw_spin_lock(&irq->irq_lock);
925 pending = irq_is_pending(irq) && irq->enabled && 926 pending = irq_is_pending(irq) && irq->enabled &&
926 !irq->active && 927 !irq->active &&
927 irq->priority < vmcr.pmr; 928 irq->priority < vmcr.pmr;
928 spin_unlock(&irq->irq_lock); 929 raw_spin_unlock(&irq->irq_lock);
929 930
930 if (pending) 931 if (pending)
931 break; 932 break;
932 } 933 }
933 934
934 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 935 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935 936
936 return pending; 937 return pending;
937} 938}
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
963 return false; 964 return false;
964 965
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 966 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags); 967 raw_spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active; 968 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags); 969 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq); 970 vgic_put_irq(vcpu->kvm, irq);
970 971
971 return map_is_active; 972 return map_is_active;
972} 973}
973