diff options
142 files changed, 1175 insertions, 735 deletions
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst index fe46d4867e2d..18c1415e7bfa 100644 --- a/Documentation/networking/msg_zerocopy.rst +++ b/Documentation/networking/msg_zerocopy.rst | |||
@@ -7,7 +7,7 @@ Intro | |||
7 | ===== | 7 | ===== |
8 | 8 | ||
9 | The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. | 9 | The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. |
10 | The feature is currently implemented for TCP sockets. | 10 | The feature is currently implemented for TCP and UDP sockets. |
11 | 11 | ||
12 | 12 | ||
13 | Opportunity and Caveats | 13 | Opportunity and Caveats |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 5 | 2 | VERSION = 5 |
3 | PATCHLEVEL = 0 | 3 | PATCHLEVEL = 0 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc6 | 5 | EXTRAVERSION = -rc7 |
6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 664e918e2624..26524b75970a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1400,6 +1400,7 @@ config NR_CPUS | |||
1400 | config HOTPLUG_CPU | 1400 | config HOTPLUG_CPU |
1401 | bool "Support for hot-pluggable CPUs" | 1401 | bool "Support for hot-pluggable CPUs" |
1402 | depends on SMP | 1402 | depends on SMP |
1403 | select GENERIC_IRQ_MIGRATION | ||
1403 | help | 1404 | help |
1404 | Say Y here to experiment with turning CPUs off and on. CPUs | 1405 | Say Y here to experiment with turning CPUs off and on. CPUs |
1405 | can be controlled through /sys/devices/system/cpu. | 1406 | can be controlled through /sys/devices/system/cpu. |
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f0..67d77eee9433 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts | |||
@@ -644,6 +644,17 @@ | |||
644 | }; | 644 | }; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | /* Configure pwm clock source for timers 8 & 9 */ | ||
648 | &timer8 { | ||
649 | assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; | ||
650 | assigned-clock-parents = <&sys_clkin_ck>; | ||
651 | }; | ||
652 | |||
653 | &timer9 { | ||
654 | assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; | ||
655 | assigned-clock-parents = <&sys_clkin_ck>; | ||
656 | }; | ||
657 | |||
647 | /* | 658 | /* |
648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for | 659 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for |
649 | * uart1 wakeirq. | 660 | * uart1 wakeirq. |
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index bc853ebeda22..61a06f6add3c 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi | |||
@@ -317,7 +317,8 @@ | |||
317 | 317 | ||
318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | 318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { |
319 | pinctrl-single,pins = < | 319 | pinctrl-single,pins = < |
320 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ | 320 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ |
321 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
321 | >; | 322 | >; |
322 | }; | 323 | }; |
323 | 324 | ||
@@ -385,7 +386,8 @@ | |||
385 | 386 | ||
386 | palmas: palmas@48 { | 387 | palmas: palmas@48 { |
387 | compatible = "ti,palmas"; | 388 | compatible = "ti,palmas"; |
388 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | 389 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
390 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
389 | reg = <0x48>; | 391 | reg = <0x48>; |
390 | interrupt-controller; | 392 | interrupt-controller; |
391 | #interrupt-cells = <2>; | 393 | #interrupt-cells = <2>; |
@@ -651,7 +653,8 @@ | |||
651 | pinctrl-names = "default"; | 653 | pinctrl-names = "default"; |
652 | pinctrl-0 = <&twl6040_pins>; | 654 | pinctrl-0 = <&twl6040_pins>; |
653 | 655 | ||
654 | interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ | 656 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
657 | interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>; | ||
655 | 658 | ||
656 | /* audpwron gpio defined in the board specific dts */ | 659 | /* audpwron gpio defined in the board specific dts */ |
657 | 660 | ||
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65..e78d3718f145 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts | |||
@@ -181,6 +181,13 @@ | |||
181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ | 181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ |
182 | >; | 182 | >; |
183 | }; | 183 | }; |
184 | |||
185 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | ||
186 | pinctrl-single,pins = < | ||
187 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ | ||
188 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
189 | >; | ||
190 | }; | ||
184 | }; | 191 | }; |
185 | 192 | ||
186 | &omap5_pmx_core { | 193 | &omap5_pmx_core { |
@@ -414,8 +421,11 @@ | |||
414 | 421 | ||
415 | palmas: palmas@48 { | 422 | palmas: palmas@48 { |
416 | compatible = "ti,palmas"; | 423 | compatible = "ti,palmas"; |
417 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | ||
418 | reg = <0x48>; | 424 | reg = <0x48>; |
425 | pinctrl-0 = <&palmas_sys_nirq_pins>; | ||
426 | pinctrl-names = "default"; | ||
427 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ | ||
428 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
419 | interrupt-controller; | 429 | interrupt-controller; |
420 | #interrupt-cells = <2>; | 430 | #interrupt-cells = <2>; |
421 | ti,system-power-controller; | 431 | ti,system-power-controller; |
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 4acb501dd3f8..3ed49898f4b2 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi | |||
@@ -719,7 +719,6 @@ | |||
719 | pm_qos = <&qos_lcdc0>, | 719 | pm_qos = <&qos_lcdc0>, |
720 | <&qos_lcdc1>, | 720 | <&qos_lcdc1>, |
721 | <&qos_cif0>, | 721 | <&qos_cif0>, |
722 | <&qos_cif1>, | ||
723 | <&qos_ipp>, | 722 | <&qos_ipp>, |
724 | <&qos_rga>; | 723 | <&qos_rga>; |
725 | }; | 724 | }; |
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
26 | struct irqaction; | 26 | struct irqaction; |
27 | struct pt_regs; | 27 | struct pt_regs; |
28 | extern void migrate_irqs(void); | ||
29 | 28 | ||
30 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); | 29 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); |
31 | void handle_IRQ(unsigned int, struct pt_regs *); | 30 | void handle_IRQ(unsigned int, struct pt_regs *); |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca56537b61bc..50e89869178a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -147,6 +148,13 @@ struct kvm_cpu_context { | |||
147 | 148 | ||
148 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 149 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
149 | 150 | ||
151 | struct vcpu_reset_state { | ||
152 | unsigned long pc; | ||
153 | unsigned long r0; | ||
154 | bool be; | ||
155 | bool reset; | ||
156 | }; | ||
157 | |||
150 | struct kvm_vcpu_arch { | 158 | struct kvm_vcpu_arch { |
151 | struct kvm_cpu_context ctxt; | 159 | struct kvm_cpu_context ctxt; |
152 | 160 | ||
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch { | |||
186 | /* Cache some mmu pages needed inside spinlock regions */ | 194 | /* Cache some mmu pages needed inside spinlock regions */ |
187 | struct kvm_mmu_memory_cache mmu_page_cache; | 195 | struct kvm_mmu_memory_cache mmu_page_cache; |
188 | 196 | ||
197 | struct vcpu_reset_state reset_state; | ||
198 | |||
189 | /* Detect first run of a vcpu */ | 199 | /* Detect first run of a vcpu */ |
190 | bool has_run_once; | 200 | bool has_run_once; |
191 | }; | 201 | }; |
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index c4b1d4fb1797..de2089501b8b 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h | |||
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) | |||
76 | #define S2_PMD_MASK PMD_MASK | 76 | #define S2_PMD_MASK PMD_MASK |
77 | #define S2_PMD_SIZE PMD_SIZE | 77 | #define S2_PMD_SIZE PMD_SIZE |
78 | 78 | ||
79 | static inline bool kvm_stage2_has_pmd(struct kvm *kvm) | ||
80 | { | ||
81 | return true; | ||
82 | } | ||
83 | |||
79 | #endif /* __ARM_S2_PGTABLE_H_ */ | 84 | #endif /* __ARM_S2_PGTABLE_H_ */ |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/ratelimit.h> | ||
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
36 | #include <linux/list.h> | 35 | #include <linux/list.h> |
37 | #include <linux/kallsyms.h> | 36 | #include <linux/kallsyms.h> |
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) | |||
109 | return nr_irqs; | 108 | return nr_irqs; |
110 | } | 109 | } |
111 | #endif | 110 | #endif |
112 | |||
113 | #ifdef CONFIG_HOTPLUG_CPU | ||
114 | static bool migrate_one_irq(struct irq_desc *desc) | ||
115 | { | ||
116 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
117 | const struct cpumask *affinity = irq_data_get_affinity_mask(d); | ||
118 | struct irq_chip *c; | ||
119 | bool ret = false; | ||
120 | |||
121 | /* | ||
122 | * If this is a per-CPU interrupt, or the affinity does not | ||
123 | * include this CPU, then we have nothing to do. | ||
124 | */ | ||
125 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
126 | return false; | ||
127 | |||
128 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
129 | affinity = cpu_online_mask; | ||
130 | ret = true; | ||
131 | } | ||
132 | |||
133 | c = irq_data_get_irq_chip(d); | ||
134 | if (!c->irq_set_affinity) | ||
135 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
136 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) | ||
137 | cpumask_copy(irq_data_get_affinity_mask(d), affinity); | ||
138 | |||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | ||
144 | * If the affinity settings do not allow other CPUs, force them onto any | ||
145 | * available CPU. | ||
146 | * | ||
147 | * Note: we must iterate over all IRQs, whether they have an attached | ||
148 | * action structure or not, as we need to get chained interrupts too. | ||
149 | */ | ||
150 | void migrate_irqs(void) | ||
151 | { | ||
152 | unsigned int i; | ||
153 | struct irq_desc *desc; | ||
154 | unsigned long flags; | ||
155 | |||
156 | local_irq_save(flags); | ||
157 | |||
158 | for_each_irq_desc(i, desc) { | ||
159 | bool affinity_broken; | ||
160 | |||
161 | raw_spin_lock(&desc->lock); | ||
162 | affinity_broken = migrate_one_irq(desc); | ||
163 | raw_spin_unlock(&desc->lock); | ||
164 | |||
165 | if (affinity_broken) | ||
166 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
167 | i, smp_processor_id()); | ||
168 | } | ||
169 | |||
170 | local_irq_restore(flags); | ||
171 | } | ||
172 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -254,7 +254,7 @@ int __cpu_disable(void) | |||
254 | /* | 254 | /* |
255 | * OK - migrate IRQs away from this CPU | 255 | * OK - migrate IRQs away from this CPU |
256 | */ | 256 | */ |
257 | migrate_irqs(); | 257 | irq_migrate_all_off_this_cpu(); |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Flush user cache and TLB mappings, and then remove this CPU | 260 | * Flush user cache and TLB mappings, and then remove this CPU |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 222c1635bc7a..e8bd288fd5be 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | |||
1450 | reset_coproc_regs(vcpu, table, num); | 1450 | reset_coproc_regs(vcpu, table, num); |
1451 | 1451 | ||
1452 | for (num = 1; num < NR_CP15_REGS; num++) | 1452 | for (num = 1; num < NR_CP15_REGS; num++) |
1453 | if (vcpu_cp15(vcpu, num) == 0x42424242) | 1453 | WARN(vcpu_cp15(vcpu, num) == 0x42424242, |
1454 | panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); | 1454 | "Didn't reset vcpu_cp15(vcpu, %zi)", num); |
1455 | } | 1455 | } |
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d6..e53327912adc 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/cputype.h> | 26 | #include <asm/cputype.h> |
27 | #include <asm/kvm_arm.h> | 27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_coproc.h> | 28 | #include <asm/kvm_coproc.h> |
29 | #include <asm/kvm_emulate.h> | ||
29 | 30 | ||
30 | #include <kvm/arm_arch_timer.h> | 31 | #include <kvm/arm_arch_timer.h> |
31 | 32 | ||
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
69 | /* Reset CP15 registers */ | 70 | /* Reset CP15 registers */ |
70 | kvm_reset_coprocs(vcpu); | 71 | kvm_reset_coprocs(vcpu); |
71 | 72 | ||
73 | /* | ||
74 | * Additional reset state handling that PSCI may have imposed on us. | ||
75 | * Must be done after all the sys_reg reset. | ||
76 | */ | ||
77 | if (READ_ONCE(vcpu->arch.reset_state.reset)) { | ||
78 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
79 | |||
80 | /* Gracefully handle Thumb2 entry point */ | ||
81 | if (target_pc & 1) { | ||
82 | target_pc &= ~1UL; | ||
83 | vcpu_set_thumb(vcpu); | ||
84 | } | ||
85 | |||
86 | /* Propagate caller endianness */ | ||
87 | if (vcpu->arch.reset_state.be) | ||
88 | kvm_vcpu_set_be(vcpu); | ||
89 | |||
90 | *vcpu_pc(vcpu) = target_pc; | ||
91 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
92 | |||
93 | vcpu->arch.reset_state.reset = false; | ||
94 | } | ||
95 | |||
72 | /* Reset arch_timer context */ | 96 | /* Reset arch_timer context */ |
73 | return kvm_timer_vcpu_reset(vcpu); | 97 | return kvm_timer_vcpu_reset(vcpu); |
74 | } | 98 | } |
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109..dae514c8276a 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, | |||
152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && | 152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && |
153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); | 153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); |
154 | 154 | ||
155 | /* Enter broadcast mode for periodic timers */ | ||
156 | tick_broadcast_enable(); | ||
157 | |||
158 | /* Enter broadcast mode for one-shot timers */ | ||
155 | tick_broadcast_enter(); | 159 | tick_broadcast_enter(); |
156 | 160 | ||
157 | /* | 161 | /* |
@@ -218,15 +222,6 @@ fail: | |||
218 | return index; | 222 | return index; |
219 | } | 223 | } |
220 | 224 | ||
221 | /* | ||
222 | * For each cpu, setup the broadcast timer because local timers | ||
223 | * stops for the states above C1. | ||
224 | */ | ||
225 | static void omap_setup_broadcast_timer(void *arg) | ||
226 | { | ||
227 | tick_broadcast_enable(); | ||
228 | } | ||
229 | |||
230 | static struct cpuidle_driver omap4_idle_driver = { | 225 | static struct cpuidle_driver omap4_idle_driver = { |
231 | .name = "omap4_idle", | 226 | .name = "omap4_idle", |
232 | .owner = THIS_MODULE, | 227 | .owner = THIS_MODULE, |
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void) | |||
319 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) | 314 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) |
320 | return -ENODEV; | 315 | return -ENODEV; |
321 | 316 | ||
322 | /* Configure the broadcast timer on each cpu */ | ||
323 | on_each_cpu(omap_setup_broadcast_timer, NULL, 1); | ||
324 | |||
325 | return cpuidle_register(idle_driver, cpu_online_mask); | 317 | return cpuidle_register(idle_driver, cpu_online_mask); |
326 | } | 318 | } |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index f86b72d1d59e..1444b4b4bd9f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
83 | u32 enable_mask, enable_shift; | 83 | u32 enable_mask, enable_shift; |
84 | u32 pipd_mask, pipd_shift; | 84 | u32 pipd_mask, pipd_shift; |
85 | u32 reg; | 85 | u32 reg; |
86 | int ret; | ||
86 | 87 | ||
87 | if (dsi_id == 0) { | 88 | if (dsi_id == 0) { |
88 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | 89 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; |
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
98 | return -ENODEV; | 99 | return -ENODEV; |
99 | } | 100 | } |
100 | 101 | ||
101 | regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); | 102 | ret = regmap_read(omap4_dsi_mux_syscon, |
103 | OMAP4_DSIPHY_SYSCON_OFFSET, | ||
104 | ®); | ||
105 | if (ret) | ||
106 | return ret; | ||
102 | 107 | ||
103 | reg &= ~enable_mask; | 108 | reg &= ~enable_mask; |
104 | reg &= ~pipd_mask; | 109 | reg &= ~pipd_mask; |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a710..17558be4bf0a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -50,6 +50,9 @@ | |||
50 | #define OMAP4_NR_BANKS 4 | 50 | #define OMAP4_NR_BANKS 4 |
51 | #define OMAP4_NR_IRQS 128 | 51 | #define OMAP4_NR_IRQS 128 |
52 | 52 | ||
53 | #define SYS_NIRQ1_EXT_SYS_IRQ_1 7 | ||
54 | #define SYS_NIRQ2_EXT_SYS_IRQ_2 119 | ||
55 | |||
53 | static void __iomem *wakeupgen_base; | 56 | static void __iomem *wakeupgen_base; |
54 | static void __iomem *sar_base; | 57 | static void __iomem *sar_base; |
55 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); | 58 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); |
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) | |||
153 | irq_chip_unmask_parent(d); | 156 | irq_chip_unmask_parent(d); |
154 | } | 157 | } |
155 | 158 | ||
159 | /* | ||
160 | * The sys_nirq pins bypass peripheral modules and are wired directly | ||
161 | * to MPUSS wakeupgen. They get automatically inverted for GIC. | ||
162 | */ | ||
163 | static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) | ||
164 | { | ||
165 | bool inverted = false; | ||
166 | |||
167 | switch (type) { | ||
168 | case IRQ_TYPE_LEVEL_LOW: | ||
169 | type &= ~IRQ_TYPE_LEVEL_MASK; | ||
170 | type |= IRQ_TYPE_LEVEL_HIGH; | ||
171 | inverted = true; | ||
172 | break; | ||
173 | case IRQ_TYPE_EDGE_FALLING: | ||
174 | type &= ~IRQ_TYPE_EDGE_BOTH; | ||
175 | type |= IRQ_TYPE_EDGE_RISING; | ||
176 | inverted = true; | ||
177 | break; | ||
178 | default: | ||
179 | break; | ||
180 | } | ||
181 | |||
182 | if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && | ||
183 | d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) | ||
184 | pr_warn("wakeupgen: irq%li polarity inverted in dts\n", | ||
185 | d->hwirq); | ||
186 | |||
187 | return irq_chip_set_type_parent(d, type); | ||
188 | } | ||
189 | |||
156 | #ifdef CONFIG_HOTPLUG_CPU | 190 | #ifdef CONFIG_HOTPLUG_CPU |
157 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); | 191 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); |
158 | 192 | ||
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { | |||
446 | .irq_mask = wakeupgen_mask, | 480 | .irq_mask = wakeupgen_mask, |
447 | .irq_unmask = wakeupgen_unmask, | 481 | .irq_unmask = wakeupgen_unmask, |
448 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 482 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
449 | .irq_set_type = irq_chip_set_type_parent, | 483 | .irq_set_type = wakeupgen_irq_set_type, |
450 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, | 484 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, |
451 | #ifdef CONFIG_SMP | 485 | #ifdef CONFIG_SMP |
452 | .irq_set_affinity = irq_chip_set_affinity_parent, | 486 | .irq_set_affinity = irq_chip_set_affinity_parent, |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..1e3e08a1c456 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev) | |||
2390 | return; | 2390 | return; |
2391 | 2391 | ||
2392 | arm_teardown_iommu_dma_ops(dev); | 2392 | arm_teardown_iommu_dma_ops(dev); |
2393 | /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ | ||
2394 | set_dma_ops(dev, NULL); | ||
2393 | } | 2395 | } |
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c | |||
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or | |||
247 | } | 247 | } |
248 | 248 | ||
249 | /* Copy arch-dep-instance from template. */ | 249 | /* Copy arch-dep-instance from template. */ |
250 | memcpy(code, (unsigned char *)optprobe_template_entry, | 250 | memcpy(code, (unsigned long *)&optprobe_template_entry, |
251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); | 251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); |
252 | 252 | ||
253 | /* Adjust buffer according to instruction. */ | 253 | /* Adjust buffer according to instruction. */ |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 64acccc4bfcb..f74b13aa5aa5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts | |||
@@ -227,34 +227,34 @@ | |||
227 | 227 | ||
228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { | 228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { |
229 | fsl,pins = < | 229 | fsl,pins = < |
230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 | 230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d |
231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 | 231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd |
232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 | 232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd |
233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 | 233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd |
234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 | 234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd |
235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 | 235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd |
236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 | 236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd |
237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 | 237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd |
238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 | 238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd |
239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 | 239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd |
240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 | 240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d |
241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
242 | >; | 242 | >; |
243 | }; | 243 | }; |
244 | 244 | ||
245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { | 245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { |
246 | fsl,pins = < | 246 | fsl,pins = < |
247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 | 247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f |
248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 | 248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf |
249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 | 249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf |
250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 | 250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf |
251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 | 251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf |
252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 | 252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf |
253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 | 253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf |
254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 | 254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf |
255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 | 255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf |
256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 | 256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf |
257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 | 257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f |
258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
259 | >; | 259 | >; |
260 | }; | 260 | }; |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 8e9d6d5ed7b2..b6d31499fb43 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi | |||
@@ -360,6 +360,8 @@ | |||
360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, | 360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, |
361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; | 361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; |
362 | clock-names = "ipg", "ahb", "per"; | 362 | clock-names = "ipg", "ahb", "per"; |
363 | assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; | ||
364 | assigned-clock-rates = <400000000>; | ||
363 | fsl,tuning-start-tap = <20>; | 365 | fsl,tuning-start-tap = <20>; |
364 | fsl,tuning-step = <2>; | 366 | fsl,tuning-step = <2>; |
365 | bus-width = <4>; | 367 | bus-width = <4>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index bd937d68ca3b..040b36ef0dd2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
@@ -40,6 +40,7 @@ | |||
40 | pinctrl-0 = <&usb30_host_drv>; | 40 | pinctrl-0 = <&usb30_host_drv>; |
41 | regulator-name = "vcc_host_5v"; | 41 | regulator-name = "vcc_host_5v"; |
42 | regulator-always-on; | 42 | regulator-always-on; |
43 | regulator-boot-on; | ||
43 | vin-supply = <&vcc_sys>; | 44 | vin-supply = <&vcc_sys>; |
44 | }; | 45 | }; |
45 | 46 | ||
@@ -51,6 +52,7 @@ | |||
51 | pinctrl-0 = <&usb20_host_drv>; | 52 | pinctrl-0 = <&usb20_host_drv>; |
52 | regulator-name = "vcc_host1_5v"; | 53 | regulator-name = "vcc_host1_5v"; |
53 | regulator-always-on; | 54 | regulator-always-on; |
55 | regulator-boot-on; | ||
54 | vin-supply = <&vcc_sys>; | 56 | vin-supply = <&vcc_sys>; |
55 | }; | 57 | }; |
56 | 58 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10..d1cf404b8708 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts | |||
@@ -22,7 +22,7 @@ | |||
22 | backlight = <&backlight>; | 22 | backlight = <&backlight>; |
23 | power-supply = <&pp3300_disp>; | 23 | power-supply = <&pp3300_disp>; |
24 | 24 | ||
25 | ports { | 25 | port { |
26 | panel_in_edp: endpoint { | 26 | panel_in_edp: endpoint { |
27 | remote-endpoint = <&edp_out_panel>; | 27 | remote-endpoint = <&edp_out_panel>; |
28 | }; | 28 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 81e73103fa78..15e254a77391 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts | |||
@@ -43,7 +43,7 @@ | |||
43 | backlight = <&backlight>; | 43 | backlight = <&backlight>; |
44 | power-supply = <&pp3300_disp>; | 44 | power-supply = <&pp3300_disp>; |
45 | 45 | ||
46 | ports { | 46 | port { |
47 | panel_in_edp: endpoint { | 47 | panel_in_edp: endpoint { |
48 | remote-endpoint = <&edp_out_panel>; | 48 | remote-endpoint = <&edp_out_panel>; |
49 | }; | 49 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 0b8f1edbd746..b48a63c3efc3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts | |||
@@ -91,7 +91,7 @@ | |||
91 | pinctrl-0 = <&lcd_panel_reset>; | 91 | pinctrl-0 = <&lcd_panel_reset>; |
92 | power-supply = <&vcc3v3_s0>; | 92 | power-supply = <&vcc3v3_s0>; |
93 | 93 | ||
94 | ports { | 94 | port { |
95 | panel_in_edp: endpoint { | 95 | panel_in_edp: endpoint { |
96 | remote-endpoint = <&edp_out_panel>; | 96 | remote-endpoint = <&edp_out_panel>; |
97 | }; | 97 | }; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..da3fc7324d68 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -208,6 +209,13 @@ struct kvm_cpu_context { | |||
208 | 209 | ||
209 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 210 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
210 | 211 | ||
212 | struct vcpu_reset_state { | ||
213 | unsigned long pc; | ||
214 | unsigned long r0; | ||
215 | bool be; | ||
216 | bool reset; | ||
217 | }; | ||
218 | |||
211 | struct kvm_vcpu_arch { | 219 | struct kvm_vcpu_arch { |
212 | struct kvm_cpu_context ctxt; | 220 | struct kvm_cpu_context ctxt; |
213 | 221 | ||
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch { | |||
297 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ | 305 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ |
298 | u64 vsesr_el2; | 306 | u64 vsesr_el2; |
299 | 307 | ||
308 | /* Additional reset state */ | ||
309 | struct vcpu_reset_state reset_state; | ||
310 | |||
300 | /* True when deferrable sysregs are loaded on the physical CPU, | 311 | /* True when deferrable sysregs are loaded on the physical CPU, |
301 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ | 312 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ |
302 | bool sysregs_loaded_on_cpu; | 313 | bool sysregs_loaded_on_cpu; |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..0c656850eeea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x) | |||
332 | #define virt_addr_valid(kaddr) \ | 332 | #define virt_addr_valid(kaddr) \ |
333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) | 333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) |
334 | 334 | ||
335 | /* | ||
336 | * Given that the GIC architecture permits ITS implementations that can only be | ||
337 | * configured with a LPI table address once, GICv3 systems with many CPUs may | ||
338 | * end up reserving a lot of different regions after a kexec for their LPI | ||
339 | * tables (one per CPU), as we are forced to reuse the same memory after kexec | ||
340 | * (and thus reserve it persistently with EFI beforehand) | ||
341 | */ | ||
342 | #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) | ||
343 | # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) | ||
344 | #endif | ||
345 | |||
335 | #include <asm-generic/memory_model.h> | 346 | #include <asm-generic/memory_model.h> |
336 | 347 | ||
337 | #endif | 348 | #endif |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..d09ec76f08cf 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p) | |||
313 | arm64_memblock_init(); | 313 | arm64_memblock_init(); |
314 | 314 | ||
315 | paging_init(); | 315 | paging_init(); |
316 | efi_apply_persistent_mem_reservations(); | ||
317 | 316 | ||
318 | acpi_table_upgrade(); | 317 | acpi_table_upgrade(); |
319 | 318 | ||
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..421ebf6f7086 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <kvm/arm_psci.h> | 23 | #include <kvm/arm_psci.h> |
24 | 24 | ||
25 | #include <asm/cpufeature.h> | 25 | #include <asm/cpufeature.h> |
26 | #include <asm/kprobes.h> | ||
26 | #include <asm/kvm_asm.h> | 27 | #include <asm/kvm_asm.h> |
27 | #include <asm/kvm_emulate.h> | 28 | #include <asm/kvm_emulate.h> |
28 | #include <asm/kvm_host.h> | 29 | #include <asm/kvm_host.h> |
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) | |||
107 | 108 | ||
108 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); | 109 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); |
109 | } | 110 | } |
111 | NOKPROBE_SYMBOL(activate_traps_vhe); | ||
110 | 112 | ||
111 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) | 113 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) |
112 | { | 114 | { |
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void) | |||
154 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); | 156 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); |
155 | write_sysreg(vectors, vbar_el1); | 157 | write_sysreg(vectors, vbar_el1); |
156 | } | 158 | } |
159 | NOKPROBE_SYMBOL(deactivate_traps_vhe); | ||
157 | 160 | ||
158 | static void __hyp_text __deactivate_traps_nvhe(void) | 161 | static void __hyp_text __deactivate_traps_nvhe(void) |
159 | { | 162 | { |
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) | |||
513 | 516 | ||
514 | return exit_code; | 517 | return exit_code; |
515 | } | 518 | } |
519 | NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); | ||
516 | 520 | ||
517 | /* Switch to the guest for legacy non-VHE systems */ | 521 | /* Switch to the guest for legacy non-VHE systems */ |
518 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) | 522 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) |
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, | |||
620 | read_sysreg_el2(esr), read_sysreg_el2(far), | 624 | read_sysreg_el2(esr), read_sysreg_el2(far), |
621 | read_sysreg(hpfar_el2), par, vcpu); | 625 | read_sysreg(hpfar_el2), par, vcpu); |
622 | } | 626 | } |
627 | NOKPROBE_SYMBOL(__hyp_call_panic_vhe); | ||
623 | 628 | ||
624 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) | 629 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) |
625 | { | 630 | { |
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..b426e2cf973c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
20 | 20 | ||
21 | #include <asm/kprobes.h> | ||
21 | #include <asm/kvm_asm.h> | 22 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_emulate.h> | 23 | #include <asm/kvm_emulate.h> |
23 | #include <asm/kvm_hyp.h> | 24 | #include <asm/kvm_hyp.h> |
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
98 | { | 99 | { |
99 | __sysreg_save_common_state(ctxt); | 100 | __sysreg_save_common_state(ctxt); |
100 | } | 101 | } |
102 | NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); | ||
101 | 103 | ||
102 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) | 104 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) |
103 | { | 105 | { |
104 | __sysreg_save_common_state(ctxt); | 106 | __sysreg_save_common_state(ctxt); |
105 | __sysreg_save_el2_return_state(ctxt); | 107 | __sysreg_save_el2_return_state(ctxt); |
106 | } | 108 | } |
109 | NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); | ||
107 | 110 | ||
108 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) | 111 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) |
109 | { | 112 | { |
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
188 | { | 191 | { |
189 | __sysreg_restore_common_state(ctxt); | 192 | __sysreg_restore_common_state(ctxt); |
190 | } | 193 | } |
194 | NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); | ||
191 | 195 | ||
192 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) | 196 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) |
193 | { | 197 | { |
194 | __sysreg_restore_common_state(ctxt); | 198 | __sysreg_restore_common_state(ctxt); |
195 | __sysreg_restore_el2_return_state(ctxt); | 199 | __sysreg_restore_el2_return_state(ctxt); |
196 | } | 200 | } |
201 | NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); | ||
197 | 202 | ||
198 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) | 203 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) |
199 | { | 204 | { |
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b72a3dd56204..f16a5f8ff2b4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/kvm_arm.h> | 32 | #include <asm/kvm_arm.h> |
33 | #include <asm/kvm_asm.h> | 33 | #include <asm/kvm_asm.h> |
34 | #include <asm/kvm_coproc.h> | 34 | #include <asm/kvm_coproc.h> |
35 | #include <asm/kvm_emulate.h> | ||
35 | #include <asm/kvm_mmu.h> | 36 | #include <asm/kvm_mmu.h> |
36 | 37 | ||
37 | /* Maximum phys_shift supported for any VM on this host */ | 38 | /* Maximum phys_shift supported for any VM on this host */ |
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
105 | * This function finds the right table above and sets the registers on | 106 | * This function finds the right table above and sets the registers on |
106 | * the virtual CPU struct to their architecturally defined reset | 107 | * the virtual CPU struct to their architecturally defined reset |
107 | * values. | 108 | * values. |
109 | * | ||
110 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT | ||
111 | * ioctl or as part of handling a request issued by another VCPU in the PSCI | ||
112 | * handling code. In the first case, the VCPU will not be loaded, and in the | ||
113 | * second case the VCPU will be loaded. Because this function operates purely | ||
114 | * on the memory-backed valus of system registers, we want to do a full put if | ||
115 | * we were loaded (handling a request) and load the values back at the end of | ||
116 | * the function. Otherwise we leave the state alone. In both cases, we | ||
117 | * disable preemption around the vcpu reset as we would otherwise race with | ||
118 | * preempt notifiers which also call put/load. | ||
108 | */ | 119 | */ |
109 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | 120 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
110 | { | 121 | { |
111 | const struct kvm_regs *cpu_reset; | 122 | const struct kvm_regs *cpu_reset; |
123 | int ret = -EINVAL; | ||
124 | bool loaded; | ||
125 | |||
126 | preempt_disable(); | ||
127 | loaded = (vcpu->cpu != -1); | ||
128 | if (loaded) | ||
129 | kvm_arch_vcpu_put(vcpu); | ||
112 | 130 | ||
113 | switch (vcpu->arch.target) { | 131 | switch (vcpu->arch.target) { |
114 | default: | 132 | default: |
115 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { | 133 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { |
116 | if (!cpu_has_32bit_el1()) | 134 | if (!cpu_has_32bit_el1()) |
117 | return -EINVAL; | 135 | goto out; |
118 | cpu_reset = &default_regs_reset32; | 136 | cpu_reset = &default_regs_reset32; |
119 | } else { | 137 | } else { |
120 | cpu_reset = &default_regs_reset; | 138 | cpu_reset = &default_regs_reset; |
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
129 | /* Reset system registers */ | 147 | /* Reset system registers */ |
130 | kvm_reset_sys_regs(vcpu); | 148 | kvm_reset_sys_regs(vcpu); |
131 | 149 | ||
150 | /* | ||
151 | * Additional reset state handling that PSCI may have imposed on us. | ||
152 | * Must be done after all the sys_reg reset. | ||
153 | */ | ||
154 | if (vcpu->arch.reset_state.reset) { | ||
155 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
156 | |||
157 | /* Gracefully handle Thumb2 entry point */ | ||
158 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
159 | target_pc &= ~1UL; | ||
160 | vcpu_set_thumb(vcpu); | ||
161 | } | ||
162 | |||
163 | /* Propagate caller endianness */ | ||
164 | if (vcpu->arch.reset_state.be) | ||
165 | kvm_vcpu_set_be(vcpu); | ||
166 | |||
167 | *vcpu_pc(vcpu) = target_pc; | ||
168 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
169 | |||
170 | vcpu->arch.reset_state.reset = false; | ||
171 | } | ||
172 | |||
132 | /* Reset PMU */ | 173 | /* Reset PMU */ |
133 | kvm_pmu_vcpu_reset(vcpu); | 174 | kvm_pmu_vcpu_reset(vcpu); |
134 | 175 | ||
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
137 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; | 178 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; |
138 | 179 | ||
139 | /* Reset timer */ | 180 | /* Reset timer */ |
140 | return kvm_timer_vcpu_reset(vcpu); | 181 | ret = kvm_timer_vcpu_reset(vcpu); |
182 | out: | ||
183 | if (loaded) | ||
184 | kvm_arch_vcpu_load(vcpu, smp_processor_id()); | ||
185 | preempt_enable(); | ||
186 | return ret; | ||
141 | } | 187 | } |
142 | 188 | ||
143 | void kvm_set_ipa_limit(void) | 189 | void kvm_set_ipa_limit(void) |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..c936aa40c3f4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, | |||
314 | return read_zero(vcpu, p); | 314 | return read_zero(vcpu, p); |
315 | } | 315 | } |
316 | 316 | ||
317 | static bool trap_undef(struct kvm_vcpu *vcpu, | 317 | /* |
318 | struct sys_reg_params *p, | 318 | * ARMv8.1 mandates at least a trivial LORegion implementation, where all the |
319 | const struct sys_reg_desc *r) | 319 | * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 |
320 | * system, these registers should UNDEF. LORID_EL1 being a RO register, we | ||
321 | * treat it separately. | ||
322 | */ | ||
323 | static bool trap_loregion(struct kvm_vcpu *vcpu, | ||
324 | struct sys_reg_params *p, | ||
325 | const struct sys_reg_desc *r) | ||
320 | { | 326 | { |
321 | kvm_inject_undefined(vcpu); | 327 | u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
322 | return false; | 328 | u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, |
329 | (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); | ||
330 | |||
331 | if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { | ||
332 | kvm_inject_undefined(vcpu); | ||
333 | return false; | ||
334 | } | ||
335 | |||
336 | if (p->is_write && sr == SYS_LORID_EL1) | ||
337 | return write_to_read_only(vcpu, p, r); | ||
338 | |||
339 | return trap_raz_wi(vcpu, p, r); | ||
323 | } | 340 | } |
324 | 341 | ||
325 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, | 342 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) | |||
1048 | if (val & ptrauth_mask) | 1065 | if (val & ptrauth_mask) |
1049 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); | 1066 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); |
1050 | val &= ~ptrauth_mask; | 1067 | val &= ~ptrauth_mask; |
1051 | } else if (id == SYS_ID_AA64MMFR1_EL1) { | ||
1052 | if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) | ||
1053 | kvm_debug("LORegions unsupported for guests, suppressing\n"); | ||
1054 | |||
1055 | val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); | ||
1056 | } | 1068 | } |
1057 | 1069 | ||
1058 | return val; | 1070 | return val; |
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
1338 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, | 1350 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, |
1339 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, | 1351 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
1340 | 1352 | ||
1341 | { SYS_DESC(SYS_LORSA_EL1), trap_undef }, | 1353 | { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, |
1342 | { SYS_DESC(SYS_LOREA_EL1), trap_undef }, | 1354 | { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, |
1343 | { SYS_DESC(SYS_LORN_EL1), trap_undef }, | 1355 | { SYS_DESC(SYS_LORN_EL1), trap_loregion }, |
1344 | { SYS_DESC(SYS_LORC_EL1), trap_undef }, | 1356 | { SYS_DESC(SYS_LORC_EL1), trap_loregion }, |
1345 | { SYS_DESC(SYS_LORID_EL1), trap_undef }, | 1357 | { SYS_DESC(SYS_LORID_EL1), trap_loregion }, |
1346 | 1358 | ||
1347 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, | 1359 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, |
1348 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, | 1360 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, |
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | |||
2596 | table = get_target_table(vcpu->arch.target, true, &num); | 2608 | table = get_target_table(vcpu->arch.target, true, &num); |
2597 | reset_sys_reg_descs(vcpu, table, num); | 2609 | reset_sys_reg_descs(vcpu, table, num); |
2598 | 2610 | ||
2599 | for (num = 1; num < NR_SYS_REGS; num++) | 2611 | for (num = 1; num < NR_SYS_REGS; num++) { |
2600 | if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | 2612 | if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, |
2601 | panic("Didn't reset __vcpu_sys_reg(%zi)", num); | 2613 | "Didn't reset __vcpu_sys_reg(%zi)\n", num)) |
2614 | break; | ||
2615 | } | ||
2602 | } | 2616 | } |
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index edfcbb25fd9f..dcea277c09ae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h | |||
@@ -45,8 +45,8 @@ | |||
45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) | 45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) |
46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) | 46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) |
47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ | 47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ |
48 | (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) | 48 | (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) |
49 | #define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) | 49 | #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) |
50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | 50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) | 51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) |
52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ | 52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ |
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
241 | 241 | ||
242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | 242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
243 | 243 | ||
244 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
245 | struct file; | ||
246 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
247 | unsigned long size, pgprot_t vma_prot); | ||
248 | |||
244 | /* | 249 | /* |
245 | * Macro to make mark a page protection value as "uncacheable". Note | 250 | * Macro to make mark a page protection value as "uncacheable". Note |
246 | * that "protection" is really a misnomer here as the protection value | 251 | * that "protection" is really a misnomer here as the protection value |
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 8f454810514f..21e0bd5293dd 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h | |||
@@ -49,7 +49,7 @@ struct thread_struct { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define INIT_THREAD { \ | 51 | #define INIT_THREAD { \ |
52 | .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ | 52 | .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ |
53 | .sr = DEFAULT_PSR_VALUE, \ | 53 | .sr = DEFAULT_PSR_VALUE, \ |
54 | } | 54 | } |
55 | 55 | ||
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) | 95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) |
96 | 96 | ||
97 | #define task_pt_regs(p) \ | 97 | #define task_pt_regs(p) \ |
98 | ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) | 98 | ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) |
99 | 99 | ||
100 | #define cpu_relax() barrier() | 100 | #define cpu_relax() barrier() |
101 | 101 | ||
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index 659253e9989c..d67f9777cfd9 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c | |||
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) | |||
38 | if (task) | 38 | if (task) |
39 | stack = (unsigned long *)thread_saved_fp(task); | 39 | stack = (unsigned long *)thread_saved_fp(task); |
40 | else | 40 | else |
41 | #ifdef CONFIG_STACKTRACE | ||
42 | asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); | ||
43 | #else | ||
41 | stack = (unsigned long *)&stack; | 44 | stack = (unsigned long *)&stack; |
45 | #endif | ||
42 | } | 46 | } |
43 | 47 | ||
44 | show_trace(stack); | 48 | show_trace(stack); |
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 57f1afe19a52..f2f12fff36f7 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/ptrace.h> | 8 | #include <linux/ptrace.h> |
9 | #include <linux/regset.h> | 9 | #include <linux/regset.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/sched/task_stack.h> | ||
11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
12 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target, | |||
159 | static const struct user_regset csky_regsets[] = { | 160 | static const struct user_regset csky_regsets[] = { |
160 | [REGSET_GPR] = { | 161 | [REGSET_GPR] = { |
161 | .core_note_type = NT_PRSTATUS, | 162 | .core_note_type = NT_PRSTATUS, |
162 | .n = ELF_NGREG, | 163 | .n = sizeof(struct pt_regs) / sizeof(u32), |
163 | .size = sizeof(u32), | 164 | .size = sizeof(u32), |
164 | .align = sizeof(u32), | 165 | .align = sizeof(u32), |
165 | .get = &gpr_get, | 166 | .get = &gpr_get, |
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index ddc4dd79f282..b07a534b3062 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c | |||
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
160 | { | 160 | { |
161 | unsigned long mask = 1 << cpu; | 161 | unsigned long mask = 1 << cpu; |
162 | 162 | ||
163 | secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; | 163 | secondary_stack = |
164 | (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; | ||
164 | secondary_hint = mfcr("cr31"); | 165 | secondary_hint = mfcr("cr31"); |
165 | secondary_ccr = mfcr("cr18"); | 166 | secondary_ccr = mfcr("cr18"); |
166 | 167 | ||
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index cb7c03e5cd21..8473b6bdf512 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c | |||
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr) | |||
46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); | 46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(iounmap); | 48 | EXPORT_SYMBOL(iounmap); |
49 | |||
50 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
51 | unsigned long size, pgprot_t vma_prot) | ||
52 | { | ||
53 | if (!pfn_valid(pfn)) { | ||
54 | vma_prot.pgprot |= _PAGE_SO; | ||
55 | return pgprot_noncached(vma_prot); | ||
56 | } else if (file->f_flags & O_SYNC) { | ||
57 | return pgprot_noncached(vma_prot); | ||
58 | } | ||
59 | |||
60 | return vma_prot; | ||
61 | } | ||
62 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..76e9bf88d3b9 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c | |||
@@ -79,8 +79,6 @@ enum reg_val_type { | |||
79 | REG_64BIT_32BIT, | 79 | REG_64BIT_32BIT, |
80 | /* 32-bit compatible, need truncation for 64-bit ops. */ | 80 | /* 32-bit compatible, need truncation for 64-bit ops. */ |
81 | REG_32BIT, | 81 | REG_32BIT, |
82 | /* 32-bit zero extended. */ | ||
83 | REG_32BIT_ZERO_EX, | ||
84 | /* 32-bit no sign/zero extension needed. */ | 82 | /* 32-bit no sign/zero extension needed. */ |
85 | REG_32BIT_POS | 83 | REG_32BIT_POS |
86 | }; | 84 | }; |
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) | |||
343 | const struct bpf_prog *prog = ctx->skf; | 341 | const struct bpf_prog *prog = ctx->skf; |
344 | int stack_adjust = ctx->stack_size; | 342 | int stack_adjust = ctx->stack_size; |
345 | int store_offset = stack_adjust - 8; | 343 | int store_offset = stack_adjust - 8; |
344 | enum reg_val_type td; | ||
346 | int r0 = MIPS_R_V0; | 345 | int r0 = MIPS_R_V0; |
347 | 346 | ||
348 | if (dest_reg == MIPS_R_RA && | 347 | if (dest_reg == MIPS_R_RA) { |
349 | get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) | ||
350 | /* Don't let zero extended value escape. */ | 348 | /* Don't let zero extended value escape. */ |
351 | emit_instr(ctx, sll, r0, r0, 0); | 349 | td = get_reg_val_type(ctx, prog->len, BPF_REG_0); |
350 | if (td == REG_64BIT) | ||
351 | emit_instr(ctx, sll, r0, r0, 0); | ||
352 | } | ||
352 | 353 | ||
353 | if (ctx->flags & EBPF_SAVE_RA) { | 354 | if (ctx->flags & EBPF_SAVE_RA) { |
354 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); | 355 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); |
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
692 | if (dst < 0) | 693 | if (dst < 0) |
693 | return dst; | 694 | return dst; |
694 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 695 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
695 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 696 | if (td == REG_64BIT) { |
696 | /* sign extend */ | 697 | /* sign extend */ |
697 | emit_instr(ctx, sll, dst, dst, 0); | 698 | emit_instr(ctx, sll, dst, dst, 0); |
698 | } | 699 | } |
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
707 | if (dst < 0) | 708 | if (dst < 0) |
708 | return dst; | 709 | return dst; |
709 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 710 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
710 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 711 | if (td == REG_64BIT) { |
711 | /* sign extend */ | 712 | /* sign extend */ |
712 | emit_instr(ctx, sll, dst, dst, 0); | 713 | emit_instr(ctx, sll, dst, dst, 0); |
713 | } | 714 | } |
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
721 | if (dst < 0) | 722 | if (dst < 0) |
722 | return dst; | 723 | return dst; |
723 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 724 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
724 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) | 725 | if (td == REG_64BIT) |
725 | /* sign extend */ | 726 | /* sign extend */ |
726 | emit_instr(ctx, sll, dst, dst, 0); | 727 | emit_instr(ctx, sll, dst, dst, 0); |
727 | if (insn->imm == 1) { | 728 | if (insn->imm == 1) { |
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
860 | if (src < 0 || dst < 0) | 861 | if (src < 0 || dst < 0) |
861 | return -EINVAL; | 862 | return -EINVAL; |
862 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 863 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
863 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 864 | if (td == REG_64BIT) { |
864 | /* sign extend */ | 865 | /* sign extend */ |
865 | emit_instr(ctx, sll, dst, dst, 0); | 866 | emit_instr(ctx, sll, dst, dst, 0); |
866 | } | 867 | } |
867 | did_move = false; | 868 | did_move = false; |
868 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); | 869 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); |
869 | if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { | 870 | if (ts == REG_64BIT) { |
870 | int tmp_reg = MIPS_R_AT; | 871 | int tmp_reg = MIPS_R_AT; |
871 | 872 | ||
872 | if (bpf_op == BPF_MOV) { | 873 | if (bpf_op == BPF_MOV) { |
@@ -1254,8 +1255,7 @@ jeq_common: | |||
1254 | if (insn->imm == 64 && td == REG_32BIT) | 1255 | if (insn->imm == 64 && td == REG_32BIT) |
1255 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | 1256 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); |
1256 | 1257 | ||
1257 | if (insn->imm != 64 && | 1258 | if (insn->imm != 64 && td == REG_64BIT) { |
1258 | (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { | ||
1259 | /* sign extend */ | 1259 | /* sign extend */ |
1260 | emit_instr(ctx, sll, dst, dst, 0); | 1260 | emit_instr(ctx, sll, dst, dst, 0); |
1261 | } | 1261 | } |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index c9bfe526ca9d..d8c8d7c9df15 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud) | |||
904 | 904 | ||
905 | static inline int pud_present(pud_t pud) | 905 | static inline int pud_present(pud_t pud) |
906 | { | 906 | { |
907 | return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); | 907 | return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); |
908 | } | 908 | } |
909 | 909 | ||
910 | extern struct page *pud_page(pud_t pud); | 910 | extern struct page *pud_page(pud_t pud); |
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd) | |||
951 | 951 | ||
952 | static inline int pgd_present(pgd_t pgd) | 952 | static inline int pgd_present(pgd_t pgd) |
953 | { | 953 | { |
954 | return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); | 954 | return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); |
955 | } | 955 | } |
956 | 956 | ||
957 | static inline pte_t pgd_pte(pgd_t pgd) | 957 | static inline pte_t pgd_pte(pgd_t pgd) |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..b684f0294f35 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void) | |||
2278 | x86_pmu.check_microcode(); | 2278 | x86_pmu.check_microcode(); |
2279 | } | 2279 | } |
2280 | 2280 | ||
2281 | static int x86_pmu_check_period(struct perf_event *event, u64 value) | ||
2282 | { | ||
2283 | if (x86_pmu.check_period && x86_pmu.check_period(event, value)) | ||
2284 | return -EINVAL; | ||
2285 | |||
2286 | if (value && x86_pmu.limit_period) { | ||
2287 | if (x86_pmu.limit_period(event, value) > value) | ||
2288 | return -EINVAL; | ||
2289 | } | ||
2290 | |||
2291 | return 0; | ||
2292 | } | ||
2293 | |||
2281 | static struct pmu pmu = { | 2294 | static struct pmu pmu = { |
2282 | .pmu_enable = x86_pmu_enable, | 2295 | .pmu_enable = x86_pmu_enable, |
2283 | .pmu_disable = x86_pmu_disable, | 2296 | .pmu_disable = x86_pmu_disable, |
@@ -2302,6 +2315,7 @@ static struct pmu pmu = { | |||
2302 | .event_idx = x86_pmu_event_idx, | 2315 | .event_idx = x86_pmu_event_idx, |
2303 | .sched_task = x86_pmu_sched_task, | 2316 | .sched_task = x86_pmu_sched_task, |
2304 | .task_ctx_size = sizeof(struct x86_perf_task_context), | 2317 | .task_ctx_size = sizeof(struct x86_perf_task_context), |
2318 | .check_period = x86_pmu_check_period, | ||
2305 | }; | 2319 | }; |
2306 | 2320 | ||
2307 | void arch_perf_update_userpage(struct perf_event *event, | 2321 | void arch_perf_update_userpage(struct perf_event *event, |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index daafb893449b..730978dff63f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, | |||
3587 | intel_pmu_lbr_sched_task(ctx, sched_in); | 3587 | intel_pmu_lbr_sched_task(ctx, sched_in); |
3588 | } | 3588 | } |
3589 | 3589 | ||
3590 | static int intel_pmu_check_period(struct perf_event *event, u64 value) | ||
3591 | { | ||
3592 | return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; | ||
3593 | } | ||
3594 | |||
3590 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 3595 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
3591 | 3596 | ||
3592 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 3597 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = { | |||
3667 | .cpu_starting = intel_pmu_cpu_starting, | 3672 | .cpu_starting = intel_pmu_cpu_starting, |
3668 | .cpu_dying = intel_pmu_cpu_dying, | 3673 | .cpu_dying = intel_pmu_cpu_dying, |
3669 | .cpu_dead = intel_pmu_cpu_dead, | 3674 | .cpu_dead = intel_pmu_cpu_dead, |
3675 | |||
3676 | .check_period = intel_pmu_check_period, | ||
3670 | }; | 3677 | }; |
3671 | 3678 | ||
3672 | static struct attribute *intel_pmu_attrs[]; | 3679 | static struct attribute *intel_pmu_attrs[]; |
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
3711 | 3718 | ||
3712 | .guest_get_msrs = intel_guest_get_msrs, | 3719 | .guest_get_msrs = intel_guest_get_msrs, |
3713 | .sched_task = intel_pmu_sched_task, | 3720 | .sched_task = intel_pmu_sched_task, |
3721 | |||
3722 | .check_period = intel_pmu_check_period, | ||
3714 | }; | 3723 | }; |
3715 | 3724 | ||
3716 | static __init void intel_clovertown_quirk(void) | 3725 | static __init void intel_clovertown_quirk(void) |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..d46fd6754d92 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -646,6 +646,11 @@ struct x86_pmu { | |||
646 | * Intel host/guest support (KVM) | 646 | * Intel host/guest support (KVM) |
647 | */ | 647 | */ |
648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
649 | |||
650 | /* | ||
651 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
652 | */ | ||
653 | int (*check_period) (struct perf_event *event, u64 period); | ||
649 | }; | 654 | }; |
650 | 655 | ||
651 | struct x86_perf_task_context { | 656 | struct x86_perf_task_context { |
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void) | |||
857 | 862 | ||
858 | #ifdef CONFIG_CPU_SUP_INTEL | 863 | #ifdef CONFIG_CPU_SUP_INTEL |
859 | 864 | ||
860 | static inline bool intel_pmu_has_bts(struct perf_event *event) | 865 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
861 | { | 866 | { |
862 | struct hw_perf_event *hwc = &event->hw; | 867 | struct hw_perf_event *hwc = &event->hw; |
863 | unsigned int hw_event, bts_event; | 868 | unsigned int hw_event, bts_event; |
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) | |||
868 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | 873 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
869 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | 874 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
870 | 875 | ||
871 | return hw_event == bts_event && hwc->sample_period == 1; | 876 | return hw_event == bts_event && period == 1; |
877 | } | ||
878 | |||
879 | static inline bool intel_pmu_has_bts(struct perf_event *event) | ||
880 | { | ||
881 | struct hw_perf_event *hwc = &event->hw; | ||
882 | |||
883 | return intel_pmu_has_bts_period(event, hwc->sample_period); | ||
872 | } | 884 | } |
873 | 885 | ||
874 | int intel_pmu_save_and_restart(struct perf_event *event); | 886 | int intel_pmu_save_and_restart(struct perf_event *event); |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index f65b78d32f5e..7dbbe9ffda17 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n) | |||
51 | /* | 51 | /* |
52 | * fill in the user structure for a core dump.. | 52 | * fill in the user structure for a core dump.. |
53 | */ | 53 | */ |
54 | static void dump_thread32(struct pt_regs *regs, struct user32 *dump) | 54 | static void fill_dump(struct pt_regs *regs, struct user32 *dump) |
55 | { | 55 | { |
56 | u32 fs, gs; | 56 | u32 fs, gs; |
57 | memset(dump, 0, sizeof(*dump)); | 57 | memset(dump, 0, sizeof(*dump)); |
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm) | |||
157 | fs = get_fs(); | 157 | fs = get_fs(); |
158 | set_fs(KERNEL_DS); | 158 | set_fs(KERNEL_DS); |
159 | has_dumped = 1; | 159 | has_dumped = 1; |
160 | |||
161 | fill_dump(cprm->regs, &dump); | ||
162 | |||
160 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); | 163 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); |
161 | dump.u_ar0 = offsetof(struct user32, regs); | 164 | dump.u_ar0 = offsetof(struct user32, regs); |
162 | dump.signal = cprm->siginfo->si_signo; | 165 | dump.signal = cprm->siginfo->si_signo; |
163 | dump_thread32(cprm->regs, &dump); | ||
164 | 166 | ||
165 | /* | 167 | /* |
166 | * If the size of the dump file exceeds the rlimit, then see | 168 | * If the size of the dump file exceeds the rlimit, then see |
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index d9a9993af882..9f15384c504a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h | |||
@@ -52,6 +52,8 @@ | |||
52 | 52 | ||
53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 | 53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 |
54 | 54 | ||
55 | #define INTEL_FAM6_ICELAKE_MOBILE 0x7E | ||
56 | |||
55 | /* "Small Core" Processors (Atom) */ | 57 | /* "Small Core" Processors (Atom) */ |
56 | 58 | ||
57 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ | 59 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ |
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc6186..3f697a9e3f59 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h | |||
@@ -48,7 +48,8 @@ enum { | |||
48 | BIOS_STATUS_SUCCESS = 0, | 48 | BIOS_STATUS_SUCCESS = 0, |
49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, | 49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, |
50 | BIOS_STATUS_EINVAL = -EINVAL, | 50 | BIOS_STATUS_EINVAL = -EINVAL, |
51 | BIOS_STATUS_UNAVAIL = -EBUSY | 51 | BIOS_STATUS_UNAVAIL = -EBUSY, |
52 | BIOS_STATUS_ABORT = -EINTR, | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | /* Address map parameters */ | 55 | /* Address map parameters */ |
@@ -167,4 +168,9 @@ extern long system_serial_number; | |||
167 | 168 | ||
168 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ | 169 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ |
169 | 170 | ||
171 | /* | ||
172 | * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details | ||
173 | */ | ||
174 | extern struct semaphore __efi_uv_runtime_lock; | ||
175 | |||
170 | #endif /* _ASM_X86_UV_BIOS_H */ | 176 | #endif /* _ASM_X86_UV_BIOS_H */ |
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d8ea4ebd79e7..d737a51a53ca 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, | |||
2473 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) | 2473 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) |
2474 | return -EINVAL; | 2474 | return -EINVAL; |
2475 | 2475 | ||
2476 | if (!nested_cpu_has_preemption_timer(vmcs12) && | ||
2477 | nested_cpu_has_save_preemption_timer(vmcs12)) | ||
2478 | return -EINVAL; | ||
2479 | |||
2476 | if (nested_cpu_has_ept(vmcs12) && | 2480 | if (nested_cpu_has_ept(vmcs12) && |
2477 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) | 2481 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) |
2478 | return -EINVAL; | 2482 | return -EINVAL; |
@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, | |||
5557 | * secondary cpu-based controls. Do not include those that | 5561 | * secondary cpu-based controls. Do not include those that |
5558 | * depend on CPUID bits, they are added later by vmx_cpuid_update. | 5562 | * depend on CPUID bits, they are added later by vmx_cpuid_update. |
5559 | */ | 5563 | */ |
5560 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | 5564 | if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) |
5561 | msrs->secondary_ctls_low, | 5565 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, |
5562 | msrs->secondary_ctls_high); | 5566 | msrs->secondary_ctls_low, |
5567 | msrs->secondary_ctls_high); | ||
5568 | |||
5563 | msrs->secondary_ctls_low = 0; | 5569 | msrs->secondary_ctls_low = 0; |
5564 | msrs->secondary_ctls_high &= | 5570 | msrs->secondary_ctls_high &= |
5565 | SECONDARY_EXEC_DESC | | 5571 | SECONDARY_EXEC_DESC | |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 95d618045001..30a6bcd735ec 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | |||
863 | if (!entry_only) | 863 | if (!entry_only) |
864 | j = find_msr(&m->host, msr); | 864 | j = find_msr(&m->host, msr); |
865 | 865 | ||
866 | if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { | 866 | if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || |
867 | (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { | ||
867 | printk_once(KERN_WARNING "Not enough msr switch entries. " | 868 | printk_once(KERN_WARNING "Not enough msr switch entries. " |
868 | "Can't add msr %x\n", msr); | 869 | "Can't add msr %x\n", msr); |
869 | return; | 870 | return; |
@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1193 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) | 1194 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) |
1194 | return; | 1195 | return; |
1195 | 1196 | ||
1196 | /* | ||
1197 | * First handle the simple case where no cmpxchg is necessary; just | ||
1198 | * allow posting non-urgent interrupts. | ||
1199 | * | ||
1200 | * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change | ||
1201 | * PI.NDST: pi_post_block will do it for us and the wakeup_handler | ||
1202 | * expects the VCPU to be on the blocked_vcpu_list that matches | ||
1203 | * PI.NDST. | ||
1204 | */ | ||
1205 | if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || | ||
1206 | vcpu->cpu == cpu) { | ||
1207 | pi_clear_sn(pi_desc); | ||
1208 | return; | ||
1209 | } | ||
1210 | |||
1211 | /* The full case. */ | 1197 | /* The full case. */ |
1212 | do { | 1198 | do { |
1213 | old.control = new.control = pi_desc->control; | 1199 | old.control = new.control = pi_desc->control; |
@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1222 | new.sn = 0; | 1208 | new.sn = 0; |
1223 | } while (cmpxchg64(&pi_desc->control, old.control, | 1209 | } while (cmpxchg64(&pi_desc->control, old.control, |
1224 | new.control) != old.control); | 1210 | new.control) != old.control); |
1211 | |||
1212 | /* | ||
1213 | * Clear SN before reading the bitmap. The VT-d firmware | ||
1214 | * writes the bitmap and reads SN atomically (5.2.3 in the | ||
1215 | * spec), so it doesn't really have a memory barrier that | ||
1216 | * pairs with this, but we cannot do that and we need one. | ||
1217 | */ | ||
1218 | smp_mb__after_atomic(); | ||
1219 | |||
1220 | if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS)) | ||
1221 | pi_set_on(pi_desc); | ||
1225 | } | 1222 | } |
1226 | 1223 | ||
1227 | /* | 1224 | /* |
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 99328954c2fc..0ac0a64c7790 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h | |||
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) | |||
337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); | 337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); |
338 | } | 338 | } |
339 | 339 | ||
340 | static inline void pi_clear_sn(struct pi_desc *pi_desc) | 340 | static inline void pi_set_sn(struct pi_desc *pi_desc) |
341 | { | 341 | { |
342 | return clear_bit(POSTED_INTR_SN, | 342 | return set_bit(POSTED_INTR_SN, |
343 | (unsigned long *)&pi_desc->control); | 343 | (unsigned long *)&pi_desc->control); |
344 | } | 344 | } |
345 | 345 | ||
346 | static inline void pi_set_sn(struct pi_desc *pi_desc) | 346 | static inline void pi_set_on(struct pi_desc *pi_desc) |
347 | { | 347 | { |
348 | return set_bit(POSTED_INTR_SN, | 348 | set_bit(POSTED_INTR_ON, |
349 | (unsigned long *)&pi_desc->control); | 349 | (unsigned long *)&pi_desc->control); |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline void pi_clear_on(struct pi_desc *pi_desc) | 352 | static inline void pi_clear_on(struct pi_desc *pi_desc) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e67ecf25e690..941f932373d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
7801 | * 1) We should set ->mode before checking ->requests. Please see | 7801 | * 1) We should set ->mode before checking ->requests. Please see |
7802 | * the comment in kvm_vcpu_exiting_guest_mode(). | 7802 | * the comment in kvm_vcpu_exiting_guest_mode(). |
7803 | * | 7803 | * |
7804 | * 2) For APICv, we should set ->mode before checking PIR.ON. This | 7804 | * 2) For APICv, we should set ->mode before checking PID.ON. This |
7805 | * pairs with the memory barrier implicit in pi_test_and_set_on | 7805 | * pairs with the memory barrier implicit in pi_test_and_set_on |
7806 | * (see vmx_deliver_posted_interrupt). | 7806 | * (see vmx_deliver_posted_interrupt). |
7807 | * | 7807 | * |
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c582..eb33432f2f24 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c | |||
@@ -29,7 +29,8 @@ | |||
29 | 29 | ||
30 | struct uv_systab *uv_systab; | 30 | struct uv_systab *uv_systab; |
31 | 31 | ||
32 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | 32 | static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
33 | u64 a4, u64 a5) | ||
33 | { | 34 | { |
34 | struct uv_systab *tab = uv_systab; | 35 | struct uv_systab *tab = uv_systab; |
35 | s64 ret; | 36 | s64 ret; |
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | |||
51 | 52 | ||
52 | return ret; | 53 | return ret; |
53 | } | 54 | } |
55 | |||
56 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | ||
57 | { | ||
58 | s64 ret; | ||
59 | |||
60 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
61 | return BIOS_STATUS_ABORT; | ||
62 | |||
63 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); | ||
64 | up(&__efi_uv_runtime_lock); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
54 | EXPORT_SYMBOL_GPL(uv_bios_call); | 68 | EXPORT_SYMBOL_GPL(uv_bios_call); |
55 | 69 | ||
56 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | 70 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | |||
59 | unsigned long bios_flags; | 73 | unsigned long bios_flags; |
60 | s64 ret; | 74 | s64 ret; |
61 | 75 | ||
76 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
77 | return BIOS_STATUS_ABORT; | ||
78 | |||
62 | local_irq_save(bios_flags); | 79 | local_irq_save(bios_flags); |
63 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); | 80 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); |
64 | local_irq_restore(bios_flags); | 81 | local_irq_restore(bios_flags); |
65 | 82 | ||
83 | up(&__efi_uv_runtime_lock); | ||
84 | |||
66 | return ret; | 85 | return ret; |
67 | } | 86 | } |
68 | 87 | ||
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 17eb09d222ff..ec78a04eb136 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) | |||
122 | 122 | ||
123 | int af_alg_release(struct socket *sock) | 123 | int af_alg_release(struct socket *sock) |
124 | { | 124 | { |
125 | if (sock->sk) | 125 | if (sock->sk) { |
126 | sock_put(sock->sk); | 126 | sock_put(sock->sk); |
127 | sock->sk = NULL; | ||
128 | } | ||
127 | return 0; | 129 | return 0; |
128 | } | 130 | } |
129 | EXPORT_SYMBOL_GPL(af_alg_release); | 131 | EXPORT_SYMBOL_GPL(af_alg_release); |
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a43276c76fc6..21393ec3b9a4 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c | |||
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client) | |||
509 | struct ht16k33_priv *priv = i2c_get_clientdata(client); | 509 | struct ht16k33_priv *priv = i2c_get_clientdata(client); |
510 | struct ht16k33_fbdev *fbdev = &priv->fbdev; | 510 | struct ht16k33_fbdev *fbdev = &priv->fbdev; |
511 | 511 | ||
512 | cancel_delayed_work(&fbdev->work); | 512 | cancel_delayed_work_sync(&fbdev->work); |
513 | unregister_framebuffer(fbdev->info); | 513 | unregister_framebuffer(fbdev->info); |
514 | framebuffer_release(fbdev->info); | 514 | framebuffer_release(fbdev->info); |
515 | free_page((unsigned long) fbdev->buffer); | 515 | free_page((unsigned long) fbdev->buffer); |
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f94d33525771..d299ec79e4c3 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { | |||
781 | SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, | 781 | SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, |
782 | SYSC_QUIRK_LEGACY_IDLE), | 782 | SYSC_QUIRK_LEGACY_IDLE), |
783 | SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, | 783 | SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, |
784 | SYSC_QUIRK_LEGACY_IDLE), | 784 | 0), |
785 | /* Some timers on omap4 and later */ | 785 | /* Some timers on omap4 and later */ |
786 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, | 786 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, |
787 | SYSC_QUIRK_LEGACY_IDLE), | 787 | 0), |
788 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, | 788 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, |
789 | SYSC_QUIRK_LEGACY_IDLE), | 789 | 0), |
790 | SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, | 790 | SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, |
791 | SYSC_QUIRK_LEGACY_IDLE), | 791 | SYSC_QUIRK_LEGACY_IDLE), |
792 | /* Uarts on omap4 and later */ | 792 | /* Uarts on omap4 and later */ |
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 595124074821..c364027638e1 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c | |||
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) | |||
154 | if (IS_ERR(parent)) | 154 | if (IS_ERR(parent)) |
155 | return -ENODEV; | 155 | return -ENODEV; |
156 | 156 | ||
157 | /* Bail out if both clocks point to fck */ | ||
158 | if (clk_is_match(parent, timer->fclk)) | ||
159 | return 0; | ||
160 | |||
157 | ret = clk_set_parent(timer->fclk, parent); | 161 | ret = clk_set_parent(timer->fclk, parent); |
158 | if (ret < 0) | 162 | if (ret < 0) |
159 | pr_err("%s: failed to set parent\n", __func__); | 163 | pr_err("%s: failed to set parent\n", __func__); |
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
864 | timer->pdev = pdev; | 868 | timer->pdev = pdev; |
865 | 869 | ||
866 | pm_runtime_enable(dev); | 870 | pm_runtime_enable(dev); |
867 | pm_runtime_irq_safe(dev); | ||
868 | 871 | ||
869 | if (!timer->reserved) { | 872 | if (!timer->reserved) { |
870 | ret = pm_runtime_get_sync(dev); | 873 | ret = pm_runtime_get_sync(dev); |
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4c46ff6f2242..55b77c576c42 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, | |||
592 | 592 | ||
593 | early_memunmap(tbl, sizeof(*tbl)); | 593 | early_memunmap(tbl, sizeof(*tbl)); |
594 | } | 594 | } |
595 | return 0; | ||
596 | } | ||
597 | 595 | ||
598 | int __init efi_apply_persistent_mem_reservations(void) | ||
599 | { | ||
600 | if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { | 596 | if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { |
601 | unsigned long prsv = efi.mem_reserve; | 597 | unsigned long prsv = efi.mem_reserve; |
602 | 598 | ||
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index eee42d5e25ee..c037c6c5d0b7 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) | |||
75 | efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; | 75 | efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; |
76 | efi_status_t status; | 76 | efi_status_t status; |
77 | 77 | ||
78 | if (IS_ENABLED(CONFIG_ARM)) | ||
79 | return; | ||
80 | |||
81 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), | 78 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), |
82 | (void **)&rsv); | 79 | (void **)&rsv); |
83 | if (status != EFI_SUCCESS) { | 80 | if (status != EFI_SUCCESS) { |
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 8903b9ccfc2b..e2abfdb5cee6 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c | |||
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) | |||
147 | static DEFINE_SEMAPHORE(efi_runtime_lock); | 147 | static DEFINE_SEMAPHORE(efi_runtime_lock); |
148 | 148 | ||
149 | /* | 149 | /* |
150 | * Expose the EFI runtime lock to the UV platform | ||
151 | */ | ||
152 | #ifdef CONFIG_X86_UV | ||
153 | extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); | ||
154 | #endif | ||
155 | |||
156 | /* | ||
150 | * Calls the appropriate efi_runtime_service() with the appropriate | 157 | * Calls the appropriate efi_runtime_service() with the appropriate |
151 | * arguments. | 158 | * arguments. |
152 | * | 159 | * |
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index ec6e69aa3a8e..d2fbb4bb4a43 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c | |||
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev) | |||
183 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); | 183 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev) | ||
187 | { | ||
188 | i2c_dev->curr_msg = NULL; | ||
189 | i2c_dev->num_msgs = 0; | ||
190 | |||
191 | i2c_dev->msg_buf = NULL; | ||
192 | i2c_dev->msg_buf_remaining = 0; | ||
193 | } | ||
194 | |||
186 | /* | 195 | /* |
187 | * Note about I2C_C_CLEAR on error: | 196 | * Note about I2C_C_CLEAR on error: |
188 | * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in | 197 | * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in |
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], | |||
283 | 292 | ||
284 | time_left = wait_for_completion_timeout(&i2c_dev->completion, | 293 | time_left = wait_for_completion_timeout(&i2c_dev->completion, |
285 | adap->timeout); | 294 | adap->timeout); |
295 | |||
296 | bcm2835_i2c_finish_transfer(i2c_dev); | ||
297 | |||
286 | if (!time_left) { | 298 | if (!time_left) { |
287 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, | 299 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, |
288 | BCM2835_I2C_C_CLEAR); | 300 | BCM2835_I2C_C_CLEAR); |
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index b13605718291..d917cefc5a19 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c | |||
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) | |||
382 | * Check for the message size against FIFO depth and set the | 382 | * Check for the message size against FIFO depth and set the |
383 | * 'hold bus' bit if it is greater than FIFO depth. | 383 | * 'hold bus' bit if it is greater than FIFO depth. |
384 | */ | 384 | */ |
385 | if (id->recv_count > CDNS_I2C_FIFO_DEPTH) | 385 | if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) |
386 | ctrl_reg |= CDNS_I2C_CR_HOLD; | 386 | ctrl_reg |= CDNS_I2C_CR_HOLD; |
387 | else | ||
388 | ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; | ||
387 | 389 | ||
388 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); | 390 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); |
389 | 391 | ||
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id) | |||
440 | * Check for the message size against FIFO depth and set the | 442 | * Check for the message size against FIFO depth and set the |
441 | * 'hold bus' bit if it is greater than FIFO depth. | 443 | * 'hold bus' bit if it is greater than FIFO depth. |
442 | */ | 444 | */ |
443 | if (id->send_count > CDNS_I2C_FIFO_DEPTH) | 445 | if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) |
444 | ctrl_reg |= CDNS_I2C_CR_HOLD; | 446 | ctrl_reg |= CDNS_I2C_CR_HOLD; |
447 | else | ||
448 | ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; | ||
449 | |||
445 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); | 450 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); |
446 | 451 | ||
447 | /* Clear the interrupts in interrupt status register. */ | 452 | /* Clear the interrupts in interrupt status register. */ |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4713957b0cbb..a878351f1643 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121 | |||
420 | 420 | ||
421 | config KEYBOARD_SNVS_PWRKEY | 421 | config KEYBOARD_SNVS_PWRKEY |
422 | tristate "IMX SNVS Power Key Driver" | 422 | tristate "IMX SNVS Power Key Driver" |
423 | depends on SOC_IMX6SX | 423 | depends on SOC_IMX6SX || SOC_IMX7D |
424 | depends on OF | 424 | depends on OF |
425 | help | 425 | help |
426 | This is the snvs powerkey driver for the Freescale i.MX application | 426 | This is the snvs powerkey driver for the Freescale i.MX application |
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 312916f99597..73686c2460ce 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c | |||
@@ -75,9 +75,7 @@ | |||
75 | struct cap11xx_led { | 75 | struct cap11xx_led { |
76 | struct cap11xx_priv *priv; | 76 | struct cap11xx_priv *priv; |
77 | struct led_classdev cdev; | 77 | struct led_classdev cdev; |
78 | struct work_struct work; | ||
79 | u32 reg; | 78 | u32 reg; |
80 | enum led_brightness new_brightness; | ||
81 | }; | 79 | }; |
82 | #endif | 80 | #endif |
83 | 81 | ||
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev) | |||
233 | } | 231 | } |
234 | 232 | ||
235 | #ifdef CONFIG_LEDS_CLASS | 233 | #ifdef CONFIG_LEDS_CLASS |
236 | static void cap11xx_led_work(struct work_struct *work) | 234 | static int cap11xx_led_set(struct led_classdev *cdev, |
235 | enum led_brightness value) | ||
237 | { | 236 | { |
238 | struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); | 237 | struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); |
239 | struct cap11xx_priv *priv = led->priv; | 238 | struct cap11xx_priv *priv = led->priv; |
240 | int value = led->new_brightness; | ||
241 | 239 | ||
242 | /* | 240 | /* |
243 | * All LEDs share the same duty cycle as this is a HW limitation. | 241 | * All LEDs share the same duty cycle as this is a HW |
244 | * Brightness levels per LED are either 0 (OFF) and 1 (ON). | 242 | * limitation. Brightness levels per LED are either |
243 | * 0 (OFF) and 1 (ON). | ||
245 | */ | 244 | */ |
246 | regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, | 245 | return regmap_update_bits(priv->regmap, |
247 | BIT(led->reg), value ? BIT(led->reg) : 0); | 246 | CAP11XX_REG_LED_OUTPUT_CONTROL, |
248 | } | 247 | BIT(led->reg), |
249 | 248 | value ? BIT(led->reg) : 0); | |
250 | static void cap11xx_led_set(struct led_classdev *cdev, | ||
251 | enum led_brightness value) | ||
252 | { | ||
253 | struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); | ||
254 | |||
255 | if (led->new_brightness == value) | ||
256 | return; | ||
257 | |||
258 | led->new_brightness = value; | ||
259 | schedule_work(&led->work); | ||
260 | } | 249 | } |
261 | 250 | ||
262 | static int cap11xx_init_leds(struct device *dev, | 251 | static int cap11xx_init_leds(struct device *dev, |
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev, | |||
299 | led->cdev.default_trigger = | 288 | led->cdev.default_trigger = |
300 | of_get_property(child, "linux,default-trigger", NULL); | 289 | of_get_property(child, "linux,default-trigger", NULL); |
301 | led->cdev.flags = 0; | 290 | led->cdev.flags = 0; |
302 | led->cdev.brightness_set = cap11xx_led_set; | 291 | led->cdev.brightness_set_blocking = cap11xx_led_set; |
303 | led->cdev.max_brightness = 1; | 292 | led->cdev.max_brightness = 1; |
304 | led->cdev.brightness = LED_OFF; | 293 | led->cdev.brightness = LED_OFF; |
305 | 294 | ||
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev, | |||
312 | led->reg = reg; | 301 | led->reg = reg; |
313 | led->priv = priv; | 302 | led->priv = priv; |
314 | 303 | ||
315 | INIT_WORK(&led->work, cap11xx_led_work); | ||
316 | |||
317 | error = devm_led_classdev_register(dev, &led->cdev); | 304 | error = devm_led_classdev_register(dev, &led->cdev); |
318 | if (error) { | 305 | if (error) { |
319 | of_node_put(child); | 306 | of_node_put(child); |
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 403452ef00e6..3d1cb7bf5e35 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev) | |||
222 | keypad->stopped = true; | 222 | keypad->stopped = true; |
223 | spin_unlock_irq(&keypad->lock); | 223 | spin_unlock_irq(&keypad->lock); |
224 | 224 | ||
225 | flush_work(&keypad->work.work); | 225 | flush_delayed_work(&keypad->work); |
226 | /* | 226 | /* |
227 | * matrix_keypad_scan() will leave IRQs enabled; | 227 | * matrix_keypad_scan() will leave IRQs enabled; |
228 | * we should disable them now. | 228 | * we should disable them now. |
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c index 43b86482dda0..d466bc07aebb 100644 --- a/drivers/input/keyboard/qt2160.c +++ b/drivers/input/keyboard/qt2160.c | |||
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = { | |||
58 | struct qt2160_led { | 58 | struct qt2160_led { |
59 | struct qt2160_data *qt2160; | 59 | struct qt2160_data *qt2160; |
60 | struct led_classdev cdev; | 60 | struct led_classdev cdev; |
61 | struct work_struct work; | ||
62 | char name[32]; | 61 | char name[32]; |
63 | int id; | 62 | int id; |
64 | enum led_brightness new_brightness; | 63 | enum led_brightness brightness; |
65 | }; | 64 | }; |
66 | #endif | 65 | #endif |
67 | 66 | ||
@@ -74,7 +73,6 @@ struct qt2160_data { | |||
74 | u16 key_matrix; | 73 | u16 key_matrix; |
75 | #ifdef CONFIG_LEDS_CLASS | 74 | #ifdef CONFIG_LEDS_CLASS |
76 | struct qt2160_led leds[QT2160_NUM_LEDS_X]; | 75 | struct qt2160_led leds[QT2160_NUM_LEDS_X]; |
77 | struct mutex led_lock; | ||
78 | #endif | 76 | #endif |
79 | }; | 77 | }; |
80 | 78 | ||
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data); | |||
83 | 81 | ||
84 | #ifdef CONFIG_LEDS_CLASS | 82 | #ifdef CONFIG_LEDS_CLASS |
85 | 83 | ||
86 | static void qt2160_led_work(struct work_struct *work) | 84 | static int qt2160_led_set(struct led_classdev *cdev, |
85 | enum led_brightness value) | ||
87 | { | 86 | { |
88 | struct qt2160_led *led = container_of(work, struct qt2160_led, work); | 87 | struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); |
89 | struct qt2160_data *qt2160 = led->qt2160; | 88 | struct qt2160_data *qt2160 = led->qt2160; |
90 | struct i2c_client *client = qt2160->client; | 89 | struct i2c_client *client = qt2160->client; |
91 | int value = led->new_brightness; | ||
92 | u32 drive, pwmen; | 90 | u32 drive, pwmen; |
93 | 91 | ||
94 | mutex_lock(&qt2160->led_lock); | 92 | if (value != led->brightness) { |
95 | 93 | drive = qt2160_read(client, QT2160_CMD_DRIVE_X); | |
96 | drive = qt2160_read(client, QT2160_CMD_DRIVE_X); | 94 | pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); |
97 | pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); | 95 | if (value != LED_OFF) { |
98 | if (value != LED_OFF) { | 96 | drive |= BIT(led->id); |
99 | drive |= (1 << led->id); | 97 | pwmen |= BIT(led->id); |
100 | pwmen |= (1 << led->id); | ||
101 | |||
102 | } else { | ||
103 | drive &= ~(1 << led->id); | ||
104 | pwmen &= ~(1 << led->id); | ||
105 | } | ||
106 | qt2160_write(client, QT2160_CMD_DRIVE_X, drive); | ||
107 | qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); | ||
108 | 98 | ||
109 | /* | 99 | } else { |
110 | * Changing this register will change the brightness | 100 | drive &= ~BIT(led->id); |
111 | * of every LED in the qt2160. It's a HW limitation. | 101 | pwmen &= ~BIT(led->id); |
112 | */ | 102 | } |
113 | if (value != LED_OFF) | 103 | qt2160_write(client, QT2160_CMD_DRIVE_X, drive); |
114 | qt2160_write(client, QT2160_CMD_PWM_DUTY, value); | 104 | qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); |
115 | 105 | ||
116 | mutex_unlock(&qt2160->led_lock); | 106 | /* |
117 | } | 107 | * Changing this register will change the brightness |
108 | * of every LED in the qt2160. It's a HW limitation. | ||
109 | */ | ||
110 | if (value != LED_OFF) | ||
111 | qt2160_write(client, QT2160_CMD_PWM_DUTY, value); | ||
118 | 112 | ||
119 | static void qt2160_led_set(struct led_classdev *cdev, | 113 | led->brightness = value; |
120 | enum led_brightness value) | 114 | } |
121 | { | ||
122 | struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); | ||
123 | 115 | ||
124 | led->new_brightness = value; | 116 | return 0; |
125 | schedule_work(&led->work); | ||
126 | } | 117 | } |
127 | 118 | ||
128 | #endif /* CONFIG_LEDS_CLASS */ | 119 | #endif /* CONFIG_LEDS_CLASS */ |
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160) | |||
293 | int ret; | 284 | int ret; |
294 | int i; | 285 | int i; |
295 | 286 | ||
296 | mutex_init(&qt2160->led_lock); | ||
297 | |||
298 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { | 287 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { |
299 | struct qt2160_led *led = &qt2160->leds[i]; | 288 | struct qt2160_led *led = &qt2160->leds[i]; |
300 | 289 | ||
301 | snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); | 290 | snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); |
302 | led->cdev.name = led->name; | 291 | led->cdev.name = led->name; |
303 | led->cdev.brightness_set = qt2160_led_set; | 292 | led->cdev.brightness_set_blocking = qt2160_led_set; |
304 | led->cdev.brightness = LED_OFF; | 293 | led->cdev.brightness = LED_OFF; |
305 | led->id = i; | 294 | led->id = i; |
306 | led->qt2160 = qt2160; | 295 | led->qt2160 = qt2160; |
307 | 296 | ||
308 | INIT_WORK(&led->work, qt2160_led_work); | ||
309 | |||
310 | ret = led_classdev_register(&client->dev, &led->cdev); | 297 | ret = led_classdev_register(&client->dev, &led->cdev); |
311 | if (ret < 0) | 298 | if (ret < 0) |
312 | return ret; | 299 | return ret; |
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160) | |||
324 | { | 311 | { |
325 | int i; | 312 | int i; |
326 | 313 | ||
327 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { | 314 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) |
328 | led_classdev_unregister(&qt2160->leds[i].cdev); | 315 | led_classdev_unregister(&qt2160->leds[i].cdev); |
329 | cancel_work_sync(&qt2160->leds[i].work); | ||
330 | } | ||
331 | } | 316 | } |
332 | 317 | ||
333 | #else | 318 | #else |
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index babcfb165e4f..3b85631fde91 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c | |||
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev) | |||
153 | 153 | ||
154 | input_dev->id.bustype = BUS_HOST; | 154 | input_dev->id.bustype = BUS_HOST; |
155 | 155 | ||
156 | keypad_data->input_dev = input_dev; | ||
157 | |||
156 | error = keypad_matrix_key_parse_dt(keypad_data); | 158 | error = keypad_matrix_key_parse_dt(keypad_data); |
157 | if (error) | 159 | if (error) |
158 | return error; | 160 | return error; |
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev) | |||
168 | 170 | ||
169 | input_set_drvdata(input_dev, keypad_data); | 171 | input_set_drvdata(input_dev, keypad_data); |
170 | 172 | ||
171 | keypad_data->input_dev = input_dev; | ||
172 | |||
173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
174 | keypad_data->base = devm_ioremap_resource(&pdev->dev, res); | 174 | keypad_data->base = devm_ioremap_resource(&pdev->dev, res); |
175 | if (IS_ERR(keypad_data->base)) | 175 | if (IS_ERR(keypad_data->base)) |
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c index 094bddf56755..c1e66f45d552 100644 --- a/drivers/input/misc/apanel.c +++ b/drivers/input/misc/apanel.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/input-polldev.h> | 23 | #include <linux/input-polldev.h> |
24 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/leds.h> | 25 | #include <linux/leds.h> |
27 | 26 | ||
28 | #define APANEL_NAME "Fujitsu Application Panel" | 27 | #define APANEL_NAME "Fujitsu Application Panel" |
@@ -59,8 +58,6 @@ struct apanel { | |||
59 | struct i2c_client *client; | 58 | struct i2c_client *client; |
60 | unsigned short keymap[MAX_PANEL_KEYS]; | 59 | unsigned short keymap[MAX_PANEL_KEYS]; |
61 | u16 nkeys; | 60 | u16 nkeys; |
62 | u16 led_bits; | ||
63 | struct work_struct led_work; | ||
64 | struct led_classdev mail_led; | 61 | struct led_classdev mail_led; |
65 | }; | 62 | }; |
66 | 63 | ||
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev) | |||
109 | report_key(idev, ap->keymap[i]); | 106 | report_key(idev, ap->keymap[i]); |
110 | } | 107 | } |
111 | 108 | ||
112 | /* Track state changes of LED */ | 109 | static int mail_led_set(struct led_classdev *led, |
113 | static void led_update(struct work_struct *work) | ||
114 | { | ||
115 | struct apanel *ap = container_of(work, struct apanel, led_work); | ||
116 | |||
117 | i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits); | ||
118 | } | ||
119 | |||
120 | static void mail_led_set(struct led_classdev *led, | ||
121 | enum led_brightness value) | 110 | enum led_brightness value) |
122 | { | 111 | { |
123 | struct apanel *ap = container_of(led, struct apanel, mail_led); | 112 | struct apanel *ap = container_of(led, struct apanel, mail_led); |
113 | u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000; | ||
124 | 114 | ||
125 | if (value != LED_OFF) | 115 | return i2c_smbus_write_word_data(ap->client, 0x10, led_bits); |
126 | ap->led_bits |= 0x8000; | ||
127 | else | ||
128 | ap->led_bits &= ~0x8000; | ||
129 | |||
130 | schedule_work(&ap->led_work); | ||
131 | } | 116 | } |
132 | 117 | ||
133 | static int apanel_remove(struct i2c_client *client) | 118 | static int apanel_remove(struct i2c_client *client) |
@@ -179,7 +164,7 @@ static struct apanel apanel = { | |||
179 | }, | 164 | }, |
180 | .mail_led = { | 165 | .mail_led = { |
181 | .name = "mail:blue", | 166 | .name = "mail:blue", |
182 | .brightness_set = mail_led_set, | 167 | .brightness_set_blocking = mail_led_set, |
183 | }, | 168 | }, |
184 | }; | 169 | }; |
185 | 170 | ||
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client, | |||
235 | if (err) | 220 | if (err) |
236 | goto out3; | 221 | goto out3; |
237 | 222 | ||
238 | INIT_WORK(&ap->led_work, led_update); | ||
239 | if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { | 223 | if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { |
240 | err = led_classdev_register(&client->dev, &ap->mail_led); | 224 | err = led_classdev_register(&client->dev, &ap->mail_led); |
241 | if (err) | 225 | if (err) |
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c index 1efcfdf9f8a8..dd9dd4e40827 100644 --- a/drivers/input/misc/bma150.c +++ b/drivers/input/misc/bma150.c | |||
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) | |||
481 | idev->close = bma150_irq_close; | 481 | idev->close = bma150_irq_close; |
482 | input_set_drvdata(idev, bma150); | 482 | input_set_drvdata(idev, bma150); |
483 | 483 | ||
484 | bma150->input = idev; | ||
485 | |||
484 | error = input_register_device(idev); | 486 | error = input_register_device(idev); |
485 | if (error) { | 487 | if (error) { |
486 | input_free_device(idev); | 488 | input_free_device(idev); |
487 | return error; | 489 | return error; |
488 | } | 490 | } |
489 | 491 | ||
490 | bma150->input = idev; | ||
491 | return 0; | 492 | return 0; |
492 | } | 493 | } |
493 | 494 | ||
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) | |||
510 | 511 | ||
511 | bma150_init_input_device(bma150, ipoll_dev->input); | 512 | bma150_init_input_device(bma150, ipoll_dev->input); |
512 | 513 | ||
514 | bma150->input_polled = ipoll_dev; | ||
515 | bma150->input = ipoll_dev->input; | ||
516 | |||
513 | error = input_register_polled_device(ipoll_dev); | 517 | error = input_register_polled_device(ipoll_dev); |
514 | if (error) { | 518 | if (error) { |
515 | input_free_polled_device(ipoll_dev); | 519 | input_free_polled_device(ipoll_dev); |
516 | return error; | 520 | return error; |
517 | } | 521 | } |
518 | 522 | ||
519 | bma150->input_polled = ipoll_dev; | ||
520 | bma150->input = ipoll_dev->input; | ||
521 | |||
522 | return 0; | 523 | return 0; |
523 | } | 524 | } |
524 | 525 | ||
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c index 55da191ae550..dbb6d9e1b947 100644 --- a/drivers/input/misc/pwm-vibra.c +++ b/drivers/input/misc/pwm-vibra.c | |||
@@ -34,6 +34,7 @@ struct pwm_vibrator { | |||
34 | struct work_struct play_work; | 34 | struct work_struct play_work; |
35 | u16 level; | 35 | u16 level; |
36 | u32 direction_duty_cycle; | 36 | u32 direction_duty_cycle; |
37 | bool vcc_on; | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | 40 | static int pwm_vibrator_start(struct pwm_vibrator *vibrator) |
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | |||
42 | struct pwm_state state; | 43 | struct pwm_state state; |
43 | int err; | 44 | int err; |
44 | 45 | ||
45 | err = regulator_enable(vibrator->vcc); | 46 | if (!vibrator->vcc_on) { |
46 | if (err) { | 47 | err = regulator_enable(vibrator->vcc); |
47 | dev_err(pdev, "failed to enable regulator: %d", err); | 48 | if (err) { |
48 | return err; | 49 | dev_err(pdev, "failed to enable regulator: %d", err); |
50 | return err; | ||
51 | } | ||
52 | vibrator->vcc_on = true; | ||
49 | } | 53 | } |
50 | 54 | ||
51 | pwm_get_state(vibrator->pwm, &state); | 55 | pwm_get_state(vibrator->pwm, &state); |
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | |||
76 | 80 | ||
77 | static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) | 81 | static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) |
78 | { | 82 | { |
79 | regulator_disable(vibrator->vcc); | ||
80 | |||
81 | if (vibrator->pwm_dir) | 83 | if (vibrator->pwm_dir) |
82 | pwm_disable(vibrator->pwm_dir); | 84 | pwm_disable(vibrator->pwm_dir); |
83 | pwm_disable(vibrator->pwm); | 85 | pwm_disable(vibrator->pwm); |
86 | |||
87 | if (vibrator->vcc_on) { | ||
88 | regulator_disable(vibrator->vcc); | ||
89 | vibrator->vcc_on = false; | ||
90 | } | ||
84 | } | 91 | } |
85 | 92 | ||
86 | static void pwm_vibrator_play_work(struct work_struct *work) | 93 | static void pwm_vibrator_play_work(struct work_struct *work) |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index f322a1768fbb..225ae6980182 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id); | |||
1336 | static const struct acpi_device_id elan_acpi_id[] = { | 1336 | static const struct acpi_device_id elan_acpi_id[] = { |
1337 | { "ELAN0000", 0 }, | 1337 | { "ELAN0000", 0 }, |
1338 | { "ELAN0100", 0 }, | 1338 | { "ELAN0100", 0 }, |
1339 | { "ELAN0501", 0 }, | ||
1340 | { "ELAN0600", 0 }, | 1339 | { "ELAN0600", 0 }, |
1341 | { "ELAN0602", 0 }, | 1340 | { "ELAN0602", 0 }, |
1342 | { "ELAN0605", 0 }, | 1341 | { "ELAN0605", 0 }, |
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
1346 | { "ELAN060C", 0 }, | 1345 | { "ELAN060C", 0 }, |
1347 | { "ELAN0611", 0 }, | 1346 | { "ELAN0611", 0 }, |
1348 | { "ELAN0612", 0 }, | 1347 | { "ELAN0612", 0 }, |
1348 | { "ELAN0617", 0 }, | ||
1349 | { "ELAN0618", 0 }, | 1349 | { "ELAN0618", 0 }, |
1350 | { "ELAN061C", 0 }, | 1350 | { "ELAN061C", 0 }, |
1351 | { "ELAN061D", 0 }, | 1351 | { "ELAN061D", 0 }, |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 9fe075c137dc..a7f8b1614559 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, | |||
1119 | * Asus UX31 0x361f00 20, 15, 0e clickpad | 1119 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
1120 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad | 1120 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
1121 | * Avatar AVIU-145A2 0x361f00 ? clickpad | 1121 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
1122 | * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) | ||
1123 | * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) | ||
1122 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons | 1124 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons |
1123 | * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons | 1125 | * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons |
1124 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons | 1126 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons |
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { | |||
1171 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), | 1173 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), |
1172 | }, | 1174 | }, |
1173 | }, | 1175 | }, |
1176 | { | ||
1177 | /* Fujitsu H780 also has a middle button */ | ||
1178 | .matches = { | ||
1179 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
1180 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), | ||
1181 | }, | ||
1182 | }, | ||
1174 | #endif | 1183 | #endif |
1175 | { } | 1184 | { } |
1176 | }; | 1185 | }; |
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index c62cceb97bb1..5e8d8384aa2a 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c | |||
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio) | |||
76 | { | 76 | { |
77 | struct ps2_gpio_data *drvdata = serio->port_data; | 77 | struct ps2_gpio_data *drvdata = serio->port_data; |
78 | 78 | ||
79 | flush_delayed_work(&drvdata->tx_work); | ||
79 | disable_irq(drvdata->irq); | 80 | disable_irq(drvdata->irq); |
80 | } | 81 | } |
81 | 82 | ||
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index d713271ebf7c..a64116586b4c 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c | |||
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) | |||
1396 | 1396 | ||
1397 | /* Clear ring flush state */ | 1397 | /* Clear ring flush state */ |
1398 | timeout = 1000; /* timeout of 1s */ | 1398 | timeout = 1000; /* timeout of 1s */ |
1399 | writel_relaxed(0x0, ring + RING_CONTROL); | 1399 | writel_relaxed(0x0, ring->regs + RING_CONTROL); |
1400 | do { | 1400 | do { |
1401 | if (!(readl_relaxed(ring + RING_FLUSH_DONE) & | 1401 | if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & |
1402 | FLUSH_DONE_MASK)) | 1402 | FLUSH_DONE_MASK)) |
1403 | break; | 1403 | break; |
1404 | mdelay(1); | 1404 | mdelay(1); |
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index c6a7d4582dc6..38d9df3fb199 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c | |||
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) | |||
310 | 310 | ||
311 | return ret; | 311 | return ret; |
312 | } | 312 | } |
313 | EXPORT_SYMBOL_GPL(mbox_flush); | ||
313 | 314 | ||
314 | /** | 315 | /** |
315 | * mbox_request_channel - Request a mailbox channel. | 316 | * mbox_request_channel - Request a mailbox channel. |
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0e4bbdcc614f..c76892ac4e69 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c | |||
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) | |||
344 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); | 344 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); |
345 | } | 345 | } |
346 | 346 | ||
347 | static void b53_enable_vlan(struct b53_device *dev, bool enable) | 347 | static void b53_enable_vlan(struct b53_device *dev, bool enable, |
348 | bool enable_filtering) | ||
348 | { | 349 | { |
349 | u8 mgmt, vc0, vc1, vc4 = 0, vc5; | 350 | u8 mgmt, vc0, vc1, vc4 = 0, vc5; |
350 | 351 | ||
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) | |||
369 | vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; | 370 | vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; |
370 | vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; | 371 | vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; |
371 | vc4 &= ~VC4_ING_VID_CHECK_MASK; | 372 | vc4 &= ~VC4_ING_VID_CHECK_MASK; |
372 | vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; | 373 | if (enable_filtering) { |
373 | vc5 |= VC5_DROP_VTABLE_MISS; | 374 | vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; |
375 | vc5 |= VC5_DROP_VTABLE_MISS; | ||
376 | } else { | ||
377 | vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; | ||
378 | vc5 &= ~VC5_DROP_VTABLE_MISS; | ||
379 | } | ||
374 | 380 | ||
375 | if (is5325(dev)) | 381 | if (is5325(dev)) |
376 | vc0 &= ~VC0_RESERVED_1; | 382 | vc0 &= ~VC0_RESERVED_1; |
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) | |||
420 | } | 426 | } |
421 | 427 | ||
422 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); | 428 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); |
429 | |||
430 | dev->vlan_enabled = enable; | ||
431 | dev->vlan_filtering_enabled = enable_filtering; | ||
423 | } | 432 | } |
424 | 433 | ||
425 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) | 434 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) |
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev) | |||
632 | b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); | 641 | b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); |
633 | } | 642 | } |
634 | 643 | ||
644 | static u16 b53_default_pvid(struct b53_device *dev) | ||
645 | { | ||
646 | if (is5325(dev) || is5365(dev)) | ||
647 | return 1; | ||
648 | else | ||
649 | return 0; | ||
650 | } | ||
651 | |||
635 | int b53_configure_vlan(struct dsa_switch *ds) | 652 | int b53_configure_vlan(struct dsa_switch *ds) |
636 | { | 653 | { |
637 | struct b53_device *dev = ds->priv; | 654 | struct b53_device *dev = ds->priv; |
638 | struct b53_vlan vl = { 0 }; | 655 | struct b53_vlan vl = { 0 }; |
639 | int i; | 656 | int i, def_vid; |
657 | |||
658 | def_vid = b53_default_pvid(dev); | ||
640 | 659 | ||
641 | /* clear all vlan entries */ | 660 | /* clear all vlan entries */ |
642 | if (is5325(dev) || is5365(dev)) { | 661 | if (is5325(dev) || is5365(dev)) { |
643 | for (i = 1; i < dev->num_vlans; i++) | 662 | for (i = def_vid; i < dev->num_vlans; i++) |
644 | b53_set_vlan_entry(dev, i, &vl); | 663 | b53_set_vlan_entry(dev, i, &vl); |
645 | } else { | 664 | } else { |
646 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); | 665 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); |
647 | } | 666 | } |
648 | 667 | ||
649 | b53_enable_vlan(dev, false); | 668 | b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); |
650 | 669 | ||
651 | b53_for_each_port(dev, i) | 670 | b53_for_each_port(dev, i) |
652 | b53_write16(dev, B53_VLAN_PAGE, | 671 | b53_write16(dev, B53_VLAN_PAGE, |
653 | B53_VLAN_PORT_DEF_TAG(i), 1); | 672 | B53_VLAN_PORT_DEF_TAG(i), def_vid); |
654 | 673 | ||
655 | if (!is5325(dev) && !is5365(dev)) | 674 | if (!is5325(dev) && !is5365(dev)) |
656 | b53_set_jumbo(dev, dev->enable_jumbo, false); | 675 | b53_set_jumbo(dev, dev->enable_jumbo, false); |
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); | |||
1255 | 1274 | ||
1256 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) | 1275 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) |
1257 | { | 1276 | { |
1277 | struct b53_device *dev = ds->priv; | ||
1278 | struct net_device *bridge_dev; | ||
1279 | unsigned int i; | ||
1280 | u16 pvid, new_pvid; | ||
1281 | |||
1282 | /* Handle the case were multiple bridges span the same switch device | ||
1283 | * and one of them has a different setting than what is being requested | ||
1284 | * which would be breaking filtering semantics for any of the other | ||
1285 | * bridge devices. | ||
1286 | */ | ||
1287 | b53_for_each_port(dev, i) { | ||
1288 | bridge_dev = dsa_to_port(ds, i)->bridge_dev; | ||
1289 | if (bridge_dev && | ||
1290 | bridge_dev != dsa_to_port(ds, port)->bridge_dev && | ||
1291 | br_vlan_enabled(bridge_dev) != vlan_filtering) { | ||
1292 | netdev_err(bridge_dev, | ||
1293 | "VLAN filtering is global to the switch!\n"); | ||
1294 | return -EINVAL; | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); | ||
1299 | new_pvid = pvid; | ||
1300 | if (dev->vlan_filtering_enabled && !vlan_filtering) { | ||
1301 | /* Filtering is currently enabled, use the default PVID since | ||
1302 | * the bridge does not expect tagging anymore | ||
1303 | */ | ||
1304 | dev->ports[port].pvid = pvid; | ||
1305 | new_pvid = b53_default_pvid(dev); | ||
1306 | } else if (!dev->vlan_filtering_enabled && vlan_filtering) { | ||
1307 | /* Filtering is currently disabled, restore the previous PVID */ | ||
1308 | new_pvid = dev->ports[port].pvid; | ||
1309 | } | ||
1310 | |||
1311 | if (pvid != new_pvid) | ||
1312 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), | ||
1313 | new_pvid); | ||
1314 | |||
1315 | b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); | ||
1316 | |||
1258 | return 0; | 1317 | return 0; |
1259 | } | 1318 | } |
1260 | EXPORT_SYMBOL(b53_vlan_filtering); | 1319 | EXPORT_SYMBOL(b53_vlan_filtering); |
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, | |||
1270 | if (vlan->vid_end > dev->num_vlans) | 1329 | if (vlan->vid_end > dev->num_vlans) |
1271 | return -ERANGE; | 1330 | return -ERANGE; |
1272 | 1331 | ||
1273 | b53_enable_vlan(dev, true); | 1332 | b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); |
1274 | 1333 | ||
1275 | return 0; | 1334 | return 0; |
1276 | } | 1335 | } |
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, | |||
1300 | b53_fast_age_vlan(dev, vid); | 1359 | b53_fast_age_vlan(dev, vid); |
1301 | } | 1360 | } |
1302 | 1361 | ||
1303 | if (pvid) { | 1362 | if (pvid && !dsa_is_cpu_port(ds, port)) { |
1304 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), | 1363 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), |
1305 | vlan->vid_end); | 1364 | vlan->vid_end); |
1306 | b53_fast_age_vlan(dev, vid); | 1365 | b53_fast_age_vlan(dev, vid); |
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, | |||
1326 | 1385 | ||
1327 | vl->members &= ~BIT(port); | 1386 | vl->members &= ~BIT(port); |
1328 | 1387 | ||
1329 | if (pvid == vid) { | 1388 | if (pvid == vid) |
1330 | if (is5325(dev) || is5365(dev)) | 1389 | pvid = b53_default_pvid(dev); |
1331 | pvid = 1; | ||
1332 | else | ||
1333 | pvid = 0; | ||
1334 | } | ||
1335 | 1390 | ||
1336 | if (untagged && !dsa_is_cpu_port(ds, port)) | 1391 | if (untagged && !dsa_is_cpu_port(ds, port)) |
1337 | vl->untag &= ~(BIT(port)); | 1392 | vl->untag &= ~(BIT(port)); |
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) | |||
1644 | b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); | 1699 | b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); |
1645 | dev->ports[port].vlan_ctl_mask = pvlan; | 1700 | dev->ports[port].vlan_ctl_mask = pvlan; |
1646 | 1701 | ||
1647 | if (is5325(dev) || is5365(dev)) | 1702 | pvid = b53_default_pvid(dev); |
1648 | pvid = 1; | ||
1649 | else | ||
1650 | pvid = 0; | ||
1651 | 1703 | ||
1652 | /* Make this port join all VLANs without VLAN entries */ | 1704 | /* Make this port join all VLANs without VLAN entries */ |
1653 | if (is58xx(dev)) { | 1705 | if (is58xx(dev)) { |
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index ec796482792d..4dc7ee38b258 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h | |||
@@ -91,6 +91,7 @@ enum { | |||
91 | struct b53_port { | 91 | struct b53_port { |
92 | u16 vlan_ctl_mask; | 92 | u16 vlan_ctl_mask; |
93 | struct ethtool_eee eee; | 93 | struct ethtool_eee eee; |
94 | u16 pvid; | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | struct b53_vlan { | 97 | struct b53_vlan { |
@@ -137,6 +138,8 @@ struct b53_device { | |||
137 | 138 | ||
138 | unsigned int num_vlans; | 139 | unsigned int num_vlans; |
139 | struct b53_vlan *vlans; | 140 | struct b53_vlan *vlans; |
141 | bool vlan_enabled; | ||
142 | bool vlan_filtering_enabled; | ||
140 | unsigned int num_ports; | 143 | unsigned int num_ports; |
141 | struct b53_port *ports; | 144 | struct b53_port *ports; |
142 | }; | 145 | }; |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 98696a88fa1c..f91b8e77d543 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, | |||
726 | { | 726 | { |
727 | struct net_device *p = ds->ports[port].cpu_dp->master; | 727 | struct net_device *p = ds->ports[port].cpu_dp->master; |
728 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); | 728 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
729 | struct ethtool_wolinfo pwol; | 729 | struct ethtool_wolinfo pwol = { }; |
730 | 730 | ||
731 | /* Get the parent device WoL settings */ | 731 | /* Get the parent device WoL settings */ |
732 | p->ethtool_ops->get_wol(p, &pwol); | 732 | if (p->ethtool_ops->get_wol) |
733 | p->ethtool_ops->get_wol(p, &pwol); | ||
733 | 734 | ||
734 | /* Advertise the parent device supported settings */ | 735 | /* Advertise the parent device supported settings */ |
735 | wol->supported = pwol.supported; | 736 | wol->supported = pwol.supported; |
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, | |||
750 | struct net_device *p = ds->ports[port].cpu_dp->master; | 751 | struct net_device *p = ds->ports[port].cpu_dp->master; |
751 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); | 752 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
752 | s8 cpu_port = ds->ports[port].cpu_dp->index; | 753 | s8 cpu_port = ds->ports[port].cpu_dp->index; |
753 | struct ethtool_wolinfo pwol; | 754 | struct ethtool_wolinfo pwol = { }; |
754 | 755 | ||
755 | p->ethtool_ops->get_wol(p, &pwol); | 756 | if (p->ethtool_ops->get_wol) |
757 | p->ethtool_ops->get_wol(p, &pwol); | ||
756 | if (wol->wolopts & ~pwol.supported) | 758 | if (wol->wolopts & ~pwol.supported) |
757 | return -EINVAL; | 759 | return -EINVAL; |
758 | 760 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 28c9b0bdf2f6..bc3ac369cbe3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev, | |||
134 | 134 | ||
135 | priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); | 135 | priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); |
136 | reg = rxchk_readl(priv, RXCHK_CONTROL); | 136 | reg = rxchk_readl(priv, RXCHK_CONTROL); |
137 | /* Clear L2 header checks, which would prevent BPDUs | ||
138 | * from being received. | ||
139 | */ | ||
140 | reg &= ~RXCHK_L2_HDR_DIS; | ||
137 | if (priv->rx_chk_en) | 141 | if (priv->rx_chk_en) |
138 | reg |= RXCHK_EN; | 142 | reg |= RXCHK_EN; |
139 | else | 143 | else |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index b8155f5e71b4..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -3128,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3128 | dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); | 3128 | dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); |
3129 | dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); | 3129 | dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); |
3130 | } | 3130 | } |
3131 | |||
3132 | put_device(&pdev->dev); | ||
3133 | |||
3131 | return 0; | 3134 | return 0; |
3132 | } | 3135 | } |
3133 | EXPORT_SYMBOL(hns_dsaf_roce_reset); | 3136 | EXPORT_SYMBOL(hns_dsaf_roce_reset); |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2f427271a793..292a668ce88e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2879 | 2879 | ||
2880 | ret = mv643xx_eth_shared_of_probe(pdev); | 2880 | ret = mv643xx_eth_shared_of_probe(pdev); |
2881 | if (ret) | 2881 | if (ret) |
2882 | return ret; | 2882 | goto err_put_clk; |
2883 | pd = dev_get_platdata(&pdev->dev); | 2883 | pd = dev_get_platdata(&pdev->dev); |
2884 | 2884 | ||
2885 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? | 2885 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? |
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2887 | infer_hw_params(msp); | 2887 | infer_hw_params(msp); |
2888 | 2888 | ||
2889 | return 0; | 2889 | return 0; |
2890 | |||
2891 | err_put_clk: | ||
2892 | if (!IS_ERR(msp->clk)) | ||
2893 | clk_disable_unprepare(msp->clk); | ||
2894 | return ret; | ||
2890 | } | 2895 | } |
2891 | 2896 | ||
2892 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) | 2897 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f3a5fa84860f..57727fe1501e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5073 | INIT_WORK(&hw->restart_work, sky2_restart); | 5073 | INIT_WORK(&hw->restart_work, sky2_restart); |
5074 | 5074 | ||
5075 | pci_set_drvdata(pdev, hw); | 5075 | pci_set_drvdata(pdev, hw); |
5076 | pdev->d3_delay = 200; | 5076 | pdev->d3_delay = 300; |
5077 | 5077 | ||
5078 | return 0; | 5078 | return 0; |
5079 | 5079 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6b88881b8e35..c1438ae52a11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
3360 | dev->addr_len = ETH_ALEN; | 3360 | dev->addr_len = ETH_ALEN; |
3361 | mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); | 3361 | mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); |
3362 | if (!is_valid_ether_addr(dev->dev_addr)) { | 3362 | if (!is_valid_ether_addr(dev->dev_addr)) { |
3363 | en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", | 3363 | en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", |
3364 | priv->port, dev->dev_addr); | 3364 | priv->port, dev->dev_addr); |
3365 | err = -EINVAL; | 3365 | err = -EINVAL; |
3366 | goto out; | 3366 | goto out; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bc349d8ca08a..7ee6d747e97b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, | |||
862 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { | 862 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { |
863 | bool configure = false; | 863 | bool configure = false; |
864 | bool pfc = false; | 864 | bool pfc = false; |
865 | u16 thres_cells; | ||
866 | u16 delay_cells; | ||
865 | bool lossy; | 867 | bool lossy; |
866 | u16 thres; | ||
867 | 868 | ||
868 | for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { | 869 | for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { |
869 | if (prio_tc[j] == i) { | 870 | if (prio_tc[j] == i) { |
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, | |||
877 | continue; | 878 | continue; |
878 | 879 | ||
879 | lossy = !(pfc || pause_en); | 880 | lossy = !(pfc || pause_en); |
880 | thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); | 881 | thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); |
881 | delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, | 882 | delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, |
882 | pause_en); | 883 | pfc, pause_en); |
883 | mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); | 884 | mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells, |
885 | thres_cells, lossy); | ||
884 | } | 886 | } |
885 | 887 | ||
886 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); | 888 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c | |||
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, | |||
1688 | 1688 | ||
1689 | eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); | 1689 | eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); |
1690 | 1690 | ||
1691 | if (!ether_addr_equal(ethh->h_dest, | ||
1692 | p_hwfn->p_rdma_info->iwarp.mac_addr)) { | ||
1693 | DP_VERBOSE(p_hwfn, | ||
1694 | QED_MSG_RDMA, | ||
1695 | "Got unexpected mac %pM instead of %pM\n", | ||
1696 | ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); | ||
1697 | return -EINVAL; | ||
1698 | } | ||
1699 | |||
1691 | ether_addr_copy(remote_mac_addr, ethh->h_source); | 1700 | ether_addr_copy(remote_mac_addr, ethh->h_source); |
1692 | ether_addr_copy(local_mac_addr, ethh->h_dest); | 1701 | ether_addr_copy(local_mac_addr, ethh->h_dest); |
1693 | 1702 | ||
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2605 | struct qed_iwarp_info *iwarp_info; | 2614 | struct qed_iwarp_info *iwarp_info; |
2606 | struct qed_ll2_acquire_data data; | 2615 | struct qed_ll2_acquire_data data; |
2607 | struct qed_ll2_cbs cbs; | 2616 | struct qed_ll2_cbs cbs; |
2608 | u32 mpa_buff_size; | 2617 | u32 buff_size; |
2609 | u16 n_ooo_bufs; | 2618 | u16 n_ooo_bufs; |
2610 | int rc = 0; | 2619 | int rc = 0; |
2611 | int i; | 2620 | int i; |
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2632 | 2641 | ||
2633 | memset(&data, 0, sizeof(data)); | 2642 | memset(&data, 0, sizeof(data)); |
2634 | data.input.conn_type = QED_LL2_TYPE_IWARP; | 2643 | data.input.conn_type = QED_LL2_TYPE_IWARP; |
2635 | data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; | 2644 | data.input.mtu = params->max_mtu; |
2636 | data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; | 2645 | data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; |
2637 | data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; | 2646 | data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; |
2638 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ | 2647 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ |
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2654 | goto err; | 2663 | goto err; |
2655 | } | 2664 | } |
2656 | 2665 | ||
2666 | buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); | ||
2657 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, | 2667 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2658 | QED_IWARP_LL2_SYN_RX_SIZE, | 2668 | QED_IWARP_LL2_SYN_RX_SIZE, |
2659 | QED_IWARP_MAX_SYN_PKT_SIZE, | 2669 | buff_size, |
2660 | iwarp_info->ll2_syn_handle); | 2670 | iwarp_info->ll2_syn_handle); |
2661 | if (rc) | 2671 | if (rc) |
2662 | goto err; | 2672 | goto err; |
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2710 | if (rc) | 2720 | if (rc) |
2711 | goto err; | 2721 | goto err; |
2712 | 2722 | ||
2713 | mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); | ||
2714 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, | 2723 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2715 | data.input.rx_num_desc, | 2724 | data.input.rx_num_desc, |
2716 | mpa_buff_size, | 2725 | buff_size, |
2717 | iwarp_info->ll2_mpa_handle); | 2726 | iwarp_info->ll2_mpa_handle); |
2718 | if (rc) | 2727 | if (rc) |
2719 | goto err; | 2728 | goto err; |
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2726 | 2735 | ||
2727 | iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; | 2736 | iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; |
2728 | 2737 | ||
2729 | iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); | 2738 | iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); |
2730 | if (!iwarp_info->mpa_intermediate_buf) | 2739 | if (!iwarp_info->mpa_intermediate_buf) |
2731 | goto err; | 2740 | goto err; |
2732 | 2741 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h | |||
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); | |||
46 | 46 | ||
47 | #define QED_IWARP_LL2_SYN_TX_SIZE (128) | 47 | #define QED_IWARP_LL2_SYN_TX_SIZE (128) |
48 | #define QED_IWARP_LL2_SYN_RX_SIZE (256) | 48 | #define QED_IWARP_LL2_SYN_RX_SIZE (256) |
49 | #define QED_IWARP_MAX_SYN_PKT_SIZE (128) | ||
50 | 49 | ||
51 | #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) | 50 | #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) |
52 | #define QED_IWARP_MAX_OOO (16) | 51 | #define QED_IWARP_MAX_OOO (16) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 90045fffd393..7fbb6a4dbf51 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) | |||
241 | static int dwmac4_rx_check_timestamp(void *desc) | 241 | static int dwmac4_rx_check_timestamp(void *desc) |
242 | { | 242 | { |
243 | struct dma_desc *p = (struct dma_desc *)desc; | 243 | struct dma_desc *p = (struct dma_desc *)desc; |
244 | unsigned int rdes0 = le32_to_cpu(p->des0); | ||
245 | unsigned int rdes1 = le32_to_cpu(p->des1); | ||
246 | unsigned int rdes3 = le32_to_cpu(p->des3); | ||
244 | u32 own, ctxt; | 247 | u32 own, ctxt; |
245 | int ret = 1; | 248 | int ret = 1; |
246 | 249 | ||
247 | own = p->des3 & RDES3_OWN; | 250 | own = rdes3 & RDES3_OWN; |
248 | ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) | 251 | ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) |
249 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); | 252 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); |
250 | 253 | ||
251 | if (likely(!own && ctxt)) { | 254 | if (likely(!own && ctxt)) { |
252 | if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) | 255 | if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) |
253 | /* Corrupted value */ | 256 | /* Corrupted value */ |
254 | ret = -EINVAL; | 257 | ret = -EINVAL; |
255 | else | 258 | else |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 5d85742a2be0..3c749c327cbd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, | |||
696 | struct ethtool_eee *edata) | 696 | struct ethtool_eee *edata) |
697 | { | 697 | { |
698 | struct stmmac_priv *priv = netdev_priv(dev); | 698 | struct stmmac_priv *priv = netdev_priv(dev); |
699 | int ret; | ||
699 | 700 | ||
700 | priv->eee_enabled = edata->eee_enabled; | 701 | if (!edata->eee_enabled) { |
701 | |||
702 | if (!priv->eee_enabled) | ||
703 | stmmac_disable_eee_mode(priv); | 702 | stmmac_disable_eee_mode(priv); |
704 | else { | 703 | } else { |
705 | /* We are asking for enabling the EEE but it is safe | 704 | /* We are asking for enabling the EEE but it is safe |
706 | * to verify all by invoking the eee_init function. | 705 | * to verify all by invoking the eee_init function. |
707 | * In case of failure it will return an error. | 706 | * In case of failure it will return an error. |
708 | */ | 707 | */ |
709 | priv->eee_enabled = stmmac_eee_init(priv); | 708 | edata->eee_enabled = stmmac_eee_init(priv); |
710 | if (!priv->eee_enabled) | 709 | if (!edata->eee_enabled) |
711 | return -EOPNOTSUPP; | 710 | return -EOPNOTSUPP; |
712 | |||
713 | /* Do not change tx_lpi_timer in case of failure */ | ||
714 | priv->tx_lpi_timer = edata->tx_lpi_timer; | ||
715 | } | 711 | } |
716 | 712 | ||
717 | return phy_ethtool_set_eee(dev->phydev, edata); | 713 | ret = phy_ethtool_set_eee(dev->phydev, edata); |
714 | if (ret) | ||
715 | return ret; | ||
716 | |||
717 | priv->eee_enabled = edata->eee_enabled; | ||
718 | priv->tx_lpi_timer = edata->tx_lpi_timer; | ||
719 | return 0; | ||
718 | } | 720 | } |
719 | 721 | ||
720 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) | 722 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) |
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1f612268c998..d847f672a705 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device, | |||
259 | const char *name; | 259 | const char *name; |
260 | char node_name[32]; | 260 | char node_name[32]; |
261 | 261 | ||
262 | if (of_property_read_string(node, "label", &name) < 0) { | 262 | if (of_property_read_string(child, "label", &name) < 0) { |
263 | snprintf(node_name, sizeof(node_name), "%pOFn", child); | 263 | snprintf(node_name, sizeof(node_name), "%pOFn", child); |
264 | name = node_name; | 264 | name = node_name; |
265 | } | 265 | } |
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index ebf419dc7307..2d1449345959 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c | |||
@@ -35,7 +35,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) | |||
35 | u16 val = 0; | 35 | u16 val = 0; |
36 | int err; | 36 | int err; |
37 | 37 | ||
38 | err = priv->phy_drv->read_status(phydev); | 38 | if (priv->phy_drv->read_status) |
39 | err = priv->phy_drv->read_status(phydev); | ||
40 | else | ||
41 | err = genphy_read_status(phydev); | ||
39 | if (err < 0) | 42 | if (err < 0) |
40 | return err; | 43 | return err; |
41 | 44 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 735ad838e2ba..18af2f8eee96 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = { | |||
1201 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1201 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
1202 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ | 1202 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
1203 | {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ | 1203 | {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ |
1204 | {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ | 1204 | {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ |
1205 | {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ | 1205 | {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ |
1206 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ | 1206 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ |
1207 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ | 1207 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ |
1208 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ | 1208 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 60dd1ec1665f..ada6baf8847a 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -557,6 +557,7 @@ enum spd_duplex { | |||
557 | /* MAC PASSTHRU */ | 557 | /* MAC PASSTHRU */ |
558 | #define AD_MASK 0xfee0 | 558 | #define AD_MASK 0xfee0 |
559 | #define BND_MASK 0x0004 | 559 | #define BND_MASK 0x0004 |
560 | #define BD_MASK 0x0001 | ||
560 | #define EFUSE 0xcfdb | 561 | #define EFUSE 0xcfdb |
561 | #define PASS_THRU_MASK 0x1 | 562 | #define PASS_THRU_MASK 0x1 |
562 | 563 | ||
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) | |||
1176 | return -ENODEV; | 1177 | return -ENODEV; |
1177 | } | 1178 | } |
1178 | } else { | 1179 | } else { |
1179 | /* test for RTL8153-BND */ | 1180 | /* test for RTL8153-BND and RTL8153-BD */ |
1180 | ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); | 1181 | ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); |
1181 | if ((ocp_data & BND_MASK) == 0) { | 1182 | if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK)) { |
1182 | netif_dbg(tp, probe, tp->netdev, | 1183 | netif_dbg(tp, probe, tp->netdev, |
1183 | "Invalid variant for MAC pass through\n"); | 1184 | "Invalid variant for MAC pass through\n"); |
1184 | return -ENODEV; | 1185 | return -ENODEV; |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index f66e1b2f0980..3987adaaf2bd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c | |||
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = { | |||
158 | .get_txpower = mt76_get_txpower, | 158 | .get_txpower = mt76_get_txpower, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static int mt76x0u_register_device(struct mt76x02_dev *dev) | 161 | static int mt76x0u_init_hardware(struct mt76x02_dev *dev) |
162 | { | 162 | { |
163 | struct ieee80211_hw *hw = dev->mt76.hw; | ||
164 | int err; | 163 | int err; |
165 | 164 | ||
166 | err = mt76u_alloc_queues(&dev->mt76); | ||
167 | if (err < 0) | ||
168 | goto out_err; | ||
169 | |||
170 | err = mt76u_mcu_init_rx(&dev->mt76); | ||
171 | if (err < 0) | ||
172 | goto out_err; | ||
173 | |||
174 | mt76x0_chip_onoff(dev, true, true); | 165 | mt76x0_chip_onoff(dev, true, true); |
175 | if (!mt76x02_wait_for_mac(&dev->mt76)) { | 166 | |
176 | err = -ETIMEDOUT; | 167 | if (!mt76x02_wait_for_mac(&dev->mt76)) |
177 | goto out_err; | 168 | return -ETIMEDOUT; |
178 | } | ||
179 | 169 | ||
180 | err = mt76x0u_mcu_init(dev); | 170 | err = mt76x0u_mcu_init(dev); |
181 | if (err < 0) | 171 | if (err < 0) |
182 | goto out_err; | 172 | return err; |
183 | 173 | ||
184 | mt76x0_init_usb_dma(dev); | 174 | mt76x0_init_usb_dma(dev); |
185 | err = mt76x0_init_hardware(dev); | 175 | err = mt76x0_init_hardware(dev); |
186 | if (err < 0) | 176 | if (err < 0) |
187 | goto out_err; | 177 | return err; |
188 | 178 | ||
189 | mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); | 179 | mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); |
190 | mt76_wr(dev, MT_TXOP_CTRL_CFG, | 180 | mt76_wr(dev, MT_TXOP_CTRL_CFG, |
191 | FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | | 181 | FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | |
192 | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); | 182 | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); |
193 | 183 | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int mt76x0u_register_device(struct mt76x02_dev *dev) | ||
188 | { | ||
189 | struct ieee80211_hw *hw = dev->mt76.hw; | ||
190 | int err; | ||
191 | |||
192 | err = mt76u_alloc_queues(&dev->mt76); | ||
193 | if (err < 0) | ||
194 | goto out_err; | ||
195 | |||
196 | err = mt76u_mcu_init_rx(&dev->mt76); | ||
197 | if (err < 0) | ||
198 | goto out_err; | ||
199 | |||
200 | err = mt76x0u_init_hardware(dev); | ||
201 | if (err < 0) | ||
202 | goto out_err; | ||
203 | |||
194 | err = mt76x0_register_device(dev); | 204 | err = mt76x0_register_device(dev); |
195 | if (err < 0) | 205 | if (err < 0) |
196 | goto out_err; | 206 | goto out_err; |
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, | |||
301 | 311 | ||
302 | mt76u_stop_queues(&dev->mt76); | 312 | mt76u_stop_queues(&dev->mt76); |
303 | mt76x0u_mac_stop(dev); | 313 | mt76x0u_mac_stop(dev); |
314 | clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); | ||
315 | mt76x0_chip_onoff(dev, false, false); | ||
304 | usb_kill_urb(usb->mcu.res.urb); | 316 | usb_kill_urb(usb->mcu.res.urb); |
305 | 317 | ||
306 | return 0; | 318 | return 0; |
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) | |||
328 | tasklet_enable(&usb->rx_tasklet); | 340 | tasklet_enable(&usb->rx_tasklet); |
329 | tasklet_enable(&usb->tx_tasklet); | 341 | tasklet_enable(&usb->tx_tasklet); |
330 | 342 | ||
331 | ret = mt76x0_init_hardware(dev); | 343 | ret = mt76x0u_init_hardware(dev); |
332 | if (ret) | 344 | if (ret) |
333 | goto err; | 345 | goto err; |
334 | 346 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aeeb0144bd55..8d1acc802a67 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, | |||
1785 | 1785 | ||
1786 | /* Issue Marker IOCB */ | 1786 | /* Issue Marker IOCB */ |
1787 | qla2x00_marker(vha, vha->hw->req_q_map[0], | 1787 | qla2x00_marker(vha, vha->hw->req_q_map[0], |
1788 | vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, | 1788 | vha->hw->rsp_q_map[0], fcport->loop_id, lun, |
1789 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); | 1789 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); |
1790 | } | 1790 | } |
1791 | 1791 | ||
1792 | done_free_sp: | 1792 | done_free_sp: |
1793 | sp->free(sp); | 1793 | sp->free(sp); |
1794 | sp->fcport->flags &= ~FCF_ASYNC_SENT; | 1794 | fcport->flags &= ~FCF_ASYNC_SENT; |
1795 | done: | 1795 | done: |
1796 | return rval; | 1796 | return rval; |
1797 | } | 1797 | } |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b2da8a00ec33..5464d467e23e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2951,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) | |||
2951 | if (rot == 1) { | 2951 | if (rot == 1) { |
2952 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 2952 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
2953 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | 2953 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
2954 | } else { | ||
2955 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
2956 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
2957 | } | 2954 | } |
2958 | 2955 | ||
2959 | if (sdkp->device->type == TYPE_ZBC) { | 2956 | if (sdkp->device->type == TYPE_ZBC) { |
@@ -3090,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
3090 | if (sdkp->media_present) { | 3087 | if (sdkp->media_present) { |
3091 | sd_read_capacity(sdkp, buffer); | 3088 | sd_read_capacity(sdkp, buffer); |
3092 | 3089 | ||
3090 | /* | ||
3091 | * set the default to rotational. All non-rotational devices | ||
3092 | * support the block characteristics VPD page, which will | ||
3093 | * cause this to be updated correctly and any device which | ||
3094 | * doesn't support it should be treated as rotational. | ||
3095 | */ | ||
3096 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
3097 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
3098 | |||
3093 | if (scsi_device_supports_vpd(sdp)) { | 3099 | if (scsi_device_supports_vpd(sdp)) { |
3094 | sd_read_block_provisioning(sdkp); | 3100 | sd_read_block_provisioning(sdkp); |
3095 | sd_read_block_limits(sdkp); | 3101 | sd_read_block_limits(sdkp); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 24a129fcdd61..a2e5dc7716e2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) | |||
1788 | 1788 | ||
1789 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, | 1789 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, |
1790 | len, iov, 64, VHOST_ACCESS_WO); | 1790 | len, iov, 64, VHOST_ACCESS_WO); |
1791 | if (ret) | 1791 | if (ret < 0) |
1792 | return ret; | 1792 | return ret; |
1793 | 1793 | ||
1794 | for (i = 0; i < ret; i++) { | 1794 | for (i = 0; i < ret; i++) { |
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index 7cde3f46ad26..e996174cbfc0 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
@@ -14,13 +14,30 @@ | |||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | 16 | ||
17 | static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } | ||
18 | static inline char *next_non_spacetab(char *first, const char *last) | ||
19 | { | ||
20 | for (; first <= last; first++) | ||
21 | if (!spacetab(*first)) | ||
22 | return first; | ||
23 | return NULL; | ||
24 | } | ||
25 | static inline char *next_terminator(char *first, const char *last) | ||
26 | { | ||
27 | for (; first <= last; first++) | ||
28 | if (spacetab(*first) || !*first) | ||
29 | return first; | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
17 | static int load_script(struct linux_binprm *bprm) | 33 | static int load_script(struct linux_binprm *bprm) |
18 | { | 34 | { |
19 | const char *i_arg, *i_name; | 35 | const char *i_arg, *i_name; |
20 | char *cp; | 36 | char *cp, *buf_end; |
21 | struct file *file; | 37 | struct file *file; |
22 | int retval; | 38 | int retval; |
23 | 39 | ||
40 | /* Not ours to exec if we don't start with "#!". */ | ||
24 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) | 41 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) |
25 | return -ENOEXEC; | 42 | return -ENOEXEC; |
26 | 43 | ||
@@ -33,18 +50,40 @@ static int load_script(struct linux_binprm *bprm) | |||
33 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) | 50 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) |
34 | return -ENOENT; | 51 | return -ENOENT; |
35 | 52 | ||
36 | /* | 53 | /* Release since we are not mapping a binary into memory. */ |
37 | * This section does the #! interpretation. | ||
38 | * Sorta complicated, but hopefully it will work. -TYT | ||
39 | */ | ||
40 | |||
41 | allow_write_access(bprm->file); | 54 | allow_write_access(bprm->file); |
42 | fput(bprm->file); | 55 | fput(bprm->file); |
43 | bprm->file = NULL; | 56 | bprm->file = NULL; |
44 | 57 | ||
45 | bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; | 58 | /* |
46 | if ((cp = strchr(bprm->buf, '\n')) == NULL) | 59 | * This section handles parsing the #! line into separate |
47 | cp = bprm->buf+BINPRM_BUF_SIZE-1; | 60 | * interpreter path and argument strings. We must be careful |
61 | * because bprm->buf is not yet guaranteed to be NUL-terminated | ||
62 | * (though the buffer will have trailing NUL padding when the | ||
63 | * file size was smaller than the buffer size). | ||
64 | * | ||
65 | * We do not want to exec a truncated interpreter path, so either | ||
66 | * we find a newline (which indicates nothing is truncated), or | ||
67 | * we find a space/tab/NUL after the interpreter path (which | ||
68 | * itself may be preceded by spaces/tabs). Truncating the | ||
69 | * arguments is fine: the interpreter can re-read the script to | ||
70 | * parse them on its own. | ||
71 | */ | ||
72 | buf_end = bprm->buf + sizeof(bprm->buf) - 1; | ||
73 | cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); | ||
74 | if (!cp) { | ||
75 | cp = next_non_spacetab(bprm->buf + 2, buf_end); | ||
76 | if (!cp) | ||
77 | return -ENOEXEC; /* Entire buf is spaces/tabs */ | ||
78 | /* | ||
79 | * If there is no later space/tab/NUL we must assume the | ||
80 | * interpreter path is truncated. | ||
81 | */ | ||
82 | if (!next_terminator(cp, buf_end)) | ||
83 | return -ENOEXEC; | ||
84 | cp = buf_end; | ||
85 | } | ||
86 | /* NUL-terminate the buffer and any trailing spaces/tabs. */ | ||
48 | *cp = '\0'; | 87 | *cp = '\0'; |
49 | while (cp > bprm->buf) { | 88 | while (cp > bprm->buf) { |
50 | cp--; | 89 | cp--; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f12cb31a41e5..d09c9f878141 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -238,9 +238,9 @@ out: | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ | 240 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ |
241 | static void nfs_set_pageerror(struct page *page) | 241 | static void nfs_set_pageerror(struct address_space *mapping) |
242 | { | 242 | { |
243 | nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); | 243 | nfs_zap_mapping(mapping->host, mapping); |
244 | } | 244 | } |
245 | 245 | ||
246 | /* | 246 | /* |
@@ -994,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
994 | nfs_list_remove_request(req); | 994 | nfs_list_remove_request(req); |
995 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && | 995 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && |
996 | (hdr->good_bytes < bytes)) { | 996 | (hdr->good_bytes < bytes)) { |
997 | nfs_set_pageerror(req->wb_page); | 997 | nfs_set_pageerror(page_file_mapping(req->wb_page)); |
998 | nfs_context_set_write_error(req->wb_context, hdr->error); | 998 | nfs_context_set_write_error(req->wb_context, hdr->error); |
999 | goto remove_req; | 999 | goto remove_req; |
1000 | } | 1000 | } |
@@ -1348,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
1348 | unsigned int offset, unsigned int count) | 1348 | unsigned int offset, unsigned int count) |
1349 | { | 1349 | { |
1350 | struct nfs_open_context *ctx = nfs_file_open_context(file); | 1350 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
1351 | struct inode *inode = page_file_mapping(page)->host; | 1351 | struct address_space *mapping = page_file_mapping(page); |
1352 | struct inode *inode = mapping->host; | ||
1352 | int status = 0; | 1353 | int status = 0; |
1353 | 1354 | ||
1354 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); | 1355 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
@@ -1366,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
1366 | 1367 | ||
1367 | status = nfs_writepage_setup(ctx, page, offset, count); | 1368 | status = nfs_writepage_setup(ctx, page, offset, count); |
1368 | if (status < 0) | 1369 | if (status < 0) |
1369 | nfs_set_pageerror(page); | 1370 | nfs_set_pageerror(mapping); |
1370 | else | 1371 | else |
1371 | __set_page_dirty_nobuffers(page); | 1372 | __set_page_dirty_nobuffers(page); |
1372 | out: | 1373 | out: |
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index b33f9785b756..72a7681f4046 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net) | |||
1239 | retval = nfsd_idmap_init(net); | 1239 | retval = nfsd_idmap_init(net); |
1240 | if (retval) | 1240 | if (retval) |
1241 | goto out_idmap_error; | 1241 | goto out_idmap_error; |
1242 | nn->nfsd4_lease = 45; /* default lease time */ | 1242 | nn->nfsd4_lease = 90; /* default lease time */ |
1243 | nn->nfsd4_grace = 45; | 1243 | nn->nfsd4_grace = 90; |
1244 | nn->somebody_reclaimed = false; | 1244 | nn->somebody_reclaimed = false; |
1245 | nn->clverifier_counter = prandom_u32(); | 1245 | nn->clverifier_counter = prandom_u32(); |
1246 | nn->clientid_counter = prandom_u32(); | 1246 | nn->clientid_counter = prandom_u32(); |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4f31f96bbfab..c36c86f1ec9a 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -100,7 +100,7 @@ enum vgic_irq_config { | |||
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct vgic_irq { | 102 | struct vgic_irq { |
103 | spinlock_t irq_lock; /* Protects the content of the struct */ | 103 | raw_spinlock_t irq_lock; /* Protects the content of the struct */ |
104 | struct list_head lpi_list; /* Used to link all LPIs together */ | 104 | struct list_head lpi_list; /* Used to link all LPIs together */ |
105 | struct list_head ap_list; | 105 | struct list_head ap_list; |
106 | 106 | ||
@@ -256,7 +256,7 @@ struct vgic_dist { | |||
256 | u64 propbaser; | 256 | u64 propbaser; |
257 | 257 | ||
258 | /* Protects the lpi_list and the count value below. */ | 258 | /* Protects the lpi_list and the count value below. */ |
259 | spinlock_t lpi_list_lock; | 259 | raw_spinlock_t lpi_list_lock; |
260 | struct list_head lpi_list_head; | 260 | struct list_head lpi_list_head; |
261 | int lpi_list_count; | 261 | int lpi_list_count; |
262 | 262 | ||
@@ -307,7 +307,7 @@ struct vgic_cpu { | |||
307 | unsigned int used_lrs; | 307 | unsigned int used_lrs; |
308 | struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; | 308 | struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; |
309 | 309 | ||
310 | spinlock_t ap_list_lock; /* Protects the ap_list */ | 310 | raw_spinlock_t ap_list_lock; /* Protects the ap_list */ |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * List of IRQs that this VCPU should consider because they are either | 313 | * List of IRQs that this VCPU should consider because they are either |
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 19f32b0c29af..6b318efd8a74 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #ifndef __has_attribute | 34 | #ifndef __has_attribute |
35 | # define __has_attribute(x) __GCC4_has_attribute_##x | 35 | # define __has_attribute(x) __GCC4_has_attribute_##x |
36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) | 36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) |
37 | # define __GCC4_has_attribute___copy__ 0 | ||
37 | # define __GCC4_has_attribute___designated_init__ 0 | 38 | # define __GCC4_has_attribute___designated_init__ 0 |
38 | # define __GCC4_has_attribute___externally_visible__ 1 | 39 | # define __GCC4_has_attribute___externally_visible__ 1 |
39 | # define __GCC4_has_attribute___noclone__ 1 | 40 | # define __GCC4_has_attribute___noclone__ 1 |
@@ -101,6 +102,19 @@ | |||
101 | #define __attribute_const__ __attribute__((__const__)) | 102 | #define __attribute_const__ __attribute__((__const__)) |
102 | 103 | ||
103 | /* | 104 | /* |
105 | * Optional: only supported since gcc >= 9 | ||
106 | * Optional: not supported by clang | ||
107 | * Optional: not supported by icc | ||
108 | * | ||
109 | * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute | ||
110 | */ | ||
111 | #if __has_attribute(__copy__) | ||
112 | # define __copy(symbol) __attribute__((__copy__(symbol))) | ||
113 | #else | ||
114 | # define __copy(symbol) | ||
115 | #endif | ||
116 | |||
117 | /* | ||
104 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' | 118 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' |
105 | * attribute warnings entirely and for good") for more information. | 119 | * attribute warnings entirely and for good") for more information. |
106 | * | 120 | * |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 45ff763fba76..28604a8d0aa9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature) | |||
1198 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); | 1198 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); |
1199 | 1199 | ||
1200 | extern bool efi_is_table_address(unsigned long phys_addr); | 1200 | extern bool efi_is_table_address(unsigned long phys_addr); |
1201 | |||
1202 | extern int efi_apply_persistent_mem_reservations(void); | ||
1203 | #else | 1201 | #else |
1204 | static inline bool efi_enabled(int feature) | 1202 | static inline bool efi_enabled(int feature) |
1205 | { | 1203 | { |
@@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr) | |||
1218 | { | 1216 | { |
1219 | return false; | 1217 | return false; |
1220 | } | 1218 | } |
1221 | |||
1222 | static inline int efi_apply_persistent_mem_reservations(void) | ||
1223 | { | ||
1224 | return 0; | ||
1225 | } | ||
1226 | #endif | 1219 | #endif |
1227 | 1220 | ||
1228 | extern int efi_status_to_err(efi_status_t status); | 1221 | extern int efi_status_to_err(efi_status_t status); |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 64c41cf45590..859b55b66db2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -29,9 +29,6 @@ extern unsigned long max_pfn; | |||
29 | */ | 29 | */ |
30 | extern unsigned long long max_possible_pfn; | 30 | extern unsigned long long max_possible_pfn; |
31 | 31 | ||
32 | #define INIT_MEMBLOCK_REGIONS 128 | ||
33 | #define INIT_PHYSMEM_REGIONS 4 | ||
34 | |||
35 | /** | 32 | /** |
36 | * enum memblock_flags - definition of memory region attributes | 33 | * enum memblock_flags - definition of memory region attributes |
37 | * @MEMBLOCK_NONE: no special request | 34 | * @MEMBLOCK_NONE: no special request |
diff --git a/include/linux/module.h b/include/linux/module.h index 8fa38d3e7538..f5bc4c046461 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -129,13 +129,13 @@ extern void cleanup_module(void); | |||
129 | #define module_init(initfn) \ | 129 | #define module_init(initfn) \ |
130 | static inline initcall_t __maybe_unused __inittest(void) \ | 130 | static inline initcall_t __maybe_unused __inittest(void) \ |
131 | { return initfn; } \ | 131 | { return initfn; } \ |
132 | int init_module(void) __attribute__((alias(#initfn))); | 132 | int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); |
133 | 133 | ||
134 | /* This is only required if you want to be unloadable. */ | 134 | /* This is only required if you want to be unloadable. */ |
135 | #define module_exit(exitfn) \ | 135 | #define module_exit(exitfn) \ |
136 | static inline exitcall_t __maybe_unused __exittest(void) \ | 136 | static inline exitcall_t __maybe_unused __exittest(void) \ |
137 | { return exitfn; } \ | 137 | { return exitfn; } \ |
138 | void cleanup_module(void) __attribute__((alias(#exitfn))); | 138 | void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); |
139 | 139 | ||
140 | #endif | 140 | #endif |
141 | 141 | ||
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2b2a6dce1630..4c76fe2c8488 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #define _LINUX_NETDEV_FEATURES_H | 11 | #define _LINUX_NETDEV_FEATURES_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/bitops.h> | ||
15 | #include <asm/byteorder.h> | ||
14 | 16 | ||
15 | typedef u64 netdev_features_t; | 17 | typedef u64 netdev_features_t; |
16 | 18 | ||
@@ -154,8 +156,26 @@ enum { | |||
154 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) | 156 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) |
155 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) | 157 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) |
156 | 158 | ||
157 | #define for_each_netdev_feature(mask_addr, bit) \ | 159 | /* Finds the next feature with the highest number of the range of start till 0. |
158 | for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) | 160 | */ |
161 | static inline int find_next_netdev_feature(u64 feature, unsigned long start) | ||
162 | { | ||
163 | /* like BITMAP_LAST_WORD_MASK() for u64 | ||
164 | * this sets the most significant 64 - start to 0. | ||
165 | */ | ||
166 | feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); | ||
167 | |||
168 | return fls64(feature) - 1; | ||
169 | } | ||
170 | |||
171 | /* This goes for the MSB to the LSB through the set feature bits, | ||
172 | * mask_addr should be a u64 and bit an int | ||
173 | */ | ||
174 | #define for_each_netdev_feature(mask_addr, bit) \ | ||
175 | for ((bit) = find_next_netdev_feature((mask_addr), \ | ||
176 | NETDEV_FEATURE_COUNT); \ | ||
177 | (bit) >= 0; \ | ||
178 | (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) | ||
159 | 179 | ||
160 | /* Features valid for ethtool to change */ | 180 | /* Features valid for ethtool to change */ |
161 | /* = all defined minus driver/device-class-related */ | 181 | /* = all defined minus driver/device-class-related */ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..e1a051724f7e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -447,6 +447,11 @@ struct pmu { | |||
447 | * Filter events for PMU-specific reasons. | 447 | * Filter events for PMU-specific reasons. |
448 | */ | 448 | */ |
449 | int (*filter_match) (struct perf_event *event); /* optional */ | 449 | int (*filter_match) (struct perf_event *event); /* optional */ |
450 | |||
451 | /* | ||
452 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
453 | */ | ||
454 | int (*check_period) (struct perf_event *event, u64 value); /* optional */ | ||
450 | }; | 455 | }; |
451 | 456 | ||
452 | enum perf_addr_filter_action_t { | 457 | enum perf_addr_filter_action_t { |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a41e84f7730c..2069fb90a559 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2439,7 +2439,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, | |||
2439 | 2439 | ||
2440 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) | 2440 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) |
2441 | skb_set_transport_header(skb, keys.control.thoff); | 2441 | skb_set_transport_header(skb, keys.control.thoff); |
2442 | else | 2442 | else if (offset_hint >= 0) |
2443 | skb_set_transport_header(skb, offset_hint); | 2443 | skb_set_transport_header(skb, offset_hint); |
2444 | } | 2444 | } |
2445 | 2445 | ||
@@ -4235,6 +4235,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) | |||
4235 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; | 4235 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; |
4236 | } | 4236 | } |
4237 | 4237 | ||
4238 | static inline bool skb_is_gso_tcp(const struct sk_buff *skb) | ||
4239 | { | ||
4240 | return skb_is_gso(skb) && | ||
4241 | skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); | ||
4242 | } | ||
4243 | |||
4238 | static inline void skb_gso_reset(struct sk_buff *skb) | 4244 | static inline void skb_gso_reset(struct sk_buff *skb) |
4239 | { | 4245 | { |
4240 | skb_shinfo(skb)->gso_size = 0; | 4246 | skb_shinfo(skb)->gso_size = 0; |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index cb462f9ab7dd..71f2394abbf7 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -57,6 +57,15 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, | |||
57 | 57 | ||
58 | if (!skb_partial_csum_set(skb, start, off)) | 58 | if (!skb_partial_csum_set(skb, start, off)) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | } else { | ||
61 | /* gso packets without NEEDS_CSUM do not set transport_offset. | ||
62 | * probe and drop if does not match one of the above types. | ||
63 | */ | ||
64 | if (gso_type) { | ||
65 | skb_probe_transport_header(skb, -1); | ||
66 | if (!skb_transport_header_was_set(skb)) | ||
67 | return -EINVAL; | ||
68 | } | ||
60 | } | 69 | } |
61 | 70 | ||
62 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 71 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d43b14535827..950ab2f28922 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry) | |||
44 | struct stack_map_irq_work *work; | 44 | struct stack_map_irq_work *work; |
45 | 45 | ||
46 | work = container_of(entry, struct stack_map_irq_work, irq_work); | 46 | work = container_of(entry, struct stack_map_irq_work, irq_work); |
47 | up_read(work->sem); | 47 | up_read_non_owner(work->sem); |
48 | work->sem = NULL; | 48 | work->sem = NULL; |
49 | } | 49 | } |
50 | 50 | ||
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
338 | } else { | 338 | } else { |
339 | work->sem = ¤t->mm->mmap_sem; | 339 | work->sem = ¤t->mm->mmap_sem; |
340 | irq_work_queue(&work->irq_work); | 340 | irq_work_queue(&work->irq_work); |
341 | /* | ||
342 | * The irq_work will release the mmap_sem with | ||
343 | * up_read_non_owner(). The rwsem_release() is called | ||
344 | * here to release the lock from lockdep's perspective. | ||
345 | */ | ||
346 | rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); | ||
341 | } | 347 | } |
342 | } | 348 | } |
343 | 349 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ede6918050..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, | |||
4963 | } | 4963 | } |
4964 | } | 4964 | } |
4965 | 4965 | ||
4966 | static int perf_event_check_period(struct perf_event *event, u64 value) | ||
4967 | { | ||
4968 | return event->pmu->check_period(event, value); | ||
4969 | } | ||
4970 | |||
4966 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 4971 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
4967 | { | 4972 | { |
4968 | u64 value; | 4973 | u64 value; |
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
4979 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | 4984 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
4980 | return -EINVAL; | 4985 | return -EINVAL; |
4981 | 4986 | ||
4987 | if (perf_event_check_period(event, value)) | ||
4988 | return -EINVAL; | ||
4989 | |||
4982 | event_function_call(event, __perf_event_period, &value); | 4990 | event_function_call(event, __perf_event_period, &value); |
4983 | 4991 | ||
4984 | return 0; | 4992 | return 0; |
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) | |||
9391 | return 0; | 9399 | return 0; |
9392 | } | 9400 | } |
9393 | 9401 | ||
9402 | static int perf_event_nop_int(struct perf_event *event, u64 value) | ||
9403 | { | ||
9404 | return 0; | ||
9405 | } | ||
9406 | |||
9394 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); | 9407 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
9395 | 9408 | ||
9396 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) | 9409 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
@@ -9691,6 +9704,9 @@ got_cpu_context: | |||
9691 | pmu->pmu_disable = perf_pmu_nop_void; | 9704 | pmu->pmu_disable = perf_pmu_nop_void; |
9692 | } | 9705 | } |
9693 | 9706 | ||
9707 | if (!pmu->check_period) | ||
9708 | pmu->check_period = perf_event_nop_int; | ||
9709 | |||
9694 | if (!pmu->event_idx) | 9710 | if (!pmu->event_idx) |
9695 | pmu->event_idx = perf_event_idx_default; | 9711 | pmu->event_idx = perf_event_idx_default; |
9696 | 9712 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 309ef5a64af5..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
734 | size = sizeof(struct ring_buffer); | 734 | size = sizeof(struct ring_buffer); |
735 | size += nr_pages * sizeof(void *); | 735 | size += nr_pages * sizeof(void *); |
736 | 736 | ||
737 | if (order_base_2(size) >= MAX_ORDER) | 737 | if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) |
738 | goto fail; | 738 | goto fail; |
739 | 739 | ||
740 | rb = kzalloc(size, GFP_KERNEL); | 740 | rb = kzalloc(size, GFP_KERNEL); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c521b7347482..c4238b441624 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file | |||
3384 | const char tgid_space[] = " "; | 3384 | const char tgid_space[] = " "; |
3385 | const char space[] = " "; | 3385 | const char space[] = " "; |
3386 | 3386 | ||
3387 | print_event_info(buf, m); | ||
3388 | |||
3387 | seq_printf(m, "# %s _-----=> irqs-off\n", | 3389 | seq_printf(m, "# %s _-----=> irqs-off\n", |
3388 | tgid ? tgid_space : space); | 3390 | tgid ? tgid_space : space); |
3389 | seq_printf(m, "# %s / _----=> need-resched\n", | 3391 | seq_printf(m, "# %s / _----=> need-resched\n", |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d5fb09ebba8b..9eaf07f99212 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = { | |||
861 | static nokprobe_inline int | 861 | static nokprobe_inline int |
862 | fetch_store_strlen(unsigned long addr) | 862 | fetch_store_strlen(unsigned long addr) |
863 | { | 863 | { |
864 | mm_segment_t old_fs; | ||
865 | int ret, len = 0; | 864 | int ret, len = 0; |
866 | u8 c; | 865 | u8 c; |
867 | 866 | ||
868 | old_fs = get_fs(); | ||
869 | set_fs(KERNEL_DS); | ||
870 | pagefault_disable(); | ||
871 | |||
872 | do { | 867 | do { |
873 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | 868 | ret = probe_mem_read(&c, (u8 *)addr + len, 1); |
874 | len++; | 869 | len++; |
875 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | 870 | } while (c && ret == 0 && len < MAX_STRING_SIZE); |
876 | 871 | ||
877 | pagefault_enable(); | ||
878 | set_fs(old_fs); | ||
879 | |||
880 | return (ret < 0) ? ret : len; | 872 | return (ret < 0) ? ret : len; |
881 | } | 873 | } |
882 | 874 | ||
diff --git a/lib/crc32.c b/lib/crc32.c index 45b1d67a1767..4a20455d1f61 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) | |||
206 | EXPORT_SYMBOL(crc32_le); | 206 | EXPORT_SYMBOL(crc32_le); |
207 | EXPORT_SYMBOL(__crc32c_le); | 207 | EXPORT_SYMBOL(__crc32c_le); |
208 | 208 | ||
209 | u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); | 209 | u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); |
210 | u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); | 210 | u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); |
211 | 211 | ||
212 | /* | 212 | /* |
213 | * This multiplies the polynomials x and y modulo the given modulus. | 213 | * This multiplies the polynomials x and y modulo the given modulus. |
diff --git a/mm/memblock.c b/mm/memblock.c index 022d4cbb3618..ea31045ba704 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -26,6 +26,13 @@ | |||
26 | 26 | ||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | #define INIT_MEMBLOCK_REGIONS 128 | ||
30 | #define INIT_PHYSMEM_REGIONS 4 | ||
31 | |||
32 | #ifndef INIT_MEMBLOCK_RESERVED_REGIONS | ||
33 | # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS | ||
34 | #endif | ||
35 | |||
29 | /** | 36 | /** |
30 | * DOC: memblock overview | 37 | * DOC: memblock overview |
31 | * | 38 | * |
@@ -92,7 +99,7 @@ unsigned long max_pfn; | |||
92 | unsigned long long max_possible_pfn; | 99 | unsigned long long max_possible_pfn; |
93 | 100 | ||
94 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 101 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
95 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 102 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; |
96 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | 103 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
97 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; | 104 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; |
98 | #endif | 105 | #endif |
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = { | |||
105 | 112 | ||
106 | .reserved.regions = memblock_reserved_init_regions, | 113 | .reserved.regions = memblock_reserved_init_regions, |
107 | .reserved.cnt = 1, /* empty dummy entry */ | 114 | .reserved.cnt = 1, /* empty dummy entry */ |
108 | .reserved.max = INIT_MEMBLOCK_REGIONS, | 115 | .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, |
109 | .reserved.name = "reserved", | 116 | .reserved.name = "reserved", |
110 | 117 | ||
111 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | 118 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 46285d28e43b..7f79b78bc829 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4675,11 +4675,11 @@ refill: | |||
4675 | /* Even if we own the page, we do not use atomic_set(). | 4675 | /* Even if we own the page, we do not use atomic_set(). |
4676 | * This would break get_page_unless_zero() users. | 4676 | * This would break get_page_unless_zero() users. |
4677 | */ | 4677 | */ |
4678 | page_ref_add(page, size); | 4678 | page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); |
4679 | 4679 | ||
4680 | /* reset page count bias and offset to start of new frag */ | 4680 | /* reset page count bias and offset to start of new frag */ |
4681 | nc->pfmemalloc = page_is_pfmemalloc(page); | 4681 | nc->pfmemalloc = page_is_pfmemalloc(page); |
4682 | nc->pagecnt_bias = size + 1; | 4682 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4683 | nc->offset = size; | 4683 | nc->offset = size; |
4684 | } | 4684 | } |
4685 | 4685 | ||
@@ -4695,10 +4695,10 @@ refill: | |||
4695 | size = nc->size; | 4695 | size = nc->size; |
4696 | #endif | 4696 | #endif |
4697 | /* OK, page count is 0, we can safely set it */ | 4697 | /* OK, page count is 0, we can safely set it */ |
4698 | set_page_count(page, size + 1); | 4698 | set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); |
4699 | 4699 | ||
4700 | /* reset page count bias and offset to start of new frag */ | 4700 | /* reset page count bias and offset to start of new frag */ |
4701 | nc->pagecnt_bias = size + 1; | 4701 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4702 | offset = size - fragsz; | 4702 | offset = size - fragsz; |
4703 | } | 4703 | } |
4704 | 4704 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index ecbe419e05ab..a3d13f5e2bfc 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -8215,7 +8215,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower, | |||
8215 | netdev_features_t feature; | 8215 | netdev_features_t feature; |
8216 | int feature_bit; | 8216 | int feature_bit; |
8217 | 8217 | ||
8218 | for_each_netdev_feature(&upper_disables, feature_bit) { | 8218 | for_each_netdev_feature(upper_disables, feature_bit) { |
8219 | feature = __NETIF_F_BIT(feature_bit); | 8219 | feature = __NETIF_F_BIT(feature_bit); |
8220 | if (!(upper->wanted_features & feature) | 8220 | if (!(upper->wanted_features & feature) |
8221 | && (features & feature)) { | 8221 | && (features & feature)) { |
@@ -8235,7 +8235,7 @@ static void netdev_sync_lower_features(struct net_device *upper, | |||
8235 | netdev_features_t feature; | 8235 | netdev_features_t feature; |
8236 | int feature_bit; | 8236 | int feature_bit; |
8237 | 8237 | ||
8238 | for_each_netdev_feature(&upper_disables, feature_bit) { | 8238 | for_each_netdev_feature(upper_disables, feature_bit) { |
8239 | feature = __NETIF_F_BIT(feature_bit); | 8239 | feature = __NETIF_F_BIT(feature_bit); |
8240 | if (!(features & feature) && (lower->features & feature)) { | 8240 | if (!(features & feature) && (lower->features & feature)) { |
8241 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", | 8241 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", |
diff --git a/net/core/filter.c b/net/core/filter.c index b584cb42a803..5132c054c981 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2804,8 +2804,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |||
2804 | u32 off = skb_mac_header_len(skb); | 2804 | u32 off = skb_mac_header_len(skb); |
2805 | int ret; | 2805 | int ret; |
2806 | 2806 | ||
2807 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2807 | if (!skb_is_gso_tcp(skb)) |
2808 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2809 | return -ENOTSUPP; | 2808 | return -ENOTSUPP; |
2810 | 2809 | ||
2811 | ret = skb_cow(skb, len_diff); | 2810 | ret = skb_cow(skb, len_diff); |
@@ -2846,8 +2845,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |||
2846 | u32 off = skb_mac_header_len(skb); | 2845 | u32 off = skb_mac_header_len(skb); |
2847 | int ret; | 2846 | int ret; |
2848 | 2847 | ||
2849 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2848 | if (!skb_is_gso_tcp(skb)) |
2850 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2851 | return -ENOTSUPP; | 2849 | return -ENOTSUPP; |
2852 | 2850 | ||
2853 | ret = skb_unclone(skb, GFP_ATOMIC); | 2851 | ret = skb_unclone(skb, GFP_ATOMIC); |
@@ -2972,8 +2970,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |||
2972 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2970 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2973 | int ret; | 2971 | int ret; |
2974 | 2972 | ||
2975 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2973 | if (!skb_is_gso_tcp(skb)) |
2976 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2977 | return -ENOTSUPP; | 2974 | return -ENOTSUPP; |
2978 | 2975 | ||
2979 | ret = skb_cow(skb, len_diff); | 2976 | ret = skb_cow(skb, len_diff); |
@@ -3002,8 +2999,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
3002 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2999 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
3003 | int ret; | 3000 | int ret; |
3004 | 3001 | ||
3005 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 3002 | if (!skb_is_gso_tcp(skb)) |
3006 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
3007 | return -ENOTSUPP; | 3003 | return -ENOTSUPP; |
3008 | 3004 | ||
3009 | ret = skb_unclone(skb, GFP_ATOMIC); | 3005 | ret = skb_unclone(skb, GFP_ATOMIC); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26d848484912..2415d9cb9b89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
356 | */ | 356 | */ |
357 | void *netdev_alloc_frag(unsigned int fragsz) | 357 | void *netdev_alloc_frag(unsigned int fragsz) |
358 | { | 358 | { |
359 | fragsz = SKB_DATA_ALIGN(fragsz); | ||
360 | |||
359 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); | 361 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); |
360 | } | 362 | } |
361 | EXPORT_SYMBOL(netdev_alloc_frag); | 363 | EXPORT_SYMBOL(netdev_alloc_frag); |
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
369 | 371 | ||
370 | void *napi_alloc_frag(unsigned int fragsz) | 372 | void *napi_alloc_frag(unsigned int fragsz) |
371 | { | 373 | { |
374 | fragsz = SKB_DATA_ALIGN(fragsz); | ||
375 | |||
372 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); | 376 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); |
373 | } | 377 | } |
374 | EXPORT_SYMBOL(napi_alloc_frag); | 378 | EXPORT_SYMBOL(napi_alloc_frag); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index cab6b2f2f61d..769508c75dce 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2546,6 +2546,7 @@ void tcp_write_queue_purge(struct sock *sk) | |||
2546 | sk_mem_reclaim(sk); | 2546 | sk_mem_reclaim(sk); |
2547 | tcp_clear_all_retrans_hints(tcp_sk(sk)); | 2547 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
2548 | tcp_sk(sk)->packets_out = 0; | 2548 | tcp_sk(sk)->packets_out = 0; |
2549 | inet_csk(sk)->icsk_backoff = 0; | ||
2549 | } | 2550 | } |
2550 | 2551 | ||
2551 | int tcp_disconnect(struct sock *sk, int flags) | 2552 | int tcp_disconnect(struct sock *sk, int flags) |
@@ -2596,6 +2597,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2596 | if (tp->write_seq == 0) | 2597 | if (tp->write_seq == 0) |
2597 | tp->write_seq = 1; | 2598 | tp->write_seq = 1; |
2598 | icsk->icsk_backoff = 0; | 2599 | icsk->icsk_backoff = 0; |
2600 | tp->snd_cwnd = 2; | ||
2599 | icsk->icsk_probes_out = 0; | 2601 | icsk->icsk_probes_out = 0; |
2600 | icsk->icsk_rto = TCP_TIMEOUT_INIT; | 2602 | icsk->icsk_rto = TCP_TIMEOUT_INIT; |
2601 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 2603 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 662b034f1795..4010ae3644f3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
536 | if (sock_owned_by_user(sk)) | 536 | if (sock_owned_by_user(sk)) |
537 | break; | 537 | break; |
538 | 538 | ||
539 | skb = tcp_rtx_queue_head(sk); | ||
540 | if (WARN_ON_ONCE(!skb)) | ||
541 | break; | ||
542 | |||
539 | icsk->icsk_backoff--; | 543 | icsk->icsk_backoff--; |
540 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : | 544 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : |
541 | TCP_TIMEOUT_INIT; | 545 | TCP_TIMEOUT_INIT; |
542 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); | 546 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
543 | 547 | ||
544 | skb = tcp_rtx_queue_head(sk); | ||
545 | 548 | ||
546 | tcp_mstamp_refresh(tp); | 549 | tcp_mstamp_refresh(tp); |
547 | delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); | 550 | delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 65a4f96dc462..bb525abd860e 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1719,6 +1719,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], | |||
1719 | return 0; | 1719 | return 0; |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static void ip6erspan_set_version(struct nlattr *data[], | ||
1723 | struct __ip6_tnl_parm *parms) | ||
1724 | { | ||
1725 | parms->erspan_ver = 1; | ||
1726 | if (data[IFLA_GRE_ERSPAN_VER]) | ||
1727 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | ||
1728 | |||
1729 | if (parms->erspan_ver == 1) { | ||
1730 | if (data[IFLA_GRE_ERSPAN_INDEX]) | ||
1731 | parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); | ||
1732 | } else if (parms->erspan_ver == 2) { | ||
1733 | if (data[IFLA_GRE_ERSPAN_DIR]) | ||
1734 | parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); | ||
1735 | if (data[IFLA_GRE_ERSPAN_HWID]) | ||
1736 | parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); | ||
1737 | } | ||
1738 | } | ||
1739 | |||
1722 | static void ip6gre_netlink_parms(struct nlattr *data[], | 1740 | static void ip6gre_netlink_parms(struct nlattr *data[], |
1723 | struct __ip6_tnl_parm *parms) | 1741 | struct __ip6_tnl_parm *parms) |
1724 | { | 1742 | { |
@@ -1767,20 +1785,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1767 | 1785 | ||
1768 | if (data[IFLA_GRE_COLLECT_METADATA]) | 1786 | if (data[IFLA_GRE_COLLECT_METADATA]) |
1769 | parms->collect_md = true; | 1787 | parms->collect_md = true; |
1770 | |||
1771 | parms->erspan_ver = 1; | ||
1772 | if (data[IFLA_GRE_ERSPAN_VER]) | ||
1773 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | ||
1774 | |||
1775 | if (parms->erspan_ver == 1) { | ||
1776 | if (data[IFLA_GRE_ERSPAN_INDEX]) | ||
1777 | parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); | ||
1778 | } else if (parms->erspan_ver == 2) { | ||
1779 | if (data[IFLA_GRE_ERSPAN_DIR]) | ||
1780 | parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); | ||
1781 | if (data[IFLA_GRE_ERSPAN_HWID]) | ||
1782 | parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); | ||
1783 | } | ||
1784 | } | 1788 | } |
1785 | 1789 | ||
1786 | static int ip6gre_tap_init(struct net_device *dev) | 1790 | static int ip6gre_tap_init(struct net_device *dev) |
@@ -2203,6 +2207,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, | |||
2203 | int err; | 2207 | int err; |
2204 | 2208 | ||
2205 | ip6gre_netlink_parms(data, &nt->parms); | 2209 | ip6gre_netlink_parms(data, &nt->parms); |
2210 | ip6erspan_set_version(data, &nt->parms); | ||
2206 | ign = net_generic(net, ip6gre_net_id); | 2211 | ign = net_generic(net, ip6gre_net_id); |
2207 | 2212 | ||
2208 | if (nt->parms.collect_md) { | 2213 | if (nt->parms.collect_md) { |
@@ -2248,6 +2253,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], | |||
2248 | if (IS_ERR(t)) | 2253 | if (IS_ERR(t)) |
2249 | return PTR_ERR(t); | 2254 | return PTR_ERR(t); |
2250 | 2255 | ||
2256 | ip6erspan_set_version(data, &p); | ||
2251 | ip6gre_tunnel_unlink_md(ign, t); | 2257 | ip6gre_tunnel_unlink_md(ign, t); |
2252 | ip6gre_tunnel_unlink(ign, t); | 2258 | ip6gre_tunnel_unlink(ign, t); |
2253 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); | 2259 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d65aa019ce85..09dd1c2860fc 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
941 | BSS_CHANGED_P2P_PS | | 941 | BSS_CHANGED_P2P_PS | |
942 | BSS_CHANGED_TXPOWER; | 942 | BSS_CHANGED_TXPOWER; |
943 | int err; | 943 | int err; |
944 | int prev_beacon_int; | ||
944 | 945 | ||
945 | old = sdata_dereference(sdata->u.ap.beacon, sdata); | 946 | old = sdata_dereference(sdata->u.ap.beacon, sdata); |
946 | if (old) | 947 | if (old) |
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
963 | 964 | ||
964 | sdata->needed_rx_chains = sdata->local->rx_chains; | 965 | sdata->needed_rx_chains = sdata->local->rx_chains; |
965 | 966 | ||
967 | prev_beacon_int = sdata->vif.bss_conf.beacon_int; | ||
966 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 968 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
967 | 969 | ||
968 | if (params->he_cap) | 970 | if (params->he_cap) |
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
974 | if (!err) | 976 | if (!err) |
975 | ieee80211_vif_copy_chanctx_to_vlans(sdata, false); | 977 | ieee80211_vif_copy_chanctx_to_vlans(sdata, false); |
976 | mutex_unlock(&local->mtx); | 978 | mutex_unlock(&local->mtx); |
977 | if (err) | 979 | if (err) { |
980 | sdata->vif.bss_conf.beacon_int = prev_beacon_int; | ||
978 | return err; | 981 | return err; |
982 | } | ||
979 | 983 | ||
980 | /* | 984 | /* |
981 | * Apply control port protocol, this allows us to | 985 | * Apply control port protocol, this allows us to |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 8b26858ab4d5..574c3891c4b2 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags { | |||
70 | * @dst: mesh path destination mac address | 70 | * @dst: mesh path destination mac address |
71 | * @mpp: mesh proxy mac address | 71 | * @mpp: mesh proxy mac address |
72 | * @rhash: rhashtable list pointer | 72 | * @rhash: rhashtable list pointer |
73 | * @walk_list: linked list containing all mesh_path objects. | ||
73 | * @gate_list: list pointer for known gates list | 74 | * @gate_list: list pointer for known gates list |
74 | * @sdata: mesh subif | 75 | * @sdata: mesh subif |
75 | * @next_hop: mesh neighbor to which frames for this destination will be | 76 | * @next_hop: mesh neighbor to which frames for this destination will be |
@@ -106,6 +107,7 @@ struct mesh_path { | |||
106 | u8 dst[ETH_ALEN]; | 107 | u8 dst[ETH_ALEN]; |
107 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ | 108 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ |
108 | struct rhash_head rhash; | 109 | struct rhash_head rhash; |
110 | struct hlist_node walk_list; | ||
109 | struct hlist_node gate_list; | 111 | struct hlist_node gate_list; |
110 | struct ieee80211_sub_if_data *sdata; | 112 | struct ieee80211_sub_if_data *sdata; |
111 | struct sta_info __rcu *next_hop; | 113 | struct sta_info __rcu *next_hop; |
@@ -135,12 +137,16 @@ struct mesh_path { | |||
135 | * gate's mpath may or may not be resolved and active. | 137 | * gate's mpath may or may not be resolved and active. |
136 | * @gates_lock: protects updates to known_gates | 138 | * @gates_lock: protects updates to known_gates |
137 | * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr | 139 | * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr |
140 | * @walk_head: linked list containging all mesh_path objects | ||
141 | * @walk_lock: lock protecting walk_head | ||
138 | * @entries: number of entries in the table | 142 | * @entries: number of entries in the table |
139 | */ | 143 | */ |
140 | struct mesh_table { | 144 | struct mesh_table { |
141 | struct hlist_head known_gates; | 145 | struct hlist_head known_gates; |
142 | spinlock_t gates_lock; | 146 | spinlock_t gates_lock; |
143 | struct rhashtable rhead; | 147 | struct rhashtable rhead; |
148 | struct hlist_head walk_head; | ||
149 | spinlock_t walk_lock; | ||
144 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ | 150 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ |
145 | }; | 151 | }; |
146 | 152 | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a5125624a76d..88a6d5e18ccc 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void) | |||
59 | return NULL; | 59 | return NULL; |
60 | 60 | ||
61 | INIT_HLIST_HEAD(&newtbl->known_gates); | 61 | INIT_HLIST_HEAD(&newtbl->known_gates); |
62 | INIT_HLIST_HEAD(&newtbl->walk_head); | ||
62 | atomic_set(&newtbl->entries, 0); | 63 | atomic_set(&newtbl->entries, 0); |
63 | spin_lock_init(&newtbl->gates_lock); | 64 | spin_lock_init(&newtbl->gates_lock); |
65 | spin_lock_init(&newtbl->walk_lock); | ||
64 | 66 | ||
65 | return newtbl; | 67 | return newtbl; |
66 | } | 68 | } |
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
249 | static struct mesh_path * | 251 | static struct mesh_path * |
250 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) | 252 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) |
251 | { | 253 | { |
252 | int i = 0, ret; | 254 | int i = 0; |
253 | struct mesh_path *mpath = NULL; | 255 | struct mesh_path *mpath; |
254 | struct rhashtable_iter iter; | ||
255 | |||
256 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
257 | if (ret) | ||
258 | return NULL; | ||
259 | |||
260 | rhashtable_walk_start(&iter); | ||
261 | 256 | ||
262 | while ((mpath = rhashtable_walk_next(&iter))) { | 257 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { |
263 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
264 | continue; | ||
265 | if (IS_ERR(mpath)) | ||
266 | break; | ||
267 | if (i++ == idx) | 258 | if (i++ == idx) |
268 | break; | 259 | break; |
269 | } | 260 | } |
270 | rhashtable_walk_stop(&iter); | ||
271 | rhashtable_walk_exit(&iter); | ||
272 | 261 | ||
273 | if (IS_ERR(mpath) || !mpath) | 262 | if (!mpath) |
274 | return NULL; | 263 | return NULL; |
275 | 264 | ||
276 | if (mpath_expired(mpath)) { | 265 | if (mpath_expired(mpath)) { |
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
432 | return ERR_PTR(-ENOMEM); | 421 | return ERR_PTR(-ENOMEM); |
433 | 422 | ||
434 | tbl = sdata->u.mesh.mesh_paths; | 423 | tbl = sdata->u.mesh.mesh_paths; |
424 | spin_lock_bh(&tbl->walk_lock); | ||
435 | do { | 425 | do { |
436 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, | 426 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, |
437 | &new_mpath->rhash, | 427 | &new_mpath->rhash, |
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
441 | mpath = rhashtable_lookup_fast(&tbl->rhead, | 431 | mpath = rhashtable_lookup_fast(&tbl->rhead, |
442 | dst, | 432 | dst, |
443 | mesh_rht_params); | 433 | mesh_rht_params); |
444 | 434 | else if (!ret) | |
435 | hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); | ||
445 | } while (unlikely(ret == -EEXIST && !mpath)); | 436 | } while (unlikely(ret == -EEXIST && !mpath)); |
437 | spin_unlock_bh(&tbl->walk_lock); | ||
446 | 438 | ||
447 | if (ret && ret != -EEXIST) | 439 | if (ret) { |
448 | return ERR_PTR(ret); | ||
449 | |||
450 | /* At this point either new_mpath was added, or we found a | ||
451 | * matching entry already in the table; in the latter case | ||
452 | * free the unnecessary new entry. | ||
453 | */ | ||
454 | if (ret == -EEXIST) { | ||
455 | kfree(new_mpath); | 440 | kfree(new_mpath); |
441 | |||
442 | if (ret != -EEXIST) | ||
443 | return ERR_PTR(ret); | ||
444 | |||
456 | new_mpath = mpath; | 445 | new_mpath = mpath; |
457 | } | 446 | } |
447 | |||
458 | sdata->u.mesh.mesh_paths_generation++; | 448 | sdata->u.mesh.mesh_paths_generation++; |
459 | return new_mpath; | 449 | return new_mpath; |
460 | } | 450 | } |
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
480 | 470 | ||
481 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 471 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
482 | tbl = sdata->u.mesh.mpp_paths; | 472 | tbl = sdata->u.mesh.mpp_paths; |
473 | |||
474 | spin_lock_bh(&tbl->walk_lock); | ||
483 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, | 475 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, |
484 | &new_mpath->rhash, | 476 | &new_mpath->rhash, |
485 | mesh_rht_params); | 477 | mesh_rht_params); |
478 | if (!ret) | ||
479 | hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); | ||
480 | spin_unlock_bh(&tbl->walk_lock); | ||
481 | |||
482 | if (ret) | ||
483 | kfree(new_mpath); | ||
486 | 484 | ||
487 | sdata->u.mesh.mpp_paths_generation++; | 485 | sdata->u.mesh.mpp_paths_generation++; |
488 | return ret; | 486 | return ret; |
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta) | |||
503 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; | 501 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; |
504 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 502 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
505 | struct mesh_path *mpath; | 503 | struct mesh_path *mpath; |
506 | struct rhashtable_iter iter; | ||
507 | int ret; | ||
508 | |||
509 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
510 | if (ret) | ||
511 | return; | ||
512 | 504 | ||
513 | rhashtable_walk_start(&iter); | 505 | rcu_read_lock(); |
514 | 506 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { | |
515 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
516 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
517 | continue; | ||
518 | if (IS_ERR(mpath)) | ||
519 | break; | ||
520 | if (rcu_access_pointer(mpath->next_hop) == sta && | 507 | if (rcu_access_pointer(mpath->next_hop) == sta && |
521 | mpath->flags & MESH_PATH_ACTIVE && | 508 | mpath->flags & MESH_PATH_ACTIVE && |
522 | !(mpath->flags & MESH_PATH_FIXED)) { | 509 | !(mpath->flags & MESH_PATH_FIXED)) { |
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
530 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); | 517 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); |
531 | } | 518 | } |
532 | } | 519 | } |
533 | rhashtable_walk_stop(&iter); | 520 | rcu_read_unlock(); |
534 | rhashtable_walk_exit(&iter); | ||
535 | } | 521 | } |
536 | 522 | ||
537 | static void mesh_path_free_rcu(struct mesh_table *tbl, | 523 | static void mesh_path_free_rcu(struct mesh_table *tbl, |
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, | |||
551 | 537 | ||
552 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) | 538 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) |
553 | { | 539 | { |
540 | hlist_del_rcu(&mpath->walk_list); | ||
554 | rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); | 541 | rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); |
555 | mesh_path_free_rcu(tbl, mpath); | 542 | mesh_path_free_rcu(tbl, mpath); |
556 | } | 543 | } |
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
571 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 558 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
572 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; | 559 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; |
573 | struct mesh_path *mpath; | 560 | struct mesh_path *mpath; |
574 | struct rhashtable_iter iter; | 561 | struct hlist_node *n; |
575 | int ret; | ||
576 | |||
577 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
578 | if (ret) | ||
579 | return; | ||
580 | |||
581 | rhashtable_walk_start(&iter); | ||
582 | |||
583 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
584 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
585 | continue; | ||
586 | if (IS_ERR(mpath)) | ||
587 | break; | ||
588 | 562 | ||
563 | spin_lock_bh(&tbl->walk_lock); | ||
564 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | ||
589 | if (rcu_access_pointer(mpath->next_hop) == sta) | 565 | if (rcu_access_pointer(mpath->next_hop) == sta) |
590 | __mesh_path_del(tbl, mpath); | 566 | __mesh_path_del(tbl, mpath); |
591 | } | 567 | } |
592 | 568 | spin_unlock_bh(&tbl->walk_lock); | |
593 | rhashtable_walk_stop(&iter); | ||
594 | rhashtable_walk_exit(&iter); | ||
595 | } | 569 | } |
596 | 570 | ||
597 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | 571 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, |
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | |||
599 | { | 573 | { |
600 | struct mesh_table *tbl = sdata->u.mesh.mpp_paths; | 574 | struct mesh_table *tbl = sdata->u.mesh.mpp_paths; |
601 | struct mesh_path *mpath; | 575 | struct mesh_path *mpath; |
602 | struct rhashtable_iter iter; | 576 | struct hlist_node *n; |
603 | int ret; | ||
604 | |||
605 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
606 | if (ret) | ||
607 | return; | ||
608 | |||
609 | rhashtable_walk_start(&iter); | ||
610 | |||
611 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
612 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
613 | continue; | ||
614 | if (IS_ERR(mpath)) | ||
615 | break; | ||
616 | 577 | ||
578 | spin_lock_bh(&tbl->walk_lock); | ||
579 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | ||
617 | if (ether_addr_equal(mpath->mpp, proxy)) | 580 | if (ether_addr_equal(mpath->mpp, proxy)) |
618 | __mesh_path_del(tbl, mpath); | 581 | __mesh_path_del(tbl, mpath); |
619 | } | 582 | } |
620 | 583 | spin_unlock_bh(&tbl->walk_lock); | |
621 | rhashtable_walk_stop(&iter); | ||
622 | rhashtable_walk_exit(&iter); | ||
623 | } | 584 | } |
624 | 585 | ||
625 | static void table_flush_by_iface(struct mesh_table *tbl) | 586 | static void table_flush_by_iface(struct mesh_table *tbl) |
626 | { | 587 | { |
627 | struct mesh_path *mpath; | 588 | struct mesh_path *mpath; |
628 | struct rhashtable_iter iter; | 589 | struct hlist_node *n; |
629 | int ret; | ||
630 | |||
631 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
632 | if (ret) | ||
633 | return; | ||
634 | |||
635 | rhashtable_walk_start(&iter); | ||
636 | 590 | ||
637 | while ((mpath = rhashtable_walk_next(&iter))) { | 591 | spin_lock_bh(&tbl->walk_lock); |
638 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 592 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
639 | continue; | ||
640 | if (IS_ERR(mpath)) | ||
641 | break; | ||
642 | __mesh_path_del(tbl, mpath); | 593 | __mesh_path_del(tbl, mpath); |
643 | } | 594 | } |
644 | 595 | spin_unlock_bh(&tbl->walk_lock); | |
645 | rhashtable_walk_stop(&iter); | ||
646 | rhashtable_walk_exit(&iter); | ||
647 | } | 596 | } |
648 | 597 | ||
649 | /** | 598 | /** |
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl, | |||
675 | { | 624 | { |
676 | struct mesh_path *mpath; | 625 | struct mesh_path *mpath; |
677 | 626 | ||
678 | rcu_read_lock(); | 627 | spin_lock_bh(&tbl->walk_lock); |
679 | mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); | 628 | mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); |
680 | if (!mpath) { | 629 | if (!mpath) { |
681 | rcu_read_unlock(); | 630 | spin_unlock_bh(&tbl->walk_lock); |
682 | return -ENXIO; | 631 | return -ENXIO; |
683 | } | 632 | } |
684 | 633 | ||
685 | __mesh_path_del(tbl, mpath); | 634 | __mesh_path_del(tbl, mpath); |
686 | rcu_read_unlock(); | 635 | spin_unlock_bh(&tbl->walk_lock); |
687 | return 0; | 636 | return 0; |
688 | } | 637 | } |
689 | 638 | ||
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, | |||
854 | struct mesh_table *tbl) | 803 | struct mesh_table *tbl) |
855 | { | 804 | { |
856 | struct mesh_path *mpath; | 805 | struct mesh_path *mpath; |
857 | struct rhashtable_iter iter; | 806 | struct hlist_node *n; |
858 | int ret; | ||
859 | 807 | ||
860 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); | 808 | spin_lock_bh(&tbl->walk_lock); |
861 | if (ret) | 809 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
862 | return; | ||
863 | |||
864 | rhashtable_walk_start(&iter); | ||
865 | |||
866 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
867 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
868 | continue; | ||
869 | if (IS_ERR(mpath)) | ||
870 | break; | ||
871 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 810 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
872 | (!(mpath->flags & MESH_PATH_FIXED)) && | 811 | (!(mpath->flags & MESH_PATH_FIXED)) && |
873 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) | 812 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
874 | __mesh_path_del(tbl, mpath); | 813 | __mesh_path_del(tbl, mpath); |
875 | } | 814 | } |
876 | 815 | spin_unlock_bh(&tbl->walk_lock); | |
877 | rhashtable_walk_stop(&iter); | ||
878 | rhashtable_walk_exit(&iter); | ||
879 | } | 816 | } |
880 | 817 | ||
881 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | 818 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 3c36480559f9..2a3d4e27cf3b 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | |||
896 | { | 896 | { |
897 | struct ip_vs_dest *dest; | 897 | struct ip_vs_dest *dest; |
898 | unsigned int atype, i; | 898 | unsigned int atype, i; |
899 | int ret = 0; | ||
900 | 899 | ||
901 | EnterFunction(2); | 900 | EnterFunction(2); |
902 | 901 | ||
903 | #ifdef CONFIG_IP_VS_IPV6 | 902 | #ifdef CONFIG_IP_VS_IPV6 |
904 | if (udest->af == AF_INET6) { | 903 | if (udest->af == AF_INET6) { |
904 | int ret; | ||
905 | |||
905 | atype = ipv6_addr_type(&udest->addr.in6); | 906 | atype = ipv6_addr_type(&udest->addr.in6); |
906 | if ((!(atype & IPV6_ADDR_UNICAST) || | 907 | if ((!(atype & IPV6_ADDR_UNICAST) || |
907 | atype & IPV6_ADDR_LINKLOCAL) && | 908 | atype & IPV6_ADDR_LINKLOCAL) && |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index de3f1e2acae0..e1a88ba2249e 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -328,6 +328,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) | |||
328 | int err; | 328 | int err; |
329 | 329 | ||
330 | list_for_each_entry(rule, &ctx->chain->rules, list) { | 330 | list_for_each_entry(rule, &ctx->chain->rules, list) { |
331 | if (!nft_is_active_next(ctx->net, rule)) | ||
332 | continue; | ||
333 | |||
331 | err = nft_delrule(ctx, rule); | 334 | err = nft_delrule(ctx, rule); |
332 | if (err < 0) | 335 | if (err < 0) |
333 | return err; | 336 | return err; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index fb6656295204..507105127095 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c | |||
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
44 | unsigned char *cksum, unsigned char *buf) | 44 | unsigned char *cksum, unsigned char *buf) |
45 | { | 45 | { |
46 | struct crypto_sync_skcipher *cipher; | 46 | struct crypto_sync_skcipher *cipher; |
47 | unsigned char plain[8]; | 47 | unsigned char *plain; |
48 | s32 code; | 48 | s32 code; |
49 | 49 | ||
50 | dprintk("RPC: %s:\n", __func__); | 50 | dprintk("RPC: %s:\n", __func__); |
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
52 | if (IS_ERR(cipher)) | 52 | if (IS_ERR(cipher)) |
53 | return PTR_ERR(cipher); | 53 | return PTR_ERR(cipher); |
54 | 54 | ||
55 | plain = kmalloc(8, GFP_NOFS); | ||
56 | if (!plain) | ||
57 | return -ENOMEM; | ||
58 | |||
55 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); | 59 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); |
56 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); | 60 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); |
57 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); | 61 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); |
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
67 | 71 | ||
68 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); | 72 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); |
69 | out: | 73 | out: |
74 | kfree(plain); | ||
70 | crypto_free_sync_skcipher(cipher); | 75 | crypto_free_sync_skcipher(cipher); |
71 | return code; | 76 | return code; |
72 | } | 77 | } |
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx, | |||
77 | u32 seqnum, | 82 | u32 seqnum, |
78 | unsigned char *cksum, unsigned char *buf) | 83 | unsigned char *cksum, unsigned char *buf) |
79 | { | 84 | { |
80 | unsigned char plain[8]; | 85 | unsigned char *plain; |
86 | s32 code; | ||
81 | 87 | ||
82 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | 88 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
83 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, | 89 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, |
84 | cksum, buf); | 90 | cksum, buf); |
85 | 91 | ||
92 | plain = kmalloc(8, GFP_NOFS); | ||
93 | if (!plain) | ||
94 | return -ENOMEM; | ||
95 | |||
86 | plain[0] = (unsigned char) (seqnum & 0xff); | 96 | plain[0] = (unsigned char) (seqnum & 0xff); |
87 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); | 97 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); |
88 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); | 98 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); |
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx, | |||
93 | plain[6] = direction; | 103 | plain[6] = direction; |
94 | plain[7] = direction; | 104 | plain[7] = direction; |
95 | 105 | ||
96 | return krb5_encrypt(key, cksum, plain, buf, 8); | 106 | code = krb5_encrypt(key, cksum, plain, buf, 8); |
107 | kfree(plain); | ||
108 | return code; | ||
97 | } | 109 | } |
98 | 110 | ||
99 | static s32 | 111 | static s32 |
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, | |||
101 | unsigned char *buf, int *direction, s32 *seqnum) | 113 | unsigned char *buf, int *direction, s32 *seqnum) |
102 | { | 114 | { |
103 | struct crypto_sync_skcipher *cipher; | 115 | struct crypto_sync_skcipher *cipher; |
104 | unsigned char plain[8]; | 116 | unsigned char *plain; |
105 | s32 code; | 117 | s32 code; |
106 | 118 | ||
107 | dprintk("RPC: %s:\n", __func__); | 119 | dprintk("RPC: %s:\n", __func__); |
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, | |||
113 | if (code) | 125 | if (code) |
114 | goto out; | 126 | goto out; |
115 | 127 | ||
128 | plain = kmalloc(8, GFP_NOFS); | ||
129 | if (!plain) { | ||
130 | code = -ENOMEM; | ||
131 | goto out; | ||
132 | } | ||
133 | |||
116 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); | 134 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); |
117 | if (code) | 135 | if (code) |
118 | goto out; | 136 | goto out_plain; |
119 | 137 | ||
120 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) | 138 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) |
121 | || (plain[4] != plain[7])) { | 139 | || (plain[4] != plain[7])) { |
122 | code = (s32)KG_BAD_SEQ; | 140 | code = (s32)KG_BAD_SEQ; |
123 | goto out; | 141 | goto out_plain; |
124 | } | 142 | } |
125 | 143 | ||
126 | *direction = plain[4]; | 144 | *direction = plain[4]; |
127 | 145 | ||
128 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | | 146 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | |
129 | (plain[2] << 8) | (plain[3])); | 147 | (plain[2] << 8) | (plain[3])); |
148 | out_plain: | ||
149 | kfree(plain); | ||
130 | out: | 150 | out: |
131 | crypto_free_sync_skcipher(cipher); | 151 | crypto_free_sync_skcipher(cipher); |
132 | return code; | 152 | return code; |
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx, | |||
139 | int *direction, u32 *seqnum) | 159 | int *direction, u32 *seqnum) |
140 | { | 160 | { |
141 | s32 code; | 161 | s32 code; |
142 | unsigned char plain[8]; | 162 | unsigned char *plain; |
143 | struct crypto_sync_skcipher *key = kctx->seq; | 163 | struct crypto_sync_skcipher *key = kctx->seq; |
144 | 164 | ||
145 | dprintk("RPC: krb5_get_seq_num:\n"); | 165 | dprintk("RPC: krb5_get_seq_num:\n"); |
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx, | |||
147 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | 167 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
148 | return krb5_get_rc4_seq_num(kctx, cksum, buf, | 168 | return krb5_get_rc4_seq_num(kctx, cksum, buf, |
149 | direction, seqnum); | 169 | direction, seqnum); |
170 | plain = kmalloc(8, GFP_NOFS); | ||
171 | if (!plain) | ||
172 | return -ENOMEM; | ||
150 | 173 | ||
151 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) | 174 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) |
152 | return code; | 175 | goto out; |
153 | 176 | ||
154 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || | 177 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || |
155 | (plain[4] != plain[7])) | 178 | (plain[4] != plain[7])) { |
156 | return (s32)KG_BAD_SEQ; | 179 | code = (s32)KG_BAD_SEQ; |
180 | goto out; | ||
181 | } | ||
157 | 182 | ||
158 | *direction = plain[4]; | 183 | *direction = plain[4]; |
159 | 184 | ||
160 | *seqnum = ((plain[0]) | | 185 | *seqnum = ((plain[0]) | |
161 | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); | 186 | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); |
162 | 187 | ||
163 | return 0; | 188 | out: |
189 | kfree(plain); | ||
190 | return code; | ||
164 | } | 191 | } |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index 45a033329cd4..19bb356230ed 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c | |||
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | |||
146 | rcu_read_lock(); | 146 | rcu_read_lock(); |
147 | xprt = rcu_dereference(clnt->cl_xprt); | 147 | xprt = rcu_dereference(clnt->cl_xprt); |
148 | /* no "debugfs" dentry? Don't bother with the symlink. */ | 148 | /* no "debugfs" dentry? Don't bother with the symlink. */ |
149 | if (!xprt->debugfs) { | 149 | if (IS_ERR_OR_NULL(xprt->debugfs)) { |
150 | rcu_read_unlock(); | 150 | rcu_read_unlock(); |
151 | return; | 151 | return; |
152 | } | 152 | } |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 4994e75945b8..21113bfd4eca 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
527 | 527 | ||
528 | sendcq = ib_alloc_cq(ia->ri_device, NULL, | 528 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
529 | ep->rep_attr.cap.max_send_wr + 1, | 529 | ep->rep_attr.cap.max_send_wr + 1, |
530 | 1, IB_POLL_WORKQUEUE); | 530 | ia->ri_device->num_comp_vectors > 1 ? 1 : 0, |
531 | IB_POLL_WORKQUEUE); | ||
531 | if (IS_ERR(sendcq)) { | 532 | if (IS_ERR(sendcq)) { |
532 | rc = PTR_ERR(sendcq); | 533 | rc = PTR_ERR(sendcq); |
533 | goto out1; | 534 | goto out1; |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 5ab236c5c9a5..77520eacee8f 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
@@ -129,9 +129,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
129 | return 0; | 129 | return 0; |
130 | 130 | ||
131 | err_unreg_umem: | 131 | err_unreg_umem: |
132 | xdp_clear_umem_at_qid(dev, queue_id); | ||
133 | if (!force_zc) | 132 | if (!force_zc) |
134 | err = 0; /* fallback to copy mode */ | 133 | err = 0; /* fallback to copy mode */ |
134 | if (err) | ||
135 | xdp_clear_umem_at_qid(dev, queue_id); | ||
135 | out_rtnl_unlock: | 136 | out_rtnl_unlock: |
136 | rtnl_unlock(); | 137 | rtnl_unlock(); |
137 | return err; | 138 | return err; |
@@ -265,10 +266,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem) | |||
265 | if (!umem->pgs) | 266 | if (!umem->pgs) |
266 | return -ENOMEM; | 267 | return -ENOMEM; |
267 | 268 | ||
268 | down_write(¤t->mm->mmap_sem); | 269 | down_read(¤t->mm->mmap_sem); |
269 | npgs = get_user_pages(umem->address, umem->npgs, | 270 | npgs = get_user_pages_longterm(umem->address, umem->npgs, |
270 | gup_flags, &umem->pgs[0], NULL); | 271 | gup_flags, &umem->pgs[0], NULL); |
271 | up_write(¤t->mm->mmap_sem); | 272 | up_read(¤t->mm->mmap_sem); |
272 | 273 | ||
273 | if (npgs != umem->npgs) { | 274 | if (npgs != umem->npgs) { |
274 | if (npgs >= 0) { | 275 | if (npgs >= 0) { |
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 949d3bbccb2f..41731c9bb26f 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c | |||
@@ -669,6 +669,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, | |||
669 | if (!umem) | 669 | if (!umem) |
670 | return -EINVAL; | 670 | return -EINVAL; |
671 | 671 | ||
672 | /* Matches the smp_wmb() in XDP_UMEM_REG */ | ||
673 | smp_rmb(); | ||
672 | if (offset == XDP_UMEM_PGOFF_FILL_RING) | 674 | if (offset == XDP_UMEM_PGOFF_FILL_RING) |
673 | q = READ_ONCE(umem->fq); | 675 | q = READ_ONCE(umem->fq); |
674 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) | 676 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) |
@@ -678,6 +680,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, | |||
678 | if (!q) | 680 | if (!q) |
679 | return -EINVAL; | 681 | return -EINVAL; |
680 | 682 | ||
683 | /* Matches the smp_wmb() in xsk_init_queue */ | ||
684 | smp_rmb(); | ||
681 | qpg = virt_to_head_page(q->ring); | 685 | qpg = virt_to_head_page(q->ring); |
682 | if (size > (PAGE_SIZE << compound_order(qpg))) | 686 | if (size > (PAGE_SIZE << compound_order(qpg))) |
683 | return -EINVAL; | 687 | return -EINVAL; |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9e350fd34504..9c486fad3f9f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) | |||
626 | /* Awaken to handle a signal, request we sleep again later. */ | 626 | /* Awaken to handle a signal, request we sleep again later. */ |
627 | kvm_make_request(KVM_REQ_SLEEP, vcpu); | 627 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
628 | } | 628 | } |
629 | |||
630 | /* | ||
631 | * Make sure we will observe a potential reset request if we've | ||
632 | * observed a change to the power state. Pairs with the smp_wmb() in | ||
633 | * kvm_psci_vcpu_on(). | ||
634 | */ | ||
635 | smp_rmb(); | ||
629 | } | 636 | } |
630 | 637 | ||
631 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) | 638 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) |
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) | |||
639 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) | 646 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) |
640 | vcpu_req_sleep(vcpu); | 647 | vcpu_req_sleep(vcpu); |
641 | 648 | ||
649 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) | ||
650 | kvm_reset_vcpu(vcpu); | ||
651 | |||
642 | /* | 652 | /* |
643 | * Clear IRQ_PENDING requests that were made to guarantee | 653 | * Clear IRQ_PENDING requests that were made to guarantee |
644 | * that a VCPU sees new virtual interrupts. | 654 | * that a VCPU sees new virtual interrupts. |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index fbdf3ac2f001..30251e288629 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1695 | 1695 | ||
1696 | vma_pagesize = vma_kernel_pagesize(vma); | 1696 | vma_pagesize = vma_kernel_pagesize(vma); |
1697 | /* | 1697 | /* |
1698 | * PUD level may not exist for a VM but PMD is guaranteed to | 1698 | * The stage2 has a minimum of 2 level table (For arm64 see |
1699 | * exist. | 1699 | * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can |
1700 | * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). | ||
1701 | * As for PUD huge maps, we must make sure that we have at least | ||
1702 | * 3 levels, i.e, PMD is not folded. | ||
1700 | */ | 1703 | */ |
1701 | if ((vma_pagesize == PMD_SIZE || | 1704 | if ((vma_pagesize == PMD_SIZE || |
1702 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && | 1705 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && |
1703 | !force_pte) { | 1706 | !force_pte) { |
1704 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; | 1707 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; |
1705 | } | 1708 | } |
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 9b73d3ad918a..34d08ee63747 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c | |||
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) | |||
104 | 104 | ||
105 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | 105 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
106 | { | 106 | { |
107 | struct vcpu_reset_state *reset_state; | ||
107 | struct kvm *kvm = source_vcpu->kvm; | 108 | struct kvm *kvm = source_vcpu->kvm; |
108 | struct kvm_vcpu *vcpu = NULL; | 109 | struct kvm_vcpu *vcpu = NULL; |
109 | struct swait_queue_head *wq; | ||
110 | unsigned long cpu_id; | 110 | unsigned long cpu_id; |
111 | unsigned long context_id; | ||
112 | phys_addr_t target_pc; | ||
113 | 111 | ||
114 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; | 112 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; |
115 | if (vcpu_mode_is_32bit(source_vcpu)) | 113 | if (vcpu_mode_is_32bit(source_vcpu)) |
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
130 | return PSCI_RET_INVALID_PARAMS; | 128 | return PSCI_RET_INVALID_PARAMS; |
131 | } | 129 | } |
132 | 130 | ||
133 | target_pc = smccc_get_arg2(source_vcpu); | 131 | reset_state = &vcpu->arch.reset_state; |
134 | context_id = smccc_get_arg3(source_vcpu); | ||
135 | 132 | ||
136 | kvm_reset_vcpu(vcpu); | 133 | reset_state->pc = smccc_get_arg2(source_vcpu); |
137 | |||
138 | /* Gracefully handle Thumb2 entry point */ | ||
139 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
140 | target_pc &= ~((phys_addr_t) 1); | ||
141 | vcpu_set_thumb(vcpu); | ||
142 | } | ||
143 | 134 | ||
144 | /* Propagate caller endianness */ | 135 | /* Propagate caller endianness */ |
145 | if (kvm_vcpu_is_be(source_vcpu)) | 136 | reset_state->be = kvm_vcpu_is_be(source_vcpu); |
146 | kvm_vcpu_set_be(vcpu); | ||
147 | 137 | ||
148 | *vcpu_pc(vcpu) = target_pc; | ||
149 | /* | 138 | /* |
150 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 | 139 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 |
151 | * the general puspose registers are undefined upon CPU_ON. | 140 | * the general puspose registers are undefined upon CPU_ON. |
152 | */ | 141 | */ |
153 | smccc_set_retval(vcpu, context_id, 0, 0, 0); | 142 | reset_state->r0 = smccc_get_arg3(source_vcpu); |
154 | vcpu->arch.power_off = false; | 143 | |
155 | smp_mb(); /* Make sure the above is visible */ | 144 | WRITE_ONCE(reset_state->reset, true); |
145 | kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); | ||
156 | 146 | ||
157 | wq = kvm_arch_vcpu_wq(vcpu); | 147 | /* |
158 | swake_up_one(wq); | 148 | * Make sure the reset request is observed if the change to |
149 | * power_state is observed. | ||
150 | */ | ||
151 | smp_wmb(); | ||
152 | |||
153 | vcpu->arch.power_off = false; | ||
154 | kvm_vcpu_wake_up(vcpu); | ||
159 | 155 | ||
160 | return PSCI_RET_SUCCESS; | 156 | return PSCI_RET_SUCCESS; |
161 | } | 157 | } |
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 07aa900bac56..1f62f2b8065d 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c | |||
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | spin_lock_irqsave(&irq->irq_lock, flags); | 254 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
255 | print_irq_state(s, irq, vcpu); | 255 | print_irq_state(s, irq, vcpu); |
256 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 256 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
257 | 257 | ||
258 | vgic_put_irq(kvm, irq); | 258 | vgic_put_irq(kvm, irq); |
259 | return 0; | 259 | return 0; |
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index c0c0b88af1d5..3bdb31eaed64 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c | |||
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm) | |||
64 | struct vgic_dist *dist = &kvm->arch.vgic; | 64 | struct vgic_dist *dist = &kvm->arch.vgic; |
65 | 65 | ||
66 | INIT_LIST_HEAD(&dist->lpi_list_head); | 66 | INIT_LIST_HEAD(&dist->lpi_list_head); |
67 | spin_lock_init(&dist->lpi_list_lock); | 67 | raw_spin_lock_init(&dist->lpi_list_lock); |
68 | } | 68 | } |
69 | 69 | ||
70 | /* CREATION */ | 70 | /* CREATION */ |
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) | |||
171 | 171 | ||
172 | irq->intid = i + VGIC_NR_PRIVATE_IRQS; | 172 | irq->intid = i + VGIC_NR_PRIVATE_IRQS; |
173 | INIT_LIST_HEAD(&irq->ap_list); | 173 | INIT_LIST_HEAD(&irq->ap_list); |
174 | spin_lock_init(&irq->irq_lock); | 174 | raw_spin_lock_init(&irq->irq_lock); |
175 | irq->vcpu = NULL; | 175 | irq->vcpu = NULL; |
176 | irq->target_vcpu = vcpu0; | 176 | irq->target_vcpu = vcpu0; |
177 | kref_init(&irq->refcount); | 177 | kref_init(&irq->refcount); |
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
206 | vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; | 206 | vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; |
207 | 207 | ||
208 | INIT_LIST_HEAD(&vgic_cpu->ap_list_head); | 208 | INIT_LIST_HEAD(&vgic_cpu->ap_list_head); |
209 | spin_lock_init(&vgic_cpu->ap_list_lock); | 209 | raw_spin_lock_init(&vgic_cpu->ap_list_lock); |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Enable and configure all SGIs to be edge-triggered and | 212 | * Enable and configure all SGIs to be edge-triggered and |
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
216 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; | 216 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; |
217 | 217 | ||
218 | INIT_LIST_HEAD(&irq->ap_list); | 218 | INIT_LIST_HEAD(&irq->ap_list); |
219 | spin_lock_init(&irq->irq_lock); | 219 | raw_spin_lock_init(&irq->irq_lock); |
220 | irq->intid = i; | 220 | irq->intid = i; |
221 | irq->vcpu = NULL; | 221 | irq->vcpu = NULL; |
222 | irq->target_vcpu = vcpu; | 222 | irq->target_vcpu = vcpu; |
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
231 | irq->config = VGIC_CONFIG_LEVEL; | 231 | irq->config = VGIC_CONFIG_LEVEL; |
232 | } | 232 | } |
233 | 233 | ||
234 | /* | ||
235 | * GICv3 can only be created via the KVM_DEVICE_CREATE API and | ||
236 | * so we always know the emulation type at this point as it's | ||
237 | * either explicitly configured as GICv3, or explicitly | ||
238 | * configured as GICv2, or not configured yet which also | ||
239 | * implies GICv2. | ||
240 | */ | ||
241 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | 234 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) |
242 | irq->group = 1; | 235 | irq->group = 1; |
243 | else | 236 | else |
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm) | |||
281 | { | 274 | { |
282 | struct vgic_dist *dist = &kvm->arch.vgic; | 275 | struct vgic_dist *dist = &kvm->arch.vgic; |
283 | struct kvm_vcpu *vcpu; | 276 | struct kvm_vcpu *vcpu; |
284 | int ret = 0, i; | 277 | int ret = 0, i, idx; |
285 | 278 | ||
286 | if (vgic_initialized(kvm)) | 279 | if (vgic_initialized(kvm)) |
287 | return 0; | 280 | return 0; |
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm) | |||
298 | if (ret) | 291 | if (ret) |
299 | goto out; | 292 | goto out; |
300 | 293 | ||
294 | /* Initialize groups on CPUs created before the VGIC type was known */ | ||
295 | kvm_for_each_vcpu(idx, vcpu, kvm) { | ||
296 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
297 | |||
298 | for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { | ||
299 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; | ||
300 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | ||
301 | irq->group = 1; | ||
302 | else | ||
303 | irq->group = 0; | ||
304 | } | ||
305 | } | ||
306 | |||
301 | if (vgic_has_its(kvm)) { | 307 | if (vgic_has_its(kvm)) { |
302 | ret = vgic_v4_init(kvm); | 308 | ret = vgic_v4_init(kvm); |
303 | if (ret) | 309 | if (ret) |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index eb2a390a6c86..ab3f47745d9c 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
65 | 65 | ||
66 | INIT_LIST_HEAD(&irq->lpi_list); | 66 | INIT_LIST_HEAD(&irq->lpi_list); |
67 | INIT_LIST_HEAD(&irq->ap_list); | 67 | INIT_LIST_HEAD(&irq->ap_list); |
68 | spin_lock_init(&irq->irq_lock); | 68 | raw_spin_lock_init(&irq->irq_lock); |
69 | 69 | ||
70 | irq->config = VGIC_CONFIG_EDGE; | 70 | irq->config = VGIC_CONFIG_EDGE; |
71 | kref_init(&irq->refcount); | 71 | kref_init(&irq->refcount); |
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
73 | irq->target_vcpu = vcpu; | 73 | irq->target_vcpu = vcpu; |
74 | irq->group = 1; | 74 | irq->group = 1; |
75 | 75 | ||
76 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 76 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * There could be a race with another vgic_add_lpi(), so we need to | 79 | * There could be a race with another vgic_add_lpi(), so we need to |
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
101 | dist->lpi_list_count++; | 101 | dist->lpi_list_count++; |
102 | 102 | ||
103 | out_unlock: | 103 | out_unlock: |
104 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 104 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * We "cache" the configuration table entries in our struct vgic_irq's. | 107 | * We "cache" the configuration table entries in our struct vgic_irq's. |
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
287 | if (ret) | 287 | if (ret) |
288 | return ret; | 288 | return ret; |
289 | 289 | ||
290 | spin_lock_irqsave(&irq->irq_lock, flags); | 290 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
291 | 291 | ||
292 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { | 292 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { |
293 | irq->priority = LPI_PROP_PRIORITY(prop); | 293 | irq->priority = LPI_PROP_PRIORITY(prop); |
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
299 | } | 299 | } |
300 | } | 300 | } |
301 | 301 | ||
302 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 302 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
303 | 303 | ||
304 | if (irq->hw) | 304 | if (irq->hw) |
305 | return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); | 305 | return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); |
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
332 | if (!intids) | 332 | if (!intids) |
333 | return -ENOMEM; | 333 | return -ENOMEM; |
334 | 334 | ||
335 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 335 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
337 | if (i == irq_count) | 337 | if (i == irq_count) |
338 | break; | 338 | break; |
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
341 | continue; | 341 | continue; |
342 | intids[i++] = irq->intid; | 342 | intids[i++] = irq->intid; |
343 | } | 343 | } |
344 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 344 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
345 | 345 | ||
346 | *intid_ptr = intids; | 346 | *intid_ptr = intids; |
347 | return i; | 347 | return i; |
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) | |||
352 | int ret = 0; | 352 | int ret = 0; |
353 | unsigned long flags; | 353 | unsigned long flags; |
354 | 354 | ||
355 | spin_lock_irqsave(&irq->irq_lock, flags); | 355 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
356 | irq->target_vcpu = vcpu; | 356 | irq->target_vcpu = vcpu; |
357 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 357 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
358 | 358 | ||
359 | if (irq->hw) { | 359 | if (irq->hw) { |
360 | struct its_vlpi_map map; | 360 | struct its_vlpi_map map; |
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
455 | } | 455 | } |
456 | 456 | ||
457 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); | 457 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); |
458 | spin_lock_irqsave(&irq->irq_lock, flags); | 458 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
459 | irq->pending_latch = pendmask & (1U << bit_nr); | 459 | irq->pending_latch = pendmask & (1U << bit_nr); |
460 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 460 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
461 | vgic_put_irq(vcpu->kvm, irq); | 461 | vgic_put_irq(vcpu->kvm, irq); |
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, | |||
612 | return irq_set_irqchip_state(irq->host_irq, | 612 | return irq_set_irqchip_state(irq->host_irq, |
613 | IRQCHIP_STATE_PENDING, true); | 613 | IRQCHIP_STATE_PENDING, true); |
614 | 614 | ||
615 | spin_lock_irqsave(&irq->irq_lock, flags); | 615 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
616 | irq->pending_latch = true; | 616 | irq->pending_latch = true; |
617 | vgic_queue_irq_unlock(kvm, irq, flags); | 617 | vgic_queue_irq_unlock(kvm, irq, flags); |
618 | 618 | ||
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 738b65d2d0e7..b535fffc7400 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c | |||
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, | |||
147 | 147 | ||
148 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); | 148 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); |
149 | 149 | ||
150 | spin_lock_irqsave(&irq->irq_lock, flags); | 150 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
151 | irq->pending_latch = true; | 151 | irq->pending_latch = true; |
152 | irq->source |= 1U << source_vcpu->vcpu_id; | 152 | irq->source |= 1U << source_vcpu->vcpu_id; |
153 | 153 | ||
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, | |||
191 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); | 191 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); |
192 | int target; | 192 | int target; |
193 | 193 | ||
194 | spin_lock_irqsave(&irq->irq_lock, flags); | 194 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
195 | 195 | ||
196 | irq->targets = (val >> (i * 8)) & cpu_mask; | 196 | irq->targets = (val >> (i * 8)) & cpu_mask; |
197 | target = irq->targets ? __ffs(irq->targets) : 0; | 197 | target = irq->targets ? __ffs(irq->targets) : 0; |
198 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); | 198 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); |
199 | 199 | ||
200 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 200 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
201 | vgic_put_irq(vcpu->kvm, irq); | 201 | vgic_put_irq(vcpu->kvm, irq); |
202 | } | 202 | } |
203 | } | 203 | } |
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, | |||
230 | for (i = 0; i < len; i++) { | 230 | for (i = 0; i < len; i++) { |
231 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 231 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
232 | 232 | ||
233 | spin_lock_irqsave(&irq->irq_lock, flags); | 233 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
234 | 234 | ||
235 | irq->source &= ~((val >> (i * 8)) & 0xff); | 235 | irq->source &= ~((val >> (i * 8)) & 0xff); |
236 | if (!irq->source) | 236 | if (!irq->source) |
237 | irq->pending_latch = false; | 237 | irq->pending_latch = false; |
238 | 238 | ||
239 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 239 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
240 | vgic_put_irq(vcpu->kvm, irq); | 240 | vgic_put_irq(vcpu->kvm, irq); |
241 | } | 241 | } |
242 | } | 242 | } |
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, | |||
252 | for (i = 0; i < len; i++) { | 252 | for (i = 0; i < len; i++) { |
253 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 253 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
254 | 254 | ||
255 | spin_lock_irqsave(&irq->irq_lock, flags); | 255 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
256 | 256 | ||
257 | irq->source |= (val >> (i * 8)) & 0xff; | 257 | irq->source |= (val >> (i * 8)) & 0xff; |
258 | 258 | ||
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, | |||
260 | irq->pending_latch = true; | 260 | irq->pending_latch = true; |
261 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 261 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
262 | } else { | 262 | } else { |
263 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 263 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
264 | } | 264 | } |
265 | vgic_put_irq(vcpu->kvm, irq); | 265 | vgic_put_irq(vcpu->kvm, irq); |
266 | } | 266 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index b3d1f0985117..4a12322bf7df 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c | |||
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, | |||
169 | if (!irq) | 169 | if (!irq) |
170 | return; | 170 | return; |
171 | 171 | ||
172 | spin_lock_irqsave(&irq->irq_lock, flags); | 172 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
173 | 173 | ||
174 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ | 174 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ |
175 | irq->mpidr = val & GENMASK(23, 0); | 175 | irq->mpidr = val & GENMASK(23, 0); |
176 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); | 176 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); |
177 | 177 | ||
178 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 178 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
179 | vgic_put_irq(vcpu->kvm, irq); | 179 | vgic_put_irq(vcpu->kvm, irq); |
180 | } | 180 | } |
181 | 181 | ||
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
281 | for (i = 0; i < len * 8; i++) { | 281 | for (i = 0; i < len * 8; i++) { |
282 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 282 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
283 | 283 | ||
284 | spin_lock_irqsave(&irq->irq_lock, flags); | 284 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
285 | if (test_bit(i, &val)) { | 285 | if (test_bit(i, &val)) { |
286 | /* | 286 | /* |
287 | * pending_latch is set irrespective of irq type | 287 | * pending_latch is set irrespective of irq type |
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
292 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 292 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
293 | } else { | 293 | } else { |
294 | irq->pending_latch = false; | 294 | irq->pending_latch = false; |
295 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 295 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
296 | } | 296 | } |
297 | 297 | ||
298 | vgic_put_irq(vcpu->kvm, irq); | 298 | vgic_put_irq(vcpu->kvm, irq); |
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) | |||
957 | 957 | ||
958 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); | 958 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); |
959 | 959 | ||
960 | spin_lock_irqsave(&irq->irq_lock, flags); | 960 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
961 | 961 | ||
962 | /* | 962 | /* |
963 | * An access targetting Group0 SGIs can only generate | 963 | * An access targetting Group0 SGIs can only generate |
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) | |||
968 | irq->pending_latch = true; | 968 | irq->pending_latch = true; |
969 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 969 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
970 | } else { | 970 | } else { |
971 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 971 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
972 | } | 972 | } |
973 | 973 | ||
974 | vgic_put_irq(vcpu->kvm, irq); | 974 | vgic_put_irq(vcpu->kvm, irq); |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index ceeda7e04a4d..7de42fba05b5 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, | |||
77 | for (i = 0; i < len * 8; i++) { | 77 | for (i = 0; i < len * 8; i++) { |
78 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 78 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
79 | 79 | ||
80 | spin_lock_irqsave(&irq->irq_lock, flags); | 80 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
81 | irq->group = !!(val & BIT(i)); | 81 | irq->group = !!(val & BIT(i)); |
82 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 82 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
83 | 83 | ||
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, | |||
120 | for_each_set_bit(i, &val, len * 8) { | 120 | for_each_set_bit(i, &val, len * 8) { |
121 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 121 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
122 | 122 | ||
123 | spin_lock_irqsave(&irq->irq_lock, flags); | 123 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
124 | irq->enabled = true; | 124 | irq->enabled = true; |
125 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 125 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
126 | 126 | ||
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, | |||
139 | for_each_set_bit(i, &val, len * 8) { | 139 | for_each_set_bit(i, &val, len * 8) { |
140 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 140 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
141 | 141 | ||
142 | spin_lock_irqsave(&irq->irq_lock, flags); | 142 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
143 | 143 | ||
144 | irq->enabled = false; | 144 | irq->enabled = false; |
145 | 145 | ||
146 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 146 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
147 | vgic_put_irq(vcpu->kvm, irq); | 147 | vgic_put_irq(vcpu->kvm, irq); |
148 | } | 148 | } |
149 | } | 149 | } |
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, | |||
160 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 160 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
161 | unsigned long flags; | 161 | unsigned long flags; |
162 | 162 | ||
163 | spin_lock_irqsave(&irq->irq_lock, flags); | 163 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
164 | if (irq_is_pending(irq)) | 164 | if (irq_is_pending(irq)) |
165 | value |= (1U << i); | 165 | value |= (1U << i); |
166 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 166 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
167 | 167 | ||
168 | vgic_put_irq(vcpu->kvm, irq); | 168 | vgic_put_irq(vcpu->kvm, irq); |
169 | } | 169 | } |
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, | |||
215 | for_each_set_bit(i, &val, len * 8) { | 215 | for_each_set_bit(i, &val, len * 8) { |
216 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 216 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
217 | 217 | ||
218 | spin_lock_irqsave(&irq->irq_lock, flags); | 218 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
219 | if (irq->hw) | 219 | if (irq->hw) |
220 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); | 220 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); |
221 | else | 221 | else |
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, | |||
262 | for_each_set_bit(i, &val, len * 8) { | 262 | for_each_set_bit(i, &val, len * 8) { |
263 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 263 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
264 | 264 | ||
265 | spin_lock_irqsave(&irq->irq_lock, flags); | 265 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
266 | 266 | ||
267 | if (irq->hw) | 267 | if (irq->hw) |
268 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); | 268 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); |
269 | else | 269 | else |
270 | irq->pending_latch = false; | 270 | irq->pending_latch = false; |
271 | 271 | ||
272 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 272 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
273 | vgic_put_irq(vcpu->kvm, irq); | 273 | vgic_put_irq(vcpu->kvm, irq); |
274 | } | 274 | } |
275 | } | 275 | } |
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
311 | unsigned long flags; | 311 | unsigned long flags; |
312 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); | 312 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); |
313 | 313 | ||
314 | spin_lock_irqsave(&irq->irq_lock, flags); | 314 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
315 | 315 | ||
316 | if (irq->hw) { | 316 | if (irq->hw) { |
317 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); | 317 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); |
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
342 | if (irq->active) | 342 | if (irq->active) |
343 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 343 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
344 | else | 344 | else |
345 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 345 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
346 | } | 346 | } |
347 | 347 | ||
348 | /* | 348 | /* |
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, | |||
485 | for (i = 0; i < len; i++) { | 485 | for (i = 0; i < len; i++) { |
486 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 486 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
487 | 487 | ||
488 | spin_lock_irqsave(&irq->irq_lock, flags); | 488 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
489 | /* Narrow the priority range to what we actually support */ | 489 | /* Narrow the priority range to what we actually support */ |
490 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); | 490 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); |
491 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 491 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
492 | 492 | ||
493 | vgic_put_irq(vcpu->kvm, irq); | 493 | vgic_put_irq(vcpu->kvm, irq); |
494 | } | 494 | } |
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, | |||
534 | continue; | 534 | continue; |
535 | 535 | ||
536 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 536 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
537 | spin_lock_irqsave(&irq->irq_lock, flags); | 537 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
538 | 538 | ||
539 | if (test_bit(i * 2 + 1, &val)) | 539 | if (test_bit(i * 2 + 1, &val)) |
540 | irq->config = VGIC_CONFIG_EDGE; | 540 | irq->config = VGIC_CONFIG_EDGE; |
541 | else | 541 | else |
542 | irq->config = VGIC_CONFIG_LEVEL; | 542 | irq->config = VGIC_CONFIG_LEVEL; |
543 | 543 | ||
544 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 544 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
545 | vgic_put_irq(vcpu->kvm, irq); | 545 | vgic_put_irq(vcpu->kvm, irq); |
546 | } | 546 | } |
547 | } | 547 | } |
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, | |||
590 | * restore irq config before line level. | 590 | * restore irq config before line level. |
591 | */ | 591 | */ |
592 | new_level = !!(val & (1U << i)); | 592 | new_level = !!(val & (1U << i)); |
593 | spin_lock_irqsave(&irq->irq_lock, flags); | 593 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
594 | irq->line_level = new_level; | 594 | irq->line_level = new_level; |
595 | if (new_level) | 595 | if (new_level) |
596 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 596 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
597 | else | 597 | else |
598 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 598 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
599 | 599 | ||
600 | vgic_put_irq(vcpu->kvm, irq); | 600 | vgic_put_irq(vcpu->kvm, irq); |
601 | } | 601 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 69b892abd7dc..d91a8938aa7c 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
84 | 84 | ||
85 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | 85 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
86 | 86 | ||
87 | spin_lock(&irq->irq_lock); | 87 | raw_spin_lock(&irq->irq_lock); |
88 | 88 | ||
89 | /* Always preserve the active bit */ | 89 | /* Always preserve the active bit */ |
90 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); | 90 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); |
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
127 | vgic_irq_set_phys_active(irq, false); | 127 | vgic_irq_set_phys_active(irq, false); |
128 | } | 128 | } |
129 | 129 | ||
130 | spin_unlock(&irq->irq_lock); | 130 | raw_spin_unlock(&irq->irq_lock); |
131 | vgic_put_irq(vcpu->kvm, irq); | 131 | vgic_put_irq(vcpu->kvm, irq); |
132 | } | 132 | } |
133 | 133 | ||
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 9c0dd234ebe8..4ee0aeb9a905 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
76 | if (!irq) /* An LPI could have been unmapped. */ | 76 | if (!irq) /* An LPI could have been unmapped. */ |
77 | continue; | 77 | continue; |
78 | 78 | ||
79 | spin_lock(&irq->irq_lock); | 79 | raw_spin_lock(&irq->irq_lock); |
80 | 80 | ||
81 | /* Always preserve the active bit */ | 81 | /* Always preserve the active bit */ |
82 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); | 82 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); |
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
119 | vgic_irq_set_phys_active(irq, false); | 119 | vgic_irq_set_phys_active(irq, false); |
120 | } | 120 | } |
121 | 121 | ||
122 | spin_unlock(&irq->irq_lock); | 122 | raw_spin_unlock(&irq->irq_lock); |
123 | vgic_put_irq(vcpu->kvm, irq); | 123 | vgic_put_irq(vcpu->kvm, irq); |
124 | } | 124 | } |
125 | 125 | ||
@@ -347,9 +347,9 @@ retry: | |||
347 | 347 | ||
348 | status = val & (1 << bit_nr); | 348 | status = val & (1 << bit_nr); |
349 | 349 | ||
350 | spin_lock_irqsave(&irq->irq_lock, flags); | 350 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
351 | if (irq->target_vcpu != vcpu) { | 351 | if (irq->target_vcpu != vcpu) { |
352 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 352 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
353 | goto retry; | 353 | goto retry; |
354 | } | 354 | } |
355 | irq->pending_latch = status; | 355 | irq->pending_latch = status; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 870b1185173b..abd9c7352677 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { | |||
54 | * When taking more than one ap_list_lock at the same time, always take the | 54 | * When taking more than one ap_list_lock at the same time, always take the |
55 | * lowest numbered VCPU's ap_list_lock first, so: | 55 | * lowest numbered VCPU's ap_list_lock first, so: |
56 | * vcpuX->vcpu_id < vcpuY->vcpu_id: | 56 | * vcpuX->vcpu_id < vcpuY->vcpu_id: |
57 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); | 57 | * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); |
58 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); | 58 | * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); |
59 | * | 59 | * |
60 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have | 60 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have |
61 | * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer | 61 | * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer |
62 | * spinlocks for any lock that may be taken while injecting an interrupt. | 62 | * spinlocks for any lock that may be taken while injecting an interrupt. |
63 | */ | 63 | */ |
64 | 64 | ||
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
72 | struct vgic_irq *irq = NULL; | 72 | struct vgic_irq *irq = NULL; |
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | 74 | ||
75 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 75 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
76 | 76 | ||
77 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 77 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
78 | if (irq->intid != intid) | 78 | if (irq->intid != intid) |
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
88 | irq = NULL; | 88 | irq = NULL; |
89 | 89 | ||
90 | out_unlock: | 90 | out_unlock: |
91 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 91 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
92 | 92 | ||
93 | return irq; | 93 | return irq; |
94 | } | 94 | } |
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | |||
138 | if (irq->intid < VGIC_MIN_LPI) | 138 | if (irq->intid < VGIC_MIN_LPI) |
139 | return; | 139 | return; |
140 | 140 | ||
141 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 141 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { | 142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
143 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 143 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
144 | return; | 144 | return; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | list_del(&irq->lpi_list); | 147 | list_del(&irq->lpi_list); |
148 | dist->lpi_list_count--; | 148 | dist->lpi_list_count--; |
149 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 149 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
150 | 150 | ||
151 | kfree(irq); | 151 | kfree(irq); |
152 | } | 152 | } |
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
244 | bool penda, pendb; | 244 | bool penda, pendb; |
245 | int ret; | 245 | int ret; |
246 | 246 | ||
247 | spin_lock(&irqa->irq_lock); | 247 | raw_spin_lock(&irqa->irq_lock); |
248 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); | 248 | raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); |
249 | 249 | ||
250 | if (irqa->active || irqb->active) { | 250 | if (irqa->active || irqb->active) { |
251 | ret = (int)irqb->active - (int)irqa->active; | 251 | ret = (int)irqb->active - (int)irqa->active; |
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
263 | /* Both pending and enabled, sort by priority */ | 263 | /* Both pending and enabled, sort by priority */ |
264 | ret = irqa->priority - irqb->priority; | 264 | ret = irqa->priority - irqb->priority; |
265 | out: | 265 | out: |
266 | spin_unlock(&irqb->irq_lock); | 266 | raw_spin_unlock(&irqb->irq_lock); |
267 | spin_unlock(&irqa->irq_lock); | 267 | raw_spin_unlock(&irqa->irq_lock); |
268 | return ret; | 268 | return ret; |
269 | } | 269 | } |
270 | 270 | ||
@@ -325,7 +325,7 @@ retry: | |||
325 | * not need to be inserted into an ap_list and there is also | 325 | * not need to be inserted into an ap_list and there is also |
326 | * no more work for us to do. | 326 | * no more work for us to do. |
327 | */ | 327 | */ |
328 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 328 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
329 | 329 | ||
330 | /* | 330 | /* |
331 | * We have to kick the VCPU here, because we could be | 331 | * We have to kick the VCPU here, because we could be |
@@ -347,12 +347,12 @@ retry: | |||
347 | * We must unlock the irq lock to take the ap_list_lock where | 347 | * We must unlock the irq lock to take the ap_list_lock where |
348 | * we are going to insert this new pending interrupt. | 348 | * we are going to insert this new pending interrupt. |
349 | */ | 349 | */ |
350 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 350 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
351 | 351 | ||
352 | /* someone can do stuff here, which we re-check below */ | 352 | /* someone can do stuff here, which we re-check below */ |
353 | 353 | ||
354 | spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 354 | raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
355 | spin_lock(&irq->irq_lock); | 355 | raw_spin_lock(&irq->irq_lock); |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * Did something change behind our backs? | 358 | * Did something change behind our backs? |
@@ -367,10 +367,11 @@ retry: | |||
367 | */ | 367 | */ |
368 | 368 | ||
369 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { | 369 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { |
370 | spin_unlock(&irq->irq_lock); | 370 | raw_spin_unlock(&irq->irq_lock); |
371 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 371 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, |
372 | flags); | ||
372 | 373 | ||
373 | spin_lock_irqsave(&irq->irq_lock, flags); | 374 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
374 | goto retry; | 375 | goto retry; |
375 | } | 376 | } |
376 | 377 | ||
@@ -382,8 +383,8 @@ retry: | |||
382 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); | 383 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
383 | irq->vcpu = vcpu; | 384 | irq->vcpu = vcpu; |
384 | 385 | ||
385 | spin_unlock(&irq->irq_lock); | 386 | raw_spin_unlock(&irq->irq_lock); |
386 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 387 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
387 | 388 | ||
388 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | 389 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
389 | kvm_vcpu_kick(vcpu); | 390 | kvm_vcpu_kick(vcpu); |
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |||
430 | if (!irq) | 431 | if (!irq) |
431 | return -EINVAL; | 432 | return -EINVAL; |
432 | 433 | ||
433 | spin_lock_irqsave(&irq->irq_lock, flags); | 434 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
434 | 435 | ||
435 | if (!vgic_validate_injection(irq, level, owner)) { | 436 | if (!vgic_validate_injection(irq, level, owner)) { |
436 | /* Nothing to see here, move along... */ | 437 | /* Nothing to see here, move along... */ |
437 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 438 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
438 | vgic_put_irq(kvm, irq); | 439 | vgic_put_irq(kvm, irq); |
439 | return 0; | 440 | return 0; |
440 | } | 441 | } |
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | |||
494 | 495 | ||
495 | BUG_ON(!irq); | 496 | BUG_ON(!irq); |
496 | 497 | ||
497 | spin_lock_irqsave(&irq->irq_lock, flags); | 498 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
498 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); | 499 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); |
499 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 500 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
500 | vgic_put_irq(vcpu->kvm, irq); | 501 | vgic_put_irq(vcpu->kvm, irq); |
501 | 502 | ||
502 | return ret; | 503 | return ret; |
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | |||
519 | if (!irq->hw) | 520 | if (!irq->hw) |
520 | goto out; | 521 | goto out; |
521 | 522 | ||
522 | spin_lock_irqsave(&irq->irq_lock, flags); | 523 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
523 | irq->active = false; | 524 | irq->active = false; |
524 | irq->pending_latch = false; | 525 | irq->pending_latch = false; |
525 | irq->line_level = false; | 526 | irq->line_level = false; |
526 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 527 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
527 | out: | 528 | out: |
528 | vgic_put_irq(vcpu->kvm, irq); | 529 | vgic_put_irq(vcpu->kvm, irq); |
529 | } | 530 | } |
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) | |||
539 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | 540 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
540 | BUG_ON(!irq); | 541 | BUG_ON(!irq); |
541 | 542 | ||
542 | spin_lock_irqsave(&irq->irq_lock, flags); | 543 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
543 | kvm_vgic_unmap_irq(irq); | 544 | kvm_vgic_unmap_irq(irq); |
544 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 545 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
545 | vgic_put_irq(vcpu->kvm, irq); | 546 | vgic_put_irq(vcpu->kvm, irq); |
546 | 547 | ||
547 | return 0; | 548 | return 0; |
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) | |||
571 | return -EINVAL; | 572 | return -EINVAL; |
572 | 573 | ||
573 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | 574 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
574 | spin_lock_irqsave(&irq->irq_lock, flags); | 575 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
575 | if (irq->owner && irq->owner != owner) | 576 | if (irq->owner && irq->owner != owner) |
576 | ret = -EEXIST; | 577 | ret = -EEXIST; |
577 | else | 578 | else |
578 | irq->owner = owner; | 579 | irq->owner = owner; |
579 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 580 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
580 | 581 | ||
581 | return ret; | 582 | return ret; |
582 | } | 583 | } |
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) | |||
597 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 598 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
598 | 599 | ||
599 | retry: | 600 | retry: |
600 | spin_lock(&vgic_cpu->ap_list_lock); | 601 | raw_spin_lock(&vgic_cpu->ap_list_lock); |
601 | 602 | ||
602 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { | 603 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
603 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; | 604 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; |
604 | bool target_vcpu_needs_kick = false; | 605 | bool target_vcpu_needs_kick = false; |
605 | 606 | ||
606 | spin_lock(&irq->irq_lock); | 607 | raw_spin_lock(&irq->irq_lock); |
607 | 608 | ||
608 | BUG_ON(vcpu != irq->vcpu); | 609 | BUG_ON(vcpu != irq->vcpu); |
609 | 610 | ||
@@ -616,7 +617,7 @@ retry: | |||
616 | */ | 617 | */ |
617 | list_del(&irq->ap_list); | 618 | list_del(&irq->ap_list); |
618 | irq->vcpu = NULL; | 619 | irq->vcpu = NULL; |
619 | spin_unlock(&irq->irq_lock); | 620 | raw_spin_unlock(&irq->irq_lock); |
620 | 621 | ||
621 | /* | 622 | /* |
622 | * This vgic_put_irq call matches the | 623 | * This vgic_put_irq call matches the |
@@ -631,14 +632,14 @@ retry: | |||
631 | 632 | ||
632 | if (target_vcpu == vcpu) { | 633 | if (target_vcpu == vcpu) { |
633 | /* We're on the right CPU */ | 634 | /* We're on the right CPU */ |
634 | spin_unlock(&irq->irq_lock); | 635 | raw_spin_unlock(&irq->irq_lock); |
635 | continue; | 636 | continue; |
636 | } | 637 | } |
637 | 638 | ||
638 | /* This interrupt looks like it has to be migrated. */ | 639 | /* This interrupt looks like it has to be migrated. */ |
639 | 640 | ||
640 | spin_unlock(&irq->irq_lock); | 641 | raw_spin_unlock(&irq->irq_lock); |
641 | spin_unlock(&vgic_cpu->ap_list_lock); | 642 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
642 | 643 | ||
643 | /* | 644 | /* |
644 | * Ensure locking order by always locking the smallest | 645 | * Ensure locking order by always locking the smallest |
@@ -652,10 +653,10 @@ retry: | |||
652 | vcpuB = vcpu; | 653 | vcpuB = vcpu; |
653 | } | 654 | } |
654 | 655 | ||
655 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 656 | raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
656 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, | 657 | raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
657 | SINGLE_DEPTH_NESTING); | 658 | SINGLE_DEPTH_NESTING); |
658 | spin_lock(&irq->irq_lock); | 659 | raw_spin_lock(&irq->irq_lock); |
659 | 660 | ||
660 | /* | 661 | /* |
661 | * If the affinity has been preserved, move the | 662 | * If the affinity has been preserved, move the |
@@ -675,9 +676,9 @@ retry: | |||
675 | target_vcpu_needs_kick = true; | 676 | target_vcpu_needs_kick = true; |
676 | } | 677 | } |
677 | 678 | ||
678 | spin_unlock(&irq->irq_lock); | 679 | raw_spin_unlock(&irq->irq_lock); |
679 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); | 680 | raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); |
680 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 681 | raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
681 | 682 | ||
682 | if (target_vcpu_needs_kick) { | 683 | if (target_vcpu_needs_kick) { |
683 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); | 684 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); |
@@ -687,7 +688,7 @@ retry: | |||
687 | goto retry; | 688 | goto retry; |
688 | } | 689 | } |
689 | 690 | ||
690 | spin_unlock(&vgic_cpu->ap_list_lock); | 691 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
691 | } | 692 | } |
692 | 693 | ||
693 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) | 694 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, | |||
741 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 742 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
742 | int w; | 743 | int w; |
743 | 744 | ||
744 | spin_lock(&irq->irq_lock); | 745 | raw_spin_lock(&irq->irq_lock); |
745 | /* GICv2 SGIs can count for more than one... */ | 746 | /* GICv2 SGIs can count for more than one... */ |
746 | w = vgic_irq_get_lr_count(irq); | 747 | w = vgic_irq_get_lr_count(irq); |
747 | spin_unlock(&irq->irq_lock); | 748 | raw_spin_unlock(&irq->irq_lock); |
748 | 749 | ||
749 | count += w; | 750 | count += w; |
750 | *multi_sgi |= (w > 1); | 751 | *multi_sgi |= (w > 1); |
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
770 | count = 0; | 771 | count = 0; |
771 | 772 | ||
772 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 773 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
773 | spin_lock(&irq->irq_lock); | 774 | raw_spin_lock(&irq->irq_lock); |
774 | 775 | ||
775 | /* | 776 | /* |
776 | * If we have multi-SGIs in the pipeline, we need to | 777 | * If we have multi-SGIs in the pipeline, we need to |
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
780 | * the AP list has been sorted already. | 781 | * the AP list has been sorted already. |
781 | */ | 782 | */ |
782 | if (multi_sgi && irq->priority > prio) { | 783 | if (multi_sgi && irq->priority > prio) { |
783 | spin_unlock(&irq->irq_lock); | 784 | _raw_spin_unlock(&irq->irq_lock); |
784 | break; | 785 | break; |
785 | } | 786 | } |
786 | 787 | ||
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
791 | prio = irq->priority; | 792 | prio = irq->priority; |
792 | } | 793 | } |
793 | 794 | ||
794 | spin_unlock(&irq->irq_lock); | 795 | raw_spin_unlock(&irq->irq_lock); |
795 | 796 | ||
796 | if (count == kvm_vgic_global_state.nr_lr) { | 797 | if (count == kvm_vgic_global_state.nr_lr) { |
797 | if (!list_is_last(&irq->ap_list, | 798 | if (!list_is_last(&irq->ap_list, |
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
872 | 873 | ||
873 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 874 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
874 | 875 | ||
875 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 876 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
876 | vgic_flush_lr_state(vcpu); | 877 | vgic_flush_lr_state(vcpu); |
877 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 878 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
878 | 879 | ||
879 | if (can_access_vgic_from_kernel()) | 880 | if (can_access_vgic_from_kernel()) |
880 | vgic_restore_state(vcpu); | 881 | vgic_restore_state(vcpu); |
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
918 | 919 | ||
919 | vgic_get_vmcr(vcpu, &vmcr); | 920 | vgic_get_vmcr(vcpu, &vmcr); |
920 | 921 | ||
921 | spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); | 922 | raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
922 | 923 | ||
923 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 924 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
924 | spin_lock(&irq->irq_lock); | 925 | raw_spin_lock(&irq->irq_lock); |
925 | pending = irq_is_pending(irq) && irq->enabled && | 926 | pending = irq_is_pending(irq) && irq->enabled && |
926 | !irq->active && | 927 | !irq->active && |
927 | irq->priority < vmcr.pmr; | 928 | irq->priority < vmcr.pmr; |
928 | spin_unlock(&irq->irq_lock); | 929 | raw_spin_unlock(&irq->irq_lock); |
929 | 930 | ||
930 | if (pending) | 931 | if (pending) |
931 | break; | 932 | break; |
932 | } | 933 | } |
933 | 934 | ||
934 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); | 935 | raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
935 | 936 | ||
936 | return pending; | 937 | return pending; |
937 | } | 938 | } |
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) | |||
963 | return false; | 964 | return false; |
964 | 965 | ||
965 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | 966 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
966 | spin_lock_irqsave(&irq->irq_lock, flags); | 967 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
967 | map_is_active = irq->hw && irq->active; | 968 | map_is_active = irq->hw && irq->active; |
968 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 969 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
969 | vgic_put_irq(vcpu->kvm, irq); | 970 | vgic_put_irq(vcpu->kvm, irq); |
970 | 971 | ||
971 | return map_is_active; | 972 | return map_is_active; |
972 | } | 973 | } |
973 | |||