diff options
340 files changed, 2993 insertions, 1972 deletions
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst index fe46d4867e2d..18c1415e7bfa 100644 --- a/Documentation/networking/msg_zerocopy.rst +++ b/Documentation/networking/msg_zerocopy.rst | |||
@@ -7,7 +7,7 @@ Intro | |||
7 | ===== | 7 | ===== |
8 | 8 | ||
9 | The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. | 9 | The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. |
10 | The feature is currently implemented for TCP sockets. | 10 | The feature is currently implemented for TCP and UDP sockets. |
11 | 11 | ||
12 | 12 | ||
13 | Opportunity and Caveats | 13 | Opportunity and Caveats |
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt index 355c6d8ef8ad..b203d1334822 100644 --- a/Documentation/networking/operstates.txt +++ b/Documentation/networking/operstates.txt | |||
@@ -22,8 +22,9 @@ and changeable from userspace under certain rules. | |||
22 | 2. Querying from userspace | 22 | 2. Querying from userspace |
23 | 23 | ||
24 | Both admin and operational state can be queried via the netlink | 24 | Both admin and operational state can be queried via the netlink |
25 | operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK | 25 | operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK |
26 | to be notified of updates. This is important for setting from userspace. | 26 | to be notified of updates while the interface is admin up. This is |
27 | important for setting from userspace. | ||
27 | 28 | ||
28 | These values contain interface state: | 29 | These values contain interface state: |
29 | 30 | ||
@@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to | |||
101 | complete. Corresponding functions are netif_dormant_on() to set the | 102 | complete. Corresponding functions are netif_dormant_on() to set the |
102 | flag, netif_dormant_off() to clear it and netif_dormant() to query. | 103 | flag, netif_dormant_off() to clear it and netif_dormant() to query. |
103 | 104 | ||
104 | On device allocation, networking core sets the flags equivalent to | 105 | On device allocation, both flags __LINK_STATE_NOCARRIER and |
105 | netif_carrier_ok() and !netif_dormant(). | 106 | __LINK_STATE_DORMANT are cleared, so the effective state is equivalent |
107 | to netif_carrier_ok() and !netif_dormant(). | ||
106 | 108 | ||
107 | 109 | ||
108 | Whenever the driver CHANGES one of these flags, a workqueue event is | 110 | Whenever the driver CHANGES one of these flags, a workqueue event is |
@@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the | |||
133 | driver. Afterwards, the userspace application can set IFLA_OPERSTATE | 135 | driver. Afterwards, the userspace application can set IFLA_OPERSTATE |
134 | to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set | 136 | to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set |
135 | netif_carrier_off() or netif_dormant_on(). Changes made by userspace | 137 | netif_carrier_off() or netif_dormant_on(). Changes made by userspace |
136 | are multicasted on the netlink group RTMGRP_LINK. | 138 | are multicasted on the netlink group RTNLGRP_LINK. |
137 | 139 | ||
138 | So basically a 802.1X supplicant interacts with the kernel like this: | 140 | So basically a 802.1X supplicant interacts with the kernel like this: |
139 | 141 | ||
140 | -subscribe to RTMGRP_LINK | 142 | -subscribe to RTNLGRP_LINK |
141 | -set IFLA_LINKMODE to 1 via RTM_SETLINK | 143 | -set IFLA_LINKMODE to 1 via RTM_SETLINK |
142 | -query RTM_GETLINK once to get initial state | 144 | -query RTM_GETLINK once to get initial state |
143 | -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until | 145 | -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until |
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 58649bd4fcfc..ebc679bcb2dc 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt | |||
@@ -80,7 +80,9 @@ nonzero when shrink_dcache_pages() has been called and the | |||
80 | dcache isn't pruned yet. | 80 | dcache isn't pruned yet. |
81 | 81 | ||
82 | nr_negative shows the number of unused dentries that are also | 82 | nr_negative shows the number of unused dentries that are also |
83 | negative dentries which do not mapped to actual files. | 83 | negative dentries which do not map to any files. Instead, |
84 | they help speeding up rejection of non-existing files provided | ||
85 | by the users. | ||
84 | 86 | ||
85 | ============================================================== | 87 | ============================================================== |
86 | 88 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 9919840d54cd..41ce5f4ad838 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6151,7 +6151,7 @@ FREESCALE SOC SOUND DRIVERS | |||
6151 | M: Timur Tabi <timur@kernel.org> | 6151 | M: Timur Tabi <timur@kernel.org> |
6152 | M: Nicolin Chen <nicoleotsuka@gmail.com> | 6152 | M: Nicolin Chen <nicoleotsuka@gmail.com> |
6153 | M: Xiubo Li <Xiubo.Lee@gmail.com> | 6153 | M: Xiubo Li <Xiubo.Lee@gmail.com> |
6154 | R: Fabio Estevam <fabio.estevam@nxp.com> | 6154 | R: Fabio Estevam <festevam@gmail.com> |
6155 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 6155 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
6156 | L: linuxppc-dev@lists.ozlabs.org | 6156 | L: linuxppc-dev@lists.ozlabs.org |
6157 | S: Maintained | 6157 | S: Maintained |
@@ -10898,7 +10898,7 @@ F: include/linux/nvmem-consumer.h | |||
10898 | F: include/linux/nvmem-provider.h | 10898 | F: include/linux/nvmem-provider.h |
10899 | 10899 | ||
10900 | NXP SGTL5000 DRIVER | 10900 | NXP SGTL5000 DRIVER |
10901 | M: Fabio Estevam <fabio.estevam@nxp.com> | 10901 | M: Fabio Estevam <festevam@gmail.com> |
10902 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 10902 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
10903 | S: Maintained | 10903 | S: Maintained |
10904 | F: Documentation/devicetree/bindings/sound/sgtl5000.txt | 10904 | F: Documentation/devicetree/bindings/sound/sgtl5000.txt |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 5 | 2 | VERSION = 5 |
3 | PATCHLEVEL = 0 | 3 | PATCHLEVEL = 0 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc6 | 5 | EXTRAVERSION = -rc7 |
6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h index 4d17cacd1462..432402c8e47f 100644 --- a/arch/alpha/include/asm/irq.h +++ b/arch/alpha/include/asm/irq.h | |||
@@ -56,15 +56,15 @@ | |||
56 | 56 | ||
57 | #elif defined(CONFIG_ALPHA_DP264) || \ | 57 | #elif defined(CONFIG_ALPHA_DP264) || \ |
58 | defined(CONFIG_ALPHA_LYNX) || \ | 58 | defined(CONFIG_ALPHA_LYNX) || \ |
59 | defined(CONFIG_ALPHA_SHARK) || \ | 59 | defined(CONFIG_ALPHA_SHARK) |
60 | defined(CONFIG_ALPHA_EIGER) | ||
61 | # define NR_IRQS 64 | 60 | # define NR_IRQS 64 |
62 | 61 | ||
63 | #elif defined(CONFIG_ALPHA_TITAN) | 62 | #elif defined(CONFIG_ALPHA_TITAN) |
64 | #define NR_IRQS 80 | 63 | #define NR_IRQS 80 |
65 | 64 | ||
66 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ | 65 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ |
67 | defined(CONFIG_ALPHA_TAKARA) | 66 | defined(CONFIG_ALPHA_TAKARA) || \ |
67 | defined(CONFIG_ALPHA_EIGER) | ||
68 | # define NR_IRQS 128 | 68 | # define NR_IRQS 128 |
69 | 69 | ||
70 | #elif defined(CONFIG_ALPHA_WILDFIRE) | 70 | #elif defined(CONFIG_ALPHA_WILDFIRE) |
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index d73dc473fbb9..188fc9256baf 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) | |||
78 | /* Macro for exception fixup code to access integer registers. */ | 78 | /* Macro for exception fixup code to access integer registers. */ |
79 | #define dpf_reg(r) \ | 79 | #define dpf_reg(r) \ |
80 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ | 80 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ |
81 | (r) <= 18 ? (r)+8 : (r)-10]) | 81 | (r) <= 18 ? (r)+10 : (r)-10]) |
82 | 82 | ||
83 | asmlinkage void | 83 | asmlinkage void |
84 | do_page_fault(unsigned long address, unsigned long mmcsr, | 84 | do_page_fault(unsigned long address, unsigned long mmcsr, |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 664e918e2624..26524b75970a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1400,6 +1400,7 @@ config NR_CPUS | |||
1400 | config HOTPLUG_CPU | 1400 | config HOTPLUG_CPU |
1401 | bool "Support for hot-pluggable CPUs" | 1401 | bool "Support for hot-pluggable CPUs" |
1402 | depends on SMP | 1402 | depends on SMP |
1403 | select GENERIC_IRQ_MIGRATION | ||
1403 | help | 1404 | help |
1404 | Say Y here to experiment with turning CPUs off and on. CPUs | 1405 | Say Y here to experiment with turning CPUs off and on. CPUs |
1405 | can be controlled through /sys/devices/system/cpu. | 1406 | can be controlled through /sys/devices/system/cpu. |
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f0..67d77eee9433 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts | |||
@@ -644,6 +644,17 @@ | |||
644 | }; | 644 | }; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | /* Configure pwm clock source for timers 8 & 9 */ | ||
648 | &timer8 { | ||
649 | assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; | ||
650 | assigned-clock-parents = <&sys_clkin_ck>; | ||
651 | }; | ||
652 | |||
653 | &timer9 { | ||
654 | assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; | ||
655 | assigned-clock-parents = <&sys_clkin_ck>; | ||
656 | }; | ||
657 | |||
647 | /* | 658 | /* |
648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for | 659 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for |
649 | * uart1 wakeirq. | 660 | * uart1 wakeirq. |
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index bc853ebeda22..61a06f6add3c 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi | |||
@@ -317,7 +317,8 @@ | |||
317 | 317 | ||
318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | 318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { |
319 | pinctrl-single,pins = < | 319 | pinctrl-single,pins = < |
320 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ | 320 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ |
321 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
321 | >; | 322 | >; |
322 | }; | 323 | }; |
323 | 324 | ||
@@ -385,7 +386,8 @@ | |||
385 | 386 | ||
386 | palmas: palmas@48 { | 387 | palmas: palmas@48 { |
387 | compatible = "ti,palmas"; | 388 | compatible = "ti,palmas"; |
388 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | 389 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
390 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
389 | reg = <0x48>; | 391 | reg = <0x48>; |
390 | interrupt-controller; | 392 | interrupt-controller; |
391 | #interrupt-cells = <2>; | 393 | #interrupt-cells = <2>; |
@@ -651,7 +653,8 @@ | |||
651 | pinctrl-names = "default"; | 653 | pinctrl-names = "default"; |
652 | pinctrl-0 = <&twl6040_pins>; | 654 | pinctrl-0 = <&twl6040_pins>; |
653 | 655 | ||
654 | interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ | 656 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
657 | interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>; | ||
655 | 658 | ||
656 | /* audpwron gpio defined in the board specific dts */ | 659 | /* audpwron gpio defined in the board specific dts */ |
657 | 660 | ||
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65..e78d3718f145 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts | |||
@@ -181,6 +181,13 @@ | |||
181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ | 181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ |
182 | >; | 182 | >; |
183 | }; | 183 | }; |
184 | |||
185 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | ||
186 | pinctrl-single,pins = < | ||
187 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ | ||
188 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
189 | >; | ||
190 | }; | ||
184 | }; | 191 | }; |
185 | 192 | ||
186 | &omap5_pmx_core { | 193 | &omap5_pmx_core { |
@@ -414,8 +421,11 @@ | |||
414 | 421 | ||
415 | palmas: palmas@48 { | 422 | palmas: palmas@48 { |
416 | compatible = "ti,palmas"; | 423 | compatible = "ti,palmas"; |
417 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | ||
418 | reg = <0x48>; | 424 | reg = <0x48>; |
425 | pinctrl-0 = <&palmas_sys_nirq_pins>; | ||
426 | pinctrl-names = "default"; | ||
427 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ | ||
428 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
419 | interrupt-controller; | 429 | interrupt-controller; |
420 | #interrupt-cells = <2>; | 430 | #interrupt-cells = <2>; |
421 | ti,system-power-controller; | 431 | ti,system-power-controller; |
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 4acb501dd3f8..3ed49898f4b2 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi | |||
@@ -719,7 +719,6 @@ | |||
719 | pm_qos = <&qos_lcdc0>, | 719 | pm_qos = <&qos_lcdc0>, |
720 | <&qos_lcdc1>, | 720 | <&qos_lcdc1>, |
721 | <&qos_cif0>, | 721 | <&qos_cif0>, |
722 | <&qos_cif1>, | ||
723 | <&qos_ipp>, | 722 | <&qos_ipp>, |
724 | <&qos_rga>; | 723 | <&qos_rga>; |
725 | }; | 724 | }; |
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
26 | struct irqaction; | 26 | struct irqaction; |
27 | struct pt_regs; | 27 | struct pt_regs; |
28 | extern void migrate_irqs(void); | ||
29 | 28 | ||
30 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); | 29 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); |
31 | void handle_IRQ(unsigned int, struct pt_regs *); | 30 | void handle_IRQ(unsigned int, struct pt_regs *); |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca56537b61bc..50e89869178a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -147,6 +148,13 @@ struct kvm_cpu_context { | |||
147 | 148 | ||
148 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 149 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
149 | 150 | ||
151 | struct vcpu_reset_state { | ||
152 | unsigned long pc; | ||
153 | unsigned long r0; | ||
154 | bool be; | ||
155 | bool reset; | ||
156 | }; | ||
157 | |||
150 | struct kvm_vcpu_arch { | 158 | struct kvm_vcpu_arch { |
151 | struct kvm_cpu_context ctxt; | 159 | struct kvm_cpu_context ctxt; |
152 | 160 | ||
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch { | |||
186 | /* Cache some mmu pages needed inside spinlock regions */ | 194 | /* Cache some mmu pages needed inside spinlock regions */ |
187 | struct kvm_mmu_memory_cache mmu_page_cache; | 195 | struct kvm_mmu_memory_cache mmu_page_cache; |
188 | 196 | ||
197 | struct vcpu_reset_state reset_state; | ||
198 | |||
189 | /* Detect first run of a vcpu */ | 199 | /* Detect first run of a vcpu */ |
190 | bool has_run_once; | 200 | bool has_run_once; |
191 | }; | 201 | }; |
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index c4b1d4fb1797..de2089501b8b 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h | |||
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) | |||
76 | #define S2_PMD_MASK PMD_MASK | 76 | #define S2_PMD_MASK PMD_MASK |
77 | #define S2_PMD_SIZE PMD_SIZE | 77 | #define S2_PMD_SIZE PMD_SIZE |
78 | 78 | ||
79 | static inline bool kvm_stage2_has_pmd(struct kvm *kvm) | ||
80 | { | ||
81 | return true; | ||
82 | } | ||
83 | |||
79 | #endif /* __ARM_S2_PGTABLE_H_ */ | 84 | #endif /* __ARM_S2_PGTABLE_H_ */ |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/ratelimit.h> | ||
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
36 | #include <linux/list.h> | 35 | #include <linux/list.h> |
37 | #include <linux/kallsyms.h> | 36 | #include <linux/kallsyms.h> |
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) | |||
109 | return nr_irqs; | 108 | return nr_irqs; |
110 | } | 109 | } |
111 | #endif | 110 | #endif |
112 | |||
113 | #ifdef CONFIG_HOTPLUG_CPU | ||
114 | static bool migrate_one_irq(struct irq_desc *desc) | ||
115 | { | ||
116 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
117 | const struct cpumask *affinity = irq_data_get_affinity_mask(d); | ||
118 | struct irq_chip *c; | ||
119 | bool ret = false; | ||
120 | |||
121 | /* | ||
122 | * If this is a per-CPU interrupt, or the affinity does not | ||
123 | * include this CPU, then we have nothing to do. | ||
124 | */ | ||
125 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
126 | return false; | ||
127 | |||
128 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
129 | affinity = cpu_online_mask; | ||
130 | ret = true; | ||
131 | } | ||
132 | |||
133 | c = irq_data_get_irq_chip(d); | ||
134 | if (!c->irq_set_affinity) | ||
135 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
136 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) | ||
137 | cpumask_copy(irq_data_get_affinity_mask(d), affinity); | ||
138 | |||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | ||
144 | * If the affinity settings do not allow other CPUs, force them onto any | ||
145 | * available CPU. | ||
146 | * | ||
147 | * Note: we must iterate over all IRQs, whether they have an attached | ||
148 | * action structure or not, as we need to get chained interrupts too. | ||
149 | */ | ||
150 | void migrate_irqs(void) | ||
151 | { | ||
152 | unsigned int i; | ||
153 | struct irq_desc *desc; | ||
154 | unsigned long flags; | ||
155 | |||
156 | local_irq_save(flags); | ||
157 | |||
158 | for_each_irq_desc(i, desc) { | ||
159 | bool affinity_broken; | ||
160 | |||
161 | raw_spin_lock(&desc->lock); | ||
162 | affinity_broken = migrate_one_irq(desc); | ||
163 | raw_spin_unlock(&desc->lock); | ||
164 | |||
165 | if (affinity_broken) | ||
166 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
167 | i, smp_processor_id()); | ||
168 | } | ||
169 | |||
170 | local_irq_restore(flags); | ||
171 | } | ||
172 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -254,7 +254,7 @@ int __cpu_disable(void) | |||
254 | /* | 254 | /* |
255 | * OK - migrate IRQs away from this CPU | 255 | * OK - migrate IRQs away from this CPU |
256 | */ | 256 | */ |
257 | migrate_irqs(); | 257 | irq_migrate_all_off_this_cpu(); |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Flush user cache and TLB mappings, and then remove this CPU | 260 | * Flush user cache and TLB mappings, and then remove this CPU |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 222c1635bc7a..e8bd288fd5be 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | |||
1450 | reset_coproc_regs(vcpu, table, num); | 1450 | reset_coproc_regs(vcpu, table, num); |
1451 | 1451 | ||
1452 | for (num = 1; num < NR_CP15_REGS; num++) | 1452 | for (num = 1; num < NR_CP15_REGS; num++) |
1453 | if (vcpu_cp15(vcpu, num) == 0x42424242) | 1453 | WARN(vcpu_cp15(vcpu, num) == 0x42424242, |
1454 | panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); | 1454 | "Didn't reset vcpu_cp15(vcpu, %zi)", num); |
1455 | } | 1455 | } |
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d6..e53327912adc 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/cputype.h> | 26 | #include <asm/cputype.h> |
27 | #include <asm/kvm_arm.h> | 27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_coproc.h> | 28 | #include <asm/kvm_coproc.h> |
29 | #include <asm/kvm_emulate.h> | ||
29 | 30 | ||
30 | #include <kvm/arm_arch_timer.h> | 31 | #include <kvm/arm_arch_timer.h> |
31 | 32 | ||
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
69 | /* Reset CP15 registers */ | 70 | /* Reset CP15 registers */ |
70 | kvm_reset_coprocs(vcpu); | 71 | kvm_reset_coprocs(vcpu); |
71 | 72 | ||
73 | /* | ||
74 | * Additional reset state handling that PSCI may have imposed on us. | ||
75 | * Must be done after all the sys_reg reset. | ||
76 | */ | ||
77 | if (READ_ONCE(vcpu->arch.reset_state.reset)) { | ||
78 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
79 | |||
80 | /* Gracefully handle Thumb2 entry point */ | ||
81 | if (target_pc & 1) { | ||
82 | target_pc &= ~1UL; | ||
83 | vcpu_set_thumb(vcpu); | ||
84 | } | ||
85 | |||
86 | /* Propagate caller endianness */ | ||
87 | if (vcpu->arch.reset_state.be) | ||
88 | kvm_vcpu_set_be(vcpu); | ||
89 | |||
90 | *vcpu_pc(vcpu) = target_pc; | ||
91 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
92 | |||
93 | vcpu->arch.reset_state.reset = false; | ||
94 | } | ||
95 | |||
72 | /* Reset arch_timer context */ | 96 | /* Reset arch_timer context */ |
73 | return kvm_timer_vcpu_reset(vcpu); | 97 | return kvm_timer_vcpu_reset(vcpu); |
74 | } | 98 | } |
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109..dae514c8276a 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, | |||
152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && | 152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && |
153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); | 153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); |
154 | 154 | ||
155 | /* Enter broadcast mode for periodic timers */ | ||
156 | tick_broadcast_enable(); | ||
157 | |||
158 | /* Enter broadcast mode for one-shot timers */ | ||
155 | tick_broadcast_enter(); | 159 | tick_broadcast_enter(); |
156 | 160 | ||
157 | /* | 161 | /* |
@@ -218,15 +222,6 @@ fail: | |||
218 | return index; | 222 | return index; |
219 | } | 223 | } |
220 | 224 | ||
221 | /* | ||
222 | * For each cpu, setup the broadcast timer because local timers | ||
223 | * stops for the states above C1. | ||
224 | */ | ||
225 | static void omap_setup_broadcast_timer(void *arg) | ||
226 | { | ||
227 | tick_broadcast_enable(); | ||
228 | } | ||
229 | |||
230 | static struct cpuidle_driver omap4_idle_driver = { | 225 | static struct cpuidle_driver omap4_idle_driver = { |
231 | .name = "omap4_idle", | 226 | .name = "omap4_idle", |
232 | .owner = THIS_MODULE, | 227 | .owner = THIS_MODULE, |
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void) | |||
319 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) | 314 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) |
320 | return -ENODEV; | 315 | return -ENODEV; |
321 | 316 | ||
322 | /* Configure the broadcast timer on each cpu */ | ||
323 | on_each_cpu(omap_setup_broadcast_timer, NULL, 1); | ||
324 | |||
325 | return cpuidle_register(idle_driver, cpu_online_mask); | 317 | return cpuidle_register(idle_driver, cpu_online_mask); |
326 | } | 318 | } |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index f86b72d1d59e..1444b4b4bd9f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
83 | u32 enable_mask, enable_shift; | 83 | u32 enable_mask, enable_shift; |
84 | u32 pipd_mask, pipd_shift; | 84 | u32 pipd_mask, pipd_shift; |
85 | u32 reg; | 85 | u32 reg; |
86 | int ret; | ||
86 | 87 | ||
87 | if (dsi_id == 0) { | 88 | if (dsi_id == 0) { |
88 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | 89 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; |
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
98 | return -ENODEV; | 99 | return -ENODEV; |
99 | } | 100 | } |
100 | 101 | ||
101 | regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); | 102 | ret = regmap_read(omap4_dsi_mux_syscon, |
103 | OMAP4_DSIPHY_SYSCON_OFFSET, | ||
104 | ®); | ||
105 | if (ret) | ||
106 | return ret; | ||
102 | 107 | ||
103 | reg &= ~enable_mask; | 108 | reg &= ~enable_mask; |
104 | reg &= ~pipd_mask; | 109 | reg &= ~pipd_mask; |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a710..17558be4bf0a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -50,6 +50,9 @@ | |||
50 | #define OMAP4_NR_BANKS 4 | 50 | #define OMAP4_NR_BANKS 4 |
51 | #define OMAP4_NR_IRQS 128 | 51 | #define OMAP4_NR_IRQS 128 |
52 | 52 | ||
53 | #define SYS_NIRQ1_EXT_SYS_IRQ_1 7 | ||
54 | #define SYS_NIRQ2_EXT_SYS_IRQ_2 119 | ||
55 | |||
53 | static void __iomem *wakeupgen_base; | 56 | static void __iomem *wakeupgen_base; |
54 | static void __iomem *sar_base; | 57 | static void __iomem *sar_base; |
55 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); | 58 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); |
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) | |||
153 | irq_chip_unmask_parent(d); | 156 | irq_chip_unmask_parent(d); |
154 | } | 157 | } |
155 | 158 | ||
159 | /* | ||
160 | * The sys_nirq pins bypass peripheral modules and are wired directly | ||
161 | * to MPUSS wakeupgen. They get automatically inverted for GIC. | ||
162 | */ | ||
163 | static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) | ||
164 | { | ||
165 | bool inverted = false; | ||
166 | |||
167 | switch (type) { | ||
168 | case IRQ_TYPE_LEVEL_LOW: | ||
169 | type &= ~IRQ_TYPE_LEVEL_MASK; | ||
170 | type |= IRQ_TYPE_LEVEL_HIGH; | ||
171 | inverted = true; | ||
172 | break; | ||
173 | case IRQ_TYPE_EDGE_FALLING: | ||
174 | type &= ~IRQ_TYPE_EDGE_BOTH; | ||
175 | type |= IRQ_TYPE_EDGE_RISING; | ||
176 | inverted = true; | ||
177 | break; | ||
178 | default: | ||
179 | break; | ||
180 | } | ||
181 | |||
182 | if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && | ||
183 | d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) | ||
184 | pr_warn("wakeupgen: irq%li polarity inverted in dts\n", | ||
185 | d->hwirq); | ||
186 | |||
187 | return irq_chip_set_type_parent(d, type); | ||
188 | } | ||
189 | |||
156 | #ifdef CONFIG_HOTPLUG_CPU | 190 | #ifdef CONFIG_HOTPLUG_CPU |
157 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); | 191 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); |
158 | 192 | ||
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { | |||
446 | .irq_mask = wakeupgen_mask, | 480 | .irq_mask = wakeupgen_mask, |
447 | .irq_unmask = wakeupgen_unmask, | 481 | .irq_unmask = wakeupgen_unmask, |
448 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 482 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
449 | .irq_set_type = irq_chip_set_type_parent, | 483 | .irq_set_type = wakeupgen_irq_set_type, |
450 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, | 484 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, |
451 | #ifdef CONFIG_SMP | 485 | #ifdef CONFIG_SMP |
452 | .irq_set_affinity = irq_chip_set_affinity_parent, | 486 | .irq_set_affinity = irq_chip_set_affinity_parent, |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..1e3e08a1c456 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev) | |||
2390 | return; | 2390 | return; |
2391 | 2391 | ||
2392 | arm_teardown_iommu_dma_ops(dev); | 2392 | arm_teardown_iommu_dma_ops(dev); |
2393 | /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ | ||
2394 | set_dma_ops(dev, NULL); | ||
2393 | } | 2395 | } |
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c | |||
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or | |||
247 | } | 247 | } |
248 | 248 | ||
249 | /* Copy arch-dep-instance from template. */ | 249 | /* Copy arch-dep-instance from template. */ |
250 | memcpy(code, (unsigned char *)optprobe_template_entry, | 250 | memcpy(code, (unsigned long *)&optprobe_template_entry, |
251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); | 251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); |
252 | 252 | ||
253 | /* Adjust buffer according to instruction. */ | 253 | /* Adjust buffer according to instruction. */ |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 64acccc4bfcb..f74b13aa5aa5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts | |||
@@ -227,34 +227,34 @@ | |||
227 | 227 | ||
228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { | 228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { |
229 | fsl,pins = < | 229 | fsl,pins = < |
230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 | 230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d |
231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 | 231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd |
232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 | 232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd |
233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 | 233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd |
234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 | 234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd |
235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 | 235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd |
236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 | 236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd |
237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 | 237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd |
238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 | 238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd |
239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 | 239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd |
240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 | 240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d |
241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
242 | >; | 242 | >; |
243 | }; | 243 | }; |
244 | 244 | ||
245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { | 245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { |
246 | fsl,pins = < | 246 | fsl,pins = < |
247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 | 247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f |
248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 | 248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf |
249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 | 249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf |
250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 | 250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf |
251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 | 251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf |
252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 | 252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf |
253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 | 253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf |
254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 | 254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf |
255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 | 255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf |
256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 | 256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf |
257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 | 257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f |
258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
259 | >; | 259 | >; |
260 | }; | 260 | }; |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 8e9d6d5ed7b2..b6d31499fb43 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi | |||
@@ -360,6 +360,8 @@ | |||
360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, | 360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, |
361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; | 361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; |
362 | clock-names = "ipg", "ahb", "per"; | 362 | clock-names = "ipg", "ahb", "per"; |
363 | assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; | ||
364 | assigned-clock-rates = <400000000>; | ||
363 | fsl,tuning-start-tap = <20>; | 365 | fsl,tuning-start-tap = <20>; |
364 | fsl,tuning-step = <2>; | 366 | fsl,tuning-step = <2>; |
365 | bus-width = <4>; | 367 | bus-width = <4>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index bd937d68ca3b..040b36ef0dd2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
@@ -40,6 +40,7 @@ | |||
40 | pinctrl-0 = <&usb30_host_drv>; | 40 | pinctrl-0 = <&usb30_host_drv>; |
41 | regulator-name = "vcc_host_5v"; | 41 | regulator-name = "vcc_host_5v"; |
42 | regulator-always-on; | 42 | regulator-always-on; |
43 | regulator-boot-on; | ||
43 | vin-supply = <&vcc_sys>; | 44 | vin-supply = <&vcc_sys>; |
44 | }; | 45 | }; |
45 | 46 | ||
@@ -51,6 +52,7 @@ | |||
51 | pinctrl-0 = <&usb20_host_drv>; | 52 | pinctrl-0 = <&usb20_host_drv>; |
52 | regulator-name = "vcc_host1_5v"; | 53 | regulator-name = "vcc_host1_5v"; |
53 | regulator-always-on; | 54 | regulator-always-on; |
55 | regulator-boot-on; | ||
54 | vin-supply = <&vcc_sys>; | 56 | vin-supply = <&vcc_sys>; |
55 | }; | 57 | }; |
56 | 58 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10..d1cf404b8708 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts | |||
@@ -22,7 +22,7 @@ | |||
22 | backlight = <&backlight>; | 22 | backlight = <&backlight>; |
23 | power-supply = <&pp3300_disp>; | 23 | power-supply = <&pp3300_disp>; |
24 | 24 | ||
25 | ports { | 25 | port { |
26 | panel_in_edp: endpoint { | 26 | panel_in_edp: endpoint { |
27 | remote-endpoint = <&edp_out_panel>; | 27 | remote-endpoint = <&edp_out_panel>; |
28 | }; | 28 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 81e73103fa78..15e254a77391 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts | |||
@@ -43,7 +43,7 @@ | |||
43 | backlight = <&backlight>; | 43 | backlight = <&backlight>; |
44 | power-supply = <&pp3300_disp>; | 44 | power-supply = <&pp3300_disp>; |
45 | 45 | ||
46 | ports { | 46 | port { |
47 | panel_in_edp: endpoint { | 47 | panel_in_edp: endpoint { |
48 | remote-endpoint = <&edp_out_panel>; | 48 | remote-endpoint = <&edp_out_panel>; |
49 | }; | 49 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 0b8f1edbd746..b48a63c3efc3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts | |||
@@ -91,7 +91,7 @@ | |||
91 | pinctrl-0 = <&lcd_panel_reset>; | 91 | pinctrl-0 = <&lcd_panel_reset>; |
92 | power-supply = <&vcc3v3_s0>; | 92 | power-supply = <&vcc3v3_s0>; |
93 | 93 | ||
94 | ports { | 94 | port { |
95 | panel_in_edp: endpoint { | 95 | panel_in_edp: endpoint { |
96 | remote-endpoint = <&edp_out_panel>; | 96 | remote-endpoint = <&edp_out_panel>; |
97 | }; | 97 | }; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..da3fc7324d68 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -208,6 +209,13 @@ struct kvm_cpu_context { | |||
208 | 209 | ||
209 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 210 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
210 | 211 | ||
212 | struct vcpu_reset_state { | ||
213 | unsigned long pc; | ||
214 | unsigned long r0; | ||
215 | bool be; | ||
216 | bool reset; | ||
217 | }; | ||
218 | |||
211 | struct kvm_vcpu_arch { | 219 | struct kvm_vcpu_arch { |
212 | struct kvm_cpu_context ctxt; | 220 | struct kvm_cpu_context ctxt; |
213 | 221 | ||
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch { | |||
297 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ | 305 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ |
298 | u64 vsesr_el2; | 306 | u64 vsesr_el2; |
299 | 307 | ||
308 | /* Additional reset state */ | ||
309 | struct vcpu_reset_state reset_state; | ||
310 | |||
300 | /* True when deferrable sysregs are loaded on the physical CPU, | 311 | /* True when deferrable sysregs are loaded on the physical CPU, |
301 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ | 312 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ |
302 | bool sysregs_loaded_on_cpu; | 313 | bool sysregs_loaded_on_cpu; |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..0c656850eeea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x) | |||
332 | #define virt_addr_valid(kaddr) \ | 332 | #define virt_addr_valid(kaddr) \ |
333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) | 333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) |
334 | 334 | ||
335 | /* | ||
336 | * Given that the GIC architecture permits ITS implementations that can only be | ||
337 | * configured with a LPI table address once, GICv3 systems with many CPUs may | ||
338 | * end up reserving a lot of different regions after a kexec for their LPI | ||
339 | * tables (one per CPU), as we are forced to reuse the same memory after kexec | ||
340 | * (and thus reserve it persistently with EFI beforehand) | ||
341 | */ | ||
342 | #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) | ||
343 | # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) | ||
344 | #endif | ||
345 | |||
335 | #include <asm-generic/memory_model.h> | 346 | #include <asm-generic/memory_model.h> |
336 | 347 | ||
337 | #endif | 348 | #endif |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..d09ec76f08cf 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p) | |||
313 | arm64_memblock_init(); | 313 | arm64_memblock_init(); |
314 | 314 | ||
315 | paging_init(); | 315 | paging_init(); |
316 | efi_apply_persistent_mem_reservations(); | ||
317 | 316 | ||
318 | acpi_table_upgrade(); | 317 | acpi_table_upgrade(); |
319 | 318 | ||
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..421ebf6f7086 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <kvm/arm_psci.h> | 23 | #include <kvm/arm_psci.h> |
24 | 24 | ||
25 | #include <asm/cpufeature.h> | 25 | #include <asm/cpufeature.h> |
26 | #include <asm/kprobes.h> | ||
26 | #include <asm/kvm_asm.h> | 27 | #include <asm/kvm_asm.h> |
27 | #include <asm/kvm_emulate.h> | 28 | #include <asm/kvm_emulate.h> |
28 | #include <asm/kvm_host.h> | 29 | #include <asm/kvm_host.h> |
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) | |||
107 | 108 | ||
108 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); | 109 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); |
109 | } | 110 | } |
111 | NOKPROBE_SYMBOL(activate_traps_vhe); | ||
110 | 112 | ||
111 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) | 113 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) |
112 | { | 114 | { |
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void) | |||
154 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); | 156 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); |
155 | write_sysreg(vectors, vbar_el1); | 157 | write_sysreg(vectors, vbar_el1); |
156 | } | 158 | } |
159 | NOKPROBE_SYMBOL(deactivate_traps_vhe); | ||
157 | 160 | ||
158 | static void __hyp_text __deactivate_traps_nvhe(void) | 161 | static void __hyp_text __deactivate_traps_nvhe(void) |
159 | { | 162 | { |
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) | |||
513 | 516 | ||
514 | return exit_code; | 517 | return exit_code; |
515 | } | 518 | } |
519 | NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); | ||
516 | 520 | ||
517 | /* Switch to the guest for legacy non-VHE systems */ | 521 | /* Switch to the guest for legacy non-VHE systems */ |
518 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) | 522 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) |
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, | |||
620 | read_sysreg_el2(esr), read_sysreg_el2(far), | 624 | read_sysreg_el2(esr), read_sysreg_el2(far), |
621 | read_sysreg(hpfar_el2), par, vcpu); | 625 | read_sysreg(hpfar_el2), par, vcpu); |
622 | } | 626 | } |
627 | NOKPROBE_SYMBOL(__hyp_call_panic_vhe); | ||
623 | 628 | ||
624 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) | 629 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) |
625 | { | 630 | { |
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..b426e2cf973c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
20 | 20 | ||
21 | #include <asm/kprobes.h> | ||
21 | #include <asm/kvm_asm.h> | 22 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_emulate.h> | 23 | #include <asm/kvm_emulate.h> |
23 | #include <asm/kvm_hyp.h> | 24 | #include <asm/kvm_hyp.h> |
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
98 | { | 99 | { |
99 | __sysreg_save_common_state(ctxt); | 100 | __sysreg_save_common_state(ctxt); |
100 | } | 101 | } |
102 | NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); | ||
101 | 103 | ||
102 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) | 104 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) |
103 | { | 105 | { |
104 | __sysreg_save_common_state(ctxt); | 106 | __sysreg_save_common_state(ctxt); |
105 | __sysreg_save_el2_return_state(ctxt); | 107 | __sysreg_save_el2_return_state(ctxt); |
106 | } | 108 | } |
109 | NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); | ||
107 | 110 | ||
108 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) | 111 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) |
109 | { | 112 | { |
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
188 | { | 191 | { |
189 | __sysreg_restore_common_state(ctxt); | 192 | __sysreg_restore_common_state(ctxt); |
190 | } | 193 | } |
194 | NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); | ||
191 | 195 | ||
192 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) | 196 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) |
193 | { | 197 | { |
194 | __sysreg_restore_common_state(ctxt); | 198 | __sysreg_restore_common_state(ctxt); |
195 | __sysreg_restore_el2_return_state(ctxt); | 199 | __sysreg_restore_el2_return_state(ctxt); |
196 | } | 200 | } |
201 | NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); | ||
197 | 202 | ||
198 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) | 203 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) |
199 | { | 204 | { |
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b72a3dd56204..f16a5f8ff2b4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/kvm_arm.h> | 32 | #include <asm/kvm_arm.h> |
33 | #include <asm/kvm_asm.h> | 33 | #include <asm/kvm_asm.h> |
34 | #include <asm/kvm_coproc.h> | 34 | #include <asm/kvm_coproc.h> |
35 | #include <asm/kvm_emulate.h> | ||
35 | #include <asm/kvm_mmu.h> | 36 | #include <asm/kvm_mmu.h> |
36 | 37 | ||
37 | /* Maximum phys_shift supported for any VM on this host */ | 38 | /* Maximum phys_shift supported for any VM on this host */ |
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
105 | * This function finds the right table above and sets the registers on | 106 | * This function finds the right table above and sets the registers on |
106 | * the virtual CPU struct to their architecturally defined reset | 107 | * the virtual CPU struct to their architecturally defined reset |
107 | * values. | 108 | * values. |
109 | * | ||
110 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT | ||
111 | * ioctl or as part of handling a request issued by another VCPU in the PSCI | ||
112 | * handling code. In the first case, the VCPU will not be loaded, and in the | ||
113 | * second case the VCPU will be loaded. Because this function operates purely | ||
114 | * on the memory-backed valus of system registers, we want to do a full put if | ||
115 | * we were loaded (handling a request) and load the values back at the end of | ||
116 | * the function. Otherwise we leave the state alone. In both cases, we | ||
117 | * disable preemption around the vcpu reset as we would otherwise race with | ||
118 | * preempt notifiers which also call put/load. | ||
108 | */ | 119 | */ |
109 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | 120 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
110 | { | 121 | { |
111 | const struct kvm_regs *cpu_reset; | 122 | const struct kvm_regs *cpu_reset; |
123 | int ret = -EINVAL; | ||
124 | bool loaded; | ||
125 | |||
126 | preempt_disable(); | ||
127 | loaded = (vcpu->cpu != -1); | ||
128 | if (loaded) | ||
129 | kvm_arch_vcpu_put(vcpu); | ||
112 | 130 | ||
113 | switch (vcpu->arch.target) { | 131 | switch (vcpu->arch.target) { |
114 | default: | 132 | default: |
115 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { | 133 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { |
116 | if (!cpu_has_32bit_el1()) | 134 | if (!cpu_has_32bit_el1()) |
117 | return -EINVAL; | 135 | goto out; |
118 | cpu_reset = &default_regs_reset32; | 136 | cpu_reset = &default_regs_reset32; |
119 | } else { | 137 | } else { |
120 | cpu_reset = &default_regs_reset; | 138 | cpu_reset = &default_regs_reset; |
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
129 | /* Reset system registers */ | 147 | /* Reset system registers */ |
130 | kvm_reset_sys_regs(vcpu); | 148 | kvm_reset_sys_regs(vcpu); |
131 | 149 | ||
150 | /* | ||
151 | * Additional reset state handling that PSCI may have imposed on us. | ||
152 | * Must be done after all the sys_reg reset. | ||
153 | */ | ||
154 | if (vcpu->arch.reset_state.reset) { | ||
155 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
156 | |||
157 | /* Gracefully handle Thumb2 entry point */ | ||
158 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
159 | target_pc &= ~1UL; | ||
160 | vcpu_set_thumb(vcpu); | ||
161 | } | ||
162 | |||
163 | /* Propagate caller endianness */ | ||
164 | if (vcpu->arch.reset_state.be) | ||
165 | kvm_vcpu_set_be(vcpu); | ||
166 | |||
167 | *vcpu_pc(vcpu) = target_pc; | ||
168 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
169 | |||
170 | vcpu->arch.reset_state.reset = false; | ||
171 | } | ||
172 | |||
132 | /* Reset PMU */ | 173 | /* Reset PMU */ |
133 | kvm_pmu_vcpu_reset(vcpu); | 174 | kvm_pmu_vcpu_reset(vcpu); |
134 | 175 | ||
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
137 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; | 178 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; |
138 | 179 | ||
139 | /* Reset timer */ | 180 | /* Reset timer */ |
140 | return kvm_timer_vcpu_reset(vcpu); | 181 | ret = kvm_timer_vcpu_reset(vcpu); |
182 | out: | ||
183 | if (loaded) | ||
184 | kvm_arch_vcpu_load(vcpu, smp_processor_id()); | ||
185 | preempt_enable(); | ||
186 | return ret; | ||
141 | } | 187 | } |
142 | 188 | ||
143 | void kvm_set_ipa_limit(void) | 189 | void kvm_set_ipa_limit(void) |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..c936aa40c3f4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, | |||
314 | return read_zero(vcpu, p); | 314 | return read_zero(vcpu, p); |
315 | } | 315 | } |
316 | 316 | ||
317 | static bool trap_undef(struct kvm_vcpu *vcpu, | 317 | /* |
318 | struct sys_reg_params *p, | 318 | * ARMv8.1 mandates at least a trivial LORegion implementation, where all the |
319 | const struct sys_reg_desc *r) | 319 | * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 |
320 | * system, these registers should UNDEF. LORID_EL1 being a RO register, we | ||
321 | * treat it separately. | ||
322 | */ | ||
323 | static bool trap_loregion(struct kvm_vcpu *vcpu, | ||
324 | struct sys_reg_params *p, | ||
325 | const struct sys_reg_desc *r) | ||
320 | { | 326 | { |
321 | kvm_inject_undefined(vcpu); | 327 | u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
322 | return false; | 328 | u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, |
329 | (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); | ||
330 | |||
331 | if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { | ||
332 | kvm_inject_undefined(vcpu); | ||
333 | return false; | ||
334 | } | ||
335 | |||
336 | if (p->is_write && sr == SYS_LORID_EL1) | ||
337 | return write_to_read_only(vcpu, p, r); | ||
338 | |||
339 | return trap_raz_wi(vcpu, p, r); | ||
323 | } | 340 | } |
324 | 341 | ||
325 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, | 342 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) | |||
1048 | if (val & ptrauth_mask) | 1065 | if (val & ptrauth_mask) |
1049 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); | 1066 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); |
1050 | val &= ~ptrauth_mask; | 1067 | val &= ~ptrauth_mask; |
1051 | } else if (id == SYS_ID_AA64MMFR1_EL1) { | ||
1052 | if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) | ||
1053 | kvm_debug("LORegions unsupported for guests, suppressing\n"); | ||
1054 | |||
1055 | val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); | ||
1056 | } | 1068 | } |
1057 | 1069 | ||
1058 | return val; | 1070 | return val; |
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
1338 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, | 1350 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, |
1339 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, | 1351 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
1340 | 1352 | ||
1341 | { SYS_DESC(SYS_LORSA_EL1), trap_undef }, | 1353 | { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, |
1342 | { SYS_DESC(SYS_LOREA_EL1), trap_undef }, | 1354 | { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, |
1343 | { SYS_DESC(SYS_LORN_EL1), trap_undef }, | 1355 | { SYS_DESC(SYS_LORN_EL1), trap_loregion }, |
1344 | { SYS_DESC(SYS_LORC_EL1), trap_undef }, | 1356 | { SYS_DESC(SYS_LORC_EL1), trap_loregion }, |
1345 | { SYS_DESC(SYS_LORID_EL1), trap_undef }, | 1357 | { SYS_DESC(SYS_LORID_EL1), trap_loregion }, |
1346 | 1358 | ||
1347 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, | 1359 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, |
1348 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, | 1360 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, |
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | |||
2596 | table = get_target_table(vcpu->arch.target, true, &num); | 2608 | table = get_target_table(vcpu->arch.target, true, &num); |
2597 | reset_sys_reg_descs(vcpu, table, num); | 2609 | reset_sys_reg_descs(vcpu, table, num); |
2598 | 2610 | ||
2599 | for (num = 1; num < NR_SYS_REGS; num++) | 2611 | for (num = 1; num < NR_SYS_REGS; num++) { |
2600 | if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | 2612 | if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, |
2601 | panic("Didn't reset __vcpu_sys_reg(%zi)", num); | 2613 | "Didn't reset __vcpu_sys_reg(%zi)\n", num)) |
2614 | break; | ||
2615 | } | ||
2602 | } | 2616 | } |
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index edfcbb25fd9f..dcea277c09ae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h | |||
@@ -45,8 +45,8 @@ | |||
45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) | 45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) |
46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) | 46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) |
47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ | 47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ |
48 | (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) | 48 | (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) |
49 | #define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) | 49 | #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) |
50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | 50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) | 51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) |
52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ | 52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ |
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
241 | 241 | ||
242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | 242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
243 | 243 | ||
244 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
245 | struct file; | ||
246 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
247 | unsigned long size, pgprot_t vma_prot); | ||
248 | |||
244 | /* | 249 | /* |
245 | * Macro to make mark a page protection value as "uncacheable". Note | 250 | * Macro to make mark a page protection value as "uncacheable". Note |
246 | * that "protection" is really a misnomer here as the protection value | 251 | * that "protection" is really a misnomer here as the protection value |
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 8f454810514f..21e0bd5293dd 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h | |||
@@ -49,7 +49,7 @@ struct thread_struct { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define INIT_THREAD { \ | 51 | #define INIT_THREAD { \ |
52 | .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ | 52 | .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ |
53 | .sr = DEFAULT_PSR_VALUE, \ | 53 | .sr = DEFAULT_PSR_VALUE, \ |
54 | } | 54 | } |
55 | 55 | ||
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) | 95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) |
96 | 96 | ||
97 | #define task_pt_regs(p) \ | 97 | #define task_pt_regs(p) \ |
98 | ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) | 98 | ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) |
99 | 99 | ||
100 | #define cpu_relax() barrier() | 100 | #define cpu_relax() barrier() |
101 | 101 | ||
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index 659253e9989c..d67f9777cfd9 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c | |||
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) | |||
38 | if (task) | 38 | if (task) |
39 | stack = (unsigned long *)thread_saved_fp(task); | 39 | stack = (unsigned long *)thread_saved_fp(task); |
40 | else | 40 | else |
41 | #ifdef CONFIG_STACKTRACE | ||
42 | asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); | ||
43 | #else | ||
41 | stack = (unsigned long *)&stack; | 44 | stack = (unsigned long *)&stack; |
45 | #endif | ||
42 | } | 46 | } |
43 | 47 | ||
44 | show_trace(stack); | 48 | show_trace(stack); |
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 57f1afe19a52..f2f12fff36f7 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/ptrace.h> | 8 | #include <linux/ptrace.h> |
9 | #include <linux/regset.h> | 9 | #include <linux/regset.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/sched/task_stack.h> | ||
11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
12 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target, | |||
159 | static const struct user_regset csky_regsets[] = { | 160 | static const struct user_regset csky_regsets[] = { |
160 | [REGSET_GPR] = { | 161 | [REGSET_GPR] = { |
161 | .core_note_type = NT_PRSTATUS, | 162 | .core_note_type = NT_PRSTATUS, |
162 | .n = ELF_NGREG, | 163 | .n = sizeof(struct pt_regs) / sizeof(u32), |
163 | .size = sizeof(u32), | 164 | .size = sizeof(u32), |
164 | .align = sizeof(u32), | 165 | .align = sizeof(u32), |
165 | .get = &gpr_get, | 166 | .get = &gpr_get, |
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index ddc4dd79f282..b07a534b3062 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c | |||
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
160 | { | 160 | { |
161 | unsigned long mask = 1 << cpu; | 161 | unsigned long mask = 1 << cpu; |
162 | 162 | ||
163 | secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; | 163 | secondary_stack = |
164 | (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; | ||
164 | secondary_hint = mfcr("cr31"); | 165 | secondary_hint = mfcr("cr31"); |
165 | secondary_ccr = mfcr("cr18"); | 166 | secondary_ccr = mfcr("cr18"); |
166 | 167 | ||
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index cb7c03e5cd21..8473b6bdf512 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c | |||
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr) | |||
46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); | 46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(iounmap); | 48 | EXPORT_SYMBOL(iounmap); |
49 | |||
50 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
51 | unsigned long size, pgprot_t vma_prot) | ||
52 | { | ||
53 | if (!pfn_valid(pfn)) { | ||
54 | vma_prot.pgprot |= _PAGE_SO; | ||
55 | return pgprot_noncached(vma_prot); | ||
56 | } else if (file->f_flags & O_SYNC) { | ||
57 | return pgprot_noncached(vma_prot); | ||
58 | } | ||
59 | |||
60 | return vma_prot; | ||
61 | } | ||
62 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..76e9bf88d3b9 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c | |||
@@ -79,8 +79,6 @@ enum reg_val_type { | |||
79 | REG_64BIT_32BIT, | 79 | REG_64BIT_32BIT, |
80 | /* 32-bit compatible, need truncation for 64-bit ops. */ | 80 | /* 32-bit compatible, need truncation for 64-bit ops. */ |
81 | REG_32BIT, | 81 | REG_32BIT, |
82 | /* 32-bit zero extended. */ | ||
83 | REG_32BIT_ZERO_EX, | ||
84 | /* 32-bit no sign/zero extension needed. */ | 82 | /* 32-bit no sign/zero extension needed. */ |
85 | REG_32BIT_POS | 83 | REG_32BIT_POS |
86 | }; | 84 | }; |
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) | |||
343 | const struct bpf_prog *prog = ctx->skf; | 341 | const struct bpf_prog *prog = ctx->skf; |
344 | int stack_adjust = ctx->stack_size; | 342 | int stack_adjust = ctx->stack_size; |
345 | int store_offset = stack_adjust - 8; | 343 | int store_offset = stack_adjust - 8; |
344 | enum reg_val_type td; | ||
346 | int r0 = MIPS_R_V0; | 345 | int r0 = MIPS_R_V0; |
347 | 346 | ||
348 | if (dest_reg == MIPS_R_RA && | 347 | if (dest_reg == MIPS_R_RA) { |
349 | get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) | ||
350 | /* Don't let zero extended value escape. */ | 348 | /* Don't let zero extended value escape. */ |
351 | emit_instr(ctx, sll, r0, r0, 0); | 349 | td = get_reg_val_type(ctx, prog->len, BPF_REG_0); |
350 | if (td == REG_64BIT) | ||
351 | emit_instr(ctx, sll, r0, r0, 0); | ||
352 | } | ||
352 | 353 | ||
353 | if (ctx->flags & EBPF_SAVE_RA) { | 354 | if (ctx->flags & EBPF_SAVE_RA) { |
354 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); | 355 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); |
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
692 | if (dst < 0) | 693 | if (dst < 0) |
693 | return dst; | 694 | return dst; |
694 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 695 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
695 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 696 | if (td == REG_64BIT) { |
696 | /* sign extend */ | 697 | /* sign extend */ |
697 | emit_instr(ctx, sll, dst, dst, 0); | 698 | emit_instr(ctx, sll, dst, dst, 0); |
698 | } | 699 | } |
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
707 | if (dst < 0) | 708 | if (dst < 0) |
708 | return dst; | 709 | return dst; |
709 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 710 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
710 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 711 | if (td == REG_64BIT) { |
711 | /* sign extend */ | 712 | /* sign extend */ |
712 | emit_instr(ctx, sll, dst, dst, 0); | 713 | emit_instr(ctx, sll, dst, dst, 0); |
713 | } | 714 | } |
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
721 | if (dst < 0) | 722 | if (dst < 0) |
722 | return dst; | 723 | return dst; |
723 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 724 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
724 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) | 725 | if (td == REG_64BIT) |
725 | /* sign extend */ | 726 | /* sign extend */ |
726 | emit_instr(ctx, sll, dst, dst, 0); | 727 | emit_instr(ctx, sll, dst, dst, 0); |
727 | if (insn->imm == 1) { | 728 | if (insn->imm == 1) { |
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
860 | if (src < 0 || dst < 0) | 861 | if (src < 0 || dst < 0) |
861 | return -EINVAL; | 862 | return -EINVAL; |
862 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 863 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
863 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 864 | if (td == REG_64BIT) { |
864 | /* sign extend */ | 865 | /* sign extend */ |
865 | emit_instr(ctx, sll, dst, dst, 0); | 866 | emit_instr(ctx, sll, dst, dst, 0); |
866 | } | 867 | } |
867 | did_move = false; | 868 | did_move = false; |
868 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); | 869 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); |
869 | if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { | 870 | if (ts == REG_64BIT) { |
870 | int tmp_reg = MIPS_R_AT; | 871 | int tmp_reg = MIPS_R_AT; |
871 | 872 | ||
872 | if (bpf_op == BPF_MOV) { | 873 | if (bpf_op == BPF_MOV) { |
@@ -1254,8 +1255,7 @@ jeq_common: | |||
1254 | if (insn->imm == 64 && td == REG_32BIT) | 1255 | if (insn->imm == 64 && td == REG_32BIT) |
1255 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | 1256 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); |
1256 | 1257 | ||
1257 | if (insn->imm != 64 && | 1258 | if (insn->imm != 64 && td == REG_64BIT) { |
1258 | (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { | ||
1259 | /* sign extend */ | 1259 | /* sign extend */ |
1260 | emit_instr(ctx, sll, dst, dst, 0); | 1260 | emit_instr(ctx, sll, dst, dst, 0); |
1261 | } | 1261 | } |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index c9bfe526ca9d..d8c8d7c9df15 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud) | |||
904 | 904 | ||
905 | static inline int pud_present(pud_t pud) | 905 | static inline int pud_present(pud_t pud) |
906 | { | 906 | { |
907 | return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); | 907 | return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); |
908 | } | 908 | } |
909 | 909 | ||
910 | extern struct page *pud_page(pud_t pud); | 910 | extern struct page *pud_page(pud_t pud); |
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd) | |||
951 | 951 | ||
952 | static inline int pgd_present(pgd_t pgd) | 952 | static inline int pgd_present(pgd_t pgd) |
953 | { | 953 | { |
954 | return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); | 954 | return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); |
955 | } | 955 | } |
956 | 956 | ||
957 | static inline pte_t pgd_pte(pgd_t pgd) | 957 | static inline pte_t pgd_pte(pgd_t pgd) |
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2fa2942be221..470755cb7558 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h | |||
@@ -35,6 +35,12 @@ | |||
35 | #define _PAGE_SPECIAL _PAGE_SOFT | 35 | #define _PAGE_SPECIAL _PAGE_SOFT |
36 | #define _PAGE_TABLE _PAGE_PRESENT | 36 | #define _PAGE_TABLE _PAGE_PRESENT |
37 | 37 | ||
38 | /* | ||
39 | * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to | ||
40 | * distinguish them from swapped out pages | ||
41 | */ | ||
42 | #define _PAGE_PROT_NONE _PAGE_READ | ||
43 | |||
38 | #define _PAGE_PFN_SHIFT 10 | 44 | #define _PAGE_PFN_SHIFT 10 |
39 | 45 | ||
40 | /* Set of bits to preserve across pte_modify() */ | 46 | /* Set of bits to preserve across pte_modify() */ |
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 16301966d65b..a8179a8c1491 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h | |||
@@ -44,7 +44,7 @@ | |||
44 | /* Page protection bits */ | 44 | /* Page protection bits */ |
45 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) | 45 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) |
46 | 46 | ||
47 | #define PAGE_NONE __pgprot(0) | 47 | #define PAGE_NONE __pgprot(_PAGE_PROT_NONE) |
48 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) | 48 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) |
49 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) | 49 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) |
50 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) | 50 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) |
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
98 | 98 | ||
99 | static inline int pmd_present(pmd_t pmd) | 99 | static inline int pmd_present(pmd_t pmd) |
100 | { | 100 | { |
101 | return (pmd_val(pmd) & _PAGE_PRESENT); | 101 | return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
102 | } | 102 | } |
103 | 103 | ||
104 | static inline int pmd_none(pmd_t pmd) | 104 | static inline int pmd_none(pmd_t pmd) |
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) | |||
178 | 178 | ||
179 | static inline int pte_present(pte_t pte) | 179 | static inline int pte_present(pte_t pte) |
180 | { | 180 | { |
181 | return (pte_val(pte) & _PAGE_PRESENT); | 181 | return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline int pte_none(pte_t pte) | 184 | static inline int pte_none(pte_t pte) |
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |||
380 | * | 380 | * |
381 | * Format of swap PTE: | 381 | * Format of swap PTE: |
382 | * bit 0: _PAGE_PRESENT (zero) | 382 | * bit 0: _PAGE_PRESENT (zero) |
383 | * bit 1: reserved for future use (zero) | 383 | * bit 1: _PAGE_PROT_NONE (zero) |
384 | * bits 2 to 6: swap type | 384 | * bits 2 to 6: swap type |
385 | * bits 7 to XLEN-1: swap offset | 385 | * bits 7 to XLEN-1: swap offset |
386 | */ | 386 | */ |
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 1e1395d63dab..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S | |||
@@ -18,8 +18,6 @@ | |||
18 | #include <asm/cache.h> | 18 | #include <asm/cache.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | 20 | ||
21 | #define MAX_BYTES_PER_LONG 0x10 | ||
22 | |||
23 | OUTPUT_ARCH(riscv) | 21 | OUTPUT_ARCH(riscv) |
24 | ENTRY(_start) | 22 | ENTRY(_start) |
25 | 23 | ||
@@ -76,6 +74,8 @@ SECTIONS | |||
76 | *(.sbss*) | 74 | *(.sbss*) |
77 | } | 75 | } |
78 | 76 | ||
77 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) | ||
78 | |||
79 | EXCEPTION_TABLE(0x10) | 79 | EXCEPTION_TABLE(0x10) |
80 | NOTES | 80 | NOTES |
81 | 81 | ||
@@ -83,10 +83,6 @@ SECTIONS | |||
83 | *(.rel.dyn*) | 83 | *(.rel.dyn*) |
84 | } | 84 | } |
85 | 85 | ||
86 | BSS_SECTION(MAX_BYTES_PER_LONG, | ||
87 | MAX_BYTES_PER_LONG, | ||
88 | MAX_BYTES_PER_LONG) | ||
89 | |||
90 | _end = .; | 86 | _end = .; |
91 | 87 | ||
92 | STABS_DEBUG | 88 | STABS_DEBUG |
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index 537f97fde37f..b6796e616812 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S | |||
@@ -30,10 +30,10 @@ | |||
30 | .section .text | 30 | .section .text |
31 | ENTRY(swsusp_arch_suspend) | 31 | ENTRY(swsusp_arch_suspend) |
32 | lg %r1,__LC_NODAT_STACK | 32 | lg %r1,__LC_NODAT_STACK |
33 | aghi %r1,-STACK_FRAME_OVERHEAD | ||
34 | stmg %r6,%r15,__SF_GPRS(%r1) | 33 | stmg %r6,%r15,__SF_GPRS(%r1) |
34 | aghi %r1,-STACK_FRAME_OVERHEAD | ||
35 | stg %r15,__SF_BACKCHAIN(%r1) | 35 | stg %r15,__SF_BACKCHAIN(%r1) |
36 | lgr %r1,%r15 | 36 | lgr %r15,%r1 |
37 | 37 | ||
38 | /* Store FPU registers */ | 38 | /* Store FPU registers */ |
39 | brasl %r14,save_fpu_regs | 39 | brasl %r14,save_fpu_regs |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a966d7bfac57..4266a4de3160 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq) | |||
382 | if (ai == -1UL) | 382 | if (ai == -1UL) |
383 | break; | 383 | break; |
384 | inc_irq_stat(IRQIO_MSI); | 384 | inc_irq_stat(IRQIO_MSI); |
385 | airq_iv_lock(aibv, ai); | ||
385 | generic_handle_irq(airq_iv_get_data(aibv, ai)); | 386 | generic_handle_irq(airq_iv_get_data(aibv, ai)); |
387 | airq_iv_unlock(aibv, ai); | ||
386 | } | 388 | } |
387 | } | 389 | } |
388 | } | 390 | } |
@@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
408 | zdev->aisb = aisb; | 410 | zdev->aisb = aisb; |
409 | 411 | ||
410 | /* Create adapter interrupt vector */ | 412 | /* Create adapter interrupt vector */ |
411 | zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); | 413 | zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); |
412 | if (!zdev->aibv) | 414 | if (!zdev->aibv) |
413 | return -ENOMEM; | 415 | return -ENOMEM; |
414 | 416 | ||
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..b684f0294f35 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void) | |||
2278 | x86_pmu.check_microcode(); | 2278 | x86_pmu.check_microcode(); |
2279 | } | 2279 | } |
2280 | 2280 | ||
2281 | static int x86_pmu_check_period(struct perf_event *event, u64 value) | ||
2282 | { | ||
2283 | if (x86_pmu.check_period && x86_pmu.check_period(event, value)) | ||
2284 | return -EINVAL; | ||
2285 | |||
2286 | if (value && x86_pmu.limit_period) { | ||
2287 | if (x86_pmu.limit_period(event, value) > value) | ||
2288 | return -EINVAL; | ||
2289 | } | ||
2290 | |||
2291 | return 0; | ||
2292 | } | ||
2293 | |||
2281 | static struct pmu pmu = { | 2294 | static struct pmu pmu = { |
2282 | .pmu_enable = x86_pmu_enable, | 2295 | .pmu_enable = x86_pmu_enable, |
2283 | .pmu_disable = x86_pmu_disable, | 2296 | .pmu_disable = x86_pmu_disable, |
@@ -2302,6 +2315,7 @@ static struct pmu pmu = { | |||
2302 | .event_idx = x86_pmu_event_idx, | 2315 | .event_idx = x86_pmu_event_idx, |
2303 | .sched_task = x86_pmu_sched_task, | 2316 | .sched_task = x86_pmu_sched_task, |
2304 | .task_ctx_size = sizeof(struct x86_perf_task_context), | 2317 | .task_ctx_size = sizeof(struct x86_perf_task_context), |
2318 | .check_period = x86_pmu_check_period, | ||
2305 | }; | 2319 | }; |
2306 | 2320 | ||
2307 | void arch_perf_update_userpage(struct perf_event *event, | 2321 | void arch_perf_update_userpage(struct perf_event *event, |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index daafb893449b..730978dff63f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, | |||
3587 | intel_pmu_lbr_sched_task(ctx, sched_in); | 3587 | intel_pmu_lbr_sched_task(ctx, sched_in); |
3588 | } | 3588 | } |
3589 | 3589 | ||
3590 | static int intel_pmu_check_period(struct perf_event *event, u64 value) | ||
3591 | { | ||
3592 | return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; | ||
3593 | } | ||
3594 | |||
3590 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 3595 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
3591 | 3596 | ||
3592 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 3597 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = { | |||
3667 | .cpu_starting = intel_pmu_cpu_starting, | 3672 | .cpu_starting = intel_pmu_cpu_starting, |
3668 | .cpu_dying = intel_pmu_cpu_dying, | 3673 | .cpu_dying = intel_pmu_cpu_dying, |
3669 | .cpu_dead = intel_pmu_cpu_dead, | 3674 | .cpu_dead = intel_pmu_cpu_dead, |
3675 | |||
3676 | .check_period = intel_pmu_check_period, | ||
3670 | }; | 3677 | }; |
3671 | 3678 | ||
3672 | static struct attribute *intel_pmu_attrs[]; | 3679 | static struct attribute *intel_pmu_attrs[]; |
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
3711 | 3718 | ||
3712 | .guest_get_msrs = intel_guest_get_msrs, | 3719 | .guest_get_msrs = intel_guest_get_msrs, |
3713 | .sched_task = intel_pmu_sched_task, | 3720 | .sched_task = intel_pmu_sched_task, |
3721 | |||
3722 | .check_period = intel_pmu_check_period, | ||
3714 | }; | 3723 | }; |
3715 | 3724 | ||
3716 | static __init void intel_clovertown_quirk(void) | 3725 | static __init void intel_clovertown_quirk(void) |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..d46fd6754d92 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -646,6 +646,11 @@ struct x86_pmu { | |||
646 | * Intel host/guest support (KVM) | 646 | * Intel host/guest support (KVM) |
647 | */ | 647 | */ |
648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
649 | |||
650 | /* | ||
651 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
652 | */ | ||
653 | int (*check_period) (struct perf_event *event, u64 period); | ||
649 | }; | 654 | }; |
650 | 655 | ||
651 | struct x86_perf_task_context { | 656 | struct x86_perf_task_context { |
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void) | |||
857 | 862 | ||
858 | #ifdef CONFIG_CPU_SUP_INTEL | 863 | #ifdef CONFIG_CPU_SUP_INTEL |
859 | 864 | ||
860 | static inline bool intel_pmu_has_bts(struct perf_event *event) | 865 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
861 | { | 866 | { |
862 | struct hw_perf_event *hwc = &event->hw; | 867 | struct hw_perf_event *hwc = &event->hw; |
863 | unsigned int hw_event, bts_event; | 868 | unsigned int hw_event, bts_event; |
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) | |||
868 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | 873 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
869 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | 874 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
870 | 875 | ||
871 | return hw_event == bts_event && hwc->sample_period == 1; | 876 | return hw_event == bts_event && period == 1; |
877 | } | ||
878 | |||
879 | static inline bool intel_pmu_has_bts(struct perf_event *event) | ||
880 | { | ||
881 | struct hw_perf_event *hwc = &event->hw; | ||
882 | |||
883 | return intel_pmu_has_bts_period(event, hwc->sample_period); | ||
872 | } | 884 | } |
873 | 885 | ||
874 | int intel_pmu_save_and_restart(struct perf_event *event); | 886 | int intel_pmu_save_and_restart(struct perf_event *event); |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index f65b78d32f5e..7dbbe9ffda17 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n) | |||
51 | /* | 51 | /* |
52 | * fill in the user structure for a core dump.. | 52 | * fill in the user structure for a core dump.. |
53 | */ | 53 | */ |
54 | static void dump_thread32(struct pt_regs *regs, struct user32 *dump) | 54 | static void fill_dump(struct pt_regs *regs, struct user32 *dump) |
55 | { | 55 | { |
56 | u32 fs, gs; | 56 | u32 fs, gs; |
57 | memset(dump, 0, sizeof(*dump)); | 57 | memset(dump, 0, sizeof(*dump)); |
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm) | |||
157 | fs = get_fs(); | 157 | fs = get_fs(); |
158 | set_fs(KERNEL_DS); | 158 | set_fs(KERNEL_DS); |
159 | has_dumped = 1; | 159 | has_dumped = 1; |
160 | |||
161 | fill_dump(cprm->regs, &dump); | ||
162 | |||
160 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); | 163 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); |
161 | dump.u_ar0 = offsetof(struct user32, regs); | 164 | dump.u_ar0 = offsetof(struct user32, regs); |
162 | dump.signal = cprm->siginfo->si_signo; | 165 | dump.signal = cprm->siginfo->si_signo; |
163 | dump_thread32(cprm->regs, &dump); | ||
164 | 166 | ||
165 | /* | 167 | /* |
166 | * If the size of the dump file exceeds the rlimit, then see | 168 | * If the size of the dump file exceeds the rlimit, then see |
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index d9a9993af882..9f15384c504a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h | |||
@@ -52,6 +52,8 @@ | |||
52 | 52 | ||
53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 | 53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 |
54 | 54 | ||
55 | #define INTEL_FAM6_ICELAKE_MOBILE 0x7E | ||
56 | |||
55 | /* "Small Core" Processors (Atom) */ | 57 | /* "Small Core" Processors (Atom) */ |
56 | 58 | ||
57 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ | 59 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ |
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc6186..3f697a9e3f59 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h | |||
@@ -48,7 +48,8 @@ enum { | |||
48 | BIOS_STATUS_SUCCESS = 0, | 48 | BIOS_STATUS_SUCCESS = 0, |
49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, | 49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, |
50 | BIOS_STATUS_EINVAL = -EINVAL, | 50 | BIOS_STATUS_EINVAL = -EINVAL, |
51 | BIOS_STATUS_UNAVAIL = -EBUSY | 51 | BIOS_STATUS_UNAVAIL = -EBUSY, |
52 | BIOS_STATUS_ABORT = -EINTR, | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | /* Address map parameters */ | 55 | /* Address map parameters */ |
@@ -167,4 +168,9 @@ extern long system_serial_number; | |||
167 | 168 | ||
168 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ | 169 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ |
169 | 170 | ||
171 | /* | ||
172 | * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details | ||
173 | */ | ||
174 | extern struct semaphore __efi_uv_runtime_lock; | ||
175 | |||
170 | #endif /* _ASM_X86_UV_BIOS_H */ | 176 | #endif /* _ASM_X86_UV_BIOS_H */ |
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d8ea4ebd79e7..d737a51a53ca 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
@@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, | |||
2473 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) | 2473 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) |
2474 | return -EINVAL; | 2474 | return -EINVAL; |
2475 | 2475 | ||
2476 | if (!nested_cpu_has_preemption_timer(vmcs12) && | ||
2477 | nested_cpu_has_save_preemption_timer(vmcs12)) | ||
2478 | return -EINVAL; | ||
2479 | |||
2476 | if (nested_cpu_has_ept(vmcs12) && | 2480 | if (nested_cpu_has_ept(vmcs12) && |
2477 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) | 2481 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) |
2478 | return -EINVAL; | 2482 | return -EINVAL; |
@@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, | |||
5557 | * secondary cpu-based controls. Do not include those that | 5561 | * secondary cpu-based controls. Do not include those that |
5558 | * depend on CPUID bits, they are added later by vmx_cpuid_update. | 5562 | * depend on CPUID bits, they are added later by vmx_cpuid_update. |
5559 | */ | 5563 | */ |
5560 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | 5564 | if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) |
5561 | msrs->secondary_ctls_low, | 5565 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, |
5562 | msrs->secondary_ctls_high); | 5566 | msrs->secondary_ctls_low, |
5567 | msrs->secondary_ctls_high); | ||
5568 | |||
5563 | msrs->secondary_ctls_low = 0; | 5569 | msrs->secondary_ctls_low = 0; |
5564 | msrs->secondary_ctls_high &= | 5570 | msrs->secondary_ctls_high &= |
5565 | SECONDARY_EXEC_DESC | | 5571 | SECONDARY_EXEC_DESC | |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 95d618045001..30a6bcd735ec 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | |||
863 | if (!entry_only) | 863 | if (!entry_only) |
864 | j = find_msr(&m->host, msr); | 864 | j = find_msr(&m->host, msr); |
865 | 865 | ||
866 | if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { | 866 | if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || |
867 | (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { | ||
867 | printk_once(KERN_WARNING "Not enough msr switch entries. " | 868 | printk_once(KERN_WARNING "Not enough msr switch entries. " |
868 | "Can't add msr %x\n", msr); | 869 | "Can't add msr %x\n", msr); |
869 | return; | 870 | return; |
@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1193 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) | 1194 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) |
1194 | return; | 1195 | return; |
1195 | 1196 | ||
1196 | /* | ||
1197 | * First handle the simple case where no cmpxchg is necessary; just | ||
1198 | * allow posting non-urgent interrupts. | ||
1199 | * | ||
1200 | * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change | ||
1201 | * PI.NDST: pi_post_block will do it for us and the wakeup_handler | ||
1202 | * expects the VCPU to be on the blocked_vcpu_list that matches | ||
1203 | * PI.NDST. | ||
1204 | */ | ||
1205 | if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || | ||
1206 | vcpu->cpu == cpu) { | ||
1207 | pi_clear_sn(pi_desc); | ||
1208 | return; | ||
1209 | } | ||
1210 | |||
1211 | /* The full case. */ | 1197 | /* The full case. */ |
1212 | do { | 1198 | do { |
1213 | old.control = new.control = pi_desc->control; | 1199 | old.control = new.control = pi_desc->control; |
@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1222 | new.sn = 0; | 1208 | new.sn = 0; |
1223 | } while (cmpxchg64(&pi_desc->control, old.control, | 1209 | } while (cmpxchg64(&pi_desc->control, old.control, |
1224 | new.control) != old.control); | 1210 | new.control) != old.control); |
1211 | |||
1212 | /* | ||
1213 | * Clear SN before reading the bitmap. The VT-d firmware | ||
1214 | * writes the bitmap and reads SN atomically (5.2.3 in the | ||
1215 | * spec), so it doesn't really have a memory barrier that | ||
1216 | * pairs with this, but we cannot do that and we need one. | ||
1217 | */ | ||
1218 | smp_mb__after_atomic(); | ||
1219 | |||
1220 | if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS)) | ||
1221 | pi_set_on(pi_desc); | ||
1225 | } | 1222 | } |
1226 | 1223 | ||
1227 | /* | 1224 | /* |
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 99328954c2fc..0ac0a64c7790 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h | |||
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) | |||
337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); | 337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); |
338 | } | 338 | } |
339 | 339 | ||
340 | static inline void pi_clear_sn(struct pi_desc *pi_desc) | 340 | static inline void pi_set_sn(struct pi_desc *pi_desc) |
341 | { | 341 | { |
342 | return clear_bit(POSTED_INTR_SN, | 342 | return set_bit(POSTED_INTR_SN, |
343 | (unsigned long *)&pi_desc->control); | 343 | (unsigned long *)&pi_desc->control); |
344 | } | 344 | } |
345 | 345 | ||
346 | static inline void pi_set_sn(struct pi_desc *pi_desc) | 346 | static inline void pi_set_on(struct pi_desc *pi_desc) |
347 | { | 347 | { |
348 | return set_bit(POSTED_INTR_SN, | 348 | set_bit(POSTED_INTR_ON, |
349 | (unsigned long *)&pi_desc->control); | 349 | (unsigned long *)&pi_desc->control); |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline void pi_clear_on(struct pi_desc *pi_desc) | 352 | static inline void pi_clear_on(struct pi_desc *pi_desc) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e67ecf25e690..941f932373d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
7801 | * 1) We should set ->mode before checking ->requests. Please see | 7801 | * 1) We should set ->mode before checking ->requests. Please see |
7802 | * the comment in kvm_vcpu_exiting_guest_mode(). | 7802 | * the comment in kvm_vcpu_exiting_guest_mode(). |
7803 | * | 7803 | * |
7804 | * 2) For APICv, we should set ->mode before checking PIR.ON. This | 7804 | * 2) For APICv, we should set ->mode before checking PID.ON. This |
7805 | * pairs with the memory barrier implicit in pi_test_and_set_on | 7805 | * pairs with the memory barrier implicit in pi_test_and_set_on |
7806 | * (see vmx_deliver_posted_interrupt). | 7806 | * (see vmx_deliver_posted_interrupt). |
7807 | * | 7807 | * |
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c582..eb33432f2f24 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c | |||
@@ -29,7 +29,8 @@ | |||
29 | 29 | ||
30 | struct uv_systab *uv_systab; | 30 | struct uv_systab *uv_systab; |
31 | 31 | ||
32 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | 32 | static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
33 | u64 a4, u64 a5) | ||
33 | { | 34 | { |
34 | struct uv_systab *tab = uv_systab; | 35 | struct uv_systab *tab = uv_systab; |
35 | s64 ret; | 36 | s64 ret; |
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | |||
51 | 52 | ||
52 | return ret; | 53 | return ret; |
53 | } | 54 | } |
55 | |||
56 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | ||
57 | { | ||
58 | s64 ret; | ||
59 | |||
60 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
61 | return BIOS_STATUS_ABORT; | ||
62 | |||
63 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); | ||
64 | up(&__efi_uv_runtime_lock); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
54 | EXPORT_SYMBOL_GPL(uv_bios_call); | 68 | EXPORT_SYMBOL_GPL(uv_bios_call); |
55 | 69 | ||
56 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | 70 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | |||
59 | unsigned long bios_flags; | 73 | unsigned long bios_flags; |
60 | s64 ret; | 74 | s64 ret; |
61 | 75 | ||
76 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
77 | return BIOS_STATUS_ABORT; | ||
78 | |||
62 | local_irq_save(bios_flags); | 79 | local_irq_save(bios_flags); |
63 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); | 80 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); |
64 | local_irq_restore(bios_flags); | 81 | local_irq_restore(bios_flags); |
65 | 82 | ||
83 | up(&__efi_uv_runtime_lock); | ||
84 | |||
66 | return ret; | 85 | return ret; |
67 | } | 86 | } |
68 | 87 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5b533764ca..9437a5eb07cf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work) | |||
737 | spin_unlock_irq(&q->requeue_lock); | 737 | spin_unlock_irq(&q->requeue_lock); |
738 | 738 | ||
739 | list_for_each_entry_safe(rq, next, &rq_list, queuelist) { | 739 | list_for_each_entry_safe(rq, next, &rq_list, queuelist) { |
740 | if (!(rq->rq_flags & RQF_SOFTBARRIER)) | 740 | if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) |
741 | continue; | 741 | continue; |
742 | 742 | ||
743 | rq->rq_flags &= ~RQF_SOFTBARRIER; | 743 | rq->rq_flags &= ~RQF_SOFTBARRIER; |
744 | list_del_init(&rq->queuelist); | 744 | list_del_init(&rq->queuelist); |
745 | blk_mq_sched_insert_request(rq, true, false, false); | 745 | /* |
746 | * If RQF_DONTPREP, rq has contained some driver specific | ||
747 | * data, so insert it to hctx dispatch list to avoid any | ||
748 | * merge. | ||
749 | */ | ||
750 | if (rq->rq_flags & RQF_DONTPREP) | ||
751 | blk_mq_request_bypass_insert(rq, false); | ||
752 | else | ||
753 | blk_mq_sched_insert_request(rq, true, false, false); | ||
746 | } | 754 | } |
747 | 755 | ||
748 | while (!list_empty(&rq_list)) { | 756 | while (!list_empty(&rq_list)) { |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 17eb09d222ff..ec78a04eb136 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) | |||
122 | 122 | ||
123 | int af_alg_release(struct socket *sock) | 123 | int af_alg_release(struct socket *sock) |
124 | { | 124 | { |
125 | if (sock->sk) | 125 | if (sock->sk) { |
126 | sock_put(sock->sk); | 126 | sock_put(sock->sk); |
127 | sock->sk = NULL; | ||
128 | } | ||
127 | return 0; | 129 | return 0; |
128 | } | 130 | } |
129 | EXPORT_SYMBOL_GPL(af_alg_release); | 131 | EXPORT_SYMBOL_GPL(af_alg_release); |
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a43276c76fc6..21393ec3b9a4 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c | |||
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client) | |||
509 | struct ht16k33_priv *priv = i2c_get_clientdata(client); | 509 | struct ht16k33_priv *priv = i2c_get_clientdata(client); |
510 | struct ht16k33_fbdev *fbdev = &priv->fbdev; | 510 | struct ht16k33_fbdev *fbdev = &priv->fbdev; |
511 | 511 | ||
512 | cancel_delayed_work(&fbdev->work); | 512 | cancel_delayed_work_sync(&fbdev->work); |
513 | unregister_framebuffer(fbdev->info); | 513 | unregister_framebuffer(fbdev->info); |
514 | framebuffer_release(fbdev->info); | 514 | framebuffer_release(fbdev->info); |
515 | free_page((unsigned long) fbdev->buffer); | 515 | free_page((unsigned long) fbdev->buffer); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6f2856c6d0f2..55481b40df9a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, | |||
4075 | 4075 | ||
4076 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { | 4076 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { |
4077 | if (lock_fdc(drive)) | 4077 | if (lock_fdc(drive)) |
4078 | return -EINTR; | 4078 | return 0; |
4079 | poll_drive(false, 0); | 4079 | poll_drive(false, 0); |
4080 | process_fd_request(); | 4080 | process_fd_request(); |
4081 | } | 4081 | } |
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f94d33525771..d299ec79e4c3 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { | |||
781 | SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, | 781 | SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, |
782 | SYSC_QUIRK_LEGACY_IDLE), | 782 | SYSC_QUIRK_LEGACY_IDLE), |
783 | SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, | 783 | SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, |
784 | SYSC_QUIRK_LEGACY_IDLE), | 784 | 0), |
785 | /* Some timers on omap4 and later */ | 785 | /* Some timers on omap4 and later */ |
786 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, | 786 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, |
787 | SYSC_QUIRK_LEGACY_IDLE), | 787 | 0), |
788 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, | 788 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, |
789 | SYSC_QUIRK_LEGACY_IDLE), | 789 | 0), |
790 | SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, | 790 | SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, |
791 | SYSC_QUIRK_LEGACY_IDLE), | 791 | SYSC_QUIRK_LEGACY_IDLE), |
792 | /* Uarts on omap4 and later */ | 792 | /* Uarts on omap4 and later */ |
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 595124074821..c364027638e1 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c | |||
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) | |||
154 | if (IS_ERR(parent)) | 154 | if (IS_ERR(parent)) |
155 | return -ENODEV; | 155 | return -ENODEV; |
156 | 156 | ||
157 | /* Bail out if both clocks point to fck */ | ||
158 | if (clk_is_match(parent, timer->fclk)) | ||
159 | return 0; | ||
160 | |||
157 | ret = clk_set_parent(timer->fclk, parent); | 161 | ret = clk_set_parent(timer->fclk, parent); |
158 | if (ret < 0) | 162 | if (ret < 0) |
159 | pr_err("%s: failed to set parent\n", __func__); | 163 | pr_err("%s: failed to set parent\n", __func__); |
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
864 | timer->pdev = pdev; | 868 | timer->pdev = pdev; |
865 | 869 | ||
866 | pm_runtime_enable(dev); | 870 | pm_runtime_enable(dev); |
867 | pm_runtime_irq_safe(dev); | ||
868 | 871 | ||
869 | if (!timer->reserved) { | 872 | if (!timer->reserved) { |
870 | ret = pm_runtime_get_sync(dev); | 873 | ret = pm_runtime_get_sync(dev); |
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 8ada308d72ee..b0125ad65825 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c | |||
@@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
380 | rc = cc_ivgen_init(new_drvdata); | 380 | rc = cc_ivgen_init(new_drvdata); |
381 | if (rc) { | 381 | if (rc) { |
382 | dev_err(dev, "cc_ivgen_init failed\n"); | 382 | dev_err(dev, "cc_ivgen_init failed\n"); |
383 | goto post_power_mgr_err; | 383 | goto post_buf_mgr_err; |
384 | } | 384 | } |
385 | 385 | ||
386 | /* Allocate crypto algs */ | 386 | /* Allocate crypto algs */ |
@@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
403 | goto post_hash_err; | 403 | goto post_hash_err; |
404 | } | 404 | } |
405 | 405 | ||
406 | /* All set, we can allow autosuspend */ | ||
407 | cc_pm_go(new_drvdata); | ||
408 | |||
406 | /* If we got here and FIPS mode is enabled | 409 | /* If we got here and FIPS mode is enabled |
407 | * it means all FIPS test passed, so let TEE | 410 | * it means all FIPS test passed, so let TEE |
408 | * know we're good. | 411 | * know we're good. |
@@ -417,8 +420,6 @@ post_cipher_err: | |||
417 | cc_cipher_free(new_drvdata); | 420 | cc_cipher_free(new_drvdata); |
418 | post_ivgen_err: | 421 | post_ivgen_err: |
419 | cc_ivgen_fini(new_drvdata); | 422 | cc_ivgen_fini(new_drvdata); |
420 | post_power_mgr_err: | ||
421 | cc_pm_fini(new_drvdata); | ||
422 | post_buf_mgr_err: | 423 | post_buf_mgr_err: |
423 | cc_buffer_mgr_fini(new_drvdata); | 424 | cc_buffer_mgr_fini(new_drvdata); |
424 | post_req_mgr_err: | 425 | post_req_mgr_err: |
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89f..6ff7e75ad90e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c | |||
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev) | |||
100 | 100 | ||
101 | int cc_pm_init(struct cc_drvdata *drvdata) | 101 | int cc_pm_init(struct cc_drvdata *drvdata) |
102 | { | 102 | { |
103 | int rc = 0; | ||
104 | struct device *dev = drvdata_to_dev(drvdata); | 103 | struct device *dev = drvdata_to_dev(drvdata); |
105 | 104 | ||
106 | /* must be before the enabling to avoid resdundent suspending */ | 105 | /* must be before the enabling to avoid resdundent suspending */ |
107 | pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); | 106 | pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); |
108 | pm_runtime_use_autosuspend(dev); | 107 | pm_runtime_use_autosuspend(dev); |
109 | /* activate the PM module */ | 108 | /* activate the PM module */ |
110 | rc = pm_runtime_set_active(dev); | 109 | return pm_runtime_set_active(dev); |
111 | if (rc) | 110 | } |
112 | return rc; | ||
113 | /* enable the PM module*/ | ||
114 | pm_runtime_enable(dev); | ||
115 | 111 | ||
116 | return rc; | 112 | /* enable the PM module*/ |
113 | void cc_pm_go(struct cc_drvdata *drvdata) | ||
114 | { | ||
115 | pm_runtime_enable(drvdata_to_dev(drvdata)); | ||
117 | } | 116 | } |
118 | 117 | ||
119 | void cc_pm_fini(struct cc_drvdata *drvdata) | 118 | void cc_pm_fini(struct cc_drvdata *drvdata) |
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58b..f62624357020 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h | |||
@@ -16,6 +16,7 @@ | |||
16 | extern const struct dev_pm_ops ccree_pm; | 16 | extern const struct dev_pm_ops ccree_pm; |
17 | 17 | ||
18 | int cc_pm_init(struct cc_drvdata *drvdata); | 18 | int cc_pm_init(struct cc_drvdata *drvdata); |
19 | void cc_pm_go(struct cc_drvdata *drvdata); | ||
19 | void cc_pm_fini(struct cc_drvdata *drvdata); | 20 | void cc_pm_fini(struct cc_drvdata *drvdata); |
20 | int cc_pm_suspend(struct device *dev); | 21 | int cc_pm_suspend(struct device *dev); |
21 | int cc_pm_resume(struct device *dev); | 22 | int cc_pm_resume(struct device *dev); |
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) | |||
29 | return 0; | 30 | return 0; |
30 | } | 31 | } |
31 | 32 | ||
33 | static void cc_pm_go(struct cc_drvdata *drvdata) {} | ||
34 | |||
32 | static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} | 35 | static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} |
33 | 36 | ||
34 | static inline int cc_pm_suspend(struct device *dev) | 37 | static inline int cc_pm_suspend(struct device *dev) |
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4c46ff6f2242..55b77c576c42 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, | |||
592 | 592 | ||
593 | early_memunmap(tbl, sizeof(*tbl)); | 593 | early_memunmap(tbl, sizeof(*tbl)); |
594 | } | 594 | } |
595 | return 0; | ||
596 | } | ||
597 | 595 | ||
598 | int __init efi_apply_persistent_mem_reservations(void) | ||
599 | { | ||
600 | if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { | 596 | if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { |
601 | unsigned long prsv = efi.mem_reserve; | 597 | unsigned long prsv = efi.mem_reserve; |
602 | 598 | ||
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index eee42d5e25ee..c037c6c5d0b7 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) | |||
75 | efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; | 75 | efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; |
76 | efi_status_t status; | 76 | efi_status_t status; |
77 | 77 | ||
78 | if (IS_ENABLED(CONFIG_ARM)) | ||
79 | return; | ||
80 | |||
81 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), | 78 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), |
82 | (void **)&rsv); | 79 | (void **)&rsv); |
83 | if (status != EFI_SUCCESS) { | 80 | if (status != EFI_SUCCESS) { |
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 8903b9ccfc2b..e2abfdb5cee6 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c | |||
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) | |||
147 | static DEFINE_SEMAPHORE(efi_runtime_lock); | 147 | static DEFINE_SEMAPHORE(efi_runtime_lock); |
148 | 148 | ||
149 | /* | 149 | /* |
150 | * Expose the EFI runtime lock to the UV platform | ||
151 | */ | ||
152 | #ifdef CONFIG_X86_UV | ||
153 | extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); | ||
154 | #endif | ||
155 | |||
156 | /* | ||
150 | * Calls the appropriate efi_runtime_service() with the appropriate | 157 | * Calls the appropriate efi_runtime_service() with the appropriate |
151 | * arguments. | 158 | * arguments. |
152 | * | 159 | * |
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index 00e954f22bc9..74401e0adb29 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #define GPIO_REG_EDGE 0xA0 | 30 | #define GPIO_REG_EDGE 0xA0 |
31 | 31 | ||
32 | struct mtk_gc { | 32 | struct mtk_gc { |
33 | struct irq_chip irq_chip; | ||
33 | struct gpio_chip chip; | 34 | struct gpio_chip chip; |
34 | spinlock_t lock; | 35 | spinlock_t lock; |
35 | int bank; | 36 | int bank; |
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type) | |||
189 | return 0; | 190 | return 0; |
190 | } | 191 | } |
191 | 192 | ||
192 | static struct irq_chip mediatek_gpio_irq_chip = { | ||
193 | .irq_unmask = mediatek_gpio_irq_unmask, | ||
194 | .irq_mask = mediatek_gpio_irq_mask, | ||
195 | .irq_mask_ack = mediatek_gpio_irq_mask, | ||
196 | .irq_set_type = mediatek_gpio_irq_type, | ||
197 | }; | ||
198 | |||
199 | static int | 193 | static int |
200 | mediatek_gpio_xlate(struct gpio_chip *chip, | 194 | mediatek_gpio_xlate(struct gpio_chip *chip, |
201 | const struct of_phandle_args *spec, u32 *flags) | 195 | const struct of_phandle_args *spec, u32 *flags) |
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev, | |||
254 | return ret; | 248 | return ret; |
255 | } | 249 | } |
256 | 250 | ||
251 | rg->irq_chip.name = dev_name(dev); | ||
252 | rg->irq_chip.parent_device = dev; | ||
253 | rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; | ||
254 | rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; | ||
255 | rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; | ||
256 | rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; | ||
257 | |||
257 | if (mtk->gpio_irq) { | 258 | if (mtk->gpio_irq) { |
258 | /* | 259 | /* |
259 | * Manually request the irq here instead of passing | 260 | * Manually request the irq here instead of passing |
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev, | |||
270 | return ret; | 271 | return ret; |
271 | } | 272 | } |
272 | 273 | ||
273 | ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, | 274 | ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip, |
274 | 0, handle_simple_irq, IRQ_TYPE_NONE); | 275 | 0, handle_simple_irq, IRQ_TYPE_NONE); |
275 | if (ret) { | 276 | if (ret) { |
276 | dev_err(dev, "failed to add gpiochip_irqchip\n"); | 277 | dev_err(dev, "failed to add gpiochip_irqchip\n"); |
277 | return ret; | 278 | return ret; |
278 | } | 279 | } |
279 | 280 | ||
280 | gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, | 281 | gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip, |
281 | mtk->gpio_irq, NULL); | 282 | mtk->gpio_irq, NULL); |
282 | } | 283 | } |
283 | 284 | ||
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev) | |||
310 | mtk->gpio_irq = irq_of_parse_and_map(np, 0); | 311 | mtk->gpio_irq = irq_of_parse_and_map(np, 0); |
311 | mtk->dev = dev; | 312 | mtk->dev = dev; |
312 | platform_set_drvdata(pdev, mtk); | 313 | platform_set_drvdata(pdev, mtk); |
313 | mediatek_gpio_irq_chip.name = dev_name(dev); | ||
314 | 314 | ||
315 | for (i = 0; i < MTK_BANK_CNT; i++) { | 315 | for (i = 0; i < MTK_BANK_CNT; i++) { |
316 | ret = mediatek_gpio_bank_probe(dev, np, i); | 316 | ret = mediatek_gpio_bank_probe(dev, np, i); |
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index e9600b556f39..bcc6be4a5cb2 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c | |||
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void) | |||
245 | { | 245 | { |
246 | switch (gpio_type) { | 246 | switch (gpio_type) { |
247 | case PXA3XX_GPIO: | 247 | case PXA3XX_GPIO: |
248 | case MMP2_GPIO: | ||
248 | return false; | 249 | return false; |
249 | 250 | ||
250 | default: | 251 | default: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8fab0d637ee5..3a9b48b227ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | |||
@@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle) | |||
90 | adev->psp.sos_fw = NULL; | 90 | adev->psp.sos_fw = NULL; |
91 | release_firmware(adev->psp.asd_fw); | 91 | release_firmware(adev->psp.asd_fw); |
92 | adev->psp.asd_fw = NULL; | 92 | adev->psp.asd_fw = NULL; |
93 | release_firmware(adev->psp.ta_fw); | 93 | if (adev->psp.ta_fw) { |
94 | adev->psp.ta_fw = NULL; | 94 | release_firmware(adev->psp.ta_fw); |
95 | adev->psp.ta_fw = NULL; | ||
96 | } | ||
95 | return 0; | 97 | return 0; |
96 | } | 98 | } |
97 | 99 | ||
@@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) | |||
435 | struct ta_xgmi_shared_memory *xgmi_cmd; | 437 | struct ta_xgmi_shared_memory *xgmi_cmd; |
436 | int ret; | 438 | int ret; |
437 | 439 | ||
440 | if (!psp->adev->psp.ta_fw) | ||
441 | return -ENOENT; | ||
442 | |||
438 | if (!psp->xgmi_context.initialized) { | 443 | if (!psp->xgmi_context.initialized) { |
439 | ret = psp_xgmi_init_shared_buf(psp); | 444 | ret = psp_xgmi_init_shared_buf(psp); |
440 | if (ret) | 445 | if (ret) |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0c6e7f9b143f..189fcb004579 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | |||
@@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) | |||
152 | 152 | ||
153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); | 153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); |
154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); | 154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); |
155 | if (err) | 155 | if (err) { |
156 | goto out2; | 156 | release_firmware(adev->psp.ta_fw); |
157 | 157 | adev->psp.ta_fw = NULL; | |
158 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | 158 | dev_info(adev->dev, |
159 | if (err) | 159 | "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); |
160 | goto out2; | 160 | } else { |
161 | 161 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | |
162 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; | 162 | if (err) |
163 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); | 163 | goto out2; |
164 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | 164 | |
165 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | 165 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; |
166 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | 166 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); |
167 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | ||
168 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | ||
169 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | ||
170 | } | ||
167 | 171 | ||
168 | return 0; | 172 | return 0; |
169 | 173 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9a7ac58eb18e..ddd75a4d8ba5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | |||
@@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us | |||
671 | return bytes_from_user; | 671 | return bytes_from_user; |
672 | } | 672 | } |
673 | 673 | ||
674 | /* | ||
675 | * Returns the min and max vrr vfreq through the connector's debugfs file. | ||
676 | * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range | ||
677 | */ | ||
678 | static int vrr_range_show(struct seq_file *m, void *data) | ||
679 | { | ||
680 | struct drm_connector *connector = m->private; | ||
681 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
682 | |||
683 | if (connector->status != connector_status_connected) | ||
684 | return -ENODEV; | ||
685 | |||
686 | seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq); | ||
687 | seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | DEFINE_SHOW_ATTRIBUTE(vrr_range); | ||
692 | |||
674 | static const struct file_operations dp_link_settings_debugfs_fops = { | 693 | static const struct file_operations dp_link_settings_debugfs_fops = { |
675 | .owner = THIS_MODULE, | 694 | .owner = THIS_MODULE, |
676 | .read = dp_link_settings_read, | 695 | .read = dp_link_settings_read, |
@@ -697,7 +716,8 @@ static const struct { | |||
697 | } dp_debugfs_entries[] = { | 716 | } dp_debugfs_entries[] = { |
698 | {"link_settings", &dp_link_settings_debugfs_fops}, | 717 | {"link_settings", &dp_link_settings_debugfs_fops}, |
699 | {"phy_settings", &dp_phy_settings_debugfs_fop}, | 718 | {"phy_settings", &dp_phy_settings_debugfs_fop}, |
700 | {"test_pattern", &dp_phy_test_pattern_fops} | 719 | {"test_pattern", &dp_phy_test_pattern_fops}, |
720 | {"vrr_range", &vrr_range_fops} | ||
701 | }; | 721 | }; |
702 | 722 | ||
703 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) | 723 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) |
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 99cba8ea5d82..5df1256618cc 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c | |||
@@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, | |||
528 | 528 | ||
529 | object_count = cl->object_count; | 529 | object_count = cl->object_count; |
530 | 530 | ||
531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); | 531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), |
532 | array_size(object_count, sizeof(__u32))); | ||
532 | if (IS_ERR(object_ids)) | 533 | if (IS_ERR(object_ids)) |
533 | return PTR_ERR(object_ids); | 534 | return PTR_ERR(object_ids); |
534 | 535 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 216f52b744a6..c882ea94172c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1824 | return 0; | 1824 | return 0; |
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | static inline bool | ||
1828 | __vma_matches(struct vm_area_struct *vma, struct file *filp, | ||
1829 | unsigned long addr, unsigned long size) | ||
1830 | { | ||
1831 | if (vma->vm_file != filp) | ||
1832 | return false; | ||
1833 | |||
1834 | return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; | ||
1835 | } | ||
1836 | |||
1827 | /** | 1837 | /** |
1828 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address | 1838 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
1829 | * it is mapped to. | 1839 | * it is mapped to. |
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1882 | return -EINTR; | 1892 | return -EINTR; |
1883 | } | 1893 | } |
1884 | vma = find_vma(mm, addr); | 1894 | vma = find_vma(mm, addr); |
1885 | if (vma) | 1895 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
1886 | vma->vm_page_prot = | 1896 | vma->vm_page_prot = |
1887 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | 1897 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1888 | else | 1898 | else |
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..017fc602a10e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c | |||
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event) | |||
594 | * Update the bitmask of enabled events and increment | 594 | * Update the bitmask of enabled events and increment |
595 | * the event reference counter. | 595 | * the event reference counter. |
596 | */ | 596 | */ |
597 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 597 | BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); |
598 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); | ||
598 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); | 599 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); |
599 | i915->pmu.enable |= BIT_ULL(bit); | 600 | i915->pmu.enable |= BIT_ULL(bit); |
600 | i915->pmu.enable_count[bit]++; | 601 | i915->pmu.enable_count[bit]++; |
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event) | |||
615 | engine = intel_engine_lookup_user(i915, | 616 | engine = intel_engine_lookup_user(i915, |
616 | engine_event_class(event), | 617 | engine_event_class(event), |
617 | engine_event_instance(event)); | 618 | engine_event_instance(event)); |
618 | GEM_BUG_ON(!engine); | ||
619 | engine->pmu.enable |= BIT(sample); | ||
620 | 619 | ||
621 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 620 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != |
621 | I915_ENGINE_SAMPLE_COUNT); | ||
622 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != | ||
623 | I915_ENGINE_SAMPLE_COUNT); | ||
624 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); | ||
625 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
622 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); | 626 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); |
627 | |||
628 | engine->pmu.enable |= BIT(sample); | ||
623 | engine->pmu.enable_count[sample]++; | 629 | engine->pmu.enable_count[sample]++; |
624 | } | 630 | } |
625 | 631 | ||
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event) | |||
649 | engine = intel_engine_lookup_user(i915, | 655 | engine = intel_engine_lookup_user(i915, |
650 | engine_event_class(event), | 656 | engine_event_class(event), |
651 | engine_event_instance(event)); | 657 | engine_event_instance(event)); |
652 | GEM_BUG_ON(!engine); | 658 | |
653 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 659 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); |
660 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
654 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); | 661 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); |
662 | |||
655 | /* | 663 | /* |
656 | * Decrement the reference count and clear the enabled | 664 | * Decrement the reference count and clear the enabled |
657 | * bitmask when the last listener on an event goes away. | 665 | * bitmask when the last listener on an event goes away. |
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event) | |||
660 | engine->pmu.enable &= ~BIT(sample); | 668 | engine->pmu.enable &= ~BIT(sample); |
661 | } | 669 | } |
662 | 670 | ||
663 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 671 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); |
664 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); | 672 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); |
665 | /* | 673 | /* |
666 | * Decrement the reference count and clear the enabled | 674 | * Decrement the reference count and clear the enabled |
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..b3728c5f13e7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h | |||
@@ -31,6 +31,8 @@ enum { | |||
31 | ((1 << I915_PMU_SAMPLE_BITS) + \ | 31 | ((1 << I915_PMU_SAMPLE_BITS) + \ |
32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) | 32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) |
33 | 33 | ||
34 | #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) | ||
35 | |||
34 | struct i915_pmu_sample { | 36 | struct i915_pmu_sample { |
35 | u64 cur; | 37 | u64 cur; |
36 | }; | 38 | }; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..067054cf4a86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1790,7 +1790,7 @@ enum i915_power_well_id { | |||
1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 | 1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 |
1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 | 1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 |
1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 | 1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 |
1793 | #define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ | 1793 | #define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ |
1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
@@ -1798,7 +1798,7 @@ enum i915_power_well_id { | |||
1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ | 1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ |
1800 | 4 * (dw)) | 1800 | 4 * (dw)) |
1801 | #define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ | 1801 | #define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ |
1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ | 1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ |
1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
@@ -1834,9 +1834,9 @@ enum i915_power_well_id { | |||
1834 | 1834 | ||
1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 | 1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 |
1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 | 1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 |
1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) | 1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) |
1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) | 1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) |
1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ | 1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ |
1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ | 1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ |
1841 | _CNL_PORT_TX_DW4_LN0_AE))) | 1841 | _CNL_PORT_TX_DW4_LN0_AE))) |
1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) | 1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) |
@@ -1864,8 +1864,12 @@ enum i915_power_well_id { | |||
1864 | #define RTERM_SELECT(x) ((x) << 3) | 1864 | #define RTERM_SELECT(x) ((x) << 3) |
1865 | #define RTERM_SELECT_MASK (0x7 << 3) | 1865 | #define RTERM_SELECT_MASK (0x7 << 3) |
1866 | 1866 | ||
1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) | 1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) |
1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) | 1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) |
1869 | #define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) | ||
1870 | #define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) | ||
1871 | #define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) | ||
1872 | #define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) | ||
1869 | #define N_SCALAR(x) ((x) << 24) | 1873 | #define N_SCALAR(x) ((x) << 24) |
1870 | #define N_SCALAR_MASK (0x7F << 24) | 1874 | #define N_SCALAR_MASK (0x7F << 24) |
1871 | 1875 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 4079050f9d6c..7edce1b7b348 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { | |||
494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ | 494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ |
495 | }; | 495 | }; |
496 | 496 | ||
497 | struct icl_combo_phy_ddi_buf_trans { | 497 | /* icl_combo_phy_ddi_translations */ |
498 | u32 dw2_swing_select; | 498 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { |
499 | u32 dw2_swing_scalar; | 499 | /* NT mV Trans mV db */ |
500 | u32 dw4_scaling; | 500 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
501 | }; | 501 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
502 | 502 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ | |
503 | /* Voltage Swing Programming for VccIO 0.85V for DP */ | 503 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
504 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { | 504 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
505 | /* Voltage mV db */ | 505 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
506 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 506 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
507 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 507 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
508 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 508 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
509 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 509 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
510 | { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ | ||
511 | { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ | ||
512 | { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ | ||
513 | { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ | ||
514 | { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ | ||
515 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
516 | }; | ||
517 | |||
518 | /* FIXME - After table is updated in Bspec */ | ||
519 | /* Voltage Swing Programming for VccIO 0.85V for eDP */ | ||
520 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { | ||
521 | /* Voltage mV db */ | ||
522 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | ||
523 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | ||
524 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | ||
525 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | ||
526 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | ||
527 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
528 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
529 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
530 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
531 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
532 | }; | ||
533 | |||
534 | /* Voltage Swing Programming for VccIO 0.95V for DP */ | ||
535 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { | ||
536 | /* Voltage mV db */ | ||
537 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | ||
538 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | ||
539 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | ||
540 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | ||
541 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | ||
542 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | ||
543 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | ||
544 | { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ | ||
545 | { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ | ||
546 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
547 | }; | 510 | }; |
548 | 511 | ||
549 | /* FIXME - After table is updated in Bspec */ | 512 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { |
550 | /* Voltage Swing Programming for VccIO 0.95V for eDP */ | 513 | /* NT mV Trans mV db */ |
551 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { | 514 | { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ |
552 | /* Voltage mV db */ | 515 | { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ |
553 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 516 | { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ |
554 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 517 | { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ |
555 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 518 | { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ |
556 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 519 | { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ |
557 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 520 | { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ |
558 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | 521 | { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ |
559 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | 522 | { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ |
560 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | 523 | { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
561 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
562 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
563 | }; | 524 | }; |
564 | 525 | ||
565 | /* Voltage Swing Programming for VccIO 1.05V for DP */ | 526 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { |
566 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { | 527 | /* NT mV Trans mV db */ |
567 | /* Voltage mV db */ | 528 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
568 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 529 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
569 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 530 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ |
570 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 531 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
571 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 532 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
572 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | 533 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
573 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | 534 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
574 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | 535 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
575 | { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ | 536 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
576 | { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ | 537 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
577 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
578 | }; | 538 | }; |
579 | 539 | ||
580 | /* FIXME - After table is updated in Bspec */ | 540 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { |
581 | /* Voltage Swing Programming for VccIO 1.05V for eDP */ | 541 | /* NT mV Trans mV db */ |
582 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { | 542 | { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ |
583 | /* Voltage mV db */ | 543 | { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ |
584 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 544 | { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ |
585 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 545 | { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ |
586 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 546 | { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ |
587 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 547 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ |
588 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 548 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ |
589 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
590 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
591 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
592 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
593 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
594 | }; | 549 | }; |
595 | 550 | ||
596 | struct icl_mg_phy_ddi_buf_trans { | 551 | struct icl_mg_phy_ddi_buf_trans { |
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) | |||
871 | } | 826 | } |
872 | } | 827 | } |
873 | 828 | ||
874 | static const struct icl_combo_phy_ddi_buf_trans * | 829 | static const struct cnl_ddi_buf_trans * |
875 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, | 830 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, |
876 | int type, int *n_entries) | 831 | int type, int rate, int *n_entries) |
877 | { | 832 | { |
878 | u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; | 833 | if (type == INTEL_OUTPUT_HDMI) { |
879 | 834 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); | |
880 | if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { | 835 | return icl_combo_phy_ddi_translations_hdmi; |
881 | switch (voltage) { | 836 | } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { |
882 | case VOLTAGE_INFO_0_85V: | 837 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); |
883 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); | 838 | return icl_combo_phy_ddi_translations_edp_hbr3; |
884 | return icl_combo_phy_ddi_translations_edp_0_85V; | 839 | } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { |
885 | case VOLTAGE_INFO_0_95V: | 840 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); |
886 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); | 841 | return icl_combo_phy_ddi_translations_edp_hbr2; |
887 | return icl_combo_phy_ddi_translations_edp_0_95V; | ||
888 | case VOLTAGE_INFO_1_05V: | ||
889 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); | ||
890 | return icl_combo_phy_ddi_translations_edp_1_05V; | ||
891 | default: | ||
892 | MISSING_CASE(voltage); | ||
893 | return NULL; | ||
894 | } | ||
895 | } else { | ||
896 | switch (voltage) { | ||
897 | case VOLTAGE_INFO_0_85V: | ||
898 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); | ||
899 | return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; | ||
900 | case VOLTAGE_INFO_0_95V: | ||
901 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); | ||
902 | return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; | ||
903 | case VOLTAGE_INFO_1_05V: | ||
904 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); | ||
905 | return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; | ||
906 | default: | ||
907 | MISSING_CASE(voltage); | ||
908 | return NULL; | ||
909 | } | ||
910 | } | 842 | } |
843 | |||
844 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); | ||
845 | return icl_combo_phy_ddi_translations_dp_hbr2; | ||
911 | } | 846 | } |
912 | 847 | ||
913 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) | 848 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) |
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por | |||
918 | 853 | ||
919 | if (IS_ICELAKE(dev_priv)) { | 854 | if (IS_ICELAKE(dev_priv)) { |
920 | if (intel_port_is_combophy(dev_priv, port)) | 855 | if (intel_port_is_combophy(dev_priv, port)) |
921 | icl_get_combo_buf_trans(dev_priv, port, | 856 | icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, |
922 | INTEL_OUTPUT_HDMI, &n_entries); | 857 | 0, &n_entries); |
923 | else | 858 | else |
924 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 859 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
925 | default_entry = n_entries - 1; | 860 | default_entry = n_entries - 1; |
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2275 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) | 2210 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) |
2276 | { | 2211 | { |
2277 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2212 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2213 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||
2278 | enum port port = encoder->port; | 2214 | enum port port = encoder->port; |
2279 | int n_entries; | 2215 | int n_entries; |
2280 | 2216 | ||
2281 | if (IS_ICELAKE(dev_priv)) { | 2217 | if (IS_ICELAKE(dev_priv)) { |
2282 | if (intel_port_is_combophy(dev_priv, port)) | 2218 | if (intel_port_is_combophy(dev_priv, port)) |
2283 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, | 2219 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, |
2284 | &n_entries); | 2220 | intel_dp->link_rate, &n_entries); |
2285 | else | 2221 | else |
2286 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 2222 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
2287 | } else if (IS_CANNONLAKE(dev_priv)) { | 2223 | } else if (IS_CANNONLAKE(dev_priv)) { |
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2462 | } | 2398 | } |
2463 | 2399 | ||
2464 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | 2400 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, |
2465 | u32 level, enum port port, int type) | 2401 | u32 level, enum port port, int type, |
2402 | int rate) | ||
2466 | { | 2403 | { |
2467 | const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; | 2404 | const struct cnl_ddi_buf_trans *ddi_translations = NULL; |
2468 | u32 n_entries, val; | 2405 | u32 n_entries, val; |
2469 | int ln; | 2406 | int ln; |
2470 | 2407 | ||
2471 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, | 2408 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, |
2472 | &n_entries); | 2409 | rate, &n_entries); |
2473 | if (!ddi_translations) | 2410 | if (!ddi_translations) |
2474 | return; | 2411 | return; |
2475 | 2412 | ||
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2478 | level = n_entries - 1; | 2415 | level = n_entries - 1; |
2479 | } | 2416 | } |
2480 | 2417 | ||
2481 | /* Set PORT_TX_DW5 Rterm Sel to 110b. */ | 2418 | /* Set PORT_TX_DW5 */ |
2482 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2419 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
2483 | val &= ~RTERM_SELECT_MASK; | 2420 | val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | |
2421 | TAP2_DISABLE | TAP3_DISABLE); | ||
2422 | val |= SCALING_MODE_SEL(0x2); | ||
2484 | val |= RTERM_SELECT(0x6); | 2423 | val |= RTERM_SELECT(0x6); |
2485 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2424 | val |= TAP3_DISABLE; |
2486 | |||
2487 | /* Program PORT_TX_DW5 */ | ||
2488 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
2489 | /* Set DisableTap2 and DisableTap3 if MIPI DSI | ||
2490 | * Clear DisableTap2 and DisableTap3 for all other Ports | ||
2491 | */ | ||
2492 | if (type == INTEL_OUTPUT_DSI) { | ||
2493 | val |= TAP2_DISABLE; | ||
2494 | val |= TAP3_DISABLE; | ||
2495 | } else { | ||
2496 | val &= ~TAP2_DISABLE; | ||
2497 | val &= ~TAP3_DISABLE; | ||
2498 | } | ||
2499 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2425 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2500 | 2426 | ||
2501 | /* Program PORT_TX_DW2 */ | 2427 | /* Program PORT_TX_DW2 */ |
2502 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); | 2428 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); |
2503 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | | 2429 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | |
2504 | RCOMP_SCALAR_MASK); | 2430 | RCOMP_SCALAR_MASK); |
2505 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); | 2431 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); |
2506 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); | 2432 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); |
2507 | /* Program Rcomp scalar for every table entry */ | 2433 | /* Program Rcomp scalar for every table entry */ |
2508 | val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); | 2434 | val |= RCOMP_SCALAR(0x98); |
2509 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); | 2435 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); |
2510 | 2436 | ||
2511 | /* Program PORT_TX_DW4 */ | 2437 | /* Program PORT_TX_DW4 */ |
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2514 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); | 2440 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); |
2515 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | | 2441 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | |
2516 | CURSOR_COEFF_MASK); | 2442 | CURSOR_COEFF_MASK); |
2517 | val |= ddi_translations[level].dw4_scaling; | 2443 | val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); |
2444 | val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); | ||
2445 | val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); | ||
2518 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); | 2446 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); |
2519 | } | 2447 | } |
2448 | |||
2449 | /* Program PORT_TX_DW7 */ | ||
2450 | val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); | ||
2451 | val &= ~N_SCALAR_MASK; | ||
2452 | val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); | ||
2453 | I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); | ||
2520 | } | 2454 | } |
2521 | 2455 | ||
2522 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | 2456 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, |
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2581 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2515 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2582 | 2516 | ||
2583 | /* 5. Program swing and de-emphasis */ | 2517 | /* 5. Program swing and de-emphasis */ |
2584 | icl_ddi_combo_vswing_program(dev_priv, level, port, type); | 2518 | icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); |
2585 | 2519 | ||
2586 | /* 6. Set training enable to trigger update */ | 2520 | /* 6. Set training enable to trigger update */ |
2587 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2521 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..22a74608c6e4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) | |||
304 | static int icl_max_source_rate(struct intel_dp *intel_dp) | 304 | static int icl_max_source_rate(struct intel_dp *intel_dp) |
305 | { | 305 | { |
306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
307 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
307 | enum port port = dig_port->base.port; | 308 | enum port port = dig_port->base.port; |
308 | 309 | ||
309 | if (port == PORT_B) | 310 | if (intel_port_is_combophy(dev_priv, port) && |
311 | !intel_dp_is_edp(intel_dp)) | ||
310 | return 540000; | 312 | return 540000; |
311 | 313 | ||
312 | return 810000; | 314 | return 810000; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..e9ddeaf05a14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -209,6 +209,16 @@ struct intel_fbdev { | |||
209 | unsigned long vma_flags; | 209 | unsigned long vma_flags; |
210 | async_cookie_t cookie; | 210 | async_cookie_t cookie; |
211 | int preferred_bpp; | 211 | int preferred_bpp; |
212 | |||
213 | /* Whether or not fbdev hpd processing is temporarily suspended */ | ||
214 | bool hpd_suspended : 1; | ||
215 | /* Set when a hotplug was received while HPD processing was | ||
216 | * suspended | ||
217 | */ | ||
218 | bool hpd_waiting : 1; | ||
219 | |||
220 | /* Protects hpd_suspended */ | ||
221 | struct mutex hpd_lock; | ||
212 | }; | 222 | }; |
213 | 223 | ||
214 | struct intel_encoder { | 224 | struct intel_encoder { |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..7f365ac0b549 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -679,6 +679,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
679 | if (ifbdev == NULL) | 679 | if (ifbdev == NULL) |
680 | return -ENOMEM; | 680 | return -ENOMEM; |
681 | 681 | ||
682 | mutex_init(&ifbdev->hpd_lock); | ||
682 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); | 683 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); |
683 | 684 | ||
684 | if (!intel_fbdev_init_bios(dev, ifbdev)) | 685 | if (!intel_fbdev_init_bios(dev, ifbdev)) |
@@ -752,6 +753,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) | |||
752 | intel_fbdev_destroy(ifbdev); | 753 | intel_fbdev_destroy(ifbdev); |
753 | } | 754 | } |
754 | 755 | ||
756 | /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD | ||
757 | * processing, fbdev will perform a full connector reprobe if a hotplug event | ||
758 | * was received while HPD was suspended. | ||
759 | */ | ||
760 | static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) | ||
761 | { | ||
762 | bool send_hpd = false; | ||
763 | |||
764 | mutex_lock(&ifbdev->hpd_lock); | ||
765 | ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; | ||
766 | send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; | ||
767 | ifbdev->hpd_waiting = false; | ||
768 | mutex_unlock(&ifbdev->hpd_lock); | ||
769 | |||
770 | if (send_hpd) { | ||
771 | DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); | ||
772 | drm_fb_helper_hotplug_event(&ifbdev->helper); | ||
773 | } | ||
774 | } | ||
775 | |||
755 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) | 776 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) |
756 | { | 777 | { |
757 | struct drm_i915_private *dev_priv = to_i915(dev); | 778 | struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -773,6 +794,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
773 | */ | 794 | */ |
774 | if (state != FBINFO_STATE_RUNNING) | 795 | if (state != FBINFO_STATE_RUNNING) |
775 | flush_work(&dev_priv->fbdev_suspend_work); | 796 | flush_work(&dev_priv->fbdev_suspend_work); |
797 | |||
776 | console_lock(); | 798 | console_lock(); |
777 | } else { | 799 | } else { |
778 | /* | 800 | /* |
@@ -800,17 +822,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
800 | 822 | ||
801 | drm_fb_helper_set_suspend(&ifbdev->helper, state); | 823 | drm_fb_helper_set_suspend(&ifbdev->helper, state); |
802 | console_unlock(); | 824 | console_unlock(); |
825 | |||
826 | intel_fbdev_hpd_set_suspend(ifbdev, state); | ||
803 | } | 827 | } |
804 | 828 | ||
805 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 829 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
806 | { | 830 | { |
807 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; | 831 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; |
832 | bool send_hpd; | ||
808 | 833 | ||
809 | if (!ifbdev) | 834 | if (!ifbdev) |
810 | return; | 835 | return; |
811 | 836 | ||
812 | intel_fbdev_sync(ifbdev); | 837 | intel_fbdev_sync(ifbdev); |
813 | if (ifbdev->vma || ifbdev->helper.deferred_setup) | 838 | |
839 | mutex_lock(&ifbdev->hpd_lock); | ||
840 | send_hpd = !ifbdev->hpd_suspended; | ||
841 | ifbdev->hpd_waiting = true; | ||
842 | mutex_unlock(&ifbdev->hpd_lock); | ||
843 | |||
844 | if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) | ||
814 | drm_fb_helper_hotplug_event(&ifbdev->helper); | 845 | drm_fb_helper_hotplug_event(&ifbdev->helper); |
815 | } | 846 | } |
816 | 847 | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..3ac20153705a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -55,7 +55,12 @@ | |||
55 | struct opregion_header { | 55 | struct opregion_header { |
56 | u8 signature[16]; | 56 | u8 signature[16]; |
57 | u32 size; | 57 | u32 size; |
58 | u32 opregion_ver; | 58 | struct { |
59 | u8 rsvd; | ||
60 | u8 revision; | ||
61 | u8 minor; | ||
62 | u8 major; | ||
63 | } __packed over; | ||
59 | u8 bios_ver[32]; | 64 | u8 bios_ver[32]; |
60 | u8 vbios_ver[16]; | 65 | u8 vbios_ver[16]; |
61 | u8 driver_ver[16]; | 66 | u8 driver_ver[16]; |
@@ -119,7 +124,8 @@ struct opregion_asle { | |||
119 | u64 fdss; | 124 | u64 fdss; |
120 | u32 fdsp; | 125 | u32 fdsp; |
121 | u32 stat; | 126 | u32 stat; |
122 | u64 rvda; /* Physical address of raw vbt data */ | 127 | u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) |
128 | * address of raw VBT data. */ | ||
123 | u32 rvds; /* Size of raw vbt data */ | 129 | u32 rvds; /* Size of raw vbt data */ |
124 | u8 rsvd[58]; | 130 | u8 rsvd[58]; |
125 | } __packed; | 131 | } __packed; |
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
925 | opregion->header = base; | 931 | opregion->header = base; |
926 | opregion->lid_state = base + ACPI_CLID; | 932 | opregion->lid_state = base + ACPI_CLID; |
927 | 933 | ||
934 | DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", | ||
935 | opregion->header->over.major, | ||
936 | opregion->header->over.minor, | ||
937 | opregion->header->over.revision); | ||
938 | |||
928 | mboxes = opregion->header->mboxes; | 939 | mboxes = opregion->header->mboxes; |
929 | if (mboxes & MBOX_ACPI) { | 940 | if (mboxes & MBOX_ACPI) { |
930 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 941 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
953 | if (dmi_check_system(intel_no_opregion_vbt)) | 964 | if (dmi_check_system(intel_no_opregion_vbt)) |
954 | goto out; | 965 | goto out; |
955 | 966 | ||
956 | if (opregion->header->opregion_ver >= 2 && opregion->asle && | 967 | if (opregion->header->over.major >= 2 && opregion->asle && |
957 | opregion->asle->rvda && opregion->asle->rvds) { | 968 | opregion->asle->rvda && opregion->asle->rvds) { |
958 | opregion->rvda = memremap(opregion->asle->rvda, | 969 | resource_size_t rvda = opregion->asle->rvda; |
959 | opregion->asle->rvds, | 970 | |
971 | /* | ||
972 | * opregion 2.0: rvda is the physical VBT address. | ||
973 | * | ||
974 | * opregion 2.1+: rvda is unsigned, relative offset from | ||
975 | * opregion base, and should never point within opregion. | ||
976 | */ | ||
977 | if (opregion->header->over.major > 2 || | ||
978 | opregion->header->over.minor >= 1) { | ||
979 | WARN_ON(rvda < OPREGION_SIZE); | ||
980 | |||
981 | rvda += asls; | ||
982 | } | ||
983 | |||
984 | opregion->rvda = memremap(rvda, opregion->asle->rvds, | ||
960 | MEMREMAP_WB); | 985 | MEMREMAP_WB); |
986 | |||
961 | vbt = opregion->rvda; | 987 | vbt = opregion->rvda; |
962 | vbt_size = opregion->asle->rvds; | 988 | vbt_size = opregion->asle->rvds; |
963 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | 989 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { |
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
967 | goto out; | 993 | goto out; |
968 | } else { | 994 | } else { |
969 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); | 995 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); |
996 | memunmap(opregion->rvda); | ||
997 | opregion->rvda = NULL; | ||
970 | } | 998 | } |
971 | } | 999 | } |
972 | 1000 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..a1a7cc29fdd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -415,16 +415,17 @@ struct intel_engine_cs { | |||
415 | /** | 415 | /** |
416 | * @enable_count: Reference count for the enabled samplers. | 416 | * @enable_count: Reference count for the enabled samplers. |
417 | * | 417 | * |
418 | * Index number corresponds to the bit number from @enable. | 418 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. |
419 | */ | 419 | */ |
420 | unsigned int enable_count[I915_PMU_SAMPLE_BITS]; | 420 | unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; |
421 | /** | 421 | /** |
422 | * @sample: Counter values for sampling events. | 422 | * @sample: Counter values for sampling events. |
423 | * | 423 | * |
424 | * Our internal timer stores the current counters in this field. | 424 | * Our internal timer stores the current counters in this field. |
425 | * | ||
426 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. | ||
425 | */ | 427 | */ |
426 | #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) | 428 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; |
427 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; | ||
428 | } pmu; | 429 | } pmu; |
429 | 430 | ||
430 | /* | 431 | /* |
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 2c5bbe317353..e31e263cf86b 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
643 | int bus_format; | 643 | int bus_format; |
644 | 644 | ||
645 | ret = of_property_read_u32(child, "reg", &i); | 645 | ret = of_property_read_u32(child, "reg", &i); |
646 | if (ret || i < 0 || i > 1) | 646 | if (ret || i < 0 || i > 1) { |
647 | return -EINVAL; | 647 | ret = -EINVAL; |
648 | goto free_child; | ||
649 | } | ||
648 | 650 | ||
649 | if (!of_device_is_available(child)) | 651 | if (!of_device_is_available(child)) |
650 | continue; | 652 | continue; |
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
657 | channel = &imx_ldb->channel[i]; | 659 | channel = &imx_ldb->channel[i]; |
658 | channel->ldb = imx_ldb; | 660 | channel->ldb = imx_ldb; |
659 | channel->chno = i; | 661 | channel->chno = i; |
660 | channel->child = child; | ||
661 | 662 | ||
662 | /* | 663 | /* |
663 | * The output port is port@4 with an external 4-port mux or | 664 | * The output port is port@4 with an external 4-port mux or |
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
667 | imx_ldb->lvds_mux ? 4 : 2, 0, | 668 | imx_ldb->lvds_mux ? 4 : 2, 0, |
668 | &channel->panel, &channel->bridge); | 669 | &channel->panel, &channel->bridge); |
669 | if (ret && ret != -ENODEV) | 670 | if (ret && ret != -ENODEV) |
670 | return ret; | 671 | goto free_child; |
671 | 672 | ||
672 | /* panel ddc only if there is no bridge */ | 673 | /* panel ddc only if there is no bridge */ |
673 | if (!channel->bridge) { | 674 | if (!channel->bridge) { |
674 | ret = imx_ldb_panel_ddc(dev, channel, child); | 675 | ret = imx_ldb_panel_ddc(dev, channel, child); |
675 | if (ret) | 676 | if (ret) |
676 | return ret; | 677 | goto free_child; |
677 | } | 678 | } |
678 | 679 | ||
679 | bus_format = of_get_bus_format(dev, child); | 680 | bus_format = of_get_bus_format(dev, child); |
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
689 | if (bus_format < 0) { | 690 | if (bus_format < 0) { |
690 | dev_err(dev, "could not determine data mapping: %d\n", | 691 | dev_err(dev, "could not determine data mapping: %d\n", |
691 | bus_format); | 692 | bus_format); |
692 | return bus_format; | 693 | ret = bus_format; |
694 | goto free_child; | ||
693 | } | 695 | } |
694 | channel->bus_format = bus_format; | 696 | channel->bus_format = bus_format; |
697 | channel->child = child; | ||
695 | 698 | ||
696 | ret = imx_ldb_register(drm, channel); | 699 | ret = imx_ldb_register(drm, channel); |
697 | if (ret) | 700 | if (ret) { |
698 | return ret; | 701 | channel->child = NULL; |
702 | goto free_child; | ||
703 | } | ||
699 | } | 704 | } |
700 | 705 | ||
701 | dev_set_drvdata(dev, imx_ldb); | 706 | dev_set_drvdata(dev, imx_ldb); |
702 | 707 | ||
703 | return 0; | 708 | return 0; |
709 | |||
710 | free_child: | ||
711 | of_node_put(child); | ||
712 | return ret; | ||
704 | } | 713 | } |
705 | 714 | ||
706 | static void imx_ldb_unbind(struct device *dev, struct device *master, | 715 | static void imx_ldb_unbind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index c390924de93d..21e964f6ab5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
370 | if (ret) | 370 | if (ret) |
371 | return ret; | 371 | return ret; |
372 | 372 | ||
373 | /* CRTC should be enabled */ | 373 | /* nothing to check when disabling or disabled */ |
374 | if (!crtc_state->enable) | 374 | if (!crtc_state->enable) |
375 | return -EINVAL; | 375 | return 0; |
376 | 376 | ||
377 | switch (plane->type) { | 377 | switch (plane->type) { |
378 | case DRM_PLANE_TYPE_PRIMARY: | 378 | case DRM_PLANE_TYPE_PRIMARY: |
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4463d3826ecb..e2942c9a11a7 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c | |||
@@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) | |||
440 | 440 | ||
441 | while ((entity->dependency = | 441 | while ((entity->dependency = |
442 | sched->ops->dependency(sched_job, entity))) { | 442 | sched->ops->dependency(sched_job, entity))) { |
443 | trace_drm_sched_job_wait_dep(sched_job, entity->dependency); | ||
443 | 444 | ||
444 | if (drm_sched_entity_add_dependency_cb(entity)) { | 445 | if (drm_sched_entity_add_dependency_cb(entity)) |
445 | |||
446 | trace_drm_sched_job_wait_dep(sched_job, | ||
447 | entity->dependency); | ||
448 | return NULL; | 446 | return NULL; |
449 | } | ||
450 | } | 447 | } |
451 | 448 | ||
452 | /* skip jobs from entity that marked guilty */ | 449 | /* skip jobs from entity that marked guilty */ |
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c index 9d9e8146db90..d7b409a3c0f8 100644 --- a/drivers/gpu/drm/vkms/vkms_crc.c +++ b/drivers/gpu/drm/vkms/vkms_crc.c | |||
@@ -1,4 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | |||
2 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
3 | #include <linux/crc32.h> | 4 | #include <linux/crc32.h> |
4 | #include <drm/drm_atomic.h> | 5 | #include <drm/drm_atomic.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 177bbcb38306..eb56ee893761 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_atomic_helper.h> | 4 | #include <drm/drm_atomic_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83087877565c..7dcbecb5fac2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c | |||
@@ -1,9 +1,4 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | 2 | ||
8 | /** | 3 | /** |
9 | * DOC: vkms (Virtual Kernel Modesetting) | 4 | * DOC: vkms (Virtual Kernel Modesetting) |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index e4469cd3d254..81f1cfbeb936 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h | |||
@@ -1,3 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
2 | |||
1 | #ifndef _VKMS_DRV_H_ | 3 | #ifndef _VKMS_DRV_H_ |
2 | #define _VKMS_DRV_H_ | 4 | #define _VKMS_DRV_H_ |
3 | 5 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 80311daed47a..138b0bb325cf 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include <linux/shmem_fs.h> | 3 | #include <linux/shmem_fs.h> |
10 | 4 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 271a0eb9042c..4173e4f48334 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_crtc_helper.h> | 4 | #include <drm/drm_crtc_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 418817600ad1..0e67d2d42f0c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_plane_helper.h> | 4 | #include <drm/drm_plane_helper.h> |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 474b00e19697..0a7d4395d427 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = { | |||
898 | .cpmem_ofs = 0x1f000000, | 898 | .cpmem_ofs = 0x1f000000, |
899 | .srm_ofs = 0x1f040000, | 899 | .srm_ofs = 0x1f040000, |
900 | .tpm_ofs = 0x1f060000, | 900 | .tpm_ofs = 0x1f060000, |
901 | .csi0_ofs = 0x1f030000, | 901 | .csi0_ofs = 0x1e030000, |
902 | .csi1_ofs = 0x1f038000, | 902 | .csi1_ofs = 0x1e038000, |
903 | .ic_ofs = 0x1e020000, | 903 | .ic_ofs = 0x1e020000, |
904 | .disp0_ofs = 0x1e040000, | 904 | .disp0_ofs = 0x1e040000, |
905 | .disp1_ofs = 0x1e048000, | 905 | .disp1_ofs = 0x1e048000, |
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = { | |||
914 | .cpmem_ofs = 0x07000000, | 914 | .cpmem_ofs = 0x07000000, |
915 | .srm_ofs = 0x07040000, | 915 | .srm_ofs = 0x07040000, |
916 | .tpm_ofs = 0x07060000, | 916 | .tpm_ofs = 0x07060000, |
917 | .csi0_ofs = 0x07030000, | 917 | .csi0_ofs = 0x06030000, |
918 | .csi1_ofs = 0x07038000, | 918 | .csi1_ofs = 0x06038000, |
919 | .ic_ofs = 0x06020000, | 919 | .ic_ofs = 0x06020000, |
920 | .disp0_ofs = 0x06040000, | 920 | .disp0_ofs = 0x06040000, |
921 | .disp1_ofs = 0x06048000, | 921 | .disp1_ofs = 0x06048000, |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 2f8db9d62551..4a28f3fbb0a2 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
@@ -106,6 +106,7 @@ struct ipu_pre { | |||
106 | void *buffer_virt; | 106 | void *buffer_virt; |
107 | bool in_use; | 107 | bool in_use; |
108 | unsigned int safe_window_end; | 108 | unsigned int safe_window_end; |
109 | unsigned int last_bufaddr; | ||
109 | }; | 110 | }; |
110 | 111 | ||
111 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 112 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
185 | 186 | ||
186 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 187 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
187 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 188 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
189 | pre->last_bufaddr = bufaddr; | ||
188 | 190 | ||
189 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | | 191 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | |
190 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | | 192 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | |||
242 | unsigned short current_yblock; | 244 | unsigned short current_yblock; |
243 | u32 val; | 245 | u32 val; |
244 | 246 | ||
247 | if (bufaddr == pre->last_bufaddr) | ||
248 | return; | ||
249 | |||
245 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 250 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
251 | pre->last_bufaddr = bufaddr; | ||
246 | 252 | ||
247 | do { | 253 | do { |
248 | if (time_after(jiffies, timeout)) { | 254 | if (time_after(jiffies, timeout)) { |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4adec4ab7d06..59ee01f3d022 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data) | |||
3594 | fan5pin |= cr1b & BIT(5); | 3594 | fan5pin |= cr1b & BIT(5); |
3595 | fan5pin |= creb & BIT(5); | 3595 | fan5pin |= creb & BIT(5); |
3596 | 3596 | ||
3597 | fan6pin = creb & BIT(3); | 3597 | fan6pin = !dsw_en && (cr2d & BIT(1)); |
3598 | fan6pin |= creb & BIT(3); | ||
3598 | 3599 | ||
3599 | pwm5pin |= cr2d & BIT(7); | 3600 | pwm5pin |= cr2d & BIT(7); |
3600 | pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); | 3601 | pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); |
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index ec6e69aa3a8e..d2fbb4bb4a43 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c | |||
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev) | |||
183 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); | 183 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev) | ||
187 | { | ||
188 | i2c_dev->curr_msg = NULL; | ||
189 | i2c_dev->num_msgs = 0; | ||
190 | |||
191 | i2c_dev->msg_buf = NULL; | ||
192 | i2c_dev->msg_buf_remaining = 0; | ||
193 | } | ||
194 | |||
186 | /* | 195 | /* |
187 | * Note about I2C_C_CLEAR on error: | 196 | * Note about I2C_C_CLEAR on error: |
188 | * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in | 197 | * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in |
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], | |||
283 | 292 | ||
284 | time_left = wait_for_completion_timeout(&i2c_dev->completion, | 293 | time_left = wait_for_completion_timeout(&i2c_dev->completion, |
285 | adap->timeout); | 294 | adap->timeout); |
295 | |||
296 | bcm2835_i2c_finish_transfer(i2c_dev); | ||
297 | |||
286 | if (!time_left) { | 298 | if (!time_left) { |
287 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, | 299 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, |
288 | BCM2835_I2C_C_CLEAR); | 300 | BCM2835_I2C_C_CLEAR); |
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index b13605718291..d917cefc5a19 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c | |||
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) | |||
382 | * Check for the message size against FIFO depth and set the | 382 | * Check for the message size against FIFO depth and set the |
383 | * 'hold bus' bit if it is greater than FIFO depth. | 383 | * 'hold bus' bit if it is greater than FIFO depth. |
384 | */ | 384 | */ |
385 | if (id->recv_count > CDNS_I2C_FIFO_DEPTH) | 385 | if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) |
386 | ctrl_reg |= CDNS_I2C_CR_HOLD; | 386 | ctrl_reg |= CDNS_I2C_CR_HOLD; |
387 | else | ||
388 | ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; | ||
387 | 389 | ||
388 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); | 390 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); |
389 | 391 | ||
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id) | |||
440 | * Check for the message size against FIFO depth and set the | 442 | * Check for the message size against FIFO depth and set the |
441 | * 'hold bus' bit if it is greater than FIFO depth. | 443 | * 'hold bus' bit if it is greater than FIFO depth. |
442 | */ | 444 | */ |
443 | if (id->send_count > CDNS_I2C_FIFO_DEPTH) | 445 | if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) |
444 | ctrl_reg |= CDNS_I2C_CR_HOLD; | 446 | ctrl_reg |= CDNS_I2C_CR_HOLD; |
447 | else | ||
448 | ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; | ||
449 | |||
445 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); | 450 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); |
446 | 451 | ||
447 | /* Clear the interrupts in interrupt status register. */ | 452 | /* Clear the interrupts in interrupt status register. */ |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4713957b0cbb..a878351f1643 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121 | |||
420 | 420 | ||
421 | config KEYBOARD_SNVS_PWRKEY | 421 | config KEYBOARD_SNVS_PWRKEY |
422 | tristate "IMX SNVS Power Key Driver" | 422 | tristate "IMX SNVS Power Key Driver" |
423 | depends on SOC_IMX6SX | 423 | depends on SOC_IMX6SX || SOC_IMX7D |
424 | depends on OF | 424 | depends on OF |
425 | help | 425 | help |
426 | This is the snvs powerkey driver for the Freescale i.MX application | 426 | This is the snvs powerkey driver for the Freescale i.MX application |
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 312916f99597..73686c2460ce 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c | |||
@@ -75,9 +75,7 @@ | |||
75 | struct cap11xx_led { | 75 | struct cap11xx_led { |
76 | struct cap11xx_priv *priv; | 76 | struct cap11xx_priv *priv; |
77 | struct led_classdev cdev; | 77 | struct led_classdev cdev; |
78 | struct work_struct work; | ||
79 | u32 reg; | 78 | u32 reg; |
80 | enum led_brightness new_brightness; | ||
81 | }; | 79 | }; |
82 | #endif | 80 | #endif |
83 | 81 | ||
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev) | |||
233 | } | 231 | } |
234 | 232 | ||
235 | #ifdef CONFIG_LEDS_CLASS | 233 | #ifdef CONFIG_LEDS_CLASS |
236 | static void cap11xx_led_work(struct work_struct *work) | 234 | static int cap11xx_led_set(struct led_classdev *cdev, |
235 | enum led_brightness value) | ||
237 | { | 236 | { |
238 | struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); | 237 | struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); |
239 | struct cap11xx_priv *priv = led->priv; | 238 | struct cap11xx_priv *priv = led->priv; |
240 | int value = led->new_brightness; | ||
241 | 239 | ||
242 | /* | 240 | /* |
243 | * All LEDs share the same duty cycle as this is a HW limitation. | 241 | * All LEDs share the same duty cycle as this is a HW |
244 | * Brightness levels per LED are either 0 (OFF) and 1 (ON). | 242 | * limitation. Brightness levels per LED are either |
243 | * 0 (OFF) and 1 (ON). | ||
245 | */ | 244 | */ |
246 | regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, | 245 | return regmap_update_bits(priv->regmap, |
247 | BIT(led->reg), value ? BIT(led->reg) : 0); | 246 | CAP11XX_REG_LED_OUTPUT_CONTROL, |
248 | } | 247 | BIT(led->reg), |
249 | 248 | value ? BIT(led->reg) : 0); | |
250 | static void cap11xx_led_set(struct led_classdev *cdev, | ||
251 | enum led_brightness value) | ||
252 | { | ||
253 | struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); | ||
254 | |||
255 | if (led->new_brightness == value) | ||
256 | return; | ||
257 | |||
258 | led->new_brightness = value; | ||
259 | schedule_work(&led->work); | ||
260 | } | 249 | } |
261 | 250 | ||
262 | static int cap11xx_init_leds(struct device *dev, | 251 | static int cap11xx_init_leds(struct device *dev, |
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev, | |||
299 | led->cdev.default_trigger = | 288 | led->cdev.default_trigger = |
300 | of_get_property(child, "linux,default-trigger", NULL); | 289 | of_get_property(child, "linux,default-trigger", NULL); |
301 | led->cdev.flags = 0; | 290 | led->cdev.flags = 0; |
302 | led->cdev.brightness_set = cap11xx_led_set; | 291 | led->cdev.brightness_set_blocking = cap11xx_led_set; |
303 | led->cdev.max_brightness = 1; | 292 | led->cdev.max_brightness = 1; |
304 | led->cdev.brightness = LED_OFF; | 293 | led->cdev.brightness = LED_OFF; |
305 | 294 | ||
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev, | |||
312 | led->reg = reg; | 301 | led->reg = reg; |
313 | led->priv = priv; | 302 | led->priv = priv; |
314 | 303 | ||
315 | INIT_WORK(&led->work, cap11xx_led_work); | ||
316 | |||
317 | error = devm_led_classdev_register(dev, &led->cdev); | 304 | error = devm_led_classdev_register(dev, &led->cdev); |
318 | if (error) { | 305 | if (error) { |
319 | of_node_put(child); | 306 | of_node_put(child); |
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 403452ef00e6..3d1cb7bf5e35 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev) | |||
222 | keypad->stopped = true; | 222 | keypad->stopped = true; |
223 | spin_unlock_irq(&keypad->lock); | 223 | spin_unlock_irq(&keypad->lock); |
224 | 224 | ||
225 | flush_work(&keypad->work.work); | 225 | flush_delayed_work(&keypad->work); |
226 | /* | 226 | /* |
227 | * matrix_keypad_scan() will leave IRQs enabled; | 227 | * matrix_keypad_scan() will leave IRQs enabled; |
228 | * we should disable them now. | 228 | * we should disable them now. |
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c index 43b86482dda0..d466bc07aebb 100644 --- a/drivers/input/keyboard/qt2160.c +++ b/drivers/input/keyboard/qt2160.c | |||
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = { | |||
58 | struct qt2160_led { | 58 | struct qt2160_led { |
59 | struct qt2160_data *qt2160; | 59 | struct qt2160_data *qt2160; |
60 | struct led_classdev cdev; | 60 | struct led_classdev cdev; |
61 | struct work_struct work; | ||
62 | char name[32]; | 61 | char name[32]; |
63 | int id; | 62 | int id; |
64 | enum led_brightness new_brightness; | 63 | enum led_brightness brightness; |
65 | }; | 64 | }; |
66 | #endif | 65 | #endif |
67 | 66 | ||
@@ -74,7 +73,6 @@ struct qt2160_data { | |||
74 | u16 key_matrix; | 73 | u16 key_matrix; |
75 | #ifdef CONFIG_LEDS_CLASS | 74 | #ifdef CONFIG_LEDS_CLASS |
76 | struct qt2160_led leds[QT2160_NUM_LEDS_X]; | 75 | struct qt2160_led leds[QT2160_NUM_LEDS_X]; |
77 | struct mutex led_lock; | ||
78 | #endif | 76 | #endif |
79 | }; | 77 | }; |
80 | 78 | ||
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data); | |||
83 | 81 | ||
84 | #ifdef CONFIG_LEDS_CLASS | 82 | #ifdef CONFIG_LEDS_CLASS |
85 | 83 | ||
86 | static void qt2160_led_work(struct work_struct *work) | 84 | static int qt2160_led_set(struct led_classdev *cdev, |
85 | enum led_brightness value) | ||
87 | { | 86 | { |
88 | struct qt2160_led *led = container_of(work, struct qt2160_led, work); | 87 | struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); |
89 | struct qt2160_data *qt2160 = led->qt2160; | 88 | struct qt2160_data *qt2160 = led->qt2160; |
90 | struct i2c_client *client = qt2160->client; | 89 | struct i2c_client *client = qt2160->client; |
91 | int value = led->new_brightness; | ||
92 | u32 drive, pwmen; | 90 | u32 drive, pwmen; |
93 | 91 | ||
94 | mutex_lock(&qt2160->led_lock); | 92 | if (value != led->brightness) { |
95 | 93 | drive = qt2160_read(client, QT2160_CMD_DRIVE_X); | |
96 | drive = qt2160_read(client, QT2160_CMD_DRIVE_X); | 94 | pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); |
97 | pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); | 95 | if (value != LED_OFF) { |
98 | if (value != LED_OFF) { | 96 | drive |= BIT(led->id); |
99 | drive |= (1 << led->id); | 97 | pwmen |= BIT(led->id); |
100 | pwmen |= (1 << led->id); | ||
101 | |||
102 | } else { | ||
103 | drive &= ~(1 << led->id); | ||
104 | pwmen &= ~(1 << led->id); | ||
105 | } | ||
106 | qt2160_write(client, QT2160_CMD_DRIVE_X, drive); | ||
107 | qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); | ||
108 | 98 | ||
109 | /* | 99 | } else { |
110 | * Changing this register will change the brightness | 100 | drive &= ~BIT(led->id); |
111 | * of every LED in the qt2160. It's a HW limitation. | 101 | pwmen &= ~BIT(led->id); |
112 | */ | 102 | } |
113 | if (value != LED_OFF) | 103 | qt2160_write(client, QT2160_CMD_DRIVE_X, drive); |
114 | qt2160_write(client, QT2160_CMD_PWM_DUTY, value); | 104 | qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); |
115 | 105 | ||
116 | mutex_unlock(&qt2160->led_lock); | 106 | /* |
117 | } | 107 | * Changing this register will change the brightness |
108 | * of every LED in the qt2160. It's a HW limitation. | ||
109 | */ | ||
110 | if (value != LED_OFF) | ||
111 | qt2160_write(client, QT2160_CMD_PWM_DUTY, value); | ||
118 | 112 | ||
119 | static void qt2160_led_set(struct led_classdev *cdev, | 113 | led->brightness = value; |
120 | enum led_brightness value) | 114 | } |
121 | { | ||
122 | struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); | ||
123 | 115 | ||
124 | led->new_brightness = value; | 116 | return 0; |
125 | schedule_work(&led->work); | ||
126 | } | 117 | } |
127 | 118 | ||
128 | #endif /* CONFIG_LEDS_CLASS */ | 119 | #endif /* CONFIG_LEDS_CLASS */ |
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160) | |||
293 | int ret; | 284 | int ret; |
294 | int i; | 285 | int i; |
295 | 286 | ||
296 | mutex_init(&qt2160->led_lock); | ||
297 | |||
298 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { | 287 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { |
299 | struct qt2160_led *led = &qt2160->leds[i]; | 288 | struct qt2160_led *led = &qt2160->leds[i]; |
300 | 289 | ||
301 | snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); | 290 | snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); |
302 | led->cdev.name = led->name; | 291 | led->cdev.name = led->name; |
303 | led->cdev.brightness_set = qt2160_led_set; | 292 | led->cdev.brightness_set_blocking = qt2160_led_set; |
304 | led->cdev.brightness = LED_OFF; | 293 | led->cdev.brightness = LED_OFF; |
305 | led->id = i; | 294 | led->id = i; |
306 | led->qt2160 = qt2160; | 295 | led->qt2160 = qt2160; |
307 | 296 | ||
308 | INIT_WORK(&led->work, qt2160_led_work); | ||
309 | |||
310 | ret = led_classdev_register(&client->dev, &led->cdev); | 297 | ret = led_classdev_register(&client->dev, &led->cdev); |
311 | if (ret < 0) | 298 | if (ret < 0) |
312 | return ret; | 299 | return ret; |
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160) | |||
324 | { | 311 | { |
325 | int i; | 312 | int i; |
326 | 313 | ||
327 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { | 314 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) |
328 | led_classdev_unregister(&qt2160->leds[i].cdev); | 315 | led_classdev_unregister(&qt2160->leds[i].cdev); |
329 | cancel_work_sync(&qt2160->leds[i].work); | ||
330 | } | ||
331 | } | 316 | } |
332 | 317 | ||
333 | #else | 318 | #else |
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index babcfb165e4f..3b85631fde91 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c | |||
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev) | |||
153 | 153 | ||
154 | input_dev->id.bustype = BUS_HOST; | 154 | input_dev->id.bustype = BUS_HOST; |
155 | 155 | ||
156 | keypad_data->input_dev = input_dev; | ||
157 | |||
156 | error = keypad_matrix_key_parse_dt(keypad_data); | 158 | error = keypad_matrix_key_parse_dt(keypad_data); |
157 | if (error) | 159 | if (error) |
158 | return error; | 160 | return error; |
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev) | |||
168 | 170 | ||
169 | input_set_drvdata(input_dev, keypad_data); | 171 | input_set_drvdata(input_dev, keypad_data); |
170 | 172 | ||
171 | keypad_data->input_dev = input_dev; | ||
172 | |||
173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
174 | keypad_data->base = devm_ioremap_resource(&pdev->dev, res); | 174 | keypad_data->base = devm_ioremap_resource(&pdev->dev, res); |
175 | if (IS_ERR(keypad_data->base)) | 175 | if (IS_ERR(keypad_data->base)) |
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c index 094bddf56755..c1e66f45d552 100644 --- a/drivers/input/misc/apanel.c +++ b/drivers/input/misc/apanel.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/input-polldev.h> | 23 | #include <linux/input-polldev.h> |
24 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/leds.h> | 25 | #include <linux/leds.h> |
27 | 26 | ||
28 | #define APANEL_NAME "Fujitsu Application Panel" | 27 | #define APANEL_NAME "Fujitsu Application Panel" |
@@ -59,8 +58,6 @@ struct apanel { | |||
59 | struct i2c_client *client; | 58 | struct i2c_client *client; |
60 | unsigned short keymap[MAX_PANEL_KEYS]; | 59 | unsigned short keymap[MAX_PANEL_KEYS]; |
61 | u16 nkeys; | 60 | u16 nkeys; |
62 | u16 led_bits; | ||
63 | struct work_struct led_work; | ||
64 | struct led_classdev mail_led; | 61 | struct led_classdev mail_led; |
65 | }; | 62 | }; |
66 | 63 | ||
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev) | |||
109 | report_key(idev, ap->keymap[i]); | 106 | report_key(idev, ap->keymap[i]); |
110 | } | 107 | } |
111 | 108 | ||
112 | /* Track state changes of LED */ | 109 | static int mail_led_set(struct led_classdev *led, |
113 | static void led_update(struct work_struct *work) | ||
114 | { | ||
115 | struct apanel *ap = container_of(work, struct apanel, led_work); | ||
116 | |||
117 | i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits); | ||
118 | } | ||
119 | |||
120 | static void mail_led_set(struct led_classdev *led, | ||
121 | enum led_brightness value) | 110 | enum led_brightness value) |
122 | { | 111 | { |
123 | struct apanel *ap = container_of(led, struct apanel, mail_led); | 112 | struct apanel *ap = container_of(led, struct apanel, mail_led); |
113 | u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000; | ||
124 | 114 | ||
125 | if (value != LED_OFF) | 115 | return i2c_smbus_write_word_data(ap->client, 0x10, led_bits); |
126 | ap->led_bits |= 0x8000; | ||
127 | else | ||
128 | ap->led_bits &= ~0x8000; | ||
129 | |||
130 | schedule_work(&ap->led_work); | ||
131 | } | 116 | } |
132 | 117 | ||
133 | static int apanel_remove(struct i2c_client *client) | 118 | static int apanel_remove(struct i2c_client *client) |
@@ -179,7 +164,7 @@ static struct apanel apanel = { | |||
179 | }, | 164 | }, |
180 | .mail_led = { | 165 | .mail_led = { |
181 | .name = "mail:blue", | 166 | .name = "mail:blue", |
182 | .brightness_set = mail_led_set, | 167 | .brightness_set_blocking = mail_led_set, |
183 | }, | 168 | }, |
184 | }; | 169 | }; |
185 | 170 | ||
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client, | |||
235 | if (err) | 220 | if (err) |
236 | goto out3; | 221 | goto out3; |
237 | 222 | ||
238 | INIT_WORK(&ap->led_work, led_update); | ||
239 | if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { | 223 | if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { |
240 | err = led_classdev_register(&client->dev, &ap->mail_led); | 224 | err = led_classdev_register(&client->dev, &ap->mail_led); |
241 | if (err) | 225 | if (err) |
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c index 1efcfdf9f8a8..dd9dd4e40827 100644 --- a/drivers/input/misc/bma150.c +++ b/drivers/input/misc/bma150.c | |||
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) | |||
481 | idev->close = bma150_irq_close; | 481 | idev->close = bma150_irq_close; |
482 | input_set_drvdata(idev, bma150); | 482 | input_set_drvdata(idev, bma150); |
483 | 483 | ||
484 | bma150->input = idev; | ||
485 | |||
484 | error = input_register_device(idev); | 486 | error = input_register_device(idev); |
485 | if (error) { | 487 | if (error) { |
486 | input_free_device(idev); | 488 | input_free_device(idev); |
487 | return error; | 489 | return error; |
488 | } | 490 | } |
489 | 491 | ||
490 | bma150->input = idev; | ||
491 | return 0; | 492 | return 0; |
492 | } | 493 | } |
493 | 494 | ||
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) | |||
510 | 511 | ||
511 | bma150_init_input_device(bma150, ipoll_dev->input); | 512 | bma150_init_input_device(bma150, ipoll_dev->input); |
512 | 513 | ||
514 | bma150->input_polled = ipoll_dev; | ||
515 | bma150->input = ipoll_dev->input; | ||
516 | |||
513 | error = input_register_polled_device(ipoll_dev); | 517 | error = input_register_polled_device(ipoll_dev); |
514 | if (error) { | 518 | if (error) { |
515 | input_free_polled_device(ipoll_dev); | 519 | input_free_polled_device(ipoll_dev); |
516 | return error; | 520 | return error; |
517 | } | 521 | } |
518 | 522 | ||
519 | bma150->input_polled = ipoll_dev; | ||
520 | bma150->input = ipoll_dev->input; | ||
521 | |||
522 | return 0; | 523 | return 0; |
523 | } | 524 | } |
524 | 525 | ||
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c index 55da191ae550..dbb6d9e1b947 100644 --- a/drivers/input/misc/pwm-vibra.c +++ b/drivers/input/misc/pwm-vibra.c | |||
@@ -34,6 +34,7 @@ struct pwm_vibrator { | |||
34 | struct work_struct play_work; | 34 | struct work_struct play_work; |
35 | u16 level; | 35 | u16 level; |
36 | u32 direction_duty_cycle; | 36 | u32 direction_duty_cycle; |
37 | bool vcc_on; | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | 40 | static int pwm_vibrator_start(struct pwm_vibrator *vibrator) |
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | |||
42 | struct pwm_state state; | 43 | struct pwm_state state; |
43 | int err; | 44 | int err; |
44 | 45 | ||
45 | err = regulator_enable(vibrator->vcc); | 46 | if (!vibrator->vcc_on) { |
46 | if (err) { | 47 | err = regulator_enable(vibrator->vcc); |
47 | dev_err(pdev, "failed to enable regulator: %d", err); | 48 | if (err) { |
48 | return err; | 49 | dev_err(pdev, "failed to enable regulator: %d", err); |
50 | return err; | ||
51 | } | ||
52 | vibrator->vcc_on = true; | ||
49 | } | 53 | } |
50 | 54 | ||
51 | pwm_get_state(vibrator->pwm, &state); | 55 | pwm_get_state(vibrator->pwm, &state); |
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | |||
76 | 80 | ||
77 | static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) | 81 | static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) |
78 | { | 82 | { |
79 | regulator_disable(vibrator->vcc); | ||
80 | |||
81 | if (vibrator->pwm_dir) | 83 | if (vibrator->pwm_dir) |
82 | pwm_disable(vibrator->pwm_dir); | 84 | pwm_disable(vibrator->pwm_dir); |
83 | pwm_disable(vibrator->pwm); | 85 | pwm_disable(vibrator->pwm); |
86 | |||
87 | if (vibrator->vcc_on) { | ||
88 | regulator_disable(vibrator->vcc); | ||
89 | vibrator->vcc_on = false; | ||
90 | } | ||
84 | } | 91 | } |
85 | 92 | ||
86 | static void pwm_vibrator_play_work(struct work_struct *work) | 93 | static void pwm_vibrator_play_work(struct work_struct *work) |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index f322a1768fbb..225ae6980182 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id); | |||
1336 | static const struct acpi_device_id elan_acpi_id[] = { | 1336 | static const struct acpi_device_id elan_acpi_id[] = { |
1337 | { "ELAN0000", 0 }, | 1337 | { "ELAN0000", 0 }, |
1338 | { "ELAN0100", 0 }, | 1338 | { "ELAN0100", 0 }, |
1339 | { "ELAN0501", 0 }, | ||
1340 | { "ELAN0600", 0 }, | 1339 | { "ELAN0600", 0 }, |
1341 | { "ELAN0602", 0 }, | 1340 | { "ELAN0602", 0 }, |
1342 | { "ELAN0605", 0 }, | 1341 | { "ELAN0605", 0 }, |
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
1346 | { "ELAN060C", 0 }, | 1345 | { "ELAN060C", 0 }, |
1347 | { "ELAN0611", 0 }, | 1346 | { "ELAN0611", 0 }, |
1348 | { "ELAN0612", 0 }, | 1347 | { "ELAN0612", 0 }, |
1348 | { "ELAN0617", 0 }, | ||
1349 | { "ELAN0618", 0 }, | 1349 | { "ELAN0618", 0 }, |
1350 | { "ELAN061C", 0 }, | 1350 | { "ELAN061C", 0 }, |
1351 | { "ELAN061D", 0 }, | 1351 | { "ELAN061D", 0 }, |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 9fe075c137dc..a7f8b1614559 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, | |||
1119 | * Asus UX31 0x361f00 20, 15, 0e clickpad | 1119 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
1120 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad | 1120 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
1121 | * Avatar AVIU-145A2 0x361f00 ? clickpad | 1121 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
1122 | * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) | ||
1123 | * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) | ||
1122 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons | 1124 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons |
1123 | * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons | 1125 | * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons |
1124 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons | 1126 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons |
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { | |||
1171 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), | 1173 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), |
1172 | }, | 1174 | }, |
1173 | }, | 1175 | }, |
1176 | { | ||
1177 | /* Fujitsu H780 also has a middle button */ | ||
1178 | .matches = { | ||
1179 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
1180 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), | ||
1181 | }, | ||
1182 | }, | ||
1174 | #endif | 1183 | #endif |
1175 | { } | 1184 | { } |
1176 | }; | 1185 | }; |
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index c62cceb97bb1..5e8d8384aa2a 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c | |||
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio) | |||
76 | { | 76 | { |
77 | struct ps2_gpio_data *drvdata = serio->port_data; | 77 | struct ps2_gpio_data *drvdata = serio->port_data; |
78 | 78 | ||
79 | flush_delayed_work(&drvdata->tx_work); | ||
79 | disable_irq(drvdata->irq); | 80 | disable_irq(drvdata->irq); |
80 | } | 81 | } |
81 | 82 | ||
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index d713271ebf7c..a64116586b4c 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c | |||
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) | |||
1396 | 1396 | ||
1397 | /* Clear ring flush state */ | 1397 | /* Clear ring flush state */ |
1398 | timeout = 1000; /* timeout of 1s */ | 1398 | timeout = 1000; /* timeout of 1s */ |
1399 | writel_relaxed(0x0, ring + RING_CONTROL); | 1399 | writel_relaxed(0x0, ring->regs + RING_CONTROL); |
1400 | do { | 1400 | do { |
1401 | if (!(readl_relaxed(ring + RING_FLUSH_DONE) & | 1401 | if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & |
1402 | FLUSH_DONE_MASK)) | 1402 | FLUSH_DONE_MASK)) |
1403 | break; | 1403 | break; |
1404 | mdelay(1); | 1404 | mdelay(1); |
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index c6a7d4582dc6..38d9df3fb199 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c | |||
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) | |||
310 | 310 | ||
311 | return ret; | 311 | return ret; |
312 | } | 312 | } |
313 | EXPORT_SYMBOL_GPL(mbox_flush); | ||
313 | 314 | ||
314 | /** | 315 | /** |
315 | * mbox_request_channel - Request a mailbox channel. | 316 | * mbox_request_channel - Request a mailbox channel. |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 47d4e0d30bf0..dd538e6b2748 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) | |||
932 | if (IS_ERR(bip)) | 932 | if (IS_ERR(bip)) |
933 | return PTR_ERR(bip); | 933 | return PTR_ERR(bip); |
934 | 934 | ||
935 | tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); | 935 | tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); |
936 | 936 | ||
937 | bip->bip_iter.bi_size = tag_len; | 937 | bip->bip_iter.bi_size = tag_len; |
938 | bip->bip_iter.bi_sector = io->cc->start + io->sector; | 938 | bip->bip_iter.bi_sector = io->cc->start + io->sector; |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ca8af21bf644..e83b63608262 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -257,6 +257,7 @@ struct pool { | |||
257 | 257 | ||
258 | spinlock_t lock; | 258 | spinlock_t lock; |
259 | struct bio_list deferred_flush_bios; | 259 | struct bio_list deferred_flush_bios; |
260 | struct bio_list deferred_flush_completions; | ||
260 | struct list_head prepared_mappings; | 261 | struct list_head prepared_mappings; |
261 | struct list_head prepared_discards; | 262 | struct list_head prepared_discards; |
262 | struct list_head prepared_discards_pt2; | 263 | struct list_head prepared_discards_pt2; |
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | |||
956 | mempool_free(m, &m->tc->pool->mapping_pool); | 957 | mempool_free(m, &m->tc->pool->mapping_pool); |
957 | } | 958 | } |
958 | 959 | ||
960 | static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) | ||
961 | { | ||
962 | struct pool *pool = tc->pool; | ||
963 | unsigned long flags; | ||
964 | |||
965 | /* | ||
966 | * If the bio has the REQ_FUA flag set we must commit the metadata | ||
967 | * before signaling its completion. | ||
968 | */ | ||
969 | if (!bio_triggers_commit(tc, bio)) { | ||
970 | bio_endio(bio); | ||
971 | return; | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * Complete bio with an error if earlier I/O caused changes to the | ||
976 | * metadata that can't be committed, e.g, due to I/O errors on the | ||
977 | * metadata device. | ||
978 | */ | ||
979 | if (dm_thin_aborted_changes(tc->td)) { | ||
980 | bio_io_error(bio); | ||
981 | return; | ||
982 | } | ||
983 | |||
984 | /* | ||
985 | * Batch together any bios that trigger commits and then issue a | ||
986 | * single commit for them in process_deferred_bios(). | ||
987 | */ | ||
988 | spin_lock_irqsave(&pool->lock, flags); | ||
989 | bio_list_add(&pool->deferred_flush_completions, bio); | ||
990 | spin_unlock_irqrestore(&pool->lock, flags); | ||
991 | } | ||
992 | |||
959 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) | 993 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) |
960 | { | 994 | { |
961 | struct thin_c *tc = m->tc; | 995 | struct thin_c *tc = m->tc; |
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
988 | */ | 1022 | */ |
989 | if (bio) { | 1023 | if (bio) { |
990 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); | 1024 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); |
991 | bio_endio(bio); | 1025 | complete_overwrite_bio(tc, bio); |
992 | } else { | 1026 | } else { |
993 | inc_all_io_entry(tc->pool, m->cell->holder); | 1027 | inc_all_io_entry(tc->pool, m->cell->holder); |
994 | remap_and_issue(tc, m->cell->holder, m->data_block); | 1028 | remap_and_issue(tc, m->cell->holder, m->data_block); |
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool) | |||
2317 | { | 2351 | { |
2318 | unsigned long flags; | 2352 | unsigned long flags; |
2319 | struct bio *bio; | 2353 | struct bio *bio; |
2320 | struct bio_list bios; | 2354 | struct bio_list bios, bio_completions; |
2321 | struct thin_c *tc; | 2355 | struct thin_c *tc; |
2322 | 2356 | ||
2323 | tc = get_first_thin(pool); | 2357 | tc = get_first_thin(pool); |
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool) | |||
2328 | } | 2362 | } |
2329 | 2363 | ||
2330 | /* | 2364 | /* |
2331 | * If there are any deferred flush bios, we must commit | 2365 | * If there are any deferred flush bios, we must commit the metadata |
2332 | * the metadata before issuing them. | 2366 | * before issuing them or signaling their completion. |
2333 | */ | 2367 | */ |
2334 | bio_list_init(&bios); | 2368 | bio_list_init(&bios); |
2369 | bio_list_init(&bio_completions); | ||
2370 | |||
2335 | spin_lock_irqsave(&pool->lock, flags); | 2371 | spin_lock_irqsave(&pool->lock, flags); |
2336 | bio_list_merge(&bios, &pool->deferred_flush_bios); | 2372 | bio_list_merge(&bios, &pool->deferred_flush_bios); |
2337 | bio_list_init(&pool->deferred_flush_bios); | 2373 | bio_list_init(&pool->deferred_flush_bios); |
2374 | |||
2375 | bio_list_merge(&bio_completions, &pool->deferred_flush_completions); | ||
2376 | bio_list_init(&pool->deferred_flush_completions); | ||
2338 | spin_unlock_irqrestore(&pool->lock, flags); | 2377 | spin_unlock_irqrestore(&pool->lock, flags); |
2339 | 2378 | ||
2340 | if (bio_list_empty(&bios) && | 2379 | if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && |
2341 | !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) | 2380 | !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) |
2342 | return; | 2381 | return; |
2343 | 2382 | ||
2344 | if (commit(pool)) { | 2383 | if (commit(pool)) { |
2384 | bio_list_merge(&bios, &bio_completions); | ||
2385 | |||
2345 | while ((bio = bio_list_pop(&bios))) | 2386 | while ((bio = bio_list_pop(&bios))) |
2346 | bio_io_error(bio); | 2387 | bio_io_error(bio); |
2347 | return; | 2388 | return; |
2348 | } | 2389 | } |
2349 | pool->last_commit_jiffies = jiffies; | 2390 | pool->last_commit_jiffies = jiffies; |
2350 | 2391 | ||
2392 | while ((bio = bio_list_pop(&bio_completions))) | ||
2393 | bio_endio(bio); | ||
2394 | |||
2351 | while ((bio = bio_list_pop(&bios))) | 2395 | while ((bio = bio_list_pop(&bios))) |
2352 | generic_make_request(bio); | 2396 | generic_make_request(bio); |
2353 | } | 2397 | } |
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2954 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); | 2998 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); |
2955 | spin_lock_init(&pool->lock); | 2999 | spin_lock_init(&pool->lock); |
2956 | bio_list_init(&pool->deferred_flush_bios); | 3000 | bio_list_init(&pool->deferred_flush_bios); |
3001 | bio_list_init(&pool->deferred_flush_completions); | ||
2957 | INIT_LIST_HEAD(&pool->prepared_mappings); | 3002 | INIT_LIST_HEAD(&pool->prepared_mappings); |
2958 | INIT_LIST_HEAD(&pool->prepared_discards); | 3003 | INIT_LIST_HEAD(&pool->prepared_discards); |
2959 | INIT_LIST_HEAD(&pool->prepared_discards_pt2); | 3004 | INIT_LIST_HEAD(&pool->prepared_discards_pt2); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1d54109071cc..fa47249fa3e4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio) | |||
1863 | reschedule_retry(r1_bio); | 1863 | reschedule_retry(r1_bio); |
1864 | } | 1864 | } |
1865 | 1865 | ||
1866 | static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) | ||
1867 | { | ||
1868 | sector_t sync_blocks = 0; | ||
1869 | sector_t s = r1_bio->sector; | ||
1870 | long sectors_to_go = r1_bio->sectors; | ||
1871 | |||
1872 | /* make sure these bits don't get cleared. */ | ||
1873 | do { | ||
1874 | md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); | ||
1875 | s += sync_blocks; | ||
1876 | sectors_to_go -= sync_blocks; | ||
1877 | } while (sectors_to_go > 0); | ||
1878 | } | ||
1879 | |||
1866 | static void end_sync_write(struct bio *bio) | 1880 | static void end_sync_write(struct bio *bio) |
1867 | { | 1881 | { |
1868 | int uptodate = !bio->bi_status; | 1882 | int uptodate = !bio->bi_status; |
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio) | |||
1874 | struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; | 1888 | struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; |
1875 | 1889 | ||
1876 | if (!uptodate) { | 1890 | if (!uptodate) { |
1877 | sector_t sync_blocks = 0; | 1891 | abort_sync_write(mddev, r1_bio); |
1878 | sector_t s = r1_bio->sector; | ||
1879 | long sectors_to_go = r1_bio->sectors; | ||
1880 | /* make sure these bits doesn't get cleared. */ | ||
1881 | do { | ||
1882 | md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); | ||
1883 | s += sync_blocks; | ||
1884 | sectors_to_go -= sync_blocks; | ||
1885 | } while (sectors_to_go > 0); | ||
1886 | set_bit(WriteErrorSeen, &rdev->flags); | 1892 | set_bit(WriteErrorSeen, &rdev->flags); |
1887 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) | 1893 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
1888 | set_bit(MD_RECOVERY_NEEDED, & | 1894 | set_bit(MD_RECOVERY_NEEDED, & |
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) | |||
2172 | (i == r1_bio->read_disk || | 2178 | (i == r1_bio->read_disk || |
2173 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) | 2179 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) |
2174 | continue; | 2180 | continue; |
2175 | if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) | 2181 | if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { |
2182 | abort_sync_write(mddev, r1_bio); | ||
2176 | continue; | 2183 | continue; |
2184 | } | ||
2177 | 2185 | ||
2178 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); | 2186 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2179 | if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) | 2187 | if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index aef1185f383d..14f3fdb8c6bb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
@@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) | |||
2112 | if (waiting) | 2112 | if (waiting) |
2113 | wake_up(&mq->wait); | 2113 | wake_up(&mq->wait); |
2114 | else | 2114 | else |
2115 | kblockd_schedule_work(&mq->complete_work); | 2115 | queue_work(mq->card->complete_wq, &mq->complete_work); |
2116 | 2116 | ||
2117 | return; | 2117 | return; |
2118 | } | 2118 | } |
@@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
2924 | 2924 | ||
2925 | mmc_fixup_device(card, mmc_blk_fixups); | 2925 | mmc_fixup_device(card, mmc_blk_fixups); |
2926 | 2926 | ||
2927 | card->complete_wq = alloc_workqueue("mmc_complete", | ||
2928 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | ||
2929 | if (unlikely(!card->complete_wq)) { | ||
2930 | pr_err("Failed to create mmc completion workqueue"); | ||
2931 | return -ENOMEM; | ||
2932 | } | ||
2933 | |||
2927 | md = mmc_blk_alloc(card); | 2934 | md = mmc_blk_alloc(card); |
2928 | if (IS_ERR(md)) | 2935 | if (IS_ERR(md)) |
2929 | return PTR_ERR(md); | 2936 | return PTR_ERR(md); |
@@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card) | |||
2987 | pm_runtime_put_noidle(&card->dev); | 2994 | pm_runtime_put_noidle(&card->dev); |
2988 | mmc_blk_remove_req(md); | 2995 | mmc_blk_remove_req(md); |
2989 | dev_set_drvdata(&card->dev, NULL); | 2996 | dev_set_drvdata(&card->dev, NULL); |
2997 | destroy_workqueue(card->complete_wq); | ||
2990 | } | 2998 | } |
2991 | 2999 | ||
2992 | static int _mmc_blk_suspend(struct mmc_card *card) | 3000 | static int _mmc_blk_suspend(struct mmc_card *card) |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index f19ec60bcbdc..2eba507790e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
@@ -1338,7 +1338,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
1338 | host->regs + SD_EMMC_IRQ_EN); | 1338 | host->regs + SD_EMMC_IRQ_EN); |
1339 | 1339 | ||
1340 | ret = request_threaded_irq(host->irq, meson_mmc_irq, | 1340 | ret = request_threaded_irq(host->irq, meson_mmc_irq, |
1341 | meson_mmc_irq_thread, IRQF_SHARED, NULL, host); | 1341 | meson_mmc_irq_thread, IRQF_SHARED, |
1342 | dev_name(&pdev->dev), host); | ||
1342 | if (ret) | 1343 | if (ret) |
1343 | goto err_init_clk; | 1344 | goto err_init_clk; |
1344 | 1345 | ||
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 279e326e397e..70fadc976795 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev) | |||
1399 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | | 1399 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | |
1400 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; | 1400 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; |
1401 | 1401 | ||
1402 | if (host->cfg->clk_delays || host->use_new_timings) | 1402 | /* |
1403 | * Some H5 devices do not have signal traces precise enough to | ||
1404 | * use HS DDR mode for their eMMC chips. | ||
1405 | * | ||
1406 | * We still enable HS DDR modes for all the other controller | ||
1407 | * variants that support them. | ||
1408 | */ | ||
1409 | if ((host->cfg->clk_delays || host->use_new_timings) && | ||
1410 | !of_device_is_compatible(pdev->dev.of_node, | ||
1411 | "allwinner,sun50i-h5-emmc")) | ||
1403 | mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; | 1412 | mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; |
1404 | 1413 | ||
1405 | ret = mmc_of_parse(mmc); | 1414 | ret = mmc_of_parse(mmc); |
1406 | if (ret) | 1415 | if (ret) |
1407 | goto error_free_dma; | 1416 | goto error_free_dma; |
1408 | 1417 | ||
1418 | /* | ||
1419 | * If we don't support delay chains in the SoC, we can't use any | ||
1420 | * of the higher speed modes. Mask them out in case the device | ||
1421 | * tree specifies the properties for them, which gets added to | ||
1422 | * the caps by mmc_of_parse() above. | ||
1423 | */ | ||
1424 | if (!(host->cfg->clk_delays || host->use_new_timings)) { | ||
1425 | mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | | ||
1426 | MMC_CAP_1_2V_DDR | MMC_CAP_UHS); | ||
1427 | mmc->caps2 &= ~MMC_CAP2_HS200; | ||
1428 | } | ||
1429 | |||
1430 | /* TODO: This driver doesn't support HS400 mode yet */ | ||
1431 | mmc->caps2 &= ~MMC_CAP2_HS400; | ||
1432 | |||
1409 | ret = sunxi_mmc_init_host(host); | 1433 | ret = sunxi_mmc_init_host(host); |
1410 | if (ret) | 1434 | if (ret) |
1411 | goto error_free_dma; | 1435 | goto error_free_dma; |
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 22f753e555ac..83f88b8b5d9f 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c | |||
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev, | |||
212 | * Going to have to check what details I need to set and how to | 212 | * Going to have to check what details I need to set and how to |
213 | * get them | 213 | * get them |
214 | */ | 214 | */ |
215 | mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); | 215 | mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); |
216 | mtd->type = MTD_NORFLASH; | 216 | mtd->type = MTD_NORFLASH; |
217 | mtd->flags = MTD_WRITEABLE; | 217 | mtd->flags = MTD_WRITEABLE; |
218 | mtd->size = size; | 218 | mtd->size = size; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 999b705769a8..3ef01baef9b6 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -507,6 +507,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) | |||
507 | { | 507 | { |
508 | struct nvmem_config config = {}; | 508 | struct nvmem_config config = {}; |
509 | 509 | ||
510 | config.id = -1; | ||
510 | config.dev = &mtd->dev; | 511 | config.dev = &mtd->dev; |
511 | config.name = mtd->name; | 512 | config.name = mtd->name; |
512 | config.owner = THIS_MODULE; | 513 | config.owner = THIS_MODULE; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index edb1c023a753..21bf8ac78380 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -197,9 +197,9 @@ config VXLAN | |||
197 | 197 | ||
198 | config GENEVE | 198 | config GENEVE |
199 | tristate "Generic Network Virtualization Encapsulation" | 199 | tristate "Generic Network Virtualization Encapsulation" |
200 | depends on INET && NET_UDP_TUNNEL | 200 | depends on INET |
201 | depends on IPV6 || !IPV6 | 201 | depends on IPV6 || !IPV6 |
202 | select NET_IP_TUNNEL | 202 | select NET_UDP_TUNNEL |
203 | select GRO_CELLS | 203 | select GRO_CELLS |
204 | ---help--- | 204 | ---help--- |
205 | This allows one to create geneve virtual interfaces that provide | 205 | This allows one to create geneve virtual interfaces that provide |
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0e4bbdcc614f..c76892ac4e69 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c | |||
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) | |||
344 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); | 344 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); |
345 | } | 345 | } |
346 | 346 | ||
347 | static void b53_enable_vlan(struct b53_device *dev, bool enable) | 347 | static void b53_enable_vlan(struct b53_device *dev, bool enable, |
348 | bool enable_filtering) | ||
348 | { | 349 | { |
349 | u8 mgmt, vc0, vc1, vc4 = 0, vc5; | 350 | u8 mgmt, vc0, vc1, vc4 = 0, vc5; |
350 | 351 | ||
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) | |||
369 | vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; | 370 | vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; |
370 | vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; | 371 | vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; |
371 | vc4 &= ~VC4_ING_VID_CHECK_MASK; | 372 | vc4 &= ~VC4_ING_VID_CHECK_MASK; |
372 | vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; | 373 | if (enable_filtering) { |
373 | vc5 |= VC5_DROP_VTABLE_MISS; | 374 | vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; |
375 | vc5 |= VC5_DROP_VTABLE_MISS; | ||
376 | } else { | ||
377 | vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; | ||
378 | vc5 &= ~VC5_DROP_VTABLE_MISS; | ||
379 | } | ||
374 | 380 | ||
375 | if (is5325(dev)) | 381 | if (is5325(dev)) |
376 | vc0 &= ~VC0_RESERVED_1; | 382 | vc0 &= ~VC0_RESERVED_1; |
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) | |||
420 | } | 426 | } |
421 | 427 | ||
422 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); | 428 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); |
429 | |||
430 | dev->vlan_enabled = enable; | ||
431 | dev->vlan_filtering_enabled = enable_filtering; | ||
423 | } | 432 | } |
424 | 433 | ||
425 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) | 434 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) |
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev) | |||
632 | b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); | 641 | b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); |
633 | } | 642 | } |
634 | 643 | ||
644 | static u16 b53_default_pvid(struct b53_device *dev) | ||
645 | { | ||
646 | if (is5325(dev) || is5365(dev)) | ||
647 | return 1; | ||
648 | else | ||
649 | return 0; | ||
650 | } | ||
651 | |||
635 | int b53_configure_vlan(struct dsa_switch *ds) | 652 | int b53_configure_vlan(struct dsa_switch *ds) |
636 | { | 653 | { |
637 | struct b53_device *dev = ds->priv; | 654 | struct b53_device *dev = ds->priv; |
638 | struct b53_vlan vl = { 0 }; | 655 | struct b53_vlan vl = { 0 }; |
639 | int i; | 656 | int i, def_vid; |
657 | |||
658 | def_vid = b53_default_pvid(dev); | ||
640 | 659 | ||
641 | /* clear all vlan entries */ | 660 | /* clear all vlan entries */ |
642 | if (is5325(dev) || is5365(dev)) { | 661 | if (is5325(dev) || is5365(dev)) { |
643 | for (i = 1; i < dev->num_vlans; i++) | 662 | for (i = def_vid; i < dev->num_vlans; i++) |
644 | b53_set_vlan_entry(dev, i, &vl); | 663 | b53_set_vlan_entry(dev, i, &vl); |
645 | } else { | 664 | } else { |
646 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); | 665 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); |
647 | } | 666 | } |
648 | 667 | ||
649 | b53_enable_vlan(dev, false); | 668 | b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); |
650 | 669 | ||
651 | b53_for_each_port(dev, i) | 670 | b53_for_each_port(dev, i) |
652 | b53_write16(dev, B53_VLAN_PAGE, | 671 | b53_write16(dev, B53_VLAN_PAGE, |
653 | B53_VLAN_PORT_DEF_TAG(i), 1); | 672 | B53_VLAN_PORT_DEF_TAG(i), def_vid); |
654 | 673 | ||
655 | if (!is5325(dev) && !is5365(dev)) | 674 | if (!is5325(dev) && !is5365(dev)) |
656 | b53_set_jumbo(dev, dev->enable_jumbo, false); | 675 | b53_set_jumbo(dev, dev->enable_jumbo, false); |
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); | |||
1255 | 1274 | ||
1256 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) | 1275 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) |
1257 | { | 1276 | { |
1277 | struct b53_device *dev = ds->priv; | ||
1278 | struct net_device *bridge_dev; | ||
1279 | unsigned int i; | ||
1280 | u16 pvid, new_pvid; | ||
1281 | |||
1282 | /* Handle the case were multiple bridges span the same switch device | ||
1283 | * and one of them has a different setting than what is being requested | ||
1284 | * which would be breaking filtering semantics for any of the other | ||
1285 | * bridge devices. | ||
1286 | */ | ||
1287 | b53_for_each_port(dev, i) { | ||
1288 | bridge_dev = dsa_to_port(ds, i)->bridge_dev; | ||
1289 | if (bridge_dev && | ||
1290 | bridge_dev != dsa_to_port(ds, port)->bridge_dev && | ||
1291 | br_vlan_enabled(bridge_dev) != vlan_filtering) { | ||
1292 | netdev_err(bridge_dev, | ||
1293 | "VLAN filtering is global to the switch!\n"); | ||
1294 | return -EINVAL; | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); | ||
1299 | new_pvid = pvid; | ||
1300 | if (dev->vlan_filtering_enabled && !vlan_filtering) { | ||
1301 | /* Filtering is currently enabled, use the default PVID since | ||
1302 | * the bridge does not expect tagging anymore | ||
1303 | */ | ||
1304 | dev->ports[port].pvid = pvid; | ||
1305 | new_pvid = b53_default_pvid(dev); | ||
1306 | } else if (!dev->vlan_filtering_enabled && vlan_filtering) { | ||
1307 | /* Filtering is currently disabled, restore the previous PVID */ | ||
1308 | new_pvid = dev->ports[port].pvid; | ||
1309 | } | ||
1310 | |||
1311 | if (pvid != new_pvid) | ||
1312 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), | ||
1313 | new_pvid); | ||
1314 | |||
1315 | b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); | ||
1316 | |||
1258 | return 0; | 1317 | return 0; |
1259 | } | 1318 | } |
1260 | EXPORT_SYMBOL(b53_vlan_filtering); | 1319 | EXPORT_SYMBOL(b53_vlan_filtering); |
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, | |||
1270 | if (vlan->vid_end > dev->num_vlans) | 1329 | if (vlan->vid_end > dev->num_vlans) |
1271 | return -ERANGE; | 1330 | return -ERANGE; |
1272 | 1331 | ||
1273 | b53_enable_vlan(dev, true); | 1332 | b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); |
1274 | 1333 | ||
1275 | return 0; | 1334 | return 0; |
1276 | } | 1335 | } |
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, | |||
1300 | b53_fast_age_vlan(dev, vid); | 1359 | b53_fast_age_vlan(dev, vid); |
1301 | } | 1360 | } |
1302 | 1361 | ||
1303 | if (pvid) { | 1362 | if (pvid && !dsa_is_cpu_port(ds, port)) { |
1304 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), | 1363 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), |
1305 | vlan->vid_end); | 1364 | vlan->vid_end); |
1306 | b53_fast_age_vlan(dev, vid); | 1365 | b53_fast_age_vlan(dev, vid); |
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, | |||
1326 | 1385 | ||
1327 | vl->members &= ~BIT(port); | 1386 | vl->members &= ~BIT(port); |
1328 | 1387 | ||
1329 | if (pvid == vid) { | 1388 | if (pvid == vid) |
1330 | if (is5325(dev) || is5365(dev)) | 1389 | pvid = b53_default_pvid(dev); |
1331 | pvid = 1; | ||
1332 | else | ||
1333 | pvid = 0; | ||
1334 | } | ||
1335 | 1390 | ||
1336 | if (untagged && !dsa_is_cpu_port(ds, port)) | 1391 | if (untagged && !dsa_is_cpu_port(ds, port)) |
1337 | vl->untag &= ~(BIT(port)); | 1392 | vl->untag &= ~(BIT(port)); |
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) | |||
1644 | b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); | 1699 | b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); |
1645 | dev->ports[port].vlan_ctl_mask = pvlan; | 1700 | dev->ports[port].vlan_ctl_mask = pvlan; |
1646 | 1701 | ||
1647 | if (is5325(dev) || is5365(dev)) | 1702 | pvid = b53_default_pvid(dev); |
1648 | pvid = 1; | ||
1649 | else | ||
1650 | pvid = 0; | ||
1651 | 1703 | ||
1652 | /* Make this port join all VLANs without VLAN entries */ | 1704 | /* Make this port join all VLANs without VLAN entries */ |
1653 | if (is58xx(dev)) { | 1705 | if (is58xx(dev)) { |
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index ec796482792d..4dc7ee38b258 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h | |||
@@ -91,6 +91,7 @@ enum { | |||
91 | struct b53_port { | 91 | struct b53_port { |
92 | u16 vlan_ctl_mask; | 92 | u16 vlan_ctl_mask; |
93 | struct ethtool_eee eee; | 93 | struct ethtool_eee eee; |
94 | u16 pvid; | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | struct b53_vlan { | 97 | struct b53_vlan { |
@@ -137,6 +138,8 @@ struct b53_device { | |||
137 | 138 | ||
138 | unsigned int num_vlans; | 139 | unsigned int num_vlans; |
139 | struct b53_vlan *vlans; | 140 | struct b53_vlan *vlans; |
141 | bool vlan_enabled; | ||
142 | bool vlan_filtering_enabled; | ||
140 | unsigned int num_ports; | 143 | unsigned int num_ports; |
141 | struct b53_port *ports; | 144 | struct b53_port *ports; |
142 | }; | 145 | }; |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 361fbde76654..14138d423cf1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) | |||
690 | * port, the other ones have already been disabled during | 690 | * port, the other ones have already been disabled during |
691 | * bcm_sf2_sw_setup | 691 | * bcm_sf2_sw_setup |
692 | */ | 692 | */ |
693 | for (port = 0; port < DSA_MAX_PORTS; port++) { | 693 | for (port = 0; port < ds->num_ports; port++) { |
694 | if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) | 694 | if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) |
695 | bcm_sf2_port_disable(ds, port, NULL); | 695 | bcm_sf2_port_disable(ds, port, NULL); |
696 | } | 696 | } |
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, | |||
726 | { | 726 | { |
727 | struct net_device *p = ds->ports[port].cpu_dp->master; | 727 | struct net_device *p = ds->ports[port].cpu_dp->master; |
728 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); | 728 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
729 | struct ethtool_wolinfo pwol; | 729 | struct ethtool_wolinfo pwol = { }; |
730 | 730 | ||
731 | /* Get the parent device WoL settings */ | 731 | /* Get the parent device WoL settings */ |
732 | p->ethtool_ops->get_wol(p, &pwol); | 732 | if (p->ethtool_ops->get_wol) |
733 | p->ethtool_ops->get_wol(p, &pwol); | ||
733 | 734 | ||
734 | /* Advertise the parent device supported settings */ | 735 | /* Advertise the parent device supported settings */ |
735 | wol->supported = pwol.supported; | 736 | wol->supported = pwol.supported; |
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, | |||
750 | struct net_device *p = ds->ports[port].cpu_dp->master; | 751 | struct net_device *p = ds->ports[port].cpu_dp->master; |
751 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); | 752 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
752 | s8 cpu_port = ds->ports[port].cpu_dp->index; | 753 | s8 cpu_port = ds->ports[port].cpu_dp->index; |
753 | struct ethtool_wolinfo pwol; | 754 | struct ethtool_wolinfo pwol = { }; |
754 | 755 | ||
755 | p->ethtool_ops->get_wol(p, &pwol); | 756 | if (p->ethtool_ops->get_wol) |
757 | p->ethtool_ops->get_wol(p, &pwol); | ||
756 | if (wol->wolopts & ~pwol.supported) | 758 | if (wol->wolopts & ~pwol.supported) |
757 | return -EINVAL; | 759 | return -EINVAL; |
758 | 760 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dca2c949e73..12fd7ce3f1ff 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) | |||
261 | unsigned int sub_irq; | 261 | unsigned int sub_irq; |
262 | unsigned int n; | 262 | unsigned int n; |
263 | u16 reg; | 263 | u16 reg; |
264 | u16 ctl1; | ||
264 | int err; | 265 | int err; |
265 | 266 | ||
266 | mutex_lock(&chip->reg_lock); | 267 | mutex_lock(&chip->reg_lock); |
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) | |||
270 | if (err) | 271 | if (err) |
271 | goto out; | 272 | goto out; |
272 | 273 | ||
273 | for (n = 0; n < chip->g1_irq.nirqs; ++n) { | 274 | do { |
274 | if (reg & (1 << n)) { | 275 | for (n = 0; n < chip->g1_irq.nirqs; ++n) { |
275 | sub_irq = irq_find_mapping(chip->g1_irq.domain, n); | 276 | if (reg & (1 << n)) { |
276 | handle_nested_irq(sub_irq); | 277 | sub_irq = irq_find_mapping(chip->g1_irq.domain, |
277 | ++nhandled; | 278 | n); |
279 | handle_nested_irq(sub_irq); | ||
280 | ++nhandled; | ||
281 | } | ||
278 | } | 282 | } |
279 | } | 283 | |
284 | mutex_lock(&chip->reg_lock); | ||
285 | err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); | ||
286 | if (err) | ||
287 | goto unlock; | ||
288 | err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); | ||
289 | unlock: | ||
290 | mutex_unlock(&chip->reg_lock); | ||
291 | if (err) | ||
292 | goto out; | ||
293 | ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); | ||
294 | } while (reg & ctl1); | ||
295 | |||
280 | out: | 296 | out: |
281 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); | 297 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); |
282 | } | 298 | } |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a70bb1bb90e7..a6eacf2099c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
2663 | goto err_device_destroy; | 2663 | goto err_device_destroy; |
2664 | } | 2664 | } |
2665 | 2665 | ||
2666 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | ||
2667 | /* Make sure we don't have a race with AENQ Links state handler */ | ||
2668 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | ||
2669 | netif_carrier_on(adapter->netdev); | ||
2670 | |||
2671 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, | 2666 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, |
2672 | adapter->num_queues); | 2667 | adapter->num_queues); |
2673 | if (rc) { | 2668 | if (rc) { |
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
2684 | } | 2679 | } |
2685 | 2680 | ||
2686 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 2681 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
2682 | |||
2683 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | ||
2684 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | ||
2685 | netif_carrier_on(adapter->netdev); | ||
2686 | |||
2687 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | 2687 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
2688 | dev_err(&pdev->dev, | 2688 | dev_err(&pdev->dev, |
2689 | "Device reset completed successfully, Driver info: %s\n", | 2689 | "Device reset completed successfully, Driver info: %s\n", |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index dc8b6173d8d8..63870072cbbd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #define DRV_MODULE_VER_MAJOR 2 | 46 | #define DRV_MODULE_VER_MAJOR 2 |
47 | #define DRV_MODULE_VER_MINOR 0 | 47 | #define DRV_MODULE_VER_MINOR 0 |
48 | #define DRV_MODULE_VER_SUBMINOR 2 | 48 | #define DRV_MODULE_VER_SUBMINOR 3 |
49 | 49 | ||
50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 28c9b0bdf2f6..bc3ac369cbe3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev, | |||
134 | 134 | ||
135 | priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); | 135 | priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); |
136 | reg = rxchk_readl(priv, RXCHK_CONTROL); | 136 | reg = rxchk_readl(priv, RXCHK_CONTROL); |
137 | /* Clear L2 header checks, which would prevent BPDUs | ||
138 | * from being received. | ||
139 | */ | ||
140 | reg &= ~RXCHK_L2_HDR_DIS; | ||
137 | if (priv->rx_chk_en) | 141 | if (priv->rx_chk_en) |
138 | reg |= RXCHK_EN; | 142 | reg |= RXCHK_EN; |
139 | else | 143 | else |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2370dc204202..697c2427f2b7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) | |||
2098 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 2098 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
2099 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ | 2099 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
2100 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) | 2100 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) |
2101 | static __u32 fec_enet_register_version = 2; | ||
2101 | static u32 fec_enet_register_offset[] = { | 2102 | static u32 fec_enet_register_offset[] = { |
2102 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, | 2103 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, |
2103 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, | 2104 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, |
@@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = { | |||
2128 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK | 2129 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK |
2129 | }; | 2130 | }; |
2130 | #else | 2131 | #else |
2132 | static __u32 fec_enet_register_version = 1; | ||
2131 | static u32 fec_enet_register_offset[] = { | 2133 | static u32 fec_enet_register_offset[] = { |
2132 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, | 2134 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, |
2133 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, | 2135 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, |
@@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev, | |||
2149 | u32 *buf = (u32 *)regbuf; | 2151 | u32 *buf = (u32 *)regbuf; |
2150 | u32 i, off; | 2152 | u32 i, off; |
2151 | 2153 | ||
2154 | regs->version = fec_enet_register_version; | ||
2155 | |||
2152 | memset(buf, 0, regs->len); | 2156 | memset(buf, 0, regs->len); |
2153 | 2157 | ||
2154 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { | 2158 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 3b9e74be5fbd..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3081 | dsaf_dev = dev_get_drvdata(&pdev->dev); | 3081 | dsaf_dev = dev_get_drvdata(&pdev->dev); |
3082 | if (!dsaf_dev) { | 3082 | if (!dsaf_dev) { |
3083 | dev_err(&pdev->dev, "dsaf_dev is NULL\n"); | 3083 | dev_err(&pdev->dev, "dsaf_dev is NULL\n"); |
3084 | put_device(&pdev->dev); | ||
3084 | return -ENODEV; | 3085 | return -ENODEV; |
3085 | } | 3086 | } |
3086 | 3087 | ||
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3088 | if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { | 3089 | if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { |
3089 | dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", | 3090 | dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", |
3090 | dsaf_dev->ae_dev.name); | 3091 | dsaf_dev->ae_dev.name); |
3092 | put_device(&pdev->dev); | ||
3091 | return -ENODEV; | 3093 | return -ENODEV; |
3092 | } | 3094 | } |
3093 | 3095 | ||
@@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3126 | dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); | 3128 | dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); |
3127 | dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); | 3129 | dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); |
3128 | } | 3130 | } |
3131 | |||
3132 | put_device(&pdev->dev); | ||
3133 | |||
3129 | return 0; | 3134 | return 0; |
3130 | } | 3135 | } |
3131 | EXPORT_SYMBOL(hns_dsaf_roce_reset); | 3136 | EXPORT_SYMBOL(hns_dsaf_roce_reset); |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2f427271a793..292a668ce88e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2879 | 2879 | ||
2880 | ret = mv643xx_eth_shared_of_probe(pdev); | 2880 | ret = mv643xx_eth_shared_of_probe(pdev); |
2881 | if (ret) | 2881 | if (ret) |
2882 | return ret; | 2882 | goto err_put_clk; |
2883 | pd = dev_get_platdata(&pdev->dev); | 2883 | pd = dev_get_platdata(&pdev->dev); |
2884 | 2884 | ||
2885 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? | 2885 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? |
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2887 | infer_hw_params(msp); | 2887 | infer_hw_params(msp); |
2888 | 2888 | ||
2889 | return 0; | 2889 | return 0; |
2890 | |||
2891 | err_put_clk: | ||
2892 | if (!IS_ERR(msp->clk)) | ||
2893 | clk_disable_unprepare(msp->clk); | ||
2894 | return ret; | ||
2890 | } | 2895 | } |
2891 | 2896 | ||
2892 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) | 2897 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f3a5fa84860f..57727fe1501e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5073 | INIT_WORK(&hw->restart_work, sky2_restart); | 5073 | INIT_WORK(&hw->restart_work, sky2_restart); |
5074 | 5074 | ||
5075 | pci_set_drvdata(pdev, hw); | 5075 | pci_set_drvdata(pdev, hw); |
5076 | pdev->d3_delay = 200; | 5076 | pdev->d3_delay = 300; |
5077 | 5077 | ||
5078 | return 0; | 5078 | return 0; |
5079 | 5079 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6b88881b8e35..c1438ae52a11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
3360 | dev->addr_len = ETH_ALEN; | 3360 | dev->addr_len = ETH_ALEN; |
3361 | mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); | 3361 | mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); |
3362 | if (!is_valid_ether_addr(dev->dev_addr)) { | 3362 | if (!is_valid_ether_addr(dev->dev_addr)) { |
3363 | en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", | 3363 | en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", |
3364 | priv->port, dev->dev_addr); | 3364 | priv->port, dev->dev_addr); |
3365 | err = -EINVAL; | 3365 | err = -EINVAL; |
3366 | goto out; | 3366 | goto out; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9a0881cb7f51..6c01314e87b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | |||
617 | } | 617 | } |
618 | #endif | 618 | #endif |
619 | 619 | ||
620 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) | ||
621 | |||
620 | /* We reach this function only after checking that any of | 622 | /* We reach this function only after checking that any of |
621 | * the (IPv4 | IPv6) bits are set in cqe->status. | 623 | * the (IPv4 | IPv6) bits are set in cqe->status. |
622 | */ | 624 | */ |
@@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |||
624 | netdev_features_t dev_features) | 626 | netdev_features_t dev_features) |
625 | { | 627 | { |
626 | __wsum hw_checksum = 0; | 628 | __wsum hw_checksum = 0; |
629 | void *hdr; | ||
630 | |||
631 | /* CQE csum doesn't cover padding octets in short ethernet | ||
632 | * frames. And the pad field is appended prior to calculating | ||
633 | * and appending the FCS field. | ||
634 | * | ||
635 | * Detecting these padded frames requires to verify and parse | ||
636 | * IP headers, so we simply force all those small frames to skip | ||
637 | * checksum complete. | ||
638 | */ | ||
639 | if (short_frame(skb->len)) | ||
640 | return -EINVAL; | ||
627 | 641 | ||
628 | void *hdr = (u8 *)va + sizeof(struct ethhdr); | 642 | hdr = (u8 *)va + sizeof(struct ethhdr); |
629 | |||
630 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); | 643 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); |
631 | 644 | ||
632 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && | 645 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && |
@@ -819,6 +832,11 @@ xdp_drop_no_cnt: | |||
819 | skb_record_rx_queue(skb, cq_ring); | 832 | skb_record_rx_queue(skb, cq_ring); |
820 | 833 | ||
821 | if (likely(dev->features & NETIF_F_RXCSUM)) { | 834 | if (likely(dev->features & NETIF_F_RXCSUM)) { |
835 | /* TODO: For IP non TCP/UDP packets when csum complete is | ||
836 | * not an option (not supported or any other reason) we can | ||
837 | * actually check cqe IPOK status bit and report | ||
838 | * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE | ||
839 | */ | ||
822 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | | 840 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | |
823 | MLX4_CQE_STATUS_UDP)) && | 841 | MLX4_CQE_STATUS_UDP)) && |
824 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | 842 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3e0fa8a8077b..e267ff93e8a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -1583,6 +1583,24 @@ no_trig: | |||
1583 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); | 1583 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); |
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | void mlx5_cmd_flush(struct mlx5_core_dev *dev) | ||
1587 | { | ||
1588 | struct mlx5_cmd *cmd = &dev->cmd; | ||
1589 | int i; | ||
1590 | |||
1591 | for (i = 0; i < cmd->max_reg_cmds; i++) | ||
1592 | while (down_trylock(&cmd->sem)) | ||
1593 | mlx5_cmd_trigger_completions(dev); | ||
1594 | |||
1595 | while (down_trylock(&cmd->pages_sem)) | ||
1596 | mlx5_cmd_trigger_completions(dev); | ||
1597 | |||
1598 | /* Unlock cmdif */ | ||
1599 | up(&cmd->pages_sem); | ||
1600 | for (i = 0; i < cmd->max_reg_cmds; i++) | ||
1601 | up(&cmd->sem); | ||
1602 | } | ||
1603 | |||
1586 | static int status_to_err(u8 status) | 1604 | static int status_to_err(u8 status) |
1587 | { | 1605 | { |
1588 | return status ? -1 : 0; /* TBD more meaningful codes */ | 1606 | return status ? -1 : 0; /* TBD more meaningful codes */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8fa8fdd30b85..448a92561567 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -657,6 +657,7 @@ struct mlx5e_channel_stats { | |||
657 | enum { | 657 | enum { |
658 | MLX5E_STATE_OPENED, | 658 | MLX5E_STATE_OPENED, |
659 | MLX5E_STATE_DESTROYING, | 659 | MLX5E_STATE_DESTROYING, |
660 | MLX5E_STATE_XDP_TX_ENABLED, | ||
660 | }; | 661 | }; |
661 | 662 | ||
662 | struct mlx5e_rqt { | 663 | struct mlx5e_rqt { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 3740177eed09..03b2a9f9c589 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | |||
@@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
365 | int sq_num; | 365 | int sq_num; |
366 | int i; | 366 | int i; |
367 | 367 | ||
368 | if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) | 368 | /* this flag is sufficient, no need to test internal sq state */ |
369 | if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) | ||
369 | return -ENETDOWN; | 370 | return -ENETDOWN; |
370 | 371 | ||
371 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) | 372 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
@@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
378 | 379 | ||
379 | sq = &priv->channels.c[sq_num]->xdpsq; | 380 | sq = &priv->channels.c[sq_num]->xdpsq; |
380 | 381 | ||
381 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) | ||
382 | return -ENETDOWN; | ||
383 | |||
384 | for (i = 0; i < n; i++) { | 382 | for (i = 0; i < n; i++) { |
385 | struct xdp_frame *xdpf = frames[i]; | 383 | struct xdp_frame *xdpf = frames[i]; |
386 | struct mlx5e_xdp_info xdpi; | 384 | struct mlx5e_xdp_info xdpi; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 3a67cb3cd179..ee27a7c8cd87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | |||
@@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); | |||
50 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | 50 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
51 | u32 flags); | 51 | u32 flags); |
52 | 52 | ||
53 | static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) | ||
54 | { | ||
55 | set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
56 | } | ||
57 | |||
58 | static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) | ||
59 | { | ||
60 | clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
61 | /* let other device's napi(s) see our new state */ | ||
62 | synchronize_rcu(); | ||
63 | } | ||
64 | |||
65 | static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) | ||
66 | { | ||
67 | return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
68 | } | ||
69 | |||
53 | static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) | 70 | static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) |
54 | { | 71 | { |
55 | if (sq->doorbell_cseg) { | 72 | if (sq->doorbell_cseg) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3bbccead2f63..47233b9a4f81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
354 | 354 | ||
355 | new_channels.params = priv->channels.params; | 355 | new_channels.params = priv->channels.params; |
356 | new_channels.params.num_channels = count; | 356 | new_channels.params.num_channels = count; |
357 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
358 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
359 | MLX5E_INDIR_RQT_SIZE, count); | ||
360 | 357 | ||
361 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 358 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
362 | priv->channels.params = new_channels.params; | 359 | priv->channels.params = new_channels.params; |
@@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
372 | if (arfs_enabled) | 369 | if (arfs_enabled) |
373 | mlx5e_arfs_disable(priv); | 370 | mlx5e_arfs_disable(priv); |
374 | 371 | ||
372 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
373 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
374 | MLX5E_INDIR_RQT_SIZE, count); | ||
375 | |||
375 | /* Switch to new channels, set new parameters and close old ones */ | 376 | /* Switch to new channels, set new parameters and close old ones */ |
376 | mlx5e_switch_priv_channels(priv, &new_channels, NULL); | 377 | mlx5e_switch_priv_channels(priv, &new_channels, NULL); |
377 | 378 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 01819e5c9975..93e50ccd44c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) | |||
2938 | 2938 | ||
2939 | mlx5e_build_tx2sq_maps(priv); | 2939 | mlx5e_build_tx2sq_maps(priv); |
2940 | mlx5e_activate_channels(&priv->channels); | 2940 | mlx5e_activate_channels(&priv->channels); |
2941 | mlx5e_xdp_tx_enable(priv); | ||
2941 | netif_tx_start_all_queues(priv->netdev); | 2942 | netif_tx_start_all_queues(priv->netdev); |
2942 | 2943 | ||
2943 | if (mlx5e_is_vport_rep(priv)) | 2944 | if (mlx5e_is_vport_rep(priv)) |
@@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) | |||
2959 | */ | 2960 | */ |
2960 | netif_tx_stop_all_queues(priv->netdev); | 2961 | netif_tx_stop_all_queues(priv->netdev); |
2961 | netif_tx_disable(priv->netdev); | 2962 | netif_tx_disable(priv->netdev); |
2963 | mlx5e_xdp_tx_disable(priv); | ||
2962 | mlx5e_deactivate_channels(&priv->channels); | 2964 | mlx5e_deactivate_channels(&priv->channels); |
2963 | } | 2965 | } |
2964 | 2966 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index fbc42b7252a9..503035469d2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c | |||
@@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data | |||
211 | enum port_module_event_status_type module_status; | 211 | enum port_module_event_status_type module_status; |
212 | enum port_module_event_error_type error_type; | 212 | enum port_module_event_error_type error_type; |
213 | struct mlx5_eqe_port_module *module_event_eqe; | 213 | struct mlx5_eqe_port_module *module_event_eqe; |
214 | const char *status_str, *error_str; | 214 | const char *status_str; |
215 | u8 module_num; | 215 | u8 module_num; |
216 | 216 | ||
217 | module_event_eqe = &eqe->data.port_module; | 217 | module_event_eqe = &eqe->data.port_module; |
218 | module_num = module_event_eqe->module; | ||
219 | module_status = module_event_eqe->module_status & | 218 | module_status = module_event_eqe->module_status & |
220 | PORT_MODULE_EVENT_MODULE_STATUS_MASK; | 219 | PORT_MODULE_EVENT_MODULE_STATUS_MASK; |
221 | error_type = module_event_eqe->error_type & | 220 | error_type = module_event_eqe->error_type & |
@@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data | |||
223 | 222 | ||
224 | if (module_status < MLX5_MODULE_STATUS_NUM) | 223 | if (module_status < MLX5_MODULE_STATUS_NUM) |
225 | events->pme_stats.status_counters[module_status]++; | 224 | events->pme_stats.status_counters[module_status]++; |
226 | status_str = mlx5_pme_status_to_string(module_status); | ||
227 | 225 | ||
228 | if (module_status == MLX5_MODULE_STATUS_ERROR) { | 226 | if (module_status == MLX5_MODULE_STATUS_ERROR) |
229 | if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) | 227 | if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) |
230 | events->pme_stats.error_counters[error_type]++; | 228 | events->pme_stats.error_counters[error_type]++; |
231 | error_str = mlx5_pme_error_to_string(error_type); | ||
232 | } | ||
233 | 229 | ||
234 | if (!printk_ratelimit()) | 230 | if (!printk_ratelimit()) |
235 | return NOTIFY_OK; | 231 | return NOTIFY_OK; |
236 | 232 | ||
237 | if (module_status == MLX5_MODULE_STATUS_ERROR) | 233 | module_num = module_event_eqe->module; |
234 | status_str = mlx5_pme_status_to_string(module_status); | ||
235 | if (module_status == MLX5_MODULE_STATUS_ERROR) { | ||
236 | const char *error_str = mlx5_pme_error_to_string(error_type); | ||
237 | |||
238 | mlx5_core_err(events->dev, | 238 | mlx5_core_err(events->dev, |
239 | "Port module event[error]: module %u, %s, %s\n", | 239 | "Port module event[error]: module %u, %s, %s\n", |
240 | module_num, status_str, error_str); | 240 | module_num, status_str, error_str); |
241 | else | 241 | } else { |
242 | mlx5_core_info(events->dev, | 242 | mlx5_core_info(events->dev, |
243 | "Port module event: module %u, %s\n", | 243 | "Port module event: module %u, %s\n", |
244 | module_num, status_str); | 244 | module_num, status_str); |
245 | } | ||
245 | 246 | ||
246 | return NOTIFY_OK; | 247 | return NOTIFY_OK; |
247 | } | 248 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 196c07383082..cb9fa3430c53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) | |||
103 | mlx5_core_err(dev, "start\n"); | 103 | mlx5_core_err(dev, "start\n"); |
104 | if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { | 104 | if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { |
105 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; | 105 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; |
106 | mlx5_cmd_trigger_completions(dev); | 106 | mlx5_cmd_flush(dev); |
107 | } | 107 | } |
108 | 108 | ||
109 | mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); | 109 | mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 5300b0b6d836..4fdac020b795 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
@@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, | |||
126 | struct ptp_system_timestamp *sts); | 126 | struct ptp_system_timestamp *sts); |
127 | 127 | ||
128 | void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); | 128 | void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); |
129 | void mlx5_cmd_flush(struct mlx5_core_dev *dev); | ||
129 | int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); | 130 | int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); |
130 | void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); | 131 | void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); |
131 | 132 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 32519c93df17..b65e274b02e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, | |||
862 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { | 862 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { |
863 | bool configure = false; | 863 | bool configure = false; |
864 | bool pfc = false; | 864 | bool pfc = false; |
865 | u16 thres_cells; | ||
866 | u16 delay_cells; | ||
865 | bool lossy; | 867 | bool lossy; |
866 | u16 thres; | ||
867 | 868 | ||
868 | for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { | 869 | for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { |
869 | if (prio_tc[j] == i) { | 870 | if (prio_tc[j] == i) { |
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, | |||
877 | continue; | 878 | continue; |
878 | 879 | ||
879 | lossy = !(pfc || pause_en); | 880 | lossy = !(pfc || pause_en); |
880 | thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); | 881 | thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); |
881 | delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, | 882 | delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, |
882 | pause_en); | 883 | pfc, pause_en); |
883 | mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); | 884 | mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells, |
885 | thres_cells, lossy); | ||
884 | } | 886 | } |
885 | 887 | ||
886 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); | 888 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c | |||
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, | |||
1688 | 1688 | ||
1689 | eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); | 1689 | eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); |
1690 | 1690 | ||
1691 | if (!ether_addr_equal(ethh->h_dest, | ||
1692 | p_hwfn->p_rdma_info->iwarp.mac_addr)) { | ||
1693 | DP_VERBOSE(p_hwfn, | ||
1694 | QED_MSG_RDMA, | ||
1695 | "Got unexpected mac %pM instead of %pM\n", | ||
1696 | ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); | ||
1697 | return -EINVAL; | ||
1698 | } | ||
1699 | |||
1691 | ether_addr_copy(remote_mac_addr, ethh->h_source); | 1700 | ether_addr_copy(remote_mac_addr, ethh->h_source); |
1692 | ether_addr_copy(local_mac_addr, ethh->h_dest); | 1701 | ether_addr_copy(local_mac_addr, ethh->h_dest); |
1693 | 1702 | ||
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2605 | struct qed_iwarp_info *iwarp_info; | 2614 | struct qed_iwarp_info *iwarp_info; |
2606 | struct qed_ll2_acquire_data data; | 2615 | struct qed_ll2_acquire_data data; |
2607 | struct qed_ll2_cbs cbs; | 2616 | struct qed_ll2_cbs cbs; |
2608 | u32 mpa_buff_size; | 2617 | u32 buff_size; |
2609 | u16 n_ooo_bufs; | 2618 | u16 n_ooo_bufs; |
2610 | int rc = 0; | 2619 | int rc = 0; |
2611 | int i; | 2620 | int i; |
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2632 | 2641 | ||
2633 | memset(&data, 0, sizeof(data)); | 2642 | memset(&data, 0, sizeof(data)); |
2634 | data.input.conn_type = QED_LL2_TYPE_IWARP; | 2643 | data.input.conn_type = QED_LL2_TYPE_IWARP; |
2635 | data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; | 2644 | data.input.mtu = params->max_mtu; |
2636 | data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; | 2645 | data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; |
2637 | data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; | 2646 | data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; |
2638 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ | 2647 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ |
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2654 | goto err; | 2663 | goto err; |
2655 | } | 2664 | } |
2656 | 2665 | ||
2666 | buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); | ||
2657 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, | 2667 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2658 | QED_IWARP_LL2_SYN_RX_SIZE, | 2668 | QED_IWARP_LL2_SYN_RX_SIZE, |
2659 | QED_IWARP_MAX_SYN_PKT_SIZE, | 2669 | buff_size, |
2660 | iwarp_info->ll2_syn_handle); | 2670 | iwarp_info->ll2_syn_handle); |
2661 | if (rc) | 2671 | if (rc) |
2662 | goto err; | 2672 | goto err; |
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2710 | if (rc) | 2720 | if (rc) |
2711 | goto err; | 2721 | goto err; |
2712 | 2722 | ||
2713 | mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); | ||
2714 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, | 2723 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2715 | data.input.rx_num_desc, | 2724 | data.input.rx_num_desc, |
2716 | mpa_buff_size, | 2725 | buff_size, |
2717 | iwarp_info->ll2_mpa_handle); | 2726 | iwarp_info->ll2_mpa_handle); |
2718 | if (rc) | 2727 | if (rc) |
2719 | goto err; | 2728 | goto err; |
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2726 | 2735 | ||
2727 | iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; | 2736 | iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; |
2728 | 2737 | ||
2729 | iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); | 2738 | iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); |
2730 | if (!iwarp_info->mpa_intermediate_buf) | 2739 | if (!iwarp_info->mpa_intermediate_buf) |
2731 | goto err; | 2740 | goto err; |
2732 | 2741 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h | |||
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); | |||
46 | 46 | ||
47 | #define QED_IWARP_LL2_SYN_TX_SIZE (128) | 47 | #define QED_IWARP_LL2_SYN_TX_SIZE (128) |
48 | #define QED_IWARP_LL2_SYN_RX_SIZE (256) | 48 | #define QED_IWARP_LL2_SYN_RX_SIZE (256) |
49 | #define QED_IWARP_MAX_SYN_PKT_SIZE (128) | ||
50 | 49 | ||
51 | #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) | 50 | #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) |
52 | #define QED_IWARP_MAX_OOO (16) | 51 | #define QED_IWARP_MAX_OOO (16) |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index abb94c543aa2..6e36b88ca7c9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -1286,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp) | |||
1286 | static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) | 1286 | static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) |
1287 | { | 1287 | { |
1288 | RTL_W16(tp, IntrStatus, bits); | 1288 | RTL_W16(tp, IntrStatus, bits); |
1289 | mmiowb(); | ||
1289 | } | 1290 | } |
1290 | 1291 | ||
1291 | static void rtl_irq_disable(struct rtl8169_private *tp) | 1292 | static void rtl_irq_disable(struct rtl8169_private *tp) |
1292 | { | 1293 | { |
1293 | RTL_W16(tp, IntrMask, 0); | 1294 | RTL_W16(tp, IntrMask, 0); |
1295 | mmiowb(); | ||
1294 | } | 1296 | } |
1295 | 1297 | ||
1296 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) | 1298 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) |
@@ -6072,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6072 | struct device *d = tp_to_dev(tp); | 6074 | struct device *d = tp_to_dev(tp); |
6073 | dma_addr_t mapping; | 6075 | dma_addr_t mapping; |
6074 | u32 opts[2], len; | 6076 | u32 opts[2], len; |
6075 | bool stop_queue; | ||
6076 | int frags; | 6077 | int frags; |
6077 | 6078 | ||
6078 | if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { | 6079 | if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { |
@@ -6114,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6114 | 6115 | ||
6115 | txd->opts2 = cpu_to_le32(opts[1]); | 6116 | txd->opts2 = cpu_to_le32(opts[1]); |
6116 | 6117 | ||
6118 | netdev_sent_queue(dev, skb->len); | ||
6119 | |||
6117 | skb_tx_timestamp(skb); | 6120 | skb_tx_timestamp(skb); |
6118 | 6121 | ||
6119 | /* Force memory writes to complete before releasing descriptor */ | 6122 | /* Force memory writes to complete before releasing descriptor */ |
@@ -6126,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6126 | 6129 | ||
6127 | tp->cur_tx += frags + 1; | 6130 | tp->cur_tx += frags + 1; |
6128 | 6131 | ||
6129 | stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); | 6132 | RTL_W8(tp, TxPoll, NPQ); |
6130 | if (unlikely(stop_queue)) | ||
6131 | netif_stop_queue(dev); | ||
6132 | 6133 | ||
6133 | if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) | 6134 | mmiowb(); |
6134 | RTL_W8(tp, TxPoll, NPQ); | ||
6135 | 6135 | ||
6136 | if (unlikely(stop_queue)) { | 6136 | if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { |
6137 | /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must | ||
6138 | * not miss a ring update when it notices a stopped queue. | ||
6139 | */ | ||
6140 | smp_wmb(); | ||
6141 | netif_stop_queue(dev); | ||
6137 | /* Sync with rtl_tx: | 6142 | /* Sync with rtl_tx: |
6138 | * - publish queue status and cur_tx ring index (write barrier) | 6143 | * - publish queue status and cur_tx ring index (write barrier) |
6139 | * - refresh dirty_tx ring index (read barrier). | 6144 | * - refresh dirty_tx ring index (read barrier). |
@@ -6483,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) | |||
6483 | 6488 | ||
6484 | if (work_done < budget) { | 6489 | if (work_done < budget) { |
6485 | napi_complete_done(napi, work_done); | 6490 | napi_complete_done(napi, work_done); |
6491 | |||
6486 | rtl_irq_enable(tp); | 6492 | rtl_irq_enable(tp); |
6493 | mmiowb(); | ||
6487 | } | 6494 | } |
6488 | 6495 | ||
6489 | return work_done; | 6496 | return work_done; |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 2f2bda68d861..c08034154a9a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -6115,7 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |||
6115 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | 6115 | static int efx_ef10_mtd_probe(struct efx_nic *efx) |
6116 | { | 6116 | { |
6117 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | 6117 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); |
6118 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); | 6118 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; |
6119 | struct efx_mcdi_mtd_partition *parts; | 6119 | struct efx_mcdi_mtd_partition *parts; |
6120 | size_t outlen, n_parts_total, i, n_parts; | 6120 | size_t outlen, n_parts_total, i, n_parts; |
6121 | unsigned int type; | 6121 | unsigned int type; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 20299f6f65fc..736e29635b77 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) | |||
241 | static int dwmac4_rx_check_timestamp(void *desc) | 241 | static int dwmac4_rx_check_timestamp(void *desc) |
242 | { | 242 | { |
243 | struct dma_desc *p = (struct dma_desc *)desc; | 243 | struct dma_desc *p = (struct dma_desc *)desc; |
244 | unsigned int rdes0 = le32_to_cpu(p->des0); | ||
245 | unsigned int rdes1 = le32_to_cpu(p->des1); | ||
246 | unsigned int rdes3 = le32_to_cpu(p->des3); | ||
244 | u32 own, ctxt; | 247 | u32 own, ctxt; |
245 | int ret = 1; | 248 | int ret = 1; |
246 | 249 | ||
247 | own = p->des3 & RDES3_OWN; | 250 | own = rdes3 & RDES3_OWN; |
248 | ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) | 251 | ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) |
249 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); | 252 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); |
250 | 253 | ||
251 | if (likely(!own && ctxt)) { | 254 | if (likely(!own && ctxt)) { |
252 | if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) | 255 | if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) |
253 | /* Corrupted value */ | 256 | /* Corrupted value */ |
254 | ret = -EINVAL; | 257 | ret = -EINVAL; |
255 | else | 258 | else |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 5d85742a2be0..3c749c327cbd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, | |||
696 | struct ethtool_eee *edata) | 696 | struct ethtool_eee *edata) |
697 | { | 697 | { |
698 | struct stmmac_priv *priv = netdev_priv(dev); | 698 | struct stmmac_priv *priv = netdev_priv(dev); |
699 | int ret; | ||
699 | 700 | ||
700 | priv->eee_enabled = edata->eee_enabled; | 701 | if (!edata->eee_enabled) { |
701 | |||
702 | if (!priv->eee_enabled) | ||
703 | stmmac_disable_eee_mode(priv); | 702 | stmmac_disable_eee_mode(priv); |
704 | else { | 703 | } else { |
705 | /* We are asking for enabling the EEE but it is safe | 704 | /* We are asking for enabling the EEE but it is safe |
706 | * to verify all by invoking the eee_init function. | 705 | * to verify all by invoking the eee_init function. |
707 | * In case of failure it will return an error. | 706 | * In case of failure it will return an error. |
708 | */ | 707 | */ |
709 | priv->eee_enabled = stmmac_eee_init(priv); | 708 | edata->eee_enabled = stmmac_eee_init(priv); |
710 | if (!priv->eee_enabled) | 709 | if (!edata->eee_enabled) |
711 | return -EOPNOTSUPP; | 710 | return -EOPNOTSUPP; |
712 | |||
713 | /* Do not change tx_lpi_timer in case of failure */ | ||
714 | priv->tx_lpi_timer = edata->tx_lpi_timer; | ||
715 | } | 711 | } |
716 | 712 | ||
717 | return phy_ethtool_set_eee(dev->phydev, edata); | 713 | ret = phy_ethtool_set_eee(dev->phydev, edata); |
714 | if (ret) | ||
715 | return ret; | ||
716 | |||
717 | priv->eee_enabled = edata->eee_enabled; | ||
718 | priv->tx_lpi_timer = edata->tx_lpi_timer; | ||
719 | return 0; | ||
718 | } | 720 | } |
719 | 721 | ||
720 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) | 722 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) |
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1f612268c998..d847f672a705 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device, | |||
259 | const char *name; | 259 | const char *name; |
260 | char node_name[32]; | 260 | char node_name[32]; |
261 | 261 | ||
262 | if (of_property_read_string(node, "label", &name) < 0) { | 262 | if (of_property_read_string(child, "label", &name) < 0) { |
263 | snprintf(node_name, sizeof(node_name), "%pOFn", child); | 263 | snprintf(node_name, sizeof(node_name), "%pOFn", child); |
264 | name = node_name; | 264 | name = node_name; |
265 | } | 265 | } |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 189cd2048c3a..c5675df5fc6f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -553,7 +553,7 @@ int phy_start_aneg(struct phy_device *phydev) | |||
553 | if (err < 0) | 553 | if (err < 0) |
554 | goto out_unlock; | 554 | goto out_unlock; |
555 | 555 | ||
556 | if (__phy_is_started(phydev)) { | 556 | if (phy_is_started(phydev)) { |
557 | if (phydev->autoneg == AUTONEG_ENABLE) { | 557 | if (phydev->autoneg == AUTONEG_ENABLE) { |
558 | err = phy_check_link_status(phydev); | 558 | err = phy_check_link_status(phydev); |
559 | } else { | 559 | } else { |
@@ -709,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev) | |||
709 | cancel_delayed_work_sync(&phydev->state_queue); | 709 | cancel_delayed_work_sync(&phydev->state_queue); |
710 | 710 | ||
711 | mutex_lock(&phydev->lock); | 711 | mutex_lock(&phydev->lock); |
712 | if (__phy_is_started(phydev)) | 712 | if (phy_is_started(phydev)) |
713 | phydev->state = PHY_UP; | 713 | phydev->state = PHY_UP; |
714 | mutex_unlock(&phydev->lock); | 714 | mutex_unlock(&phydev->lock); |
715 | } | 715 | } |
@@ -762,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
762 | { | 762 | { |
763 | struct phy_device *phydev = phy_dat; | 763 | struct phy_device *phydev = phy_dat; |
764 | 764 | ||
765 | if (!phy_is_started(phydev)) | ||
766 | return IRQ_NONE; /* It can't be ours. */ | ||
767 | |||
768 | if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) | 765 | if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) |
769 | return IRQ_NONE; | 766 | return IRQ_NONE; |
770 | 767 | ||
@@ -842,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts); | |||
842 | */ | 839 | */ |
843 | void phy_stop(struct phy_device *phydev) | 840 | void phy_stop(struct phy_device *phydev) |
844 | { | 841 | { |
845 | mutex_lock(&phydev->lock); | 842 | if (!phy_is_started(phydev)) { |
846 | |||
847 | if (!__phy_is_started(phydev)) { | ||
848 | WARN(1, "called from state %s\n", | 843 | WARN(1, "called from state %s\n", |
849 | phy_state_to_str(phydev->state)); | 844 | phy_state_to_str(phydev->state)); |
850 | mutex_unlock(&phydev->lock); | ||
851 | return; | 845 | return; |
852 | } | 846 | } |
853 | 847 | ||
848 | mutex_lock(&phydev->lock); | ||
849 | |||
854 | if (phy_interrupt_is_valid(phydev)) | 850 | if (phy_interrupt_is_valid(phydev)) |
855 | phy_disable_interrupts(phydev); | 851 | phy_disable_interrupts(phydev); |
856 | 852 | ||
@@ -989,8 +985,10 @@ void phy_state_machine(struct work_struct *work) | |||
989 | * state machine would be pointless and possibly error prone when | 985 | * state machine would be pointless and possibly error prone when |
990 | * called from phy_disconnect() synchronously. | 986 | * called from phy_disconnect() synchronously. |
991 | */ | 987 | */ |
988 | mutex_lock(&phydev->lock); | ||
992 | if (phy_polling_mode(phydev) && phy_is_started(phydev)) | 989 | if (phy_polling_mode(phydev) && phy_is_started(phydev)) |
993 | phy_queue_state_machine(phydev, PHY_STATE_TIME); | 990 | phy_queue_state_machine(phydev, PHY_STATE_TIME); |
991 | mutex_unlock(&phydev->lock); | ||
994 | } | 992 | } |
995 | 993 | ||
996 | /** | 994 | /** |
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index e7becc7379d7..938803237d7f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c | |||
@@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl) | |||
474 | queue_work(system_power_efficient_wq, &pl->resolve); | 474 | queue_work(system_power_efficient_wq, &pl->resolve); |
475 | } | 475 | } |
476 | 476 | ||
477 | static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) | ||
478 | { | ||
479 | unsigned long state = pl->phylink_disable_state; | ||
480 | |||
481 | set_bit(bit, &pl->phylink_disable_state); | ||
482 | if (state == 0) { | ||
483 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
484 | flush_work(&pl->resolve); | ||
485 | } | ||
486 | } | ||
487 | |||
477 | static void phylink_fixed_poll(struct timer_list *t) | 488 | static void phylink_fixed_poll(struct timer_list *t) |
478 | { | 489 | { |
479 | struct phylink *pl = container_of(t, struct phylink, link_poll); | 490 | struct phylink *pl = container_of(t, struct phylink, link_poll); |
@@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl) | |||
924 | if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) | 935 | if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) |
925 | del_timer_sync(&pl->link_poll); | 936 | del_timer_sync(&pl->link_poll); |
926 | 937 | ||
927 | set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); | 938 | phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); |
928 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
929 | flush_work(&pl->resolve); | ||
930 | } | 939 | } |
931 | EXPORT_SYMBOL_GPL(phylink_stop); | 940 | EXPORT_SYMBOL_GPL(phylink_stop); |
932 | 941 | ||
@@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream) | |||
1632 | 1641 | ||
1633 | ASSERT_RTNL(); | 1642 | ASSERT_RTNL(); |
1634 | 1643 | ||
1635 | set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); | 1644 | phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); |
1636 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
1637 | flush_work(&pl->resolve); | ||
1638 | } | 1645 | } |
1639 | 1646 | ||
1640 | static void phylink_sfp_link_up(void *upstream) | 1647 | static void phylink_sfp_link_up(void *upstream) |
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ad9db652874d..fef701bfad62 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus) | |||
347 | return ret; | 347 | return ret; |
348 | } | 348 | } |
349 | } | 349 | } |
350 | bus->socket_ops->attach(bus->sfp); | ||
350 | if (bus->started) | 351 | if (bus->started) |
351 | bus->socket_ops->start(bus->sfp); | 352 | bus->socket_ops->start(bus->sfp); |
352 | bus->netdev->sfp_bus = bus; | 353 | bus->netdev->sfp_bus = bus; |
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) | |||
362 | if (bus->registered) { | 363 | if (bus->registered) { |
363 | if (bus->started) | 364 | if (bus->started) |
364 | bus->socket_ops->stop(bus->sfp); | 365 | bus->socket_ops->stop(bus->sfp); |
366 | bus->socket_ops->detach(bus->sfp); | ||
365 | if (bus->phydev && ops && ops->disconnect_phy) | 367 | if (bus->phydev && ops && ops->disconnect_phy) |
366 | ops->disconnect_phy(bus->upstream); | 368 | ops->disconnect_phy(bus->upstream); |
367 | } | 369 | } |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index fd8bb998ae52..68c8fbf099f8 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
@@ -184,6 +184,7 @@ struct sfp { | |||
184 | 184 | ||
185 | struct gpio_desc *gpio[GPIO_MAX]; | 185 | struct gpio_desc *gpio[GPIO_MAX]; |
186 | 186 | ||
187 | bool attached; | ||
187 | unsigned int state; | 188 | unsigned int state; |
188 | struct delayed_work poll; | 189 | struct delayed_work poll; |
189 | struct delayed_work timeout; | 190 | struct delayed_work timeout; |
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) | |||
1475 | */ | 1476 | */ |
1476 | switch (sfp->sm_mod_state) { | 1477 | switch (sfp->sm_mod_state) { |
1477 | default: | 1478 | default: |
1478 | if (event == SFP_E_INSERT) { | 1479 | if (event == SFP_E_INSERT && sfp->attached) { |
1479 | sfp_module_tx_disable(sfp); | 1480 | sfp_module_tx_disable(sfp); |
1480 | sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); | 1481 | sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); |
1481 | } | 1482 | } |
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) | |||
1607 | mutex_unlock(&sfp->sm_mutex); | 1608 | mutex_unlock(&sfp->sm_mutex); |
1608 | } | 1609 | } |
1609 | 1610 | ||
1611 | static void sfp_attach(struct sfp *sfp) | ||
1612 | { | ||
1613 | sfp->attached = true; | ||
1614 | if (sfp->state & SFP_F_PRESENT) | ||
1615 | sfp_sm_event(sfp, SFP_E_INSERT); | ||
1616 | } | ||
1617 | |||
1618 | static void sfp_detach(struct sfp *sfp) | ||
1619 | { | ||
1620 | sfp->attached = false; | ||
1621 | sfp_sm_event(sfp, SFP_E_REMOVE); | ||
1622 | } | ||
1623 | |||
1610 | static void sfp_start(struct sfp *sfp) | 1624 | static void sfp_start(struct sfp *sfp) |
1611 | { | 1625 | { |
1612 | sfp_sm_event(sfp, SFP_E_DEV_UP); | 1626 | sfp_sm_event(sfp, SFP_E_DEV_UP); |
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, | |||
1667 | } | 1681 | } |
1668 | 1682 | ||
1669 | static const struct sfp_socket_ops sfp_module_ops = { | 1683 | static const struct sfp_socket_ops sfp_module_ops = { |
1684 | .attach = sfp_attach, | ||
1685 | .detach = sfp_detach, | ||
1670 | .start = sfp_start, | 1686 | .start = sfp_start, |
1671 | .stop = sfp_stop, | 1687 | .stop = sfp_stop, |
1672 | .module_info = sfp_module_info, | 1688 | .module_info = sfp_module_info, |
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev) | |||
1834 | dev_info(sfp->dev, "Host maximum power %u.%uW\n", | 1850 | dev_info(sfp->dev, "Host maximum power %u.%uW\n", |
1835 | sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); | 1851 | sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); |
1836 | 1852 | ||
1837 | sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); | ||
1838 | if (!sfp->sfp_bus) | ||
1839 | return -ENOMEM; | ||
1840 | |||
1841 | /* Get the initial state, and always signal TX disable, | 1853 | /* Get the initial state, and always signal TX disable, |
1842 | * since the network interface will not be up. | 1854 | * since the network interface will not be up. |
1843 | */ | 1855 | */ |
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev) | |||
1848 | sfp->state |= SFP_F_RATE_SELECT; | 1860 | sfp->state |= SFP_F_RATE_SELECT; |
1849 | sfp_set_state(sfp, sfp->state); | 1861 | sfp_set_state(sfp, sfp->state); |
1850 | sfp_module_tx_disable(sfp); | 1862 | sfp_module_tx_disable(sfp); |
1851 | rtnl_lock(); | ||
1852 | if (sfp->state & SFP_F_PRESENT) | ||
1853 | sfp_sm_event(sfp, SFP_E_INSERT); | ||
1854 | rtnl_unlock(); | ||
1855 | 1863 | ||
1856 | for (i = 0; i < GPIO_MAX; i++) { | 1864 | for (i = 0; i < GPIO_MAX; i++) { |
1857 | if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) | 1865 | if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) |
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev) | |||
1884 | dev_warn(sfp->dev, | 1892 | dev_warn(sfp->dev, |
1885 | "No tx_disable pin: SFP modules will always be emitting.\n"); | 1893 | "No tx_disable pin: SFP modules will always be emitting.\n"); |
1886 | 1894 | ||
1895 | sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); | ||
1896 | if (!sfp->sfp_bus) | ||
1897 | return -ENOMEM; | ||
1898 | |||
1887 | return 0; | 1899 | return 0; |
1888 | } | 1900 | } |
1889 | 1901 | ||
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 31b0acf337e2..64f54b0bbd8c 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h | |||
@@ -7,6 +7,8 @@ | |||
7 | struct sfp; | 7 | struct sfp; |
8 | 8 | ||
9 | struct sfp_socket_ops { | 9 | struct sfp_socket_ops { |
10 | void (*attach)(struct sfp *sfp); | ||
11 | void (*detach)(struct sfp *sfp); | ||
10 | void (*start)(struct sfp *sfp); | 12 | void (*start)(struct sfp *sfp); |
11 | void (*stop)(struct sfp *sfp); | 13 | void (*stop)(struct sfp *sfp); |
12 | int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); | 14 | int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); |
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 74a8782313cf..bd6084e315de 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c | |||
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) | |||
44 | u16 val = 0; | 44 | u16 val = 0; |
45 | int err; | 45 | int err; |
46 | 46 | ||
47 | err = priv->phy_drv->read_status(phydev); | 47 | if (priv->phy_drv->read_status) |
48 | err = priv->phy_drv->read_status(phydev); | ||
49 | else | ||
50 | err = genphy_read_status(phydev); | ||
48 | if (err < 0) | 51 | if (err < 0) |
49 | return err; | 52 | return err; |
50 | 53 | ||
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index afd9d25d1992..958f1cf67282 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, | |||
256 | } | 256 | } |
257 | } | 257 | } |
258 | 258 | ||
259 | static bool __team_option_inst_tmp_find(const struct list_head *opts, | ||
260 | const struct team_option_inst *needle) | ||
261 | { | ||
262 | struct team_option_inst *opt_inst; | ||
263 | |||
264 | list_for_each_entry(opt_inst, opts, tmp_list) | ||
265 | if (opt_inst == needle) | ||
266 | return true; | ||
267 | return false; | ||
268 | } | ||
269 | |||
270 | static int __team_options_register(struct team *team, | 259 | static int __team_options_register(struct team *team, |
271 | const struct team_option *option, | 260 | const struct team_option *option, |
272 | size_t option_count) | 261 | size_t option_count) |
@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2460 | int err = 0; | 2449 | int err = 0; |
2461 | int i; | 2450 | int i; |
2462 | struct nlattr *nl_option; | 2451 | struct nlattr *nl_option; |
2463 | LIST_HEAD(opt_inst_list); | ||
2464 | 2452 | ||
2465 | rtnl_lock(); | 2453 | rtnl_lock(); |
2466 | 2454 | ||
@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2480 | struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; | 2468 | struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; |
2481 | struct nlattr *attr; | 2469 | struct nlattr *attr; |
2482 | struct nlattr *attr_data; | 2470 | struct nlattr *attr_data; |
2471 | LIST_HEAD(opt_inst_list); | ||
2483 | enum team_option_type opt_type; | 2472 | enum team_option_type opt_type; |
2484 | int opt_port_ifindex = 0; /* != 0 for per-port options */ | 2473 | int opt_port_ifindex = 0; /* != 0 for per-port options */ |
2485 | u32 opt_array_index = 0; | 2474 | u32 opt_array_index = 0; |
@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2584 | if (err) | 2573 | if (err) |
2585 | goto team_put; | 2574 | goto team_put; |
2586 | opt_inst->changed = true; | 2575 | opt_inst->changed = true; |
2587 | |||
2588 | /* dumb/evil user-space can send us duplicate opt, | ||
2589 | * keep only the last one | ||
2590 | */ | ||
2591 | if (__team_option_inst_tmp_find(&opt_inst_list, | ||
2592 | opt_inst)) | ||
2593 | continue; | ||
2594 | |||
2595 | list_add(&opt_inst->tmp_list, &opt_inst_list); | 2576 | list_add(&opt_inst->tmp_list, &opt_inst_list); |
2596 | } | 2577 | } |
2597 | if (!opt_found) { | 2578 | if (!opt_found) { |
2598 | err = -ENOENT; | 2579 | err = -ENOENT; |
2599 | goto team_put; | 2580 | goto team_put; |
2600 | } | 2581 | } |
2601 | } | ||
2602 | 2582 | ||
2603 | err = team_nl_send_event_options_get(team, &opt_inst_list); | 2583 | err = team_nl_send_event_options_get(team, &opt_inst_list); |
2584 | if (err) | ||
2585 | break; | ||
2586 | } | ||
2604 | 2587 | ||
2605 | team_put: | 2588 | team_put: |
2606 | team_nl_team_put(team); | 2589 | team_nl_team_put(team); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 735ad838e2ba..18af2f8eee96 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = { | |||
1201 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1201 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
1202 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ | 1202 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
1203 | {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ | 1203 | {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ |
1204 | {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ | 1204 | {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ |
1205 | {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ | 1205 | {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ |
1206 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ | 1206 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ |
1207 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ | 1207 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ |
1208 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ | 1208 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 60dd1ec1665f..ada6baf8847a 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -557,6 +557,7 @@ enum spd_duplex { | |||
557 | /* MAC PASSTHRU */ | 557 | /* MAC PASSTHRU */ |
558 | #define AD_MASK 0xfee0 | 558 | #define AD_MASK 0xfee0 |
559 | #define BND_MASK 0x0004 | 559 | #define BND_MASK 0x0004 |
560 | #define BD_MASK 0x0001 | ||
560 | #define EFUSE 0xcfdb | 561 | #define EFUSE 0xcfdb |
561 | #define PASS_THRU_MASK 0x1 | 562 | #define PASS_THRU_MASK 0x1 |
562 | 563 | ||
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) | |||
1176 | return -ENODEV; | 1177 | return -ENODEV; |
1177 | } | 1178 | } |
1178 | } else { | 1179 | } else { |
1179 | /* test for RTL8153-BND */ | 1180 | /* test for RTL8153-BND and RTL8153-BD */ |
1180 | ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); | 1181 | ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); |
1181 | if ((ocp_data & BND_MASK) == 0) { | 1182 | if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK)) { |
1182 | netif_dbg(tp, probe, tp->netdev, | 1183 | netif_dbg(tp, probe, tp->netdev, |
1183 | "Invalid variant for MAC pass through\n"); | 1184 | "Invalid variant for MAC pass through\n"); |
1184 | return -ENODEV; | 1185 | return -ENODEV; |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5209ee9aac47..2aae11feff0c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2219 | struct pcpu_sw_netstats *tx_stats, *rx_stats; | 2219 | struct pcpu_sw_netstats *tx_stats, *rx_stats; |
2220 | union vxlan_addr loopback; | 2220 | union vxlan_addr loopback; |
2221 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; | 2221 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; |
2222 | struct net_device *dev = skb->dev; | 2222 | struct net_device *dev; |
2223 | int len = skb->len; | 2223 | int len = skb->len; |
2224 | 2224 | ||
2225 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); | 2225 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); |
@@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2239 | #endif | 2239 | #endif |
2240 | } | 2240 | } |
2241 | 2241 | ||
2242 | rcu_read_lock(); | ||
2243 | dev = skb->dev; | ||
2244 | if (unlikely(!(dev->flags & IFF_UP))) { | ||
2245 | kfree_skb(skb); | ||
2246 | goto drop; | ||
2247 | } | ||
2248 | |||
2242 | if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) | 2249 | if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) |
2243 | vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, | 2250 | vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); |
2244 | vni); | ||
2245 | 2251 | ||
2246 | u64_stats_update_begin(&tx_stats->syncp); | 2252 | u64_stats_update_begin(&tx_stats->syncp); |
2247 | tx_stats->tx_packets++; | 2253 | tx_stats->tx_packets++; |
@@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2254 | rx_stats->rx_bytes += len; | 2260 | rx_stats->rx_bytes += len; |
2255 | u64_stats_update_end(&rx_stats->syncp); | 2261 | u64_stats_update_end(&rx_stats->syncp); |
2256 | } else { | 2262 | } else { |
2263 | drop: | ||
2257 | dev->stats.rx_dropped++; | 2264 | dev->stats.rx_dropped++; |
2258 | } | 2265 | } |
2266 | rcu_read_unlock(); | ||
2259 | } | 2267 | } |
2260 | 2268 | ||
2261 | static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, | 2269 | static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0e6b43bb4678..a5ea3ba495a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c | |||
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = { | |||
158 | .get_txpower = mt76x02_get_txpower, | 158 | .get_txpower = mt76x02_get_txpower, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static int mt76x0u_register_device(struct mt76x02_dev *dev) | 161 | static int mt76x0u_init_hardware(struct mt76x02_dev *dev) |
162 | { | 162 | { |
163 | struct ieee80211_hw *hw = dev->mt76.hw; | ||
164 | int err; | 163 | int err; |
165 | 164 | ||
166 | err = mt76u_alloc_queues(&dev->mt76); | ||
167 | if (err < 0) | ||
168 | goto out_err; | ||
169 | |||
170 | err = mt76u_mcu_init_rx(&dev->mt76); | ||
171 | if (err < 0) | ||
172 | goto out_err; | ||
173 | |||
174 | mt76x0_chip_onoff(dev, true, true); | 165 | mt76x0_chip_onoff(dev, true, true); |
175 | if (!mt76x02_wait_for_mac(&dev->mt76)) { | 166 | |
176 | err = -ETIMEDOUT; | 167 | if (!mt76x02_wait_for_mac(&dev->mt76)) |
177 | goto out_err; | 168 | return -ETIMEDOUT; |
178 | } | ||
179 | 169 | ||
180 | err = mt76x0u_mcu_init(dev); | 170 | err = mt76x0u_mcu_init(dev); |
181 | if (err < 0) | 171 | if (err < 0) |
182 | goto out_err; | 172 | return err; |
183 | 173 | ||
184 | mt76x0_init_usb_dma(dev); | 174 | mt76x0_init_usb_dma(dev); |
185 | err = mt76x0_init_hardware(dev); | 175 | err = mt76x0_init_hardware(dev); |
186 | if (err < 0) | 176 | if (err < 0) |
187 | goto out_err; | 177 | return err; |
188 | 178 | ||
189 | mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); | 179 | mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); |
190 | mt76_wr(dev, MT_TXOP_CTRL_CFG, | 180 | mt76_wr(dev, MT_TXOP_CTRL_CFG, |
191 | FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | | 181 | FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | |
192 | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); | 182 | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); |
193 | 183 | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int mt76x0u_register_device(struct mt76x02_dev *dev) | ||
188 | { | ||
189 | struct ieee80211_hw *hw = dev->mt76.hw; | ||
190 | int err; | ||
191 | |||
192 | err = mt76u_alloc_queues(&dev->mt76); | ||
193 | if (err < 0) | ||
194 | goto out_err; | ||
195 | |||
196 | err = mt76u_mcu_init_rx(&dev->mt76); | ||
197 | if (err < 0) | ||
198 | goto out_err; | ||
199 | |||
200 | err = mt76x0u_init_hardware(dev); | ||
201 | if (err < 0) | ||
202 | goto out_err; | ||
203 | |||
194 | err = mt76x0_register_device(dev); | 204 | err = mt76x0_register_device(dev); |
195 | if (err < 0) | 205 | if (err < 0) |
196 | goto out_err; | 206 | goto out_err; |
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, | |||
301 | 311 | ||
302 | mt76u_stop_queues(&dev->mt76); | 312 | mt76u_stop_queues(&dev->mt76); |
303 | mt76x0u_mac_stop(dev); | 313 | mt76x0u_mac_stop(dev); |
314 | clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); | ||
315 | mt76x0_chip_onoff(dev, false, false); | ||
304 | usb_kill_urb(usb->mcu.res.urb); | 316 | usb_kill_urb(usb->mcu.res.urb); |
305 | 317 | ||
306 | return 0; | 318 | return 0; |
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) | |||
328 | tasklet_enable(&usb->rx_tasklet); | 340 | tasklet_enable(&usb->rx_tasklet); |
329 | tasklet_enable(&usb->tx_tasklet); | 341 | tasklet_enable(&usb->tx_tasklet); |
330 | 342 | ||
331 | ret = mt76x0_init_hardware(dev); | 343 | ret = mt76x0u_init_hardware(dev); |
332 | if (ret) | 344 | if (ret) |
333 | goto err; | 345 | goto err; |
334 | 346 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 022ea1ee63f8..7fee665ec45e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -2560,15 +2560,15 @@ static void nvme_reset_work(struct work_struct *work) | |||
2560 | mutex_lock(&dev->shutdown_lock); | 2560 | mutex_lock(&dev->shutdown_lock); |
2561 | result = nvme_pci_enable(dev); | 2561 | result = nvme_pci_enable(dev); |
2562 | if (result) | 2562 | if (result) |
2563 | goto out; | 2563 | goto out_unlock; |
2564 | 2564 | ||
2565 | result = nvme_pci_configure_admin_queue(dev); | 2565 | result = nvme_pci_configure_admin_queue(dev); |
2566 | if (result) | 2566 | if (result) |
2567 | goto out; | 2567 | goto out_unlock; |
2568 | 2568 | ||
2569 | result = nvme_alloc_admin_tags(dev); | 2569 | result = nvme_alloc_admin_tags(dev); |
2570 | if (result) | 2570 | if (result) |
2571 | goto out; | 2571 | goto out_unlock; |
2572 | 2572 | ||
2573 | /* | 2573 | /* |
2574 | * Limit the max command size to prevent iod->sg allocations going | 2574 | * Limit the max command size to prevent iod->sg allocations going |
@@ -2651,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work) | |||
2651 | nvme_start_ctrl(&dev->ctrl); | 2651 | nvme_start_ctrl(&dev->ctrl); |
2652 | return; | 2652 | return; |
2653 | 2653 | ||
2654 | out_unlock: | ||
2655 | mutex_unlock(&dev->shutdown_lock); | ||
2654 | out: | 2656 | out: |
2655 | nvme_remove_dead_ctrl(dev, result); | 2657 | nvme_remove_dead_ctrl(dev, result); |
2656 | } | 2658 | } |
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index c69ca95b1ad5..0f140a802137 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c | |||
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = { | |||
693 | 693 | ||
694 | static const char * const sdxc_a_groups[] = { | 694 | static const char * const sdxc_a_groups[] = { |
695 | "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", | 695 | "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", |
696 | "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" | 696 | "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a" |
697 | }; | 697 | }; |
698 | 698 | ||
699 | static const char * const pcm_a_groups[] = { | 699 | static const char * const pcm_a_groups[] = { |
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c index 7aae52a09ff0..4ffd56ff809e 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs404.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c | |||
@@ -79,7 +79,7 @@ enum { | |||
79 | .intr_cfg_reg = 0, \ | 79 | .intr_cfg_reg = 0, \ |
80 | .intr_status_reg = 0, \ | 80 | .intr_status_reg = 0, \ |
81 | .intr_target_reg = 0, \ | 81 | .intr_target_reg = 0, \ |
82 | .tile = NORTH, \ | 82 | .tile = SOUTH, \ |
83 | .mux_bit = -1, \ | 83 | .mux_bit = -1, \ |
84 | .pull_bit = pull, \ | 84 | .pull_bit = pull, \ |
85 | .drv_bit = drv, \ | 85 | .drv_bit = drv, \ |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 4e7b55a14b1a..6e294b4d3635 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) | |||
4469 | usrparm.psf_data &= 0x7fffffffULL; | 4469 | usrparm.psf_data &= 0x7fffffffULL; |
4470 | usrparm.rssd_result &= 0x7fffffffULL; | 4470 | usrparm.rssd_result &= 0x7fffffffULL; |
4471 | } | 4471 | } |
4472 | /* at least 2 bytes are accessed and should be allocated */ | ||
4473 | if (usrparm.psf_data_len < 2) { | ||
4474 | DBF_DEV_EVENT(DBF_WARNING, device, | ||
4475 | "Symmetrix ioctl invalid data length %d", | ||
4476 | usrparm.psf_data_len); | ||
4477 | rc = -EINVAL; | ||
4478 | goto out; | ||
4479 | } | ||
4472 | /* alloc I/O data area */ | 4480 | /* alloc I/O data area */ |
4473 | psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); | 4481 | psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); |
4474 | rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); | 4482 | rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 48ea0004a56d..5a699746c357 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr) | |||
248 | static inline int ap_test_config_card_id(unsigned int id) | 248 | static inline int ap_test_config_card_id(unsigned int id) |
249 | { | 249 | { |
250 | if (!ap_configuration) /* QCI not supported */ | 250 | if (!ap_configuration) /* QCI not supported */ |
251 | return 1; | 251 | /* only ids 0...3F may be probed */ |
252 | return id < 0x40 ? 1 : 0; | ||
252 | return ap_test_config(ap_configuration->apm, id); | 253 | return ap_test_config(ap_configuration->apm, id); |
253 | } | 254 | } |
254 | 255 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aeeb0144bd55..8d1acc802a67 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, | |||
1785 | 1785 | ||
1786 | /* Issue Marker IOCB */ | 1786 | /* Issue Marker IOCB */ |
1787 | qla2x00_marker(vha, vha->hw->req_q_map[0], | 1787 | qla2x00_marker(vha, vha->hw->req_q_map[0], |
1788 | vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, | 1788 | vha->hw->rsp_q_map[0], fcport->loop_id, lun, |
1789 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); | 1789 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); |
1790 | } | 1790 | } |
1791 | 1791 | ||
1792 | done_free_sp: | 1792 | done_free_sp: |
1793 | sp->free(sp); | 1793 | sp->free(sp); |
1794 | sp->fcport->flags &= ~FCF_ASYNC_SENT; | 1794 | fcport->flags &= ~FCF_ASYNC_SENT; |
1795 | done: | 1795 | done: |
1796 | return rval; | 1796 | return rval; |
1797 | } | 1797 | } |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b2da8a00ec33..5464d467e23e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2951,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) | |||
2951 | if (rot == 1) { | 2951 | if (rot == 1) { |
2952 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 2952 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
2953 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | 2953 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
2954 | } else { | ||
2955 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
2956 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
2957 | } | 2954 | } |
2958 | 2955 | ||
2959 | if (sdkp->device->type == TYPE_ZBC) { | 2956 | if (sdkp->device->type == TYPE_ZBC) { |
@@ -3090,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
3090 | if (sdkp->media_present) { | 3087 | if (sdkp->media_present) { |
3091 | sd_read_capacity(sdkp, buffer); | 3088 | sd_read_capacity(sdkp, buffer); |
3092 | 3089 | ||
3090 | /* | ||
3091 | * set the default to rotational. All non-rotational devices | ||
3092 | * support the block characteristics VPD page, which will | ||
3093 | * cause this to be updated correctly and any device which | ||
3094 | * doesn't support it should be treated as rotational. | ||
3095 | */ | ||
3096 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
3097 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
3098 | |||
3093 | if (scsi_device_supports_vpd(sdp)) { | 3099 | if (scsi_device_supports_vpd(sdp)) { |
3094 | sd_read_block_provisioning(sdkp); | 3100 | sd_read_block_provisioning(sdkp); |
3095 | sd_read_block_limits(sdkp); | 3101 | sd_read_block_limits(sdkp); |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index dfd23245f778..6fff16113628 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) | |||
774 | 774 | ||
775 | cdev = __cpufreq_cooling_register(np, policy, capacitance); | 775 | cdev = __cpufreq_cooling_register(np, policy, capacitance); |
776 | if (IS_ERR(cdev)) { | 776 | if (IS_ERR(cdev)) { |
777 | pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", | 777 | pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n", |
778 | policy->cpu, PTR_ERR(cdev)); | 778 | policy->cpu, PTR_ERR(cdev)); |
779 | cdev = NULL; | 779 | cdev = NULL; |
780 | } | 780 | } |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 4bfdb4a1e47d..2df059cc07e2 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
@@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) | |||
867 | 867 | ||
868 | ret = of_property_read_u32(np, "polling-delay-passive", &prop); | 868 | ret = of_property_read_u32(np, "polling-delay-passive", &prop); |
869 | if (ret < 0) { | 869 | if (ret < 0) { |
870 | pr_err("missing polling-delay-passive property\n"); | 870 | pr_err("%pOFn: missing polling-delay-passive property\n", np); |
871 | goto free_tz; | 871 | goto free_tz; |
872 | } | 872 | } |
873 | tz->passive_delay = prop; | 873 | tz->passive_delay = prop; |
874 | 874 | ||
875 | ret = of_property_read_u32(np, "polling-delay", &prop); | 875 | ret = of_property_read_u32(np, "polling-delay", &prop); |
876 | if (ret < 0) { | 876 | if (ret < 0) { |
877 | pr_err("missing polling-delay property\n"); | 877 | pr_err("%pOFn: missing polling-delay property\n", np); |
878 | goto free_tz; | 878 | goto free_tz; |
879 | } | 879 | } |
880 | tz->polling_delay = prop; | 880 | tz->polling_delay = prop; |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 24a129fcdd61..a2e5dc7716e2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) | |||
1788 | 1788 | ||
1789 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, | 1789 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, |
1790 | len, iov, 64, VHOST_ACCESS_WO); | 1790 | len, iov, 64, VHOST_ACCESS_WO); |
1791 | if (ret) | 1791 | if (ret < 0) |
1792 | return ret; | 1792 | return ret; |
1793 | 1793 | ||
1794 | for (i = 0; i < ret; i++) { | 1794 | for (i = 0; i < ret; i++) { |
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index d0078cbb718b..e996174cbfc0 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
@@ -14,13 +14,30 @@ | |||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | 16 | ||
17 | static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } | ||
18 | static inline char *next_non_spacetab(char *first, const char *last) | ||
19 | { | ||
20 | for (; first <= last; first++) | ||
21 | if (!spacetab(*first)) | ||
22 | return first; | ||
23 | return NULL; | ||
24 | } | ||
25 | static inline char *next_terminator(char *first, const char *last) | ||
26 | { | ||
27 | for (; first <= last; first++) | ||
28 | if (spacetab(*first) || !*first) | ||
29 | return first; | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
17 | static int load_script(struct linux_binprm *bprm) | 33 | static int load_script(struct linux_binprm *bprm) |
18 | { | 34 | { |
19 | const char *i_arg, *i_name; | 35 | const char *i_arg, *i_name; |
20 | char *cp; | 36 | char *cp, *buf_end; |
21 | struct file *file; | 37 | struct file *file; |
22 | int retval; | 38 | int retval; |
23 | 39 | ||
40 | /* Not ours to exec if we don't start with "#!". */ | ||
24 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) | 41 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) |
25 | return -ENOEXEC; | 42 | return -ENOEXEC; |
26 | 43 | ||
@@ -33,23 +50,41 @@ static int load_script(struct linux_binprm *bprm) | |||
33 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) | 50 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) |
34 | return -ENOENT; | 51 | return -ENOENT; |
35 | 52 | ||
36 | /* | 53 | /* Release since we are not mapping a binary into memory. */ |
37 | * This section does the #! interpretation. | ||
38 | * Sorta complicated, but hopefully it will work. -TYT | ||
39 | */ | ||
40 | |||
41 | allow_write_access(bprm->file); | 54 | allow_write_access(bprm->file); |
42 | fput(bprm->file); | 55 | fput(bprm->file); |
43 | bprm->file = NULL; | 56 | bprm->file = NULL; |
44 | 57 | ||
45 | for (cp = bprm->buf+2;; cp++) { | 58 | /* |
46 | if (cp >= bprm->buf + BINPRM_BUF_SIZE) | 59 | * This section handles parsing the #! line into separate |
60 | * interpreter path and argument strings. We must be careful | ||
61 | * because bprm->buf is not yet guaranteed to be NUL-terminated | ||
62 | * (though the buffer will have trailing NUL padding when the | ||
63 | * file size was smaller than the buffer size). | ||
64 | * | ||
65 | * We do not want to exec a truncated interpreter path, so either | ||
66 | * we find a newline (which indicates nothing is truncated), or | ||
67 | * we find a space/tab/NUL after the interpreter path (which | ||
68 | * itself may be preceded by spaces/tabs). Truncating the | ||
69 | * arguments is fine: the interpreter can re-read the script to | ||
70 | * parse them on its own. | ||
71 | */ | ||
72 | buf_end = bprm->buf + sizeof(bprm->buf) - 1; | ||
73 | cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); | ||
74 | if (!cp) { | ||
75 | cp = next_non_spacetab(bprm->buf + 2, buf_end); | ||
76 | if (!cp) | ||
77 | return -ENOEXEC; /* Entire buf is spaces/tabs */ | ||
78 | /* | ||
79 | * If there is no later space/tab/NUL we must assume the | ||
80 | * interpreter path is truncated. | ||
81 | */ | ||
82 | if (!next_terminator(cp, buf_end)) | ||
47 | return -ENOEXEC; | 83 | return -ENOEXEC; |
48 | if (!*cp || (*cp == '\n')) | 84 | cp = buf_end; |
49 | break; | ||
50 | } | 85 | } |
86 | /* NUL-terminate the buffer and any trailing spaces/tabs. */ | ||
51 | *cp = '\0'; | 87 | *cp = '\0'; |
52 | |||
53 | while (cp > bprm->buf) { | 88 | while (cp > bprm->buf) { |
54 | cp--; | 89 | cp--; |
55 | if ((*cp == ' ') || (*cp == '\t')) | 90 | if ((*cp == ' ') || (*cp == '\t')) |
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 712f00995390..5508baa11bb6 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
116 | goto out; | 116 | goto out; |
117 | } | 117 | } |
118 | 118 | ||
119 | ret = file_write_and_wait_range(file, start, end); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | |||
123 | if (!journal) { | 119 | if (!journal) { |
124 | struct writeback_control wbc = { | 120 | ret = __generic_file_fsync(file, start, end, datasync); |
125 | .sync_mode = WB_SYNC_ALL | ||
126 | }; | ||
127 | |||
128 | ret = ext4_write_inode(inode, &wbc); | ||
129 | if (!ret) | 121 | if (!ret) |
130 | ret = ext4_sync_parent(inode); | 122 | ret = ext4_sync_parent(inode); |
131 | if (test_opt(inode->i_sb, BARRIER)) | 123 | if (test_opt(inode->i_sb, BARRIER)) |
@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
133 | goto out; | 125 | goto out; |
134 | } | 126 | } |
135 | 127 | ||
128 | ret = file_write_and_wait_range(file, start, end); | ||
129 | if (ret) | ||
130 | return ret; | ||
136 | /* | 131 | /* |
137 | * data=writeback,ordered: | 132 | * data=writeback,ordered: |
138 | * The caller's filemap_fdatawrite()/wait will sync the data. | 133 | * The caller's filemap_fdatawrite()/wait will sync the data. |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index f15b4c57c4bd..78510ab91835 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "util.h" | 28 | #include "util.h" |
29 | #include "trans.h" | 29 | #include "trans.h" |
30 | #include "dir.h" | 30 | #include "dir.h" |
31 | #include "lops.h" | ||
32 | 31 | ||
33 | struct workqueue_struct *gfs2_freeze_wq; | 32 | struct workqueue_struct *gfs2_freeze_wq; |
34 | 33 | ||
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 5bfaf381921a..b8830fda51e8 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -733,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, | |||
733 | lh->lh_crc = cpu_to_be32(crc); | 733 | lh->lh_crc = cpu_to_be32(crc); |
734 | 734 | ||
735 | gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); | 735 | gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); |
736 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); | 736 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, op_flags); |
737 | log_flush_wait(sdp); | 737 | log_flush_wait(sdp); |
738 | } | 738 | } |
739 | 739 | ||
@@ -810,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) | |||
810 | 810 | ||
811 | gfs2_ordered_write(sdp); | 811 | gfs2_ordered_write(sdp); |
812 | lops_before_commit(sdp, tr); | 812 | lops_before_commit(sdp, tr); |
813 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); | 813 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, 0); |
814 | 814 | ||
815 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { | 815 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
816 | log_flush_wait(sdp); | 816 | log_flush_wait(sdp); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 94dcab655bc0..2295042bc625 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -17,9 +17,7 @@ | |||
17 | #include <linux/bio.h> | 17 | #include <linux/bio.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/list_sort.h> | 19 | #include <linux/list_sort.h> |
20 | #include <linux/blkdev.h> | ||
21 | 20 | ||
22 | #include "bmap.h" | ||
23 | #include "dir.h" | 21 | #include "dir.h" |
24 | #include "gfs2.h" | 22 | #include "gfs2.h" |
25 | #include "incore.h" | 23 | #include "incore.h" |
@@ -195,6 +193,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, | |||
195 | /** | 193 | /** |
196 | * gfs2_end_log_write - end of i/o to the log | 194 | * gfs2_end_log_write - end of i/o to the log |
197 | * @bio: The bio | 195 | * @bio: The bio |
196 | * @error: Status of i/o request | ||
198 | * | 197 | * |
199 | * Each bio_vec contains either data from the pagecache or data | 198 | * Each bio_vec contains either data from the pagecache or data |
200 | * relating to the log itself. Here we iterate over the bio_vec | 199 | * relating to the log itself. Here we iterate over the bio_vec |
@@ -231,19 +230,20 @@ static void gfs2_end_log_write(struct bio *bio) | |||
231 | /** | 230 | /** |
232 | * gfs2_log_submit_bio - Submit any pending log bio | 231 | * gfs2_log_submit_bio - Submit any pending log bio |
233 | * @biop: Address of the bio pointer | 232 | * @biop: Address of the bio pointer |
234 | * @opf: REQ_OP | op_flags | 233 | * @op: REQ_OP |
234 | * @op_flags: req_flag_bits | ||
235 | * | 235 | * |
236 | * Submit any pending part-built or full bio to the block device. If | 236 | * Submit any pending part-built or full bio to the block device. If |
237 | * there is no pending bio, then this is a no-op. | 237 | * there is no pending bio, then this is a no-op. |
238 | */ | 238 | */ |
239 | 239 | ||
240 | void gfs2_log_submit_bio(struct bio **biop, int opf) | 240 | void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags) |
241 | { | 241 | { |
242 | struct bio *bio = *biop; | 242 | struct bio *bio = *biop; |
243 | if (bio) { | 243 | if (bio) { |
244 | struct gfs2_sbd *sdp = bio->bi_private; | 244 | struct gfs2_sbd *sdp = bio->bi_private; |
245 | atomic_inc(&sdp->sd_log_in_flight); | 245 | atomic_inc(&sdp->sd_log_in_flight); |
246 | bio->bi_opf = opf; | 246 | bio_set_op_attrs(bio, op, op_flags); |
247 | submit_bio(bio); | 247 | submit_bio(bio); |
248 | *biop = NULL; | 248 | *biop = NULL; |
249 | } | 249 | } |
@@ -304,7 +304,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, | |||
304 | nblk >>= sdp->sd_fsb2bb_shift; | 304 | nblk >>= sdp->sd_fsb2bb_shift; |
305 | if (blkno == nblk && !flush) | 305 | if (blkno == nblk && !flush) |
306 | return bio; | 306 | return bio; |
307 | gfs2_log_submit_bio(biop, op); | 307 | gfs2_log_submit_bio(biop, op, 0); |
308 | } | 308 | } |
309 | 309 | ||
310 | *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); | 310 | *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); |
@@ -375,184 +375,6 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) | |||
375 | gfs2_log_bmap(sdp)); | 375 | gfs2_log_bmap(sdp)); |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | ||
379 | * gfs2_end_log_read - end I/O callback for reads from the log | ||
380 | * @bio: The bio | ||
381 | * | ||
382 | * Simply unlock the pages in the bio. The main thread will wait on them and | ||
383 | * process them in order as necessary. | ||
384 | */ | ||
385 | |||
386 | static void gfs2_end_log_read(struct bio *bio) | ||
387 | { | ||
388 | struct page *page; | ||
389 | struct bio_vec *bvec; | ||
390 | int i; | ||
391 | |||
392 | bio_for_each_segment_all(bvec, bio, i) { | ||
393 | page = bvec->bv_page; | ||
394 | if (bio->bi_status) { | ||
395 | int err = blk_status_to_errno(bio->bi_status); | ||
396 | |||
397 | SetPageError(page); | ||
398 | mapping_set_error(page->mapping, err); | ||
399 | } | ||
400 | unlock_page(page); | ||
401 | } | ||
402 | |||
403 | bio_put(bio); | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * gfs2_jhead_pg_srch - Look for the journal head in a given page. | ||
408 | * @jd: The journal descriptor | ||
409 | * @page: The page to look in | ||
410 | * | ||
411 | * Returns: 1 if found, 0 otherwise. | ||
412 | */ | ||
413 | |||
414 | static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, | ||
415 | struct gfs2_log_header_host *head, | ||
416 | struct page *page) | ||
417 | { | ||
418 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | ||
419 | struct gfs2_log_header_host uninitialized_var(lh); | ||
420 | void *kaddr = kmap_atomic(page); | ||
421 | unsigned int offset; | ||
422 | bool ret = false; | ||
423 | |||
424 | for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { | ||
425 | if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { | ||
426 | if (lh.lh_sequence > head->lh_sequence) | ||
427 | *head = lh; | ||
428 | else { | ||
429 | ret = true; | ||
430 | break; | ||
431 | } | ||
432 | } | ||
433 | } | ||
434 | kunmap_atomic(kaddr); | ||
435 | return ret; | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * gfs2_jhead_process_page - Search/cleanup a page | ||
440 | * @jd: The journal descriptor | ||
441 | * @index: Index of the page to look into | ||
442 | * @done: If set, perform only cleanup, else search and set if found. | ||
443 | * | ||
444 | * Find the page with 'index' in the journal's mapping. Search the page for | ||
445 | * the journal head if requested (cleanup == false). Release refs on the | ||
446 | * page so the page cache can reclaim it (put_page() twice). We grabbed a | ||
447 | * reference on this page two times, first when we did a find_or_create_page() | ||
448 | * to obtain the page to add it to the bio and second when we do a | ||
449 | * find_get_page() here to get the page to wait on while I/O on it is being | ||
450 | * completed. | ||
451 | * This function is also used to free up a page we might've grabbed but not | ||
452 | * used. Maybe we added it to a bio, but not submitted it for I/O. Or we | ||
453 | * submitted the I/O, but we already found the jhead so we only need to drop | ||
454 | * our references to the page. | ||
455 | */ | ||
456 | |||
457 | static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, | ||
458 | struct gfs2_log_header_host *head, | ||
459 | bool *done) | ||
460 | { | ||
461 | struct page *page; | ||
462 | |||
463 | page = find_get_page(jd->jd_inode->i_mapping, index); | ||
464 | wait_on_page_locked(page); | ||
465 | |||
466 | if (PageError(page)) | ||
467 | *done = true; | ||
468 | |||
469 | if (!*done) | ||
470 | *done = gfs2_jhead_pg_srch(jd, head, page); | ||
471 | |||
472 | put_page(page); /* Once for find_get_page */ | ||
473 | put_page(page); /* Once more for find_or_create_page */ | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * gfs2_find_jhead - find the head of a log | ||
478 | * @jd: The journal descriptor | ||
479 | * @head: The log descriptor for the head of the log is returned here | ||
480 | * | ||
481 | * Do a search of a journal by reading it in large chunks using bios and find | ||
482 | * the valid log entry with the highest sequence number. (i.e. the log head) | ||
483 | * | ||
484 | * Returns: 0 on success, errno otherwise | ||
485 | */ | ||
486 | |||
487 | int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
488 | { | ||
489 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | ||
490 | struct address_space *mapping = jd->jd_inode->i_mapping; | ||
491 | struct gfs2_journal_extent *je; | ||
492 | u32 block, read_idx = 0, submit_idx = 0, index = 0; | ||
493 | int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; | ||
494 | int blocks_per_page = 1 << shift, sz, ret = 0; | ||
495 | struct bio *bio = NULL; | ||
496 | struct page *page; | ||
497 | bool done = false; | ||
498 | errseq_t since; | ||
499 | |||
500 | memset(head, 0, sizeof(*head)); | ||
501 | if (list_empty(&jd->extent_list)) | ||
502 | gfs2_map_journal_extents(sdp, jd); | ||
503 | |||
504 | since = filemap_sample_wb_err(mapping); | ||
505 | list_for_each_entry(je, &jd->extent_list, list) { | ||
506 | for (block = 0; block < je->blocks; block += blocks_per_page) { | ||
507 | index = (je->lblock + block) >> shift; | ||
508 | |||
509 | page = find_or_create_page(mapping, index, GFP_NOFS); | ||
510 | if (!page) { | ||
511 | ret = -ENOMEM; | ||
512 | done = true; | ||
513 | goto out; | ||
514 | } | ||
515 | |||
516 | if (bio) { | ||
517 | sz = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
518 | if (sz == PAGE_SIZE) | ||
519 | goto page_added; | ||
520 | submit_idx = index; | ||
521 | submit_bio(bio); | ||
522 | bio = NULL; | ||
523 | } | ||
524 | |||
525 | bio = gfs2_log_alloc_bio(sdp, | ||
526 | je->dblock + (index << shift), | ||
527 | gfs2_end_log_read); | ||
528 | bio->bi_opf = REQ_OP_READ; | ||
529 | sz = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
530 | gfs2_assert_warn(sdp, sz == PAGE_SIZE); | ||
531 | |||
532 | page_added: | ||
533 | if (submit_idx <= read_idx + BIO_MAX_PAGES) { | ||
534 | /* Keep at least one bio in flight */ | ||
535 | continue; | ||
536 | } | ||
537 | |||
538 | gfs2_jhead_process_page(jd, read_idx++, head, &done); | ||
539 | if (done) | ||
540 | goto out; /* found */ | ||
541 | } | ||
542 | } | ||
543 | |||
544 | out: | ||
545 | if (bio) | ||
546 | submit_bio(bio); | ||
547 | while (read_idx <= index) | ||
548 | gfs2_jhead_process_page(jd, read_idx++, head, &done); | ||
549 | |||
550 | if (!ret) | ||
551 | ret = filemap_check_wb_err(mapping, since); | ||
552 | |||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, | 378 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, |
557 | u32 ld_length, u32 ld_data1) | 379 | u32 ld_length, u32 ld_data1) |
558 | { | 380 | { |
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 331160fc568b..711c4d89c063 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h | |||
@@ -30,10 +30,8 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp); | |||
30 | extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, | 30 | extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, |
31 | unsigned size, unsigned offset, u64 blkno); | 31 | unsigned size, unsigned offset, u64 blkno); |
32 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); | 32 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); |
33 | extern void gfs2_log_submit_bio(struct bio **biop, int opf); | 33 | extern void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags); |
34 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); | 34 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); |
35 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, | ||
36 | struct gfs2_log_header_host *head); | ||
37 | 35 | ||
38 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) | 36 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) |
39 | { | 37 | { |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 1179763f6370..b041cb8ae383 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include "dir.h" | 41 | #include "dir.h" |
42 | #include "meta_io.h" | 42 | #include "meta_io.h" |
43 | #include "trace_gfs2.h" | 43 | #include "trace_gfs2.h" |
44 | #include "lops.h" | ||
45 | 44 | ||
46 | #define DO 0 | 45 | #define DO 0 |
47 | #define UNDO 1 | 46 | #define UNDO 1 |
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 7389e445a7a7..2dac43065382 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c | |||
@@ -182,6 +182,129 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, | |||
182 | } | 182 | } |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * find_good_lh - find a good log header | ||
186 | * @jd: the journal | ||
187 | * @blk: the segment to start searching from | ||
188 | * @lh: the log header to fill in | ||
189 | * @forward: if true search forward in the log, else search backward | ||
190 | * | ||
191 | * Call get_log_header() to get a log header for a segment, but if the | ||
192 | * segment is bad, either scan forward or backward until we find a good one. | ||
193 | * | ||
194 | * Returns: errno | ||
195 | */ | ||
196 | |||
197 | static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, | ||
198 | struct gfs2_log_header_host *head) | ||
199 | { | ||
200 | unsigned int orig_blk = *blk; | ||
201 | int error; | ||
202 | |||
203 | for (;;) { | ||
204 | error = get_log_header(jd, *blk, head); | ||
205 | if (error <= 0) | ||
206 | return error; | ||
207 | |||
208 | if (++*blk == jd->jd_blocks) | ||
209 | *blk = 0; | ||
210 | |||
211 | if (*blk == orig_blk) { | ||
212 | gfs2_consist_inode(GFS2_I(jd->jd_inode)); | ||
213 | return -EIO; | ||
214 | } | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * jhead_scan - make sure we've found the head of the log | ||
220 | * @jd: the journal | ||
221 | * @head: this is filled in with the log descriptor of the head | ||
222 | * | ||
223 | * At this point, seg and lh should be either the head of the log or just | ||
224 | * before. Scan forward until we find the head. | ||
225 | * | ||
226 | * Returns: errno | ||
227 | */ | ||
228 | |||
229 | static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
230 | { | ||
231 | unsigned int blk = head->lh_blkno; | ||
232 | struct gfs2_log_header_host lh; | ||
233 | int error; | ||
234 | |||
235 | for (;;) { | ||
236 | if (++blk == jd->jd_blocks) | ||
237 | blk = 0; | ||
238 | |||
239 | error = get_log_header(jd, blk, &lh); | ||
240 | if (error < 0) | ||
241 | return error; | ||
242 | if (error == 1) | ||
243 | continue; | ||
244 | |||
245 | if (lh.lh_sequence == head->lh_sequence) { | ||
246 | gfs2_consist_inode(GFS2_I(jd->jd_inode)); | ||
247 | return -EIO; | ||
248 | } | ||
249 | if (lh.lh_sequence < head->lh_sequence) | ||
250 | break; | ||
251 | |||
252 | *head = lh; | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * gfs2_find_jhead - find the head of a log | ||
260 | * @jd: the journal | ||
261 | * @head: the log descriptor for the head of the log is returned here | ||
262 | * | ||
263 | * Do a binary search of a journal and find the valid log entry with the | ||
264 | * highest sequence number. (i.e. the log head) | ||
265 | * | ||
266 | * Returns: errno | ||
267 | */ | ||
268 | |||
269 | int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
270 | { | ||
271 | struct gfs2_log_header_host lh_1, lh_m; | ||
272 | u32 blk_1, blk_2, blk_m; | ||
273 | int error; | ||
274 | |||
275 | blk_1 = 0; | ||
276 | blk_2 = jd->jd_blocks - 1; | ||
277 | |||
278 | for (;;) { | ||
279 | blk_m = (blk_1 + blk_2) / 2; | ||
280 | |||
281 | error = find_good_lh(jd, &blk_1, &lh_1); | ||
282 | if (error) | ||
283 | return error; | ||
284 | |||
285 | error = find_good_lh(jd, &blk_m, &lh_m); | ||
286 | if (error) | ||
287 | return error; | ||
288 | |||
289 | if (blk_1 == blk_m || blk_m == blk_2) | ||
290 | break; | ||
291 | |||
292 | if (lh_1.lh_sequence <= lh_m.lh_sequence) | ||
293 | blk_1 = blk_m; | ||
294 | else | ||
295 | blk_2 = blk_m; | ||
296 | } | ||
297 | |||
298 | error = jhead_scan(jd, &lh_1); | ||
299 | if (error) | ||
300 | return error; | ||
301 | |||
302 | *head = lh_1; | ||
303 | |||
304 | return error; | ||
305 | } | ||
306 | |||
307 | /** | ||
185 | * foreach_descriptor - go through the active part of the log | 308 | * foreach_descriptor - go through the active part of the log |
186 | * @jd: the journal | 309 | * @jd: the journal |
187 | * @start: the first log header in the active region | 310 | * @start: the first log header in the active region |
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 99575ab81202..11d81248be85 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h | |||
@@ -27,6 +27,8 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where) | |||
27 | extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); | 27 | extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); |
28 | extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); | 28 | extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); |
29 | 29 | ||
30 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, | ||
31 | struct gfs2_log_header_host *head); | ||
30 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); | 32 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); |
31 | extern void gfs2_recover_func(struct work_struct *work); | 33 | extern void gfs2_recover_func(struct work_struct *work); |
32 | extern int __get_log_header(struct gfs2_sbd *sdp, | 34 | extern int __get_log_header(struct gfs2_sbd *sdp, |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index d4b11c903971..ca71163ff7cf 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include "util.h" | 45 | #include "util.h" |
46 | #include "sys.h" | 46 | #include "sys.h" |
47 | #include "xattr.h" | 47 | #include "xattr.h" |
48 | #include "lops.h" | ||
49 | 48 | ||
50 | #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) | 49 | #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) |
51 | 50 | ||
diff --git a/fs/inode.c b/fs/inode.c index 0cd47fe0dbe5..73432e64f874 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item, | |||
730 | return LRU_REMOVED; | 730 | return LRU_REMOVED; |
731 | } | 731 | } |
732 | 732 | ||
733 | /* | 733 | /* recently referenced inodes get one more pass */ |
734 | * Recently referenced inodes and inodes with many attached pages | 734 | if (inode->i_state & I_REFERENCED) { |
735 | * get one more pass. | ||
736 | */ | ||
737 | if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) { | ||
738 | inode->i_state &= ~I_REFERENCED; | 735 | inode->i_state &= ~I_REFERENCED; |
739 | spin_unlock(&inode->i_lock); | 736 | spin_unlock(&inode->i_lock); |
740 | return LRU_ROTATE; | 737 | return LRU_ROTATE; |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 885363ca8569..2f6b447cdd82 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -229,6 +229,8 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation | |||
229 | spin_lock(&delegation->lock); | 229 | spin_lock(&delegation->lock); |
230 | if (delegation->inode != NULL) | 230 | if (delegation->inode != NULL) |
231 | inode = igrab(delegation->inode); | 231 | inode = igrab(delegation->inode); |
232 | if (!inode) | ||
233 | set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags); | ||
232 | spin_unlock(&delegation->lock); | 234 | spin_unlock(&delegation->lock); |
233 | return inode; | 235 | return inode; |
234 | } | 236 | } |
@@ -681,7 +683,7 @@ void nfs_expire_all_delegations(struct nfs_client *clp) | |||
681 | 683 | ||
682 | /** | 684 | /** |
683 | * nfs_super_return_all_delegations - return delegations for one superblock | 685 | * nfs_super_return_all_delegations - return delegations for one superblock |
684 | * @sb: sb to process | 686 | * @server: pointer to nfs_server to process |
685 | * | 687 | * |
686 | */ | 688 | */ |
687 | void nfs_server_return_all_delegations(struct nfs_server *server) | 689 | void nfs_server_return_all_delegations(struct nfs_server *server) |
@@ -944,10 +946,11 @@ restart: | |||
944 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | 946 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
945 | list_for_each_entry_rcu(delegation, &server->delegations, | 947 | list_for_each_entry_rcu(delegation, &server->delegations, |
946 | super_list) { | 948 | super_list) { |
947 | if (test_bit(NFS_DELEGATION_RETURNING, | 949 | if (test_bit(NFS_DELEGATION_INODE_FREEING, |
948 | &delegation->flags)) | 950 | &delegation->flags) || |
949 | continue; | 951 | test_bit(NFS_DELEGATION_RETURNING, |
950 | if (test_bit(NFS_DELEGATION_NEED_RECLAIM, | 952 | &delegation->flags) || |
953 | test_bit(NFS_DELEGATION_NEED_RECLAIM, | ||
951 | &delegation->flags) == 0) | 954 | &delegation->flags) == 0) |
952 | continue; | 955 | continue; |
953 | if (!nfs_sb_active(server->super)) | 956 | if (!nfs_sb_active(server->super)) |
@@ -1053,10 +1056,11 @@ restart: | |||
1053 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | 1056 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
1054 | list_for_each_entry_rcu(delegation, &server->delegations, | 1057 | list_for_each_entry_rcu(delegation, &server->delegations, |
1055 | super_list) { | 1058 | super_list) { |
1056 | if (test_bit(NFS_DELEGATION_RETURNING, | 1059 | if (test_bit(NFS_DELEGATION_INODE_FREEING, |
1057 | &delegation->flags)) | 1060 | &delegation->flags) || |
1058 | continue; | 1061 | test_bit(NFS_DELEGATION_RETURNING, |
1059 | if (test_bit(NFS_DELEGATION_TEST_EXPIRED, | 1062 | &delegation->flags) || |
1063 | test_bit(NFS_DELEGATION_TEST_EXPIRED, | ||
1060 | &delegation->flags) == 0) | 1064 | &delegation->flags) == 0) |
1061 | continue; | 1065 | continue; |
1062 | if (!nfs_sb_active(server->super)) | 1066 | if (!nfs_sb_active(server->super)) |
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index dcbf3394ba0e..35b4b02c1ae0 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h | |||
@@ -34,6 +34,7 @@ enum { | |||
34 | NFS_DELEGATION_RETURNING, | 34 | NFS_DELEGATION_RETURNING, |
35 | NFS_DELEGATION_REVOKED, | 35 | NFS_DELEGATION_REVOKED, |
36 | NFS_DELEGATION_TEST_EXPIRED, | 36 | NFS_DELEGATION_TEST_EXPIRED, |
37 | NFS_DELEGATION_INODE_FREEING, | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred, | 40 | int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred, |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 6bf4471850c8..a71d0b42d160 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -139,12 +139,19 @@ struct nfs_cache_array { | |||
139 | struct nfs_cache_array_entry array[0]; | 139 | struct nfs_cache_array_entry array[0]; |
140 | }; | 140 | }; |
141 | 141 | ||
142 | struct readdirvec { | ||
143 | unsigned long nr; | ||
144 | unsigned long index; | ||
145 | struct page *pages[NFS_MAX_READDIR_RAPAGES]; | ||
146 | }; | ||
147 | |||
142 | typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool); | 148 | typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool); |
143 | typedef struct { | 149 | typedef struct { |
144 | struct file *file; | 150 | struct file *file; |
145 | struct page *page; | 151 | struct page *page; |
146 | struct dir_context *ctx; | 152 | struct dir_context *ctx; |
147 | unsigned long page_index; | 153 | unsigned long page_index; |
154 | struct readdirvec pvec; | ||
148 | u64 *dir_cookie; | 155 | u64 *dir_cookie; |
149 | u64 last_cookie; | 156 | u64 last_cookie; |
150 | loff_t current_index; | 157 | loff_t current_index; |
@@ -524,6 +531,10 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en | |||
524 | struct nfs_cache_array *array; | 531 | struct nfs_cache_array *array; |
525 | unsigned int count = 0; | 532 | unsigned int count = 0; |
526 | int status; | 533 | int status; |
534 | int max_rapages = NFS_MAX_READDIR_RAPAGES; | ||
535 | |||
536 | desc->pvec.index = desc->page_index; | ||
537 | desc->pvec.nr = 0; | ||
527 | 538 | ||
528 | scratch = alloc_page(GFP_KERNEL); | 539 | scratch = alloc_page(GFP_KERNEL); |
529 | if (scratch == NULL) | 540 | if (scratch == NULL) |
@@ -548,20 +559,40 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en | |||
548 | if (desc->plus) | 559 | if (desc->plus) |
549 | nfs_prime_dcache(file_dentry(desc->file), entry); | 560 | nfs_prime_dcache(file_dentry(desc->file), entry); |
550 | 561 | ||
551 | status = nfs_readdir_add_to_array(entry, page); | 562 | status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]); |
563 | if (status == -ENOSPC) { | ||
564 | desc->pvec.nr++; | ||
565 | if (desc->pvec.nr == max_rapages) | ||
566 | break; | ||
567 | status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]); | ||
568 | } | ||
552 | if (status != 0) | 569 | if (status != 0) |
553 | break; | 570 | break; |
554 | } while (!entry->eof); | 571 | } while (!entry->eof); |
555 | 572 | ||
573 | /* | ||
574 | * page and desc->pvec.pages[0] are valid, don't need to check | ||
575 | * whether or not to be NULL. | ||
576 | */ | ||
577 | copy_highpage(page, desc->pvec.pages[0]); | ||
578 | |||
556 | out_nopages: | 579 | out_nopages: |
557 | if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) { | 580 | if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) { |
558 | array = kmap(page); | 581 | array = kmap_atomic(desc->pvec.pages[desc->pvec.nr]); |
559 | array->eof_index = array->size; | 582 | array->eof_index = array->size; |
560 | status = 0; | 583 | status = 0; |
561 | kunmap(page); | 584 | kunmap_atomic(array); |
562 | } | 585 | } |
563 | 586 | ||
564 | put_page(scratch); | 587 | put_page(scratch); |
588 | |||
589 | /* | ||
590 | * desc->pvec.nr > 0 means at least one page was completely filled, | ||
591 | * we should return -ENOSPC. Otherwise function | ||
592 | * nfs_readdir_xdr_to_array will enter infinite loop. | ||
593 | */ | ||
594 | if (desc->pvec.nr > 0) | ||
595 | return -ENOSPC; | ||
565 | return status; | 596 | return status; |
566 | } | 597 | } |
567 | 598 | ||
@@ -574,8 +605,8 @@ void nfs_readdir_free_pages(struct page **pages, unsigned int npages) | |||
574 | } | 605 | } |
575 | 606 | ||
576 | /* | 607 | /* |
577 | * nfs_readdir_large_page will allocate pages that must be freed with a call | 608 | * nfs_readdir_alloc_pages() will allocate pages that must be freed with a call |
578 | * to nfs_readdir_free_pagearray | 609 | * to nfs_readdir_free_pages() |
579 | */ | 610 | */ |
580 | static | 611 | static |
581 | int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages) | 612 | int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages) |
@@ -595,6 +626,24 @@ out_freepages: | |||
595 | return -ENOMEM; | 626 | return -ENOMEM; |
596 | } | 627 | } |
597 | 628 | ||
629 | /* | ||
630 | * nfs_readdir_rapages_init initialize rapages by nfs_cache_array structure. | ||
631 | */ | ||
632 | static | ||
633 | void nfs_readdir_rapages_init(nfs_readdir_descriptor_t *desc) | ||
634 | { | ||
635 | struct nfs_cache_array *array; | ||
636 | int max_rapages = NFS_MAX_READDIR_RAPAGES; | ||
637 | int index; | ||
638 | |||
639 | for (index = 0; index < max_rapages; index++) { | ||
640 | array = kmap_atomic(desc->pvec.pages[index]); | ||
641 | memset(array, 0, sizeof(struct nfs_cache_array)); | ||
642 | array->eof_index = -1; | ||
643 | kunmap_atomic(array); | ||
644 | } | ||
645 | } | ||
646 | |||
598 | static | 647 | static |
599 | int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode) | 648 | int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode) |
600 | { | 649 | { |
@@ -605,6 +654,12 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, | |||
605 | int status = -ENOMEM; | 654 | int status = -ENOMEM; |
606 | unsigned int array_size = ARRAY_SIZE(pages); | 655 | unsigned int array_size = ARRAY_SIZE(pages); |
607 | 656 | ||
657 | /* | ||
658 | * This means we hit readdir rdpages miss, the preallocated rdpages | ||
659 | * are useless, the preallocate rdpages should be reinitialized. | ||
660 | */ | ||
661 | nfs_readdir_rapages_init(desc); | ||
662 | |||
608 | entry.prev_cookie = 0; | 663 | entry.prev_cookie = 0; |
609 | entry.cookie = desc->last_cookie; | 664 | entry.cookie = desc->last_cookie; |
610 | entry.eof = 0; | 665 | entry.eof = 0; |
@@ -664,9 +719,24 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) | |||
664 | struct inode *inode = file_inode(desc->file); | 719 | struct inode *inode = file_inode(desc->file); |
665 | int ret; | 720 | int ret; |
666 | 721 | ||
667 | ret = nfs_readdir_xdr_to_array(desc, page, inode); | 722 | /* |
668 | if (ret < 0) | 723 | * If desc->page_index in range desc->pvec.index and |
669 | goto error; | 724 | * desc->pvec.index + desc->pvec.nr, we get readdir cache hit. |
725 | */ | ||
726 | if (desc->page_index >= desc->pvec.index && | ||
727 | desc->page_index < (desc->pvec.index + desc->pvec.nr)) { | ||
728 | /* | ||
729 | * page and desc->pvec.pages[x] are valid, don't need to check | ||
730 | * whether or not to be NULL. | ||
731 | */ | ||
732 | copy_highpage(page, desc->pvec.pages[desc->page_index - desc->pvec.index]); | ||
733 | ret = 0; | ||
734 | } else { | ||
735 | ret = nfs_readdir_xdr_to_array(desc, page, inode); | ||
736 | if (ret < 0) | ||
737 | goto error; | ||
738 | } | ||
739 | |||
670 | SetPageUptodate(page); | 740 | SetPageUptodate(page); |
671 | 741 | ||
672 | if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) { | 742 | if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) { |
@@ -831,6 +901,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) | |||
831 | *desc = &my_desc; | 901 | *desc = &my_desc; |
832 | struct nfs_open_dir_context *dir_ctx = file->private_data; | 902 | struct nfs_open_dir_context *dir_ctx = file->private_data; |
833 | int res = 0; | 903 | int res = 0; |
904 | int max_rapages = NFS_MAX_READDIR_RAPAGES; | ||
834 | 905 | ||
835 | dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", | 906 | dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n", |
836 | file, (long long)ctx->pos); | 907 | file, (long long)ctx->pos); |
@@ -850,6 +921,12 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) | |||
850 | desc->decode = NFS_PROTO(inode)->decode_dirent; | 921 | desc->decode = NFS_PROTO(inode)->decode_dirent; |
851 | desc->plus = nfs_use_readdirplus(inode, ctx); | 922 | desc->plus = nfs_use_readdirplus(inode, ctx); |
852 | 923 | ||
924 | res = nfs_readdir_alloc_pages(desc->pvec.pages, max_rapages); | ||
925 | if (res < 0) | ||
926 | return -ENOMEM; | ||
927 | |||
928 | nfs_readdir_rapages_init(desc); | ||
929 | |||
853 | if (ctx->pos == 0 || nfs_attribute_cache_expired(inode)) | 930 | if (ctx->pos == 0 || nfs_attribute_cache_expired(inode)) |
854 | res = nfs_revalidate_mapping(inode, file->f_mapping); | 931 | res = nfs_revalidate_mapping(inode, file->f_mapping); |
855 | if (res < 0) | 932 | if (res < 0) |
@@ -885,6 +962,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) | |||
885 | break; | 962 | break; |
886 | } while (!desc->eof); | 963 | } while (!desc->eof); |
887 | out: | 964 | out: |
965 | nfs_readdir_free_pages(desc->pvec.pages, max_rapages); | ||
888 | if (res > 0) | 966 | if (res > 0) |
889 | res = 0; | 967 | res = 0; |
890 | dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res); | 968 | dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res); |
@@ -945,7 +1023,7 @@ static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end, | |||
945 | 1023 | ||
946 | /** | 1024 | /** |
947 | * nfs_force_lookup_revalidate - Mark the directory as having changed | 1025 | * nfs_force_lookup_revalidate - Mark the directory as having changed |
948 | * @dir - pointer to directory inode | 1026 | * @dir: pointer to directory inode |
949 | * | 1027 | * |
950 | * This forces the revalidation code in nfs_lookup_revalidate() to do a | 1028 | * This forces the revalidation code in nfs_lookup_revalidate() to do a |
951 | * full lookup on all child dentries of 'dir' whenever a change occurs | 1029 | * full lookup on all child dentries of 'dir' whenever a change occurs |
@@ -1649,7 +1727,7 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, | |||
1649 | reval_dentry: | 1727 | reval_dentry: |
1650 | if (flags & LOOKUP_RCU) | 1728 | if (flags & LOOKUP_RCU) |
1651 | return -ECHILD; | 1729 | return -ECHILD; |
1652 | return nfs_lookup_revalidate_dentry(dir, dentry, inode);; | 1730 | return nfs_lookup_revalidate_dentry(dir, dentry, inode); |
1653 | 1731 | ||
1654 | full_reval: | 1732 | full_reval: |
1655 | return nfs_do_lookup_revalidate(dir, dentry, flags); | 1733 | return nfs_do_lookup_revalidate(dir, dentry, flags); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 33824a0a57bf..0fd811ac08b5 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -428,7 +428,7 @@ out_put: | |||
428 | hdr->release(hdr); | 428 | hdr->release(hdr); |
429 | } | 429 | } |
430 | 430 | ||
431 | static void nfs_read_sync_pgio_error(struct list_head *head) | 431 | static void nfs_read_sync_pgio_error(struct list_head *head, int error) |
432 | { | 432 | { |
433 | struct nfs_page *req; | 433 | struct nfs_page *req; |
434 | 434 | ||
@@ -664,8 +664,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | |||
664 | 664 | ||
665 | list_for_each_entry_safe(req, tmp, &reqs, wb_list) { | 665 | list_for_each_entry_safe(req, tmp, &reqs, wb_list) { |
666 | if (!nfs_pageio_add_request(&desc, req)) { | 666 | if (!nfs_pageio_add_request(&desc, req)) { |
667 | nfs_list_remove_request(req); | 667 | nfs_list_move_request(req, &failed); |
668 | nfs_list_add_request(req, &failed); | ||
669 | spin_lock(&cinfo.inode->i_lock); | 668 | spin_lock(&cinfo.inode->i_lock); |
670 | dreq->flags = 0; | 669 | dreq->flags = 0; |
671 | if (desc.pg_error < 0) | 670 | if (desc.pg_error < 0) |
@@ -821,7 +820,7 @@ out_put: | |||
821 | hdr->release(hdr); | 820 | hdr->release(hdr); |
822 | } | 821 | } |
823 | 822 | ||
824 | static void nfs_write_sync_pgio_error(struct list_head *head) | 823 | static void nfs_write_sync_pgio_error(struct list_head *head, int error) |
825 | { | 824 | { |
826 | struct nfs_page *req; | 825 | struct nfs_page *req; |
827 | 826 | ||
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 29553fdba8af..4899b85f9b3c 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -89,8 +89,8 @@ EXPORT_SYMBOL_GPL(nfs_file_release); | |||
89 | 89 | ||
90 | /** | 90 | /** |
91 | * nfs_revalidate_size - Revalidate the file size | 91 | * nfs_revalidate_size - Revalidate the file size |
92 | * @inode - pointer to inode struct | 92 | * @inode: pointer to inode struct |
93 | * @file - pointer to struct file | 93 | * @filp: pointer to struct file |
94 | * | 94 | * |
95 | * Revalidates the file length. This is basically a wrapper around | 95 | * Revalidates the file length. This is basically a wrapper around |
96 | * nfs_revalidate_inode() that takes into account the fact that we may | 96 | * nfs_revalidate_inode() that takes into account the fact that we may |
@@ -276,6 +276,12 @@ EXPORT_SYMBOL_GPL(nfs_file_fsync); | |||
276 | * then a modify/write/read cycle when writing to a page in the | 276 | * then a modify/write/read cycle when writing to a page in the |
277 | * page cache. | 277 | * page cache. |
278 | * | 278 | * |
279 | * Some pNFS layout drivers can only read/write at a certain block | ||
280 | * granularity like all block devices and therefore we must perform | ||
281 | * read/modify/write whenever a page hasn't read yet and the data | ||
282 | * to be written there is not aligned to a block boundary and/or | ||
283 | * smaller than the block size. | ||
284 | * | ||
279 | * The modify/write/read cycle may occur if a page is read before | 285 | * The modify/write/read cycle may occur if a page is read before |
280 | * being completely filled by the writer. In this situation, the | 286 | * being completely filled by the writer. In this situation, the |
281 | * page must be completely written to stable storage on the server | 287 | * page must be completely written to stable storage on the server |
@@ -291,26 +297,32 @@ EXPORT_SYMBOL_GPL(nfs_file_fsync); | |||
291 | * and that the new data won't completely replace the old data in | 297 | * and that the new data won't completely replace the old data in |
292 | * that range of the file. | 298 | * that range of the file. |
293 | */ | 299 | */ |
294 | static int nfs_want_read_modify_write(struct file *file, struct page *page, | 300 | static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len) |
295 | loff_t pos, unsigned len) | ||
296 | { | 301 | { |
297 | unsigned int pglen = nfs_page_length(page); | 302 | unsigned int pglen = nfs_page_length(page); |
298 | unsigned int offset = pos & (PAGE_SIZE - 1); | 303 | unsigned int offset = pos & (PAGE_SIZE - 1); |
299 | unsigned int end = offset + len; | 304 | unsigned int end = offset + len; |
300 | 305 | ||
301 | if (pnfs_ld_read_whole_page(file->f_mapping->host)) { | 306 | return !pglen || (end >= pglen && !offset); |
302 | if (!PageUptodate(page)) | 307 | } |
303 | return 1; | ||
304 | return 0; | ||
305 | } | ||
306 | 308 | ||
307 | if ((file->f_mode & FMODE_READ) && /* open for read? */ | 309 | static bool nfs_want_read_modify_write(struct file *file, struct page *page, |
308 | !PageUptodate(page) && /* Uptodate? */ | 310 | loff_t pos, unsigned int len) |
309 | !PagePrivate(page) && /* i/o request already? */ | 311 | { |
310 | pglen && /* valid bytes of file? */ | 312 | /* |
311 | (end < pglen || offset)) /* replace all valid bytes? */ | 313 | * Up-to-date pages, those with ongoing or full-page write |
312 | return 1; | 314 | * don't need read/modify/write |
313 | return 0; | 315 | */ |
316 | if (PageUptodate(page) || PagePrivate(page) || | ||
317 | nfs_full_page_write(page, pos, len)) | ||
318 | return false; | ||
319 | |||
320 | if (pnfs_ld_read_whole_page(file->f_mapping->host)) | ||
321 | return true; | ||
322 | /* Open for reading too? */ | ||
323 | if (file->f_mode & FMODE_READ) | ||
324 | return true; | ||
325 | return false; | ||
314 | } | 326 | } |
315 | 327 | ||
316 | /* | 328 | /* |
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index 11766a74216d..ca7a6203b3cb 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c | |||
@@ -483,9 +483,15 @@ ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx, | |||
483 | } | 483 | } |
484 | 484 | ||
485 | /** | 485 | /** |
486 | * Find or create a DS rpc client with th MDS server rpc client auth flavor | 486 | * nfs4_ff_find_or_create_ds_client - Find or create a DS rpc client |
487 | * in the nfs_client cl_ds_clients list. | 487 | * @lseg: pointer to layout segment |
488 | */ | 488 | * @ds_idx: mirror index |
489 | * @ds_clp: nfs_client for the DS | ||
490 | * @inode: pointer to inode | ||
491 | * | ||
492 | * Find or create a DS rpc client with th MDS server rpc client auth flavor | ||
493 | * in the nfs_client cl_ds_clients list. | ||
494 | */ | ||
489 | struct rpc_clnt * | 495 | struct rpc_clnt * |
490 | nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx, | 496 | nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx, |
491 | struct nfs_client *ds_clp, struct inode *inode) | 497 | struct nfs_client *ds_clp, struct inode *inode) |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 094775ea0781..414a90d48493 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(nfs_sync_inode); | |||
143 | 143 | ||
144 | /** | 144 | /** |
145 | * nfs_sync_mapping - helper to flush all mmapped dirty data to disk | 145 | * nfs_sync_mapping - helper to flush all mmapped dirty data to disk |
146 | * @mapping: pointer to struct address_space | ||
146 | */ | 147 | */ |
147 | int nfs_sync_mapping(struct address_space *mapping) | 148 | int nfs_sync_mapping(struct address_space *mapping) |
148 | { | 149 | { |
@@ -1184,8 +1185,8 @@ int nfs_attribute_cache_expired(struct inode *inode) | |||
1184 | 1185 | ||
1185 | /** | 1186 | /** |
1186 | * nfs_revalidate_inode - Revalidate the inode attributes | 1187 | * nfs_revalidate_inode - Revalidate the inode attributes |
1187 | * @server - pointer to nfs_server struct | 1188 | * @server: pointer to nfs_server struct |
1188 | * @inode - pointer to inode struct | 1189 | * @inode: pointer to inode struct |
1189 | * | 1190 | * |
1190 | * Updates inode attribute information by retrieving the data from the server. | 1191 | * Updates inode attribute information by retrieving the data from the server. |
1191 | */ | 1192 | */ |
@@ -1255,8 +1256,8 @@ out: | |||
1255 | 1256 | ||
1256 | /** | 1257 | /** |
1257 | * nfs_revalidate_mapping - Revalidate the pagecache | 1258 | * nfs_revalidate_mapping - Revalidate the pagecache |
1258 | * @inode - pointer to host inode | 1259 | * @inode: pointer to host inode |
1259 | * @mapping - pointer to mapping | 1260 | * @mapping: pointer to mapping |
1260 | */ | 1261 | */ |
1261 | int nfs_revalidate_mapping(struct inode *inode, | 1262 | int nfs_revalidate_mapping(struct inode *inode, |
1262 | struct address_space *mapping) | 1263 | struct address_space *mapping) |
@@ -1371,8 +1372,8 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1371 | 1372 | ||
1372 | /** | 1373 | /** |
1373 | * nfs_check_inode_attributes - verify consistency of the inode attribute cache | 1374 | * nfs_check_inode_attributes - verify consistency of the inode attribute cache |
1374 | * @inode - pointer to inode | 1375 | * @inode: pointer to inode |
1375 | * @fattr - updated attributes | 1376 | * @fattr: updated attributes |
1376 | * | 1377 | * |
1377 | * Verifies the attribute cache. If we have just changed the attributes, | 1378 | * Verifies the attribute cache. If we have just changed the attributes, |
1378 | * so that fattr carries weak cache consistency data, then it may | 1379 | * so that fattr carries weak cache consistency data, then it may |
@@ -1572,8 +1573,8 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle); | |||
1572 | 1573 | ||
1573 | /** | 1574 | /** |
1574 | * nfs_inode_attrs_need_update - check if the inode attributes need updating | 1575 | * nfs_inode_attrs_need_update - check if the inode attributes need updating |
1575 | * @inode - pointer to inode | 1576 | * @inode: pointer to inode |
1576 | * @fattr - attributes | 1577 | * @fattr: attributes |
1577 | * | 1578 | * |
1578 | * Attempt to divine whether or not an RPC call reply carrying stale | 1579 | * Attempt to divine whether or not an RPC call reply carrying stale |
1579 | * attributes got scheduled after another call carrying updated ones. | 1580 | * attributes got scheduled after another call carrying updated ones. |
@@ -1614,8 +1615,8 @@ static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr | |||
1614 | 1615 | ||
1615 | /** | 1616 | /** |
1616 | * nfs_refresh_inode - try to update the inode attribute cache | 1617 | * nfs_refresh_inode - try to update the inode attribute cache |
1617 | * @inode - pointer to inode | 1618 | * @inode: pointer to inode |
1618 | * @fattr - updated attributes | 1619 | * @fattr: updated attributes |
1619 | * | 1620 | * |
1620 | * Check that an RPC call that returned attributes has not overlapped with | 1621 | * Check that an RPC call that returned attributes has not overlapped with |
1621 | * other recent updates of the inode metadata, then decide whether it is | 1622 | * other recent updates of the inode metadata, then decide whether it is |
@@ -1649,8 +1650,8 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, | |||
1649 | 1650 | ||
1650 | /** | 1651 | /** |
1651 | * nfs_post_op_update_inode - try to update the inode attribute cache | 1652 | * nfs_post_op_update_inode - try to update the inode attribute cache |
1652 | * @inode - pointer to inode | 1653 | * @inode: pointer to inode |
1653 | * @fattr - updated attributes | 1654 | * @fattr: updated attributes |
1654 | * | 1655 | * |
1655 | * After an operation that has changed the inode metadata, mark the | 1656 | * After an operation that has changed the inode metadata, mark the |
1656 | * attribute cache as being invalid, then try to update it. | 1657 | * attribute cache as being invalid, then try to update it. |
@@ -1679,8 +1680,8 @@ EXPORT_SYMBOL_GPL(nfs_post_op_update_inode); | |||
1679 | 1680 | ||
1680 | /** | 1681 | /** |
1681 | * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache | 1682 | * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache |
1682 | * @inode - pointer to inode | 1683 | * @inode: pointer to inode |
1683 | * @fattr - updated attributes | 1684 | * @fattr: updated attributes |
1684 | * | 1685 | * |
1685 | * After an operation that has changed the inode metadata, mark the | 1686 | * After an operation that has changed the inode metadata, mark the |
1686 | * attribute cache as being invalid, then try to update it. Fake up | 1687 | * attribute cache as being invalid, then try to update it. Fake up |
@@ -1731,8 +1732,8 @@ out_noforce: | |||
1731 | 1732 | ||
1732 | /** | 1733 | /** |
1733 | * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache | 1734 | * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache |
1734 | * @inode - pointer to inode | 1735 | * @inode: pointer to inode |
1735 | * @fattr - updated attributes | 1736 | * @fattr: updated attributes |
1736 | * | 1737 | * |
1737 | * After an operation that has changed the inode metadata, mark the | 1738 | * After an operation that has changed the inode metadata, mark the |
1738 | * attribute cache as being invalid, then try to update it. Fake up | 1739 | * attribute cache as being invalid, then try to update it. Fake up |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b1e577302518..c7cf23ae6597 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -69,7 +69,8 @@ struct nfs_clone_mount { | |||
69 | * Maximum number of pages that readdir can use for creating | 69 | * Maximum number of pages that readdir can use for creating |
70 | * a vmapped array of pages. | 70 | * a vmapped array of pages. |
71 | */ | 71 | */ |
72 | #define NFS_MAX_READDIR_PAGES 8 | 72 | #define NFS_MAX_READDIR_PAGES 64 |
73 | #define NFS_MAX_READDIR_RAPAGES 8 | ||
73 | 74 | ||
74 | struct nfs_client_initdata { | 75 | struct nfs_client_initdata { |
75 | unsigned long init_flags; | 76 | unsigned long init_flags; |
@@ -755,6 +756,7 @@ static inline bool nfs_error_is_fatal(int err) | |||
755 | { | 756 | { |
756 | switch (err) { | 757 | switch (err) { |
757 | case -ERESTARTSYS: | 758 | case -ERESTARTSYS: |
759 | case -EINTR: | ||
758 | case -EACCES: | 760 | case -EACCES: |
759 | case -EDQUOT: | 761 | case -EDQUOT: |
760 | case -EFBIG: | 762 | case -EFBIG: |
@@ -763,6 +765,7 @@ static inline bool nfs_error_is_fatal(int err) | |||
763 | case -EROFS: | 765 | case -EROFS: |
764 | case -ESTALE: | 766 | case -ESTALE: |
765 | case -E2BIG: | 767 | case -E2BIG: |
768 | case -ENOMEM: | ||
766 | return true; | 769 | return true; |
767 | default: | 770 | default: |
768 | return false; | 771 | return false; |
diff --git a/fs/nfs/io.c b/fs/nfs/io.c index 9034b4926909..5088fda9b453 100644 --- a/fs/nfs/io.c +++ b/fs/nfs/io.c | |||
@@ -25,7 +25,7 @@ static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode) | |||
25 | 25 | ||
26 | /** | 26 | /** |
27 | * nfs_start_io_read - declare the file is being used for buffered reads | 27 | * nfs_start_io_read - declare the file is being used for buffered reads |
28 | * @inode - file inode | 28 | * @inode: file inode |
29 | * | 29 | * |
30 | * Declare that a buffered read operation is about to start, and ensure | 30 | * Declare that a buffered read operation is about to start, and ensure |
31 | * that we block all direct I/O. | 31 | * that we block all direct I/O. |
@@ -56,7 +56,7 @@ nfs_start_io_read(struct inode *inode) | |||
56 | 56 | ||
57 | /** | 57 | /** |
58 | * nfs_end_io_read - declare that the buffered read operation is done | 58 | * nfs_end_io_read - declare that the buffered read operation is done |
59 | * @inode - file inode | 59 | * @inode: file inode |
60 | * | 60 | * |
61 | * Declare that a buffered read operation is done, and release the shared | 61 | * Declare that a buffered read operation is done, and release the shared |
62 | * lock on inode->i_rwsem. | 62 | * lock on inode->i_rwsem. |
@@ -69,7 +69,7 @@ nfs_end_io_read(struct inode *inode) | |||
69 | 69 | ||
70 | /** | 70 | /** |
71 | * nfs_start_io_write - declare the file is being used for buffered writes | 71 | * nfs_start_io_write - declare the file is being used for buffered writes |
72 | * @inode - file inode | 72 | * @inode: file inode |
73 | * | 73 | * |
74 | * Declare that a buffered read operation is about to start, and ensure | 74 | * Declare that a buffered read operation is about to start, and ensure |
75 | * that we block all direct I/O. | 75 | * that we block all direct I/O. |
@@ -83,7 +83,7 @@ nfs_start_io_write(struct inode *inode) | |||
83 | 83 | ||
84 | /** | 84 | /** |
85 | * nfs_end_io_write - declare that the buffered write operation is done | 85 | * nfs_end_io_write - declare that the buffered write operation is done |
86 | * @inode - file inode | 86 | * @inode: file inode |
87 | * | 87 | * |
88 | * Declare that a buffered write operation is done, and release the | 88 | * Declare that a buffered write operation is done, and release the |
89 | * lock on inode->i_rwsem. | 89 | * lock on inode->i_rwsem. |
@@ -105,7 +105,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode) | |||
105 | 105 | ||
106 | /** | 106 | /** |
107 | * nfs_end_io_direct - declare the file is being used for direct i/o | 107 | * nfs_end_io_direct - declare the file is being used for direct i/o |
108 | * @inode - file inode | 108 | * @inode: file inode |
109 | * | 109 | * |
110 | * Declare that a direct I/O operation is about to start, and ensure | 110 | * Declare that a direct I/O operation is about to start, and ensure |
111 | * that we block all buffered I/O. | 111 | * that we block all buffered I/O. |
@@ -136,7 +136,7 @@ nfs_start_io_direct(struct inode *inode) | |||
136 | 136 | ||
137 | /** | 137 | /** |
138 | * nfs_end_io_direct - declare that the direct i/o operation is done | 138 | * nfs_end_io_direct - declare that the direct i/o operation is done |
139 | * @inode - file inode | 139 | * @inode: file inode |
140 | * | 140 | * |
141 | * Declare that a direct I/O operation is done, and release the shared | 141 | * Declare that a direct I/O operation is done, and release the shared |
142 | * lock on inode->i_rwsem. | 142 | * lock on inode->i_rwsem. |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index e5686be67be8..15f099a24c29 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -221,10 +221,10 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server, | |||
221 | 221 | ||
222 | /** | 222 | /** |
223 | * nfs_do_submount - set up mountpoint when crossing a filesystem boundary | 223 | * nfs_do_submount - set up mountpoint when crossing a filesystem boundary |
224 | * @dentry - parent directory | 224 | * @dentry: parent directory |
225 | * @fh - filehandle for new root dentry | 225 | * @fh: filehandle for new root dentry |
226 | * @fattr - attributes for new root inode | 226 | * @fattr: attributes for new root inode |
227 | * @authflavor - security flavor to use when performing the mount | 227 | * @authflavor: security flavor to use when performing the mount |
228 | * | 228 | * |
229 | */ | 229 | */ |
230 | struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh, | 230 | struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh, |
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 9fce18548f7e..c5c3fc6e6c60 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c | |||
@@ -222,8 +222,6 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, | |||
222 | switch (status) { | 222 | switch (status) { |
223 | case 0: | 223 | case 0: |
224 | status = nfs_refresh_inode(inode, fattr); | 224 | status = nfs_refresh_inode(inode, fattr); |
225 | set_cached_acl(inode, ACL_TYPE_ACCESS, acl); | ||
226 | set_cached_acl(inode, ACL_TYPE_DEFAULT, dfacl); | ||
227 | break; | 225 | break; |
228 | case -EPFNOSUPPORT: | 226 | case -EPFNOSUPPORT: |
229 | case -EPROTONOSUPPORT: | 227 | case -EPROTONOSUPPORT: |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 2548405da1f7..1339ede979af 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -42,7 +42,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) | |||
42 | } | 42 | } |
43 | 43 | ||
44 | #ifdef CONFIG_NFS_V4_1 | 44 | #ifdef CONFIG_NFS_V4_1 |
45 | /** | 45 | /* |
46 | * Per auth flavor data server rpc clients | 46 | * Per auth flavor data server rpc clients |
47 | */ | 47 | */ |
48 | struct nfs4_ds_server { | 48 | struct nfs4_ds_server { |
@@ -51,7 +51,9 @@ struct nfs4_ds_server { | |||
51 | }; | 51 | }; |
52 | 52 | ||
53 | /** | 53 | /** |
54 | * Common lookup case for DS I/O | 54 | * nfs4_find_ds_client - Common lookup case for DS I/O |
55 | * @ds_clp: pointer to the DS's nfs_client | ||
56 | * @flavor: rpc auth flavour to match | ||
55 | */ | 57 | */ |
56 | static struct nfs4_ds_server * | 58 | static struct nfs4_ds_server * |
57 | nfs4_find_ds_client(struct nfs_client *ds_clp, rpc_authflavor_t flavor) | 59 | nfs4_find_ds_client(struct nfs_client *ds_clp, rpc_authflavor_t flavor) |
@@ -118,9 +120,13 @@ nfs4_free_ds_server(struct nfs4_ds_server *dss) | |||
118 | } | 120 | } |
119 | 121 | ||
120 | /** | 122 | /** |
121 | * Find or create a DS rpc client with th MDS server rpc client auth flavor | 123 | * nfs4_find_or_create_ds_client - Find or create a DS rpc client |
122 | * in the nfs_client cl_ds_clients list. | 124 | * @ds_clp: pointer to the DS's nfs_client |
123 | */ | 125 | * @inode: pointer to the inode |
126 | * | ||
127 | * Find or create a DS rpc client with th MDS server rpc client auth flavor | ||
128 | * in the nfs_client cl_ds_clients list. | ||
129 | */ | ||
124 | struct rpc_clnt * | 130 | struct rpc_clnt * |
125 | nfs4_find_or_create_ds_client(struct nfs_client *ds_clp, struct inode *inode) | 131 | nfs4_find_or_create_ds_client(struct nfs_client *ds_clp, struct inode *inode) |
126 | { | 132 | { |
@@ -145,7 +151,6 @@ static void | |||
145 | nfs4_shutdown_ds_clients(struct nfs_client *clp) | 151 | nfs4_shutdown_ds_clients(struct nfs_client *clp) |
146 | { | 152 | { |
147 | struct nfs4_ds_server *dss; | 153 | struct nfs4_ds_server *dss; |
148 | LIST_HEAD(shutdown_list); | ||
149 | 154 | ||
150 | while (!list_empty(&clp->cl_ds_clients)) { | 155 | while (!list_empty(&clp->cl_ds_clients)) { |
151 | dss = list_entry(clp->cl_ds_clients.next, | 156 | dss = list_entry(clp->cl_ds_clients.next, |
@@ -284,7 +289,7 @@ static int nfs4_init_callback(struct nfs_client *clp) | |||
284 | 289 | ||
285 | /** | 290 | /** |
286 | * nfs40_init_client - nfs_client initialization tasks for NFSv4.0 | 291 | * nfs40_init_client - nfs_client initialization tasks for NFSv4.0 |
287 | * @clp - nfs_client to initialize | 292 | * @clp: nfs_client to initialize |
288 | * | 293 | * |
289 | * Returns zero on success, or a negative errno if some error occurred. | 294 | * Returns zero on success, or a negative errno if some error occurred. |
290 | */ | 295 | */ |
@@ -312,7 +317,7 @@ int nfs40_init_client(struct nfs_client *clp) | |||
312 | 317 | ||
313 | /** | 318 | /** |
314 | * nfs41_init_client - nfs_client initialization tasks for NFSv4.1+ | 319 | * nfs41_init_client - nfs_client initialization tasks for NFSv4.1+ |
315 | * @clp - nfs_client to initialize | 320 | * @clp: nfs_client to initialize |
316 | * | 321 | * |
317 | * Returns zero on success, or a negative errno if some error occurred. | 322 | * Returns zero on success, or a negative errno if some error occurred. |
318 | */ | 323 | */ |
@@ -360,9 +365,7 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp) | |||
360 | * nfs4_init_client - Initialise an NFS4 client record | 365 | * nfs4_init_client - Initialise an NFS4 client record |
361 | * | 366 | * |
362 | * @clp: nfs_client to initialise | 367 | * @clp: nfs_client to initialise |
363 | * @timeparms: timeout parameters for underlying RPC transport | 368 | * @cl_init: pointer to nfs_client_initdata |
364 | * @ip_addr: callback IP address in presentation format | ||
365 | * @authflavor: authentication flavor for underlying RPC transport | ||
366 | * | 369 | * |
367 | * Returns pointer to an NFS client, or an ERR_PTR value. | 370 | * Returns pointer to an NFS client, or an ERR_PTR value. |
368 | */ | 371 | */ |
@@ -649,13 +652,13 @@ nfs4_check_server_scope(struct nfs41_server_scope *s1, | |||
649 | 652 | ||
650 | /** | 653 | /** |
651 | * nfs4_detect_session_trunking - Checks for session trunking. | 654 | * nfs4_detect_session_trunking - Checks for session trunking. |
652 | * | ||
653 | * Called after a successful EXCHANGE_ID on a multi-addr connection. | ||
654 | * Upon success, add the transport. | ||
655 | * | ||
656 | * @clp: original mount nfs_client | 655 | * @clp: original mount nfs_client |
657 | * @res: result structure from an exchange_id using the original mount | 656 | * @res: result structure from an exchange_id using the original mount |
658 | * nfs_client with a new multi_addr transport | 657 | * nfs_client with a new multi_addr transport |
658 | * @xprt: pointer to the transport to add. | ||
659 | * | ||
660 | * Called after a successful EXCHANGE_ID on a multi-addr connection. | ||
661 | * Upon success, add the transport. | ||
659 | * | 662 | * |
660 | * Returns zero on success, otherwise -EINVAL | 663 | * Returns zero on success, otherwise -EINVAL |
661 | * | 664 | * |
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 3f23b6840547..bf34ddaa2ad7 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/keyctl.h> | 44 | #include <linux/keyctl.h> |
45 | #include <linux/key-type.h> | 45 | #include <linux/key-type.h> |
46 | #include <keys/user-type.h> | 46 | #include <keys/user-type.h> |
47 | #include <keys/request_key_auth-type.h> | ||
47 | #include <linux/module.h> | 48 | #include <linux/module.h> |
48 | 49 | ||
49 | #include "internal.h" | 50 | #include "internal.h" |
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy; | |||
59 | struct idmap_legacy_upcalldata { | 60 | struct idmap_legacy_upcalldata { |
60 | struct rpc_pipe_msg pipe_msg; | 61 | struct rpc_pipe_msg pipe_msg; |
61 | struct idmap_msg idmap_msg; | 62 | struct idmap_msg idmap_msg; |
62 | struct key_construction *key_cons; | 63 | struct key *authkey; |
63 | struct idmap *idmap; | 64 | struct idmap *idmap; |
64 | }; | 65 | }; |
65 | 66 | ||
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = { | |||
384 | { Opt_find_err, NULL } | 385 | { Opt_find_err, NULL } |
385 | }; | 386 | }; |
386 | 387 | ||
387 | static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); | 388 | static int nfs_idmap_legacy_upcall(struct key *, void *); |
388 | static ssize_t idmap_pipe_downcall(struct file *, const char __user *, | 389 | static ssize_t idmap_pipe_downcall(struct file *, const char __user *, |
389 | size_t); | 390 | size_t); |
390 | static void idmap_release_pipe(struct inode *); | 391 | static void idmap_release_pipe(struct inode *); |
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap, | |||
549 | static void | 550 | static void |
550 | nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) | 551 | nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) |
551 | { | 552 | { |
552 | struct key_construction *cons = idmap->idmap_upcall_data->key_cons; | 553 | struct key *authkey = idmap->idmap_upcall_data->authkey; |
553 | 554 | ||
554 | kfree(idmap->idmap_upcall_data); | 555 | kfree(idmap->idmap_upcall_data); |
555 | idmap->idmap_upcall_data = NULL; | 556 | idmap->idmap_upcall_data = NULL; |
556 | complete_request_key(cons, ret); | 557 | complete_request_key(authkey, ret); |
558 | key_put(authkey); | ||
557 | } | 559 | } |
558 | 560 | ||
559 | static void | 561 | static void |
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret) | |||
563 | nfs_idmap_complete_pipe_upcall_locked(idmap, ret); | 565 | nfs_idmap_complete_pipe_upcall_locked(idmap, ret); |
564 | } | 566 | } |
565 | 567 | ||
566 | static int nfs_idmap_legacy_upcall(struct key_construction *cons, | 568 | static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) |
567 | const char *op, | ||
568 | void *aux) | ||
569 | { | 569 | { |
570 | struct idmap_legacy_upcalldata *data; | 570 | struct idmap_legacy_upcalldata *data; |
571 | struct request_key_auth *rka = get_request_key_auth(authkey); | ||
571 | struct rpc_pipe_msg *msg; | 572 | struct rpc_pipe_msg *msg; |
572 | struct idmap_msg *im; | 573 | struct idmap_msg *im; |
573 | struct idmap *idmap = (struct idmap *)aux; | 574 | struct idmap *idmap = (struct idmap *)aux; |
574 | struct key *key = cons->key; | 575 | struct key *key = rka->target_key; |
575 | int ret = -ENOKEY; | 576 | int ret = -ENOKEY; |
576 | 577 | ||
577 | if (!aux) | 578 | if (!aux) |
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, | |||
586 | msg = &data->pipe_msg; | 587 | msg = &data->pipe_msg; |
587 | im = &data->idmap_msg; | 588 | im = &data->idmap_msg; |
588 | data->idmap = idmap; | 589 | data->idmap = idmap; |
589 | data->key_cons = cons; | 590 | data->authkey = key_get(authkey); |
590 | 591 | ||
591 | ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); | 592 | ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); |
592 | if (ret < 0) | 593 | if (ret < 0) |
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, | |||
604 | out2: | 605 | out2: |
605 | kfree(data); | 606 | kfree(data); |
606 | out1: | 607 | out1: |
607 | complete_request_key(cons, ret); | 608 | complete_request_key(authkey, ret); |
608 | return ret; | 609 | return ret; |
609 | } | 610 | } |
610 | 611 | ||
@@ -651,9 +652,10 @@ out: | |||
651 | static ssize_t | 652 | static ssize_t |
652 | idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | 653 | idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) |
653 | { | 654 | { |
655 | struct request_key_auth *rka; | ||
654 | struct rpc_inode *rpci = RPC_I(file_inode(filp)); | 656 | struct rpc_inode *rpci = RPC_I(file_inode(filp)); |
655 | struct idmap *idmap = (struct idmap *)rpci->private; | 657 | struct idmap *idmap = (struct idmap *)rpci->private; |
656 | struct key_construction *cons; | 658 | struct key *authkey; |
657 | struct idmap_msg im; | 659 | struct idmap_msg im; |
658 | size_t namelen_in; | 660 | size_t namelen_in; |
659 | int ret = -ENOKEY; | 661 | int ret = -ENOKEY; |
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
665 | if (idmap->idmap_upcall_data == NULL) | 667 | if (idmap->idmap_upcall_data == NULL) |
666 | goto out_noupcall; | 668 | goto out_noupcall; |
667 | 669 | ||
668 | cons = idmap->idmap_upcall_data->key_cons; | 670 | authkey = idmap->idmap_upcall_data->authkey; |
671 | rka = get_request_key_auth(authkey); | ||
669 | 672 | ||
670 | if (mlen != sizeof(im)) { | 673 | if (mlen != sizeof(im)) { |
671 | ret = -ENOSPC; | 674 | ret = -ENOSPC; |
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
690 | 693 | ||
691 | ret = nfs_idmap_read_and_verify_message(&im, | 694 | ret = nfs_idmap_read_and_verify_message(&im, |
692 | &idmap->idmap_upcall_data->idmap_msg, | 695 | &idmap->idmap_upcall_data->idmap_msg, |
693 | cons->key, cons->authkey); | 696 | rka->target_key, authkey); |
694 | if (ret >= 0) { | 697 | if (ret >= 0) { |
695 | key_set_timeout(cons->key, nfs_idmap_cache_timeout); | 698 | key_set_timeout(rka->target_key, nfs_idmap_cache_timeout); |
696 | ret = mlen; | 699 | ret = mlen; |
697 | } | 700 | } |
698 | 701 | ||
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 24f06dcc2b08..2e460c33ae48 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c | |||
@@ -137,6 +137,7 @@ static size_t nfs_parse_server_name(char *string, size_t len, | |||
137 | 137 | ||
138 | /** | 138 | /** |
139 | * nfs_find_best_sec - Find a security mechanism supported locally | 139 | * nfs_find_best_sec - Find a security mechanism supported locally |
140 | * @clnt: pointer to rpc_clnt | ||
140 | * @server: NFS server struct | 141 | * @server: NFS server struct |
141 | * @flavors: List of security tuples returned by SECINFO procedure | 142 | * @flavors: List of security tuples returned by SECINFO procedure |
142 | * | 143 | * |
@@ -288,8 +289,8 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata, | |||
288 | 289 | ||
289 | /** | 290 | /** |
290 | * nfs_follow_referral - set up mountpoint when hitting a referral on moved error | 291 | * nfs_follow_referral - set up mountpoint when hitting a referral on moved error |
291 | * @dentry - parent directory | 292 | * @dentry: parent directory |
292 | * @locations - array of NFSv4 server location information | 293 | * @locations: array of NFSv4 server location information |
293 | * | 294 | * |
294 | */ | 295 | */ |
295 | static struct vfsmount *nfs_follow_referral(struct dentry *dentry, | 296 | static struct vfsmount *nfs_follow_referral(struct dentry *dentry, |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 557a5d636183..77c6e2d3f3fc 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -730,13 +730,25 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) | |||
730 | res->sr_slot = NULL; | 730 | res->sr_slot = NULL; |
731 | } | 731 | } |
732 | 732 | ||
733 | static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, | ||
734 | u32 seqnr) | ||
735 | { | ||
736 | if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) | ||
737 | slot->seq_nr_highest_sent = seqnr; | ||
738 | } | ||
739 | static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, | ||
740 | u32 seqnr) | ||
741 | { | ||
742 | slot->seq_nr_highest_sent = seqnr; | ||
743 | slot->seq_nr_last_acked = seqnr; | ||
744 | } | ||
745 | |||
733 | static int nfs41_sequence_process(struct rpc_task *task, | 746 | static int nfs41_sequence_process(struct rpc_task *task, |
734 | struct nfs4_sequence_res *res) | 747 | struct nfs4_sequence_res *res) |
735 | { | 748 | { |
736 | struct nfs4_session *session; | 749 | struct nfs4_session *session; |
737 | struct nfs4_slot *slot = res->sr_slot; | 750 | struct nfs4_slot *slot = res->sr_slot; |
738 | struct nfs_client *clp; | 751 | struct nfs_client *clp; |
739 | bool interrupted = false; | ||
740 | int ret = 1; | 752 | int ret = 1; |
741 | 753 | ||
742 | if (slot == NULL) | 754 | if (slot == NULL) |
@@ -747,16 +759,12 @@ static int nfs41_sequence_process(struct rpc_task *task, | |||
747 | 759 | ||
748 | session = slot->table->session; | 760 | session = slot->table->session; |
749 | 761 | ||
750 | if (slot->interrupted) { | ||
751 | if (res->sr_status != -NFS4ERR_DELAY) | ||
752 | slot->interrupted = 0; | ||
753 | interrupted = true; | ||
754 | } | ||
755 | |||
756 | trace_nfs4_sequence_done(session, res); | 762 | trace_nfs4_sequence_done(session, res); |
757 | /* Check the SEQUENCE operation status */ | 763 | /* Check the SEQUENCE operation status */ |
758 | switch (res->sr_status) { | 764 | switch (res->sr_status) { |
759 | case 0: | 765 | case 0: |
766 | /* Mark this sequence number as having been acked */ | ||
767 | nfs4_slot_sequence_acked(slot, slot->seq_nr); | ||
760 | /* Update the slot's sequence and clientid lease timer */ | 768 | /* Update the slot's sequence and clientid lease timer */ |
761 | slot->seq_done = 1; | 769 | slot->seq_done = 1; |
762 | clp = session->clp; | 770 | clp = session->clp; |
@@ -771,9 +779,9 @@ static int nfs41_sequence_process(struct rpc_task *task, | |||
771 | * sr_status remains 1 if an RPC level error occurred. | 779 | * sr_status remains 1 if an RPC level error occurred. |
772 | * The server may or may not have processed the sequence | 780 | * The server may or may not have processed the sequence |
773 | * operation.. | 781 | * operation.. |
774 | * Mark the slot as having hosted an interrupted RPC call. | ||
775 | */ | 782 | */ |
776 | slot->interrupted = 1; | 783 | nfs4_slot_sequence_record_sent(slot, slot->seq_nr); |
784 | slot->seq_done = 1; | ||
777 | goto out; | 785 | goto out; |
778 | case -NFS4ERR_DELAY: | 786 | case -NFS4ERR_DELAY: |
779 | /* The server detected a resend of the RPC call and | 787 | /* The server detected a resend of the RPC call and |
@@ -784,6 +792,7 @@ static int nfs41_sequence_process(struct rpc_task *task, | |||
784 | __func__, | 792 | __func__, |
785 | slot->slot_nr, | 793 | slot->slot_nr, |
786 | slot->seq_nr); | 794 | slot->seq_nr); |
795 | nfs4_slot_sequence_acked(slot, slot->seq_nr); | ||
787 | goto out_retry; | 796 | goto out_retry; |
788 | case -NFS4ERR_RETRY_UNCACHED_REP: | 797 | case -NFS4ERR_RETRY_UNCACHED_REP: |
789 | case -NFS4ERR_SEQ_FALSE_RETRY: | 798 | case -NFS4ERR_SEQ_FALSE_RETRY: |
@@ -791,6 +800,7 @@ static int nfs41_sequence_process(struct rpc_task *task, | |||
791 | * The server thinks we tried to replay a request. | 800 | * The server thinks we tried to replay a request. |
792 | * Retry the call after bumping the sequence ID. | 801 | * Retry the call after bumping the sequence ID. |
793 | */ | 802 | */ |
803 | nfs4_slot_sequence_acked(slot, slot->seq_nr); | ||
794 | goto retry_new_seq; | 804 | goto retry_new_seq; |
795 | case -NFS4ERR_BADSLOT: | 805 | case -NFS4ERR_BADSLOT: |
796 | /* | 806 | /* |
@@ -801,21 +811,28 @@ static int nfs41_sequence_process(struct rpc_task *task, | |||
801 | goto session_recover; | 811 | goto session_recover; |
802 | goto retry_nowait; | 812 | goto retry_nowait; |
803 | case -NFS4ERR_SEQ_MISORDERED: | 813 | case -NFS4ERR_SEQ_MISORDERED: |
814 | nfs4_slot_sequence_record_sent(slot, slot->seq_nr); | ||
804 | /* | 815 | /* |
805 | * Was the last operation on this sequence interrupted? | 816 | * Were one or more calls using this slot interrupted? |
806 | * If so, retry after bumping the sequence number. | 817 | * If the server never received the request, then our |
807 | */ | 818 | * transmitted slot sequence number may be too high. |
808 | if (interrupted) | ||
809 | goto retry_new_seq; | ||
810 | /* | ||
811 | * Could this slot have been previously retired? | ||
812 | * If so, then the server may be expecting seq_nr = 1! | ||
813 | */ | 819 | */ |
814 | if (slot->seq_nr != 1) { | 820 | if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { |
815 | slot->seq_nr = 1; | 821 | slot->seq_nr--; |
816 | goto retry_nowait; | 822 | goto retry_nowait; |
817 | } | 823 | } |
818 | goto session_recover; | 824 | /* |
825 | * RFC5661: | ||
826 | * A retry might be sent while the original request is | ||
827 | * still in progress on the replier. The replier SHOULD | ||
828 | * deal with the issue by returning NFS4ERR_DELAY as the | ||
829 | * reply to SEQUENCE or CB_SEQUENCE operation, but | ||
830 | * implementations MAY return NFS4ERR_SEQ_MISORDERED. | ||
831 | * | ||
832 | * Restart the search after a delay. | ||
833 | */ | ||
834 | slot->seq_nr = slot->seq_nr_highest_sent; | ||
835 | goto out_retry; | ||
819 | default: | 836 | default: |
820 | /* Just update the slot sequence no. */ | 837 | /* Just update the slot sequence no. */ |
821 | slot->seq_done = 1; | 838 | slot->seq_done = 1; |
@@ -906,17 +923,6 @@ static const struct rpc_call_ops nfs41_call_sync_ops = { | |||
906 | .rpc_call_done = nfs41_call_sync_done, | 923 | .rpc_call_done = nfs41_call_sync_done, |
907 | }; | 924 | }; |
908 | 925 | ||
909 | static void | ||
910 | nfs4_sequence_process_interrupted(struct nfs_client *client, | ||
911 | struct nfs4_slot *slot, const struct cred *cred) | ||
912 | { | ||
913 | struct rpc_task *task; | ||
914 | |||
915 | task = _nfs41_proc_sequence(client, cred, slot, true); | ||
916 | if (!IS_ERR(task)) | ||
917 | rpc_put_task_async(task); | ||
918 | } | ||
919 | |||
920 | #else /* !CONFIG_NFS_V4_1 */ | 926 | #else /* !CONFIG_NFS_V4_1 */ |
921 | 927 | ||
922 | static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) | 928 | static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) |
@@ -937,14 +943,6 @@ int nfs4_sequence_done(struct rpc_task *task, | |||
937 | } | 943 | } |
938 | EXPORT_SYMBOL_GPL(nfs4_sequence_done); | 944 | EXPORT_SYMBOL_GPL(nfs4_sequence_done); |
939 | 945 | ||
940 | static void | ||
941 | nfs4_sequence_process_interrupted(struct nfs_client *client, | ||
942 | struct nfs4_slot *slot, const struct cred *cred) | ||
943 | { | ||
944 | WARN_ON_ONCE(1); | ||
945 | slot->interrupted = 0; | ||
946 | } | ||
947 | |||
948 | #endif /* !CONFIG_NFS_V4_1 */ | 946 | #endif /* !CONFIG_NFS_V4_1 */ |
949 | 947 | ||
950 | static | 948 | static |
@@ -982,26 +980,19 @@ int nfs4_setup_sequence(struct nfs_client *client, | |||
982 | task->tk_timeout = 0; | 980 | task->tk_timeout = 0; |
983 | } | 981 | } |
984 | 982 | ||
985 | for (;;) { | 983 | spin_lock(&tbl->slot_tbl_lock); |
986 | spin_lock(&tbl->slot_tbl_lock); | 984 | /* The state manager will wait until the slot table is empty */ |
987 | /* The state manager will wait until the slot table is empty */ | 985 | if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) |
988 | if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) | 986 | goto out_sleep; |
989 | goto out_sleep; | ||
990 | |||
991 | slot = nfs4_alloc_slot(tbl); | ||
992 | if (IS_ERR(slot)) { | ||
993 | /* Try again in 1/4 second */ | ||
994 | if (slot == ERR_PTR(-ENOMEM)) | ||
995 | task->tk_timeout = HZ >> 2; | ||
996 | goto out_sleep; | ||
997 | } | ||
998 | spin_unlock(&tbl->slot_tbl_lock); | ||
999 | 987 | ||
1000 | if (likely(!slot->interrupted)) | 988 | slot = nfs4_alloc_slot(tbl); |
1001 | break; | 989 | if (IS_ERR(slot)) { |
1002 | nfs4_sequence_process_interrupted(client, | 990 | /* Try again in 1/4 second */ |
1003 | slot, task->tk_msg.rpc_cred); | 991 | if (slot == ERR_PTR(-ENOMEM)) |
992 | task->tk_timeout = HZ >> 2; | ||
993 | goto out_sleep; | ||
1004 | } | 994 | } |
995 | spin_unlock(&tbl->slot_tbl_lock); | ||
1005 | 996 | ||
1006 | nfs4_sequence_attach_slot(args, res, slot); | 997 | nfs4_sequence_attach_slot(args, res, slot); |
1007 | 998 | ||
@@ -1555,6 +1546,10 @@ static void nfs_clear_open_stateid(struct nfs4_state *state, | |||
1555 | 1546 | ||
1556 | static void nfs_set_open_stateid_locked(struct nfs4_state *state, | 1547 | static void nfs_set_open_stateid_locked(struct nfs4_state *state, |
1557 | const nfs4_stateid *stateid, nfs4_stateid *freeme) | 1548 | const nfs4_stateid *stateid, nfs4_stateid *freeme) |
1549 | __must_hold(&state->owner->so_lock) | ||
1550 | __must_hold(&state->seqlock) | ||
1551 | __must_hold(RCU) | ||
1552 | |||
1558 | { | 1553 | { |
1559 | DEFINE_WAIT(wait); | 1554 | DEFINE_WAIT(wait); |
1560 | int status = 0; | 1555 | int status = 0; |
@@ -5963,7 +5958,7 @@ out: | |||
5963 | /** | 5958 | /** |
5964 | * nfs4_proc_setclientid_confirm - Confirm client ID | 5959 | * nfs4_proc_setclientid_confirm - Confirm client ID |
5965 | * @clp: state data structure | 5960 | * @clp: state data structure |
5966 | * @res: result of a previous SETCLIENTID | 5961 | * @arg: result of a previous SETCLIENTID |
5967 | * @cred: credential to use for this call | 5962 | * @cred: credential to use for this call |
5968 | * | 5963 | * |
5969 | * Returns zero, a negative errno, or a negative NFS4ERR status code. | 5964 | * Returns zero, a negative errno, or a negative NFS4ERR status code. |
@@ -7527,7 +7522,7 @@ int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) | |||
7527 | return status; | 7522 | return status; |
7528 | } | 7523 | } |
7529 | 7524 | ||
7530 | /** | 7525 | /* |
7531 | * If 'use_integrity' is true and the state managment nfs_client | 7526 | * If 'use_integrity' is true and the state managment nfs_client |
7532 | * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient | 7527 | * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient |
7533 | * and the machine credential as per RFC3530bis and RFC5661 Security | 7528 | * and the machine credential as per RFC3530bis and RFC5661 Security |
@@ -9219,7 +9214,7 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) | |||
9219 | return status; | 9214 | return status; |
9220 | } | 9215 | } |
9221 | 9216 | ||
9222 | /** | 9217 | /* |
9223 | * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if | 9218 | * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if |
9224 | * possible) as per RFC3530bis and RFC5661 Security Considerations sections | 9219 | * possible) as per RFC3530bis and RFC5661 Security Considerations sections |
9225 | */ | 9220 | */ |
@@ -9484,7 +9479,7 @@ static const struct rpc_call_ops nfs41_free_stateid_ops = { | |||
9484 | * @server: server / transport on which to perform the operation | 9479 | * @server: server / transport on which to perform the operation |
9485 | * @stateid: state ID to release | 9480 | * @stateid: state ID to release |
9486 | * @cred: credential | 9481 | * @cred: credential |
9487 | * @is_recovery: set to true if this call needs to be privileged | 9482 | * @privileged: set to true if this call needs to be privileged |
9488 | * | 9483 | * |
9489 | * Note: this function is always asynchronous. | 9484 | * Note: this function is always asynchronous. |
9490 | */ | 9485 | */ |
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c index a5489d70a724..bcb532def9e2 100644 --- a/fs/nfs/nfs4session.c +++ b/fs/nfs/nfs4session.c | |||
@@ -55,7 +55,7 @@ static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize) | |||
55 | 55 | ||
56 | /** | 56 | /** |
57 | * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete | 57 | * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete |
58 | * @tbl - controlling slot table | 58 | * @tbl: controlling slot table |
59 | * | 59 | * |
60 | */ | 60 | */ |
61 | void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl) | 61 | void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl) |
@@ -110,6 +110,8 @@ static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl, | |||
110 | slot->table = tbl; | 110 | slot->table = tbl; |
111 | slot->slot_nr = slotid; | 111 | slot->slot_nr = slotid; |
112 | slot->seq_nr = seq_init; | 112 | slot->seq_nr = seq_init; |
113 | slot->seq_nr_highest_sent = seq_init; | ||
114 | slot->seq_nr_last_acked = seq_init - 1; | ||
113 | } | 115 | } |
114 | return slot; | 116 | return slot; |
115 | } | 117 | } |
@@ -276,7 +278,8 @@ static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl, | |||
276 | p = &tbl->slots; | 278 | p = &tbl->slots; |
277 | while (*p) { | 279 | while (*p) { |
278 | (*p)->seq_nr = ivalue; | 280 | (*p)->seq_nr = ivalue; |
279 | (*p)->interrupted = 0; | 281 | (*p)->seq_nr_highest_sent = ivalue; |
282 | (*p)->seq_nr_last_acked = ivalue - 1; | ||
280 | p = &(*p)->next; | 283 | p = &(*p)->next; |
281 | } | 284 | } |
282 | tbl->highest_used_slotid = NFS4_NO_SLOT; | 285 | tbl->highest_used_slotid = NFS4_NO_SLOT; |
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index 3c550f297561..230509b77121 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h | |||
@@ -23,8 +23,9 @@ struct nfs4_slot { | |||
23 | unsigned long generation; | 23 | unsigned long generation; |
24 | u32 slot_nr; | 24 | u32 slot_nr; |
25 | u32 seq_nr; | 25 | u32 seq_nr; |
26 | unsigned int interrupted : 1, | 26 | u32 seq_nr_last_acked; |
27 | privileged : 1, | 27 | u32 seq_nr_highest_sent; |
28 | unsigned int privileged : 1, | ||
28 | seq_done : 1; | 29 | seq_done : 1; |
29 | }; | 30 | }; |
30 | 31 | ||
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 02488b50534a..3de36479ed7a 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -563,6 +563,7 @@ static void nfs4_gc_state_owners(struct nfs_server *server) | |||
563 | * nfs4_get_state_owner - Look up a state owner given a credential | 563 | * nfs4_get_state_owner - Look up a state owner given a credential |
564 | * @server: nfs_server to search | 564 | * @server: nfs_server to search |
565 | * @cred: RPC credential to match | 565 | * @cred: RPC credential to match |
566 | * @gfp_flags: allocation mode | ||
566 | * | 567 | * |
567 | * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. | 568 | * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. |
568 | */ | 569 | */ |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index e54d899c1848..e9f39fa5964b 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -350,7 +350,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page, | |||
350 | 350 | ||
351 | /** | 351 | /** |
352 | * nfs_unlock_request - Unlock request and wake up sleepers. | 352 | * nfs_unlock_request - Unlock request and wake up sleepers. |
353 | * @req: | 353 | * @req: pointer to request |
354 | */ | 354 | */ |
355 | void nfs_unlock_request(struct nfs_page *req) | 355 | void nfs_unlock_request(struct nfs_page *req) |
356 | { | 356 | { |
@@ -368,7 +368,7 @@ void nfs_unlock_request(struct nfs_page *req) | |||
368 | 368 | ||
369 | /** | 369 | /** |
370 | * nfs_unlock_and_release_request - Unlock request and release the nfs_page | 370 | * nfs_unlock_and_release_request - Unlock request and release the nfs_page |
371 | * @req: | 371 | * @req: pointer to request |
372 | */ | 372 | */ |
373 | void nfs_unlock_and_release_request(struct nfs_page *req) | 373 | void nfs_unlock_and_release_request(struct nfs_page *req) |
374 | { | 374 | { |
@@ -531,7 +531,6 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free); | |||
531 | * nfs_pgio_rpcsetup - Set up arguments for a pageio call | 531 | * nfs_pgio_rpcsetup - Set up arguments for a pageio call |
532 | * @hdr: The pageio hdr | 532 | * @hdr: The pageio hdr |
533 | * @count: Number of bytes to read | 533 | * @count: Number of bytes to read |
534 | * @offset: Initial offset | ||
535 | * @how: How to commit data (writes only) | 534 | * @how: How to commit data (writes only) |
536 | * @cinfo: Commit information for the call (writes only) | 535 | * @cinfo: Commit information for the call (writes only) |
537 | */ | 536 | */ |
@@ -634,7 +633,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio); | |||
634 | 633 | ||
635 | /** | 634 | /** |
636 | * nfs_pgio_error - Clean up from a pageio error | 635 | * nfs_pgio_error - Clean up from a pageio error |
637 | * @desc: IO descriptor | ||
638 | * @hdr: pageio header | 636 | * @hdr: pageio header |
639 | */ | 637 | */ |
640 | static void nfs_pgio_error(struct nfs_pgio_header *hdr) | 638 | static void nfs_pgio_error(struct nfs_pgio_header *hdr) |
@@ -768,8 +766,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, | |||
768 | pageused = 0; | 766 | pageused = 0; |
769 | while (!list_empty(head)) { | 767 | while (!list_empty(head)) { |
770 | req = nfs_list_entry(head->next); | 768 | req = nfs_list_entry(head->next); |
771 | nfs_list_remove_request(req); | 769 | nfs_list_move_request(req, &hdr->pages); |
772 | nfs_list_add_request(req, &hdr->pages); | ||
773 | 770 | ||
774 | if (!last_page || last_page != req->wb_page) { | 771 | if (!last_page || last_page != req->wb_page) { |
775 | pageused++; | 772 | pageused++; |
@@ -893,6 +890,7 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1, | |||
893 | * nfs_can_coalesce_requests - test two requests for compatibility | 890 | * nfs_can_coalesce_requests - test two requests for compatibility |
894 | * @prev: pointer to nfs_page | 891 | * @prev: pointer to nfs_page |
895 | * @req: pointer to nfs_page | 892 | * @req: pointer to nfs_page |
893 | * @pgio: pointer to nfs_pagio_descriptor | ||
896 | * | 894 | * |
897 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the | 895 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the |
898 | * page data area they describe is contiguous, and that their RPC | 896 | * page data area they describe is contiguous, and that their RPC |
@@ -961,8 +959,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, | |||
961 | } | 959 | } |
962 | if (!nfs_can_coalesce_requests(prev, req, desc)) | 960 | if (!nfs_can_coalesce_requests(prev, req, desc)) |
963 | return 0; | 961 | return 0; |
964 | nfs_list_remove_request(req); | 962 | nfs_list_move_request(req, &mirror->pg_list); |
965 | nfs_list_add_request(req, &mirror->pg_list); | ||
966 | mirror->pg_count += req->wb_bytes; | 963 | mirror->pg_count += req->wb_bytes; |
967 | return 1; | 964 | return 1; |
968 | } | 965 | } |
@@ -988,6 +985,16 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) | |||
988 | } | 985 | } |
989 | } | 986 | } |
990 | 987 | ||
988 | static void | ||
989 | nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc, | ||
990 | struct nfs_page *req) | ||
991 | { | ||
992 | LIST_HEAD(head); | ||
993 | |||
994 | nfs_list_move_request(req, &head); | ||
995 | desc->pg_completion_ops->error_cleanup(&head, desc->pg_error); | ||
996 | } | ||
997 | |||
991 | /** | 998 | /** |
992 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. | 999 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. |
993 | * @desc: destination io descriptor | 1000 | * @desc: destination io descriptor |
@@ -1025,10 +1032,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
1025 | nfs_page_group_unlock(req); | 1032 | nfs_page_group_unlock(req); |
1026 | desc->pg_moreio = 1; | 1033 | desc->pg_moreio = 1; |
1027 | nfs_pageio_doio(desc); | 1034 | nfs_pageio_doio(desc); |
1028 | if (desc->pg_error < 0) | 1035 | if (desc->pg_error < 0 || mirror->pg_recoalesce) |
1029 | return 0; | 1036 | goto out_cleanup_subreq; |
1030 | if (mirror->pg_recoalesce) | ||
1031 | return 0; | ||
1032 | /* retry add_request for this subreq */ | 1037 | /* retry add_request for this subreq */ |
1033 | nfs_page_group_lock(req); | 1038 | nfs_page_group_lock(req); |
1034 | continue; | 1039 | continue; |
@@ -1061,6 +1066,10 @@ err_ptr: | |||
1061 | desc->pg_error = PTR_ERR(subreq); | 1066 | desc->pg_error = PTR_ERR(subreq); |
1062 | nfs_page_group_unlock(req); | 1067 | nfs_page_group_unlock(req); |
1063 | return 0; | 1068 | return 0; |
1069 | out_cleanup_subreq: | ||
1070 | if (req != subreq) | ||
1071 | nfs_pageio_cleanup_request(desc, subreq); | ||
1072 | return 0; | ||
1064 | } | 1073 | } |
1065 | 1074 | ||
1066 | static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) | 1075 | static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) |
@@ -1079,7 +1088,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) | |||
1079 | struct nfs_page *req; | 1088 | struct nfs_page *req; |
1080 | 1089 | ||
1081 | req = list_first_entry(&head, struct nfs_page, wb_list); | 1090 | req = list_first_entry(&head, struct nfs_page, wb_list); |
1082 | nfs_list_remove_request(req); | ||
1083 | if (__nfs_pageio_add_request(desc, req)) | 1091 | if (__nfs_pageio_add_request(desc, req)) |
1084 | continue; | 1092 | continue; |
1085 | if (desc->pg_error < 0) { | 1093 | if (desc->pg_error < 0) { |
@@ -1120,7 +1128,8 @@ static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc) | |||
1120 | 1128 | ||
1121 | for (midx = 0; midx < desc->pg_mirror_count; midx++) { | 1129 | for (midx = 0; midx < desc->pg_mirror_count; midx++) { |
1122 | mirror = &desc->pg_mirrors[midx]; | 1130 | mirror = &desc->pg_mirrors[midx]; |
1123 | desc->pg_completion_ops->error_cleanup(&mirror->pg_list); | 1131 | desc->pg_completion_ops->error_cleanup(&mirror->pg_list, |
1132 | desc->pg_error); | ||
1124 | } | 1133 | } |
1125 | } | 1134 | } |
1126 | 1135 | ||
@@ -1168,11 +1177,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |||
1168 | if (nfs_pgio_has_mirroring(desc)) | 1177 | if (nfs_pgio_has_mirroring(desc)) |
1169 | desc->pg_mirror_idx = midx; | 1178 | desc->pg_mirror_idx = midx; |
1170 | if (!nfs_pageio_add_request_mirror(desc, dupreq)) | 1179 | if (!nfs_pageio_add_request_mirror(desc, dupreq)) |
1171 | goto out_failed; | 1180 | goto out_cleanup_subreq; |
1172 | } | 1181 | } |
1173 | 1182 | ||
1174 | return 1; | 1183 | return 1; |
1175 | 1184 | ||
1185 | out_cleanup_subreq: | ||
1186 | if (req != dupreq) | ||
1187 | nfs_pageio_cleanup_request(desc, dupreq); | ||
1176 | out_failed: | 1188 | out_failed: |
1177 | nfs_pageio_error_cleanup(desc); | 1189 | nfs_pageio_error_cleanup(desc); |
1178 | return 0; | 1190 | return 0; |
@@ -1194,7 +1206,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, | |||
1194 | desc->pg_mirror_idx = mirror_idx; | 1206 | desc->pg_mirror_idx = mirror_idx; |
1195 | for (;;) { | 1207 | for (;;) { |
1196 | nfs_pageio_doio(desc); | 1208 | nfs_pageio_doio(desc); |
1197 | if (!mirror->pg_recoalesce) | 1209 | if (desc->pg_error < 0 || !mirror->pg_recoalesce) |
1198 | break; | 1210 | break; |
1199 | if (!nfs_do_recoalesce(desc)) | 1211 | if (!nfs_do_recoalesce(desc)) |
1200 | break; | 1212 | break; |
@@ -1222,9 +1234,8 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, | |||
1222 | while (!list_empty(&hdr->pages)) { | 1234 | while (!list_empty(&hdr->pages)) { |
1223 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | 1235 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); |
1224 | 1236 | ||
1225 | nfs_list_remove_request(req); | ||
1226 | if (!nfs_pageio_add_request(desc, req)) | 1237 | if (!nfs_pageio_add_request(desc, req)) |
1227 | nfs_list_add_request(req, &failed); | 1238 | nfs_list_move_request(req, &failed); |
1228 | } | 1239 | } |
1229 | nfs_pageio_complete(desc); | 1240 | nfs_pageio_complete(desc); |
1230 | if (!list_empty(&failed)) { | 1241 | if (!list_empty(&failed)) { |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 53726da5c010..8247bd1634cb 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -758,22 +758,35 @@ static int | |||
758 | pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, | 758 | pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, |
759 | struct nfs_server *server, | 759 | struct nfs_server *server, |
760 | struct list_head *layout_list) | 760 | struct list_head *layout_list) |
761 | __must_hold(&clp->cl_lock) | ||
762 | __must_hold(RCU) | ||
761 | { | 763 | { |
762 | struct pnfs_layout_hdr *lo, *next; | 764 | struct pnfs_layout_hdr *lo, *next; |
763 | struct inode *inode; | 765 | struct inode *inode; |
764 | 766 | ||
765 | list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) { | 767 | list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) { |
766 | if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) | 768 | if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || |
769 | test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) || | ||
770 | !list_empty(&lo->plh_bulk_destroy)) | ||
767 | continue; | 771 | continue; |
772 | /* If the sb is being destroyed, just bail */ | ||
773 | if (!nfs_sb_active(server->super)) | ||
774 | break; | ||
768 | inode = igrab(lo->plh_inode); | 775 | inode = igrab(lo->plh_inode); |
769 | if (inode == NULL) | 776 | if (inode != NULL) { |
770 | continue; | 777 | list_del_init(&lo->plh_layouts); |
771 | list_del_init(&lo->plh_layouts); | 778 | if (pnfs_layout_add_bulk_destroy_list(inode, |
772 | if (pnfs_layout_add_bulk_destroy_list(inode, layout_list)) | 779 | layout_list)) |
773 | continue; | 780 | continue; |
774 | rcu_read_unlock(); | 781 | rcu_read_unlock(); |
775 | spin_unlock(&clp->cl_lock); | 782 | spin_unlock(&clp->cl_lock); |
776 | iput(inode); | 783 | iput(inode); |
784 | } else { | ||
785 | rcu_read_unlock(); | ||
786 | spin_unlock(&clp->cl_lock); | ||
787 | set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags); | ||
788 | } | ||
789 | nfs_sb_deactive(server->super); | ||
777 | spin_lock(&clp->cl_lock); | 790 | spin_lock(&clp->cl_lock); |
778 | rcu_read_lock(); | 791 | rcu_read_lock(); |
779 | return -EAGAIN; | 792 | return -EAGAIN; |
@@ -811,7 +824,7 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list, | |||
811 | /* Free all lsegs that are attached to commit buckets */ | 824 | /* Free all lsegs that are attached to commit buckets */ |
812 | nfs_commit_inode(inode, 0); | 825 | nfs_commit_inode(inode, 0); |
813 | pnfs_put_layout_hdr(lo); | 826 | pnfs_put_layout_hdr(lo); |
814 | iput(inode); | 827 | nfs_iput_and_deactive(inode); |
815 | } | 828 | } |
816 | return ret; | 829 | return ret; |
817 | } | 830 | } |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 5e80a07b7bea..56659ccce1d8 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -104,6 +104,7 @@ enum { | |||
104 | NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */ | 104 | NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */ |
105 | NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ | 105 | NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ |
106 | NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ | 106 | NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ |
107 | NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */ | ||
107 | }; | 108 | }; |
108 | 109 | ||
109 | enum layoutdriver_policy_flags { | 110 | enum layoutdriver_policy_flags { |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index f9f19784db82..1d95a60b2586 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -205,7 +205,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr, | |||
205 | } | 205 | } |
206 | 206 | ||
207 | static void | 207 | static void |
208 | nfs_async_read_error(struct list_head *head) | 208 | nfs_async_read_error(struct list_head *head, int error) |
209 | { | 209 | { |
210 | struct nfs_page *req; | 210 | struct nfs_page *req; |
211 | 211 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 0570391eaa16..23790c7b2289 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -1919,7 +1919,7 @@ static int nfs_parse_devname(const char *dev_name, | |||
1919 | /* kill possible hostname list: not supported */ | 1919 | /* kill possible hostname list: not supported */ |
1920 | comma = strchr(dev_name, ','); | 1920 | comma = strchr(dev_name, ','); |
1921 | if (comma != NULL && comma < end) | 1921 | if (comma != NULL && comma < end) |
1922 | *comma = 0; | 1922 | len = comma - dev_name; |
1923 | } | 1923 | } |
1924 | 1924 | ||
1925 | if (len > maxnamlen) | 1925 | if (len > maxnamlen) |
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 79b97b3c4427..52d533967485 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c | |||
@@ -39,6 +39,7 @@ nfs_free_unlinkdata(struct nfs_unlinkdata *data) | |||
39 | /** | 39 | /** |
40 | * nfs_async_unlink_done - Sillydelete post-processing | 40 | * nfs_async_unlink_done - Sillydelete post-processing |
41 | * @task: rpc_task of the sillydelete | 41 | * @task: rpc_task of the sillydelete |
42 | * @calldata: pointer to nfs_unlinkdata | ||
42 | * | 43 | * |
43 | * Do the directory attribute update. | 44 | * Do the directory attribute update. |
44 | */ | 45 | */ |
@@ -54,7 +55,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) | |||
54 | 55 | ||
55 | /** | 56 | /** |
56 | * nfs_async_unlink_release - Release the sillydelete data. | 57 | * nfs_async_unlink_release - Release the sillydelete data. |
57 | * @task: rpc_task of the sillydelete | 58 | * @calldata: struct nfs_unlinkdata to release |
58 | * | 59 | * |
59 | * We need to call nfs_put_unlinkdata as a 'tk_release' task since the | 60 | * We need to call nfs_put_unlinkdata as a 'tk_release' task since the |
60 | * rpc_task would be freed too. | 61 | * rpc_task would be freed too. |
@@ -159,8 +160,8 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf | |||
159 | 160 | ||
160 | /** | 161 | /** |
161 | * nfs_async_unlink - asynchronous unlinking of a file | 162 | * nfs_async_unlink - asynchronous unlinking of a file |
162 | * @dir: parent directory of dentry | 163 | * @dentry: parent directory of dentry |
163 | * @dentry: dentry to unlink | 164 | * @name: name of dentry to unlink |
164 | */ | 165 | */ |
165 | static int | 166 | static int |
166 | nfs_async_unlink(struct dentry *dentry, const struct qstr *name) | 167 | nfs_async_unlink(struct dentry *dentry, const struct qstr *name) |
@@ -324,6 +325,7 @@ static const struct rpc_call_ops nfs_rename_ops = { | |||
324 | * @new_dir: target directory for the rename | 325 | * @new_dir: target directory for the rename |
325 | * @old_dentry: original dentry to be renamed | 326 | * @old_dentry: original dentry to be renamed |
326 | * @new_dentry: dentry to which the old_dentry should be renamed | 327 | * @new_dentry: dentry to which the old_dentry should be renamed |
328 | * @complete: Function to run on successful completion | ||
327 | * | 329 | * |
328 | * It's expected that valid references to the dentries and inodes are held | 330 | * It's expected that valid references to the dentries and inodes are held |
329 | */ | 331 | */ |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f12cb31a41e5..f3ebabaa291d 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/iversion.h> | 26 | #include <linux/iversion.h> |
27 | 27 | ||
28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
29 | #include <linux/sched/mm.h> | ||
29 | 30 | ||
30 | #include "delegation.h" | 31 | #include "delegation.h" |
31 | #include "internal.h" | 32 | #include "internal.h" |
@@ -238,9 +239,9 @@ out: | |||
238 | } | 239 | } |
239 | 240 | ||
240 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ | 241 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ |
241 | static void nfs_set_pageerror(struct page *page) | 242 | static void nfs_set_pageerror(struct address_space *mapping) |
242 | { | 243 | { |
243 | nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); | 244 | nfs_zap_mapping(mapping->host, mapping); |
244 | } | 245 | } |
245 | 246 | ||
246 | /* | 247 | /* |
@@ -712,11 +713,13 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
712 | { | 713 | { |
713 | struct inode *inode = mapping->host; | 714 | struct inode *inode = mapping->host; |
714 | struct nfs_pageio_descriptor pgio; | 715 | struct nfs_pageio_descriptor pgio; |
715 | struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS); | 716 | struct nfs_io_completion *ioc; |
717 | unsigned int pflags = memalloc_nofs_save(); | ||
716 | int err; | 718 | int err; |
717 | 719 | ||
718 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); | 720 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
719 | 721 | ||
722 | ioc = nfs_io_completion_alloc(GFP_NOFS); | ||
720 | if (ioc) | 723 | if (ioc) |
721 | nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); | 724 | nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); |
722 | 725 | ||
@@ -727,6 +730,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
727 | nfs_pageio_complete(&pgio); | 730 | nfs_pageio_complete(&pgio); |
728 | nfs_io_completion_put(ioc); | 731 | nfs_io_completion_put(ioc); |
729 | 732 | ||
733 | memalloc_nofs_restore(pflags); | ||
734 | |||
730 | if (err < 0) | 735 | if (err < 0) |
731 | goto out_err; | 736 | goto out_err; |
732 | err = pgio.pg_error; | 737 | err = pgio.pg_error; |
@@ -865,7 +870,6 @@ EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); | |||
865 | /** | 870 | /** |
866 | * nfs_request_add_commit_list - add request to a commit list | 871 | * nfs_request_add_commit_list - add request to a commit list |
867 | * @req: pointer to a struct nfs_page | 872 | * @req: pointer to a struct nfs_page |
868 | * @dst: commit list head | ||
869 | * @cinfo: holds list lock and accounting info | 873 | * @cinfo: holds list lock and accounting info |
870 | * | 874 | * |
871 | * This sets the PG_CLEAN bit, updates the cinfo count of | 875 | * This sets the PG_CLEAN bit, updates the cinfo count of |
@@ -994,7 +998,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
994 | nfs_list_remove_request(req); | 998 | nfs_list_remove_request(req); |
995 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && | 999 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && |
996 | (hdr->good_bytes < bytes)) { | 1000 | (hdr->good_bytes < bytes)) { |
997 | nfs_set_pageerror(req->wb_page); | 1001 | nfs_set_pageerror(page_file_mapping(req->wb_page)); |
998 | nfs_context_set_write_error(req->wb_context, hdr->error); | 1002 | nfs_context_set_write_error(req->wb_context, hdr->error); |
999 | goto remove_req; | 1003 | goto remove_req; |
1000 | } | 1004 | } |
@@ -1348,7 +1352,8 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
1348 | unsigned int offset, unsigned int count) | 1352 | unsigned int offset, unsigned int count) |
1349 | { | 1353 | { |
1350 | struct nfs_open_context *ctx = nfs_file_open_context(file); | 1354 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
1351 | struct inode *inode = page_file_mapping(page)->host; | 1355 | struct address_space *mapping = page_file_mapping(page); |
1356 | struct inode *inode = mapping->host; | ||
1352 | int status = 0; | 1357 | int status = 0; |
1353 | 1358 | ||
1354 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); | 1359 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
@@ -1366,7 +1371,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
1366 | 1371 | ||
1367 | status = nfs_writepage_setup(ctx, page, offset, count); | 1372 | status = nfs_writepage_setup(ctx, page, offset, count); |
1368 | if (status < 0) | 1373 | if (status < 0) |
1369 | nfs_set_pageerror(page); | 1374 | nfs_set_pageerror(mapping); |
1370 | else | 1375 | else |
1371 | __set_page_dirty_nobuffers(page); | 1376 | __set_page_dirty_nobuffers(page); |
1372 | out: | 1377 | out: |
@@ -1411,20 +1416,27 @@ static void nfs_redirty_request(struct nfs_page *req) | |||
1411 | nfs_release_request(req); | 1416 | nfs_release_request(req); |
1412 | } | 1417 | } |
1413 | 1418 | ||
1414 | static void nfs_async_write_error(struct list_head *head) | 1419 | static void nfs_async_write_error(struct list_head *head, int error) |
1415 | { | 1420 | { |
1416 | struct nfs_page *req; | 1421 | struct nfs_page *req; |
1417 | 1422 | ||
1418 | while (!list_empty(head)) { | 1423 | while (!list_empty(head)) { |
1419 | req = nfs_list_entry(head->next); | 1424 | req = nfs_list_entry(head->next); |
1420 | nfs_list_remove_request(req); | 1425 | nfs_list_remove_request(req); |
1426 | if (nfs_error_is_fatal(error)) { | ||
1427 | nfs_context_set_write_error(req->wb_context, error); | ||
1428 | if (nfs_error_is_fatal_on_server(error)) { | ||
1429 | nfs_write_error_remove_page(req); | ||
1430 | continue; | ||
1431 | } | ||
1432 | } | ||
1421 | nfs_redirty_request(req); | 1433 | nfs_redirty_request(req); |
1422 | } | 1434 | } |
1423 | } | 1435 | } |
1424 | 1436 | ||
1425 | static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) | 1437 | static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) |
1426 | { | 1438 | { |
1427 | nfs_async_write_error(&hdr->pages); | 1439 | nfs_async_write_error(&hdr->pages, 0); |
1428 | filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, | 1440 | filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, |
1429 | hdr->args.offset + hdr->args.count - 1); | 1441 | hdr->args.offset + hdr->args.count - 1); |
1430 | } | 1442 | } |
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index b33f9785b756..72a7681f4046 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net) | |||
1239 | retval = nfsd_idmap_init(net); | 1239 | retval = nfsd_idmap_init(net); |
1240 | if (retval) | 1240 | if (retval) |
1241 | goto out_idmap_error; | 1241 | goto out_idmap_error; |
1242 | nn->nfsd4_lease = 45; /* default lease time */ | 1242 | nn->nfsd4_lease = 90; /* default lease time */ |
1243 | nn->nfsd4_grace = 45; | 1243 | nn->nfsd4_grace = 90; |
1244 | nn->somebody_reclaimed = false; | 1244 | nn->somebody_reclaimed = false; |
1245 | nn->clverifier_counter = prandom_u32(); | 1245 | nn->clverifier_counter = prandom_u32(); |
1246 | nn->clientid_counter = prandom_u32(); | 1246 | nn->clientid_counter = prandom_u32(); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0ec9edab2f3..85b0ef890b28 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -423,7 +423,7 @@ struct mem_size_stats { | |||
423 | }; | 423 | }; |
424 | 424 | ||
425 | static void smaps_account(struct mem_size_stats *mss, struct page *page, | 425 | static void smaps_account(struct mem_size_stats *mss, struct page *page, |
426 | bool compound, bool young, bool dirty) | 426 | bool compound, bool young, bool dirty, bool locked) |
427 | { | 427 | { |
428 | int i, nr = compound ? 1 << compound_order(page) : 1; | 428 | int i, nr = compound ? 1 << compound_order(page) : 1; |
429 | unsigned long size = nr * PAGE_SIZE; | 429 | unsigned long size = nr * PAGE_SIZE; |
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, | |||
450 | else | 450 | else |
451 | mss->private_clean += size; | 451 | mss->private_clean += size; |
452 | mss->pss += (u64)size << PSS_SHIFT; | 452 | mss->pss += (u64)size << PSS_SHIFT; |
453 | if (locked) | ||
454 | mss->pss_locked += (u64)size << PSS_SHIFT; | ||
453 | return; | 455 | return; |
454 | } | 456 | } |
455 | 457 | ||
456 | for (i = 0; i < nr; i++, page++) { | 458 | for (i = 0; i < nr; i++, page++) { |
457 | int mapcount = page_mapcount(page); | 459 | int mapcount = page_mapcount(page); |
460 | unsigned long pss = (PAGE_SIZE << PSS_SHIFT); | ||
458 | 461 | ||
459 | if (mapcount >= 2) { | 462 | if (mapcount >= 2) { |
460 | if (dirty || PageDirty(page)) | 463 | if (dirty || PageDirty(page)) |
461 | mss->shared_dirty += PAGE_SIZE; | 464 | mss->shared_dirty += PAGE_SIZE; |
462 | else | 465 | else |
463 | mss->shared_clean += PAGE_SIZE; | 466 | mss->shared_clean += PAGE_SIZE; |
464 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; | 467 | mss->pss += pss / mapcount; |
468 | if (locked) | ||
469 | mss->pss_locked += pss / mapcount; | ||
465 | } else { | 470 | } else { |
466 | if (dirty || PageDirty(page)) | 471 | if (dirty || PageDirty(page)) |
467 | mss->private_dirty += PAGE_SIZE; | 472 | mss->private_dirty += PAGE_SIZE; |
468 | else | 473 | else |
469 | mss->private_clean += PAGE_SIZE; | 474 | mss->private_clean += PAGE_SIZE; |
470 | mss->pss += PAGE_SIZE << PSS_SHIFT; | 475 | mss->pss += pss; |
476 | if (locked) | ||
477 | mss->pss_locked += pss; | ||
471 | } | 478 | } |
472 | } | 479 | } |
473 | } | 480 | } |
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, | |||
490 | { | 497 | { |
491 | struct mem_size_stats *mss = walk->private; | 498 | struct mem_size_stats *mss = walk->private; |
492 | struct vm_area_struct *vma = walk->vma; | 499 | struct vm_area_struct *vma = walk->vma; |
500 | bool locked = !!(vma->vm_flags & VM_LOCKED); | ||
493 | struct page *page = NULL; | 501 | struct page *page = NULL; |
494 | 502 | ||
495 | if (pte_present(*pte)) { | 503 | if (pte_present(*pte)) { |
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, | |||
532 | if (!page) | 540 | if (!page) |
533 | return; | 541 | return; |
534 | 542 | ||
535 | smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); | 543 | smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); |
536 | } | 544 | } |
537 | 545 | ||
538 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 546 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |||
541 | { | 549 | { |
542 | struct mem_size_stats *mss = walk->private; | 550 | struct mem_size_stats *mss = walk->private; |
543 | struct vm_area_struct *vma = walk->vma; | 551 | struct vm_area_struct *vma = walk->vma; |
552 | bool locked = !!(vma->vm_flags & VM_LOCKED); | ||
544 | struct page *page; | 553 | struct page *page; |
545 | 554 | ||
546 | /* FOLL_DUMP will return -EFAULT on huge zero page */ | 555 | /* FOLL_DUMP will return -EFAULT on huge zero page */ |
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |||
555 | /* pass */; | 564 | /* pass */; |
556 | else | 565 | else |
557 | VM_BUG_ON_PAGE(1, page); | 566 | VM_BUG_ON_PAGE(1, page); |
558 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); | 567 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); |
559 | } | 568 | } |
560 | #else | 569 | #else |
561 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | 570 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, |
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma, | |||
737 | } | 746 | } |
738 | } | 747 | } |
739 | #endif | 748 | #endif |
740 | |||
741 | /* mmap_sem is held in m_start */ | 749 | /* mmap_sem is held in m_start */ |
742 | walk_page_vma(vma, &smaps_walk); | 750 | walk_page_vma(vma, &smaps_walk); |
743 | if (vma->vm_flags & VM_LOCKED) | ||
744 | mss->pss_locked += mss->pss; | ||
745 | } | 751 | } |
746 | 752 | ||
747 | #define SEQ_PUT_DEC(str, val) \ | 753 | #define SEQ_PUT_DEC(str, val) \ |
diff --git a/include/uapi/asm-generic/shmparam.h b/include/asm-generic/shmparam.h index 8b78c0ba08b1..8b78c0ba08b1 100644 --- a/include/uapi/asm-generic/shmparam.h +++ b/include/asm-generic/shmparam.h | |||
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h new file mode 100644 index 000000000000..a726dd3f1dc6 --- /dev/null +++ b/include/keys/request_key_auth-type.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* request_key authorisation token key type | ||
2 | * | ||
3 | * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H | ||
13 | #define _KEYS_REQUEST_KEY_AUTH_TYPE_H | ||
14 | |||
15 | #include <linux/key.h> | ||
16 | |||
17 | /* | ||
18 | * Authorisation record for request_key(). | ||
19 | */ | ||
20 | struct request_key_auth { | ||
21 | struct key *target_key; | ||
22 | struct key *dest_keyring; | ||
23 | const struct cred *cred; | ||
24 | void *callout_info; | ||
25 | size_t callout_len; | ||
26 | pid_t pid; | ||
27 | char op[8]; | ||
28 | } __randomize_layout; | ||
29 | |||
30 | static inline struct request_key_auth *get_request_key_auth(const struct key *key) | ||
31 | { | ||
32 | return key->payload.data[0]; | ||
33 | } | ||
34 | |||
35 | |||
36 | #endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */ | ||
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4f31f96bbfab..c36c86f1ec9a 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -100,7 +100,7 @@ enum vgic_irq_config { | |||
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct vgic_irq { | 102 | struct vgic_irq { |
103 | spinlock_t irq_lock; /* Protects the content of the struct */ | 103 | raw_spinlock_t irq_lock; /* Protects the content of the struct */ |
104 | struct list_head lpi_list; /* Used to link all LPIs together */ | 104 | struct list_head lpi_list; /* Used to link all LPIs together */ |
105 | struct list_head ap_list; | 105 | struct list_head ap_list; |
106 | 106 | ||
@@ -256,7 +256,7 @@ struct vgic_dist { | |||
256 | u64 propbaser; | 256 | u64 propbaser; |
257 | 257 | ||
258 | /* Protects the lpi_list and the count value below. */ | 258 | /* Protects the lpi_list and the count value below. */ |
259 | spinlock_t lpi_list_lock; | 259 | raw_spinlock_t lpi_list_lock; |
260 | struct list_head lpi_list_head; | 260 | struct list_head lpi_list_head; |
261 | int lpi_list_count; | 261 | int lpi_list_count; |
262 | 262 | ||
@@ -307,7 +307,7 @@ struct vgic_cpu { | |||
307 | unsigned int used_lrs; | 307 | unsigned int used_lrs; |
308 | struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; | 308 | struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; |
309 | 309 | ||
310 | spinlock_t ap_list_lock; /* Protects the ap_list */ | 310 | raw_spinlock_t ap_list_lock; /* Protects the ap_list */ |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * List of IRQs that this VCPU should consider because they are either | 313 | * List of IRQs that this VCPU should consider because they are either |
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 19f32b0c29af..6b318efd8a74 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #ifndef __has_attribute | 34 | #ifndef __has_attribute |
35 | # define __has_attribute(x) __GCC4_has_attribute_##x | 35 | # define __has_attribute(x) __GCC4_has_attribute_##x |
36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) | 36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) |
37 | # define __GCC4_has_attribute___copy__ 0 | ||
37 | # define __GCC4_has_attribute___designated_init__ 0 | 38 | # define __GCC4_has_attribute___designated_init__ 0 |
38 | # define __GCC4_has_attribute___externally_visible__ 1 | 39 | # define __GCC4_has_attribute___externally_visible__ 1 |
39 | # define __GCC4_has_attribute___noclone__ 1 | 40 | # define __GCC4_has_attribute___noclone__ 1 |
@@ -101,6 +102,19 @@ | |||
101 | #define __attribute_const__ __attribute__((__const__)) | 102 | #define __attribute_const__ __attribute__((__const__)) |
102 | 103 | ||
103 | /* | 104 | /* |
105 | * Optional: only supported since gcc >= 9 | ||
106 | * Optional: not supported by clang | ||
107 | * Optional: not supported by icc | ||
108 | * | ||
109 | * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute | ||
110 | */ | ||
111 | #if __has_attribute(__copy__) | ||
112 | # define __copy(symbol) __attribute__((__copy__(symbol))) | ||
113 | #else | ||
114 | # define __copy(symbol) | ||
115 | #endif | ||
116 | |||
117 | /* | ||
104 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' | 118 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' |
105 | * attribute warnings entirely and for good") for more information. | 119 | * attribute warnings entirely and for good") for more information. |
106 | * | 120 | * |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 45ff763fba76..28604a8d0aa9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature) | |||
1198 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); | 1198 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); |
1199 | 1199 | ||
1200 | extern bool efi_is_table_address(unsigned long phys_addr); | 1200 | extern bool efi_is_table_address(unsigned long phys_addr); |
1201 | |||
1202 | extern int efi_apply_persistent_mem_reservations(void); | ||
1203 | #else | 1201 | #else |
1204 | static inline bool efi_enabled(int feature) | 1202 | static inline bool efi_enabled(int feature) |
1205 | { | 1203 | { |
@@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr) | |||
1218 | { | 1216 | { |
1219 | return false; | 1217 | return false; |
1220 | } | 1218 | } |
1221 | |||
1222 | static inline int efi_apply_persistent_mem_reservations(void) | ||
1223 | { | ||
1224 | return 0; | ||
1225 | } | ||
1226 | #endif | 1219 | #endif |
1227 | 1220 | ||
1228 | extern int efi_status_to_err(efi_status_t status); | 1221 | extern int efi_status_to_err(efi_status_t status); |
diff --git a/include/linux/key-type.h b/include/linux/key-type.h index bc9af551fc83..e49d1de0614e 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h | |||
@@ -21,15 +21,6 @@ struct kernel_pkey_query; | |||
21 | struct kernel_pkey_params; | 21 | struct kernel_pkey_params; |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * key under-construction record | ||
25 | * - passed to the request_key actor if supplied | ||
26 | */ | ||
27 | struct key_construction { | ||
28 | struct key *key; /* key being constructed */ | ||
29 | struct key *authkey;/* authorisation for key being constructed */ | ||
30 | }; | ||
31 | |||
32 | /* | ||
33 | * Pre-parsed payload, used by key add, update and instantiate. | 24 | * Pre-parsed payload, used by key add, update and instantiate. |
34 | * | 25 | * |
35 | * This struct will be cleared and data and datalen will be set with the data | 26 | * This struct will be cleared and data and datalen will be set with the data |
@@ -50,8 +41,7 @@ struct key_preparsed_payload { | |||
50 | time64_t expiry; /* Expiry time of key */ | 41 | time64_t expiry; /* Expiry time of key */ |
51 | } __randomize_layout; | 42 | } __randomize_layout; |
52 | 43 | ||
53 | typedef int (*request_key_actor_t)(struct key_construction *key, | 44 | typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); |
54 | const char *op, void *aux); | ||
55 | 45 | ||
56 | /* | 46 | /* |
57 | * Preparsed matching criterion. | 47 | * Preparsed matching criterion. |
@@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key, | |||
181 | const void *data, | 171 | const void *data, |
182 | size_t datalen, | 172 | size_t datalen, |
183 | struct key *keyring, | 173 | struct key *keyring, |
184 | struct key *instkey); | 174 | struct key *authkey); |
185 | extern int key_reject_and_link(struct key *key, | 175 | extern int key_reject_and_link(struct key *key, |
186 | unsigned timeout, | 176 | unsigned timeout, |
187 | unsigned error, | 177 | unsigned error, |
188 | struct key *keyring, | 178 | struct key *keyring, |
189 | struct key *instkey); | 179 | struct key *authkey); |
190 | extern void complete_request_key(struct key_construction *cons, int error); | 180 | extern void complete_request_key(struct key *authkey, int error); |
191 | 181 | ||
192 | static inline int key_negate_and_link(struct key *key, | 182 | static inline int key_negate_and_link(struct key *key, |
193 | unsigned timeout, | 183 | unsigned timeout, |
194 | struct key *keyring, | 184 | struct key *keyring, |
195 | struct key *instkey) | 185 | struct key *authkey) |
196 | { | 186 | { |
197 | return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); | 187 | return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); |
198 | } | 188 | } |
199 | 189 | ||
200 | extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); | 190 | extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 64c41cf45590..859b55b66db2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -29,9 +29,6 @@ extern unsigned long max_pfn; | |||
29 | */ | 29 | */ |
30 | extern unsigned long long max_possible_pfn; | 30 | extern unsigned long long max_possible_pfn; |
31 | 31 | ||
32 | #define INIT_MEMBLOCK_REGIONS 128 | ||
33 | #define INIT_PHYSMEM_REGIONS 4 | ||
34 | |||
35 | /** | 32 | /** |
36 | * enum memblock_flags - definition of memory region attributes | 33 | * enum memblock_flags - definition of memory region attributes |
37 | * @MEMBLOCK_NONE: no special request | 34 | * @MEMBLOCK_NONE: no special request |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index de7377815b6b..8ef330027b13 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -308,6 +308,7 @@ struct mmc_card { | |||
308 | unsigned int nr_parts; | 308 | unsigned int nr_parts; |
309 | 309 | ||
310 | unsigned int bouncesz; /* Bounce buffer size */ | 310 | unsigned int bouncesz; /* Bounce buffer size */ |
311 | struct workqueue_struct *complete_wq; /* Private workqueue */ | ||
311 | }; | 312 | }; |
312 | 313 | ||
313 | static inline bool mmc_large_sector(struct mmc_card *card) | 314 | static inline bool mmc_large_sector(struct mmc_card *card) |
diff --git a/include/linux/module.h b/include/linux/module.h index 8fa38d3e7538..f5bc4c046461 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -129,13 +129,13 @@ extern void cleanup_module(void); | |||
129 | #define module_init(initfn) \ | 129 | #define module_init(initfn) \ |
130 | static inline initcall_t __maybe_unused __inittest(void) \ | 130 | static inline initcall_t __maybe_unused __inittest(void) \ |
131 | { return initfn; } \ | 131 | { return initfn; } \ |
132 | int init_module(void) __attribute__((alias(#initfn))); | 132 | int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); |
133 | 133 | ||
134 | /* This is only required if you want to be unloadable. */ | 134 | /* This is only required if you want to be unloadable. */ |
135 | #define module_exit(exitfn) \ | 135 | #define module_exit(exitfn) \ |
136 | static inline exitcall_t __maybe_unused __exittest(void) \ | 136 | static inline exitcall_t __maybe_unused __exittest(void) \ |
137 | { return exitfn; } \ | 137 | { return exitfn; } \ |
138 | void cleanup_module(void) __attribute__((alias(#exitfn))); | 138 | void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); |
139 | 139 | ||
140 | #endif | 140 | #endif |
141 | 141 | ||
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2b2a6dce1630..4c76fe2c8488 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #define _LINUX_NETDEV_FEATURES_H | 11 | #define _LINUX_NETDEV_FEATURES_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/bitops.h> | ||
15 | #include <asm/byteorder.h> | ||
14 | 16 | ||
15 | typedef u64 netdev_features_t; | 17 | typedef u64 netdev_features_t; |
16 | 18 | ||
@@ -154,8 +156,26 @@ enum { | |||
154 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) | 156 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) |
155 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) | 157 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) |
156 | 158 | ||
157 | #define for_each_netdev_feature(mask_addr, bit) \ | 159 | /* Finds the next feature with the highest number of the range of start till 0. |
158 | for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) | 160 | */ |
161 | static inline int find_next_netdev_feature(u64 feature, unsigned long start) | ||
162 | { | ||
163 | /* like BITMAP_LAST_WORD_MASK() for u64 | ||
164 | * this sets the most significant 64 - start to 0. | ||
165 | */ | ||
166 | feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); | ||
167 | |||
168 | return fls64(feature) - 1; | ||
169 | } | ||
170 | |||
171 | /* This goes for the MSB to the LSB through the set feature bits, | ||
172 | * mask_addr should be a u64 and bit an int | ||
173 | */ | ||
174 | #define for_each_netdev_feature(mask_addr, bit) \ | ||
175 | for ((bit) = find_next_netdev_feature((mask_addr), \ | ||
176 | NETDEV_FEATURE_COUNT); \ | ||
177 | (bit) >= 0; \ | ||
178 | (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) | ||
159 | 179 | ||
160 | /* Features valid for ethtool to change */ | 180 | /* Features valid for ethtool to change */ |
161 | /* = all defined minus driver/device-class-related */ | 181 | /* = all defined minus driver/device-class-related */ |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index e27572d30d97..ad69430fd0eb 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
@@ -164,6 +164,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head) | |||
164 | list_add_tail(&req->wb_list, head); | 164 | list_add_tail(&req->wb_list, head); |
165 | } | 165 | } |
166 | 166 | ||
167 | /** | ||
168 | * nfs_list_move_request - Move a request to a new list | ||
169 | * @req: request | ||
170 | * @head: head of list into which to insert the request. | ||
171 | */ | ||
172 | static inline void | ||
173 | nfs_list_move_request(struct nfs_page *req, struct list_head *head) | ||
174 | { | ||
175 | list_move_tail(&req->wb_list, head); | ||
176 | } | ||
167 | 177 | ||
168 | /** | 178 | /** |
169 | * nfs_list_remove_request - Remove a request from its wb_list | 179 | * nfs_list_remove_request - Remove a request from its wb_list |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 441a93ebcac0..b4bd2bf5f585 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -1549,7 +1549,7 @@ struct nfs_commit_data { | |||
1549 | }; | 1549 | }; |
1550 | 1550 | ||
1551 | struct nfs_pgio_completion_ops { | 1551 | struct nfs_pgio_completion_ops { |
1552 | void (*error_cleanup)(struct list_head *head); | 1552 | void (*error_cleanup)(struct list_head *head, int); |
1553 | void (*init_hdr)(struct nfs_pgio_header *hdr); | 1553 | void (*init_hdr)(struct nfs_pgio_header *hdr); |
1554 | void (*completion)(struct nfs_pgio_header *hdr); | 1554 | void (*completion)(struct nfs_pgio_header *hdr); |
1555 | void (*reschedule_io)(struct nfs_pgio_header *hdr); | 1555 | void (*reschedule_io)(struct nfs_pgio_header *hdr); |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..e1a051724f7e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -447,6 +447,11 @@ struct pmu { | |||
447 | * Filter events for PMU-specific reasons. | 447 | * Filter events for PMU-specific reasons. |
448 | */ | 448 | */ |
449 | int (*filter_match) (struct perf_event *event); /* optional */ | 449 | int (*filter_match) (struct perf_event *event); /* optional */ |
450 | |||
451 | /* | ||
452 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
453 | */ | ||
454 | int (*check_period) (struct perf_event *event, u64 value); /* optional */ | ||
450 | }; | 455 | }; |
451 | 456 | ||
452 | enum perf_addr_filter_action_t { | 457 | enum perf_addr_filter_action_t { |
diff --git a/include/linux/phy.h b/include/linux/phy.h index ef20aeea10cc..127fcc9c3778 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -674,26 +674,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, | |||
674 | size_t phy_speeds(unsigned int *speeds, size_t size, | 674 | size_t phy_speeds(unsigned int *speeds, size_t size, |
675 | unsigned long *mask); | 675 | unsigned long *mask); |
676 | 676 | ||
677 | static inline bool __phy_is_started(struct phy_device *phydev) | ||
678 | { | ||
679 | WARN_ON(!mutex_is_locked(&phydev->lock)); | ||
680 | |||
681 | return phydev->state >= PHY_UP; | ||
682 | } | ||
683 | |||
684 | /** | 677 | /** |
685 | * phy_is_started - Convenience function to check whether PHY is started | 678 | * phy_is_started - Convenience function to check whether PHY is started |
686 | * @phydev: The phy_device struct | 679 | * @phydev: The phy_device struct |
687 | */ | 680 | */ |
688 | static inline bool phy_is_started(struct phy_device *phydev) | 681 | static inline bool phy_is_started(struct phy_device *phydev) |
689 | { | 682 | { |
690 | bool started; | 683 | return phydev->state >= PHY_UP; |
691 | |||
692 | mutex_lock(&phydev->lock); | ||
693 | started = __phy_is_started(phydev); | ||
694 | mutex_unlock(&phydev->lock); | ||
695 | |||
696 | return started; | ||
697 | } | 684 | } |
698 | 685 | ||
699 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); | 686 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 95d25b010a25..bdb9563c64a0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, | |||
2434 | 2434 | ||
2435 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) | 2435 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) |
2436 | skb_set_transport_header(skb, keys.control.thoff); | 2436 | skb_set_transport_header(skb, keys.control.thoff); |
2437 | else | 2437 | else if (offset_hint >= 0) |
2438 | skb_set_transport_header(skb, offset_hint); | 2438 | skb_set_transport_header(skb, offset_hint); |
2439 | } | 2439 | } |
2440 | 2440 | ||
@@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) | |||
4212 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; | 4212 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; |
4213 | } | 4213 | } |
4214 | 4214 | ||
4215 | static inline bool skb_is_gso_tcp(const struct sk_buff *skb) | ||
4216 | { | ||
4217 | return skb_is_gso(skb) && | ||
4218 | skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); | ||
4219 | } | ||
4220 | |||
4215 | static inline void skb_gso_reset(struct sk_buff *skb) | 4221 | static inline void skb_gso_reset(struct sk_buff *skb) |
4216 | { | 4222 | { |
4217 | skb_shinfo(skb)->gso_size = 0; | 4223 | skb_shinfo(skb)->gso_size = 0; |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index cb462f9ab7dd..71f2394abbf7 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -57,6 +57,15 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, | |||
57 | 57 | ||
58 | if (!skb_partial_csum_set(skb, start, off)) | 58 | if (!skb_partial_csum_set(skb, start, off)) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | } else { | ||
61 | /* gso packets without NEEDS_CSUM do not set transport_offset. | ||
62 | * probe and drop if does not match one of the above types. | ||
63 | */ | ||
64 | if (gso_type) { | ||
65 | skb_probe_transport_header(skb, -1); | ||
66 | if (!skb_transport_header_was_set(skb)) | ||
67 | return -EINVAL; | ||
68 | } | ||
60 | } | 69 | } |
61 | 70 | ||
62 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 71 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 00b5e7825508..74ff688568a0 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
@@ -39,6 +39,7 @@ struct inet_peer { | |||
39 | 39 | ||
40 | u32 metrics[RTAX_MAX]; | 40 | u32 metrics[RTAX_MAX]; |
41 | u32 rate_tokens; /* rate limiting for ICMP */ | 41 | u32 rate_tokens; /* rate limiting for ICMP */ |
42 | u32 n_redirects; | ||
42 | unsigned long rate_last; | 43 | unsigned long rate_last; |
43 | /* | 44 | /* |
44 | * Once inet_peer is queued for deletion (refcnt == 0), following field | 45 | * Once inet_peer is queued for deletion (refcnt == 0), following field |
diff --git a/include/net/sock.h b/include/net/sock.h index 2b229f7be8eb..f43f935cb113 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1277,7 +1277,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk) | |||
1277 | percpu_counter_inc(sk->sk_prot->sockets_allocated); | 1277 | percpu_counter_inc(sk->sk_prot->sockets_allocated); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | static inline int | 1280 | static inline u64 |
1281 | sk_sockets_allocated_read_positive(struct sock *sk) | 1281 | sk_sockets_allocated_read_positive(struct sock *sk) |
1282 | { | 1282 | { |
1283 | return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); | 1283 | return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); |
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 14565d703291..e8baca85bac6 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h | |||
@@ -137,15 +137,21 @@ enum { | |||
137 | INET_DIAG_TCLASS, | 137 | INET_DIAG_TCLASS, |
138 | INET_DIAG_SKMEMINFO, | 138 | INET_DIAG_SKMEMINFO, |
139 | INET_DIAG_SHUTDOWN, | 139 | INET_DIAG_SHUTDOWN, |
140 | INET_DIAG_DCTCPINFO, | 140 | |
141 | INET_DIAG_PROTOCOL, /* response attribute only */ | 141 | /* |
142 | * Next extenstions cannot be requested in struct inet_diag_req_v2: | ||
143 | * its field idiag_ext has only 8 bits. | ||
144 | */ | ||
145 | |||
146 | INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */ | ||
147 | INET_DIAG_PROTOCOL, /* response attribute only */ | ||
142 | INET_DIAG_SKV6ONLY, | 148 | INET_DIAG_SKV6ONLY, |
143 | INET_DIAG_LOCALS, | 149 | INET_DIAG_LOCALS, |
144 | INET_DIAG_PEERS, | 150 | INET_DIAG_PEERS, |
145 | INET_DIAG_PAD, | 151 | INET_DIAG_PAD, |
146 | INET_DIAG_MARK, | 152 | INET_DIAG_MARK, /* only with CAP_NET_ADMIN */ |
147 | INET_DIAG_BBRINFO, | 153 | INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */ |
148 | INET_DIAG_CLASS_ID, | 154 | INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ |
149 | INET_DIAG_MD5SIG, | 155 | INET_DIAG_MD5SIG, |
150 | __INET_DIAG_MAX, | 156 | __INET_DIAG_MAX, |
151 | }; | 157 | }; |
diff --git a/init/main.c b/init/main.c index e2e80ca3165a..c86a1c8f19f4 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void) | |||
695 | initrd_start = 0; | 695 | initrd_start = 0; |
696 | } | 696 | } |
697 | #endif | 697 | #endif |
698 | page_ext_init(); | ||
699 | kmemleak_init(); | 698 | kmemleak_init(); |
700 | setup_per_cpu_pageset(); | 699 | setup_per_cpu_pageset(); |
701 | numa_policy_init(); | 700 | numa_policy_init(); |
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void) | |||
1131 | sched_init_smp(); | 1130 | sched_init_smp(); |
1132 | 1131 | ||
1133 | page_alloc_init_late(); | 1132 | page_alloc_init_late(); |
1133 | /* Initialize page ext after all struct pages are initialized. */ | ||
1134 | page_ext_init(); | ||
1134 | 1135 | ||
1135 | do_basic_setup(); | 1136 | do_basic_setup(); |
1136 | 1137 | ||
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d43b14535827..950ab2f28922 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry) | |||
44 | struct stack_map_irq_work *work; | 44 | struct stack_map_irq_work *work; |
45 | 45 | ||
46 | work = container_of(entry, struct stack_map_irq_work, irq_work); | 46 | work = container_of(entry, struct stack_map_irq_work, irq_work); |
47 | up_read(work->sem); | 47 | up_read_non_owner(work->sem); |
48 | work->sem = NULL; | 48 | work->sem = NULL; |
49 | } | 49 | } |
50 | 50 | ||
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
338 | } else { | 338 | } else { |
339 | work->sem = ¤t->mm->mmap_sem; | 339 | work->sem = ¤t->mm->mmap_sem; |
340 | irq_work_queue(&work->irq_work); | 340 | irq_work_queue(&work->irq_work); |
341 | /* | ||
342 | * The irq_work will release the mmap_sem with | ||
343 | * up_read_non_owner(). The rwsem_release() is called | ||
344 | * here to release the lock from lockdep's perspective. | ||
345 | */ | ||
346 | rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); | ||
341 | } | 347 | } |
342 | } | 348 | } |
343 | 349 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 56674a7c3778..8f295b790297 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off, | |||
1617 | return 0; | 1617 | return 0; |
1618 | } | 1618 | } |
1619 | 1619 | ||
1620 | static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, | 1620 | static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, |
1621 | int size, enum bpf_access_type t) | 1621 | u32 regno, int off, int size, |
1622 | enum bpf_access_type t) | ||
1622 | { | 1623 | { |
1623 | struct bpf_reg_state *regs = cur_regs(env); | 1624 | struct bpf_reg_state *regs = cur_regs(env); |
1624 | struct bpf_reg_state *reg = ®s[regno]; | 1625 | struct bpf_reg_state *reg = ®s[regno]; |
1625 | struct bpf_insn_access_aux info; | 1626 | struct bpf_insn_access_aux info = {}; |
1626 | 1627 | ||
1627 | if (reg->smin_value < 0) { | 1628 | if (reg->smin_value < 0) { |
1628 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", | 1629 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", |
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, | |||
1636 | return -EACCES; | 1637 | return -EACCES; |
1637 | } | 1638 | } |
1638 | 1639 | ||
1640 | env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; | ||
1641 | |||
1639 | return 0; | 1642 | return 0; |
1640 | } | 1643 | } |
1641 | 1644 | ||
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
2032 | verbose(env, "cannot write into socket\n"); | 2035 | verbose(env, "cannot write into socket\n"); |
2033 | return -EACCES; | 2036 | return -EACCES; |
2034 | } | 2037 | } |
2035 | err = check_sock_access(env, regno, off, size, t); | 2038 | err = check_sock_access(env, insn_idx, regno, off, size, t); |
2036 | if (!err && value_regno >= 0) | 2039 | if (!err && value_regno >= 0) |
2037 | mark_reg_unknown(env, regs, value_regno); | 2040 | mark_reg_unknown(env, regs, value_regno); |
2038 | } else { | 2041 | } else { |
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ede6918050..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, | |||
4963 | } | 4963 | } |
4964 | } | 4964 | } |
4965 | 4965 | ||
4966 | static int perf_event_check_period(struct perf_event *event, u64 value) | ||
4967 | { | ||
4968 | return event->pmu->check_period(event, value); | ||
4969 | } | ||
4970 | |||
4966 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 4971 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
4967 | { | 4972 | { |
4968 | u64 value; | 4973 | u64 value; |
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
4979 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | 4984 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
4980 | return -EINVAL; | 4985 | return -EINVAL; |
4981 | 4986 | ||
4987 | if (perf_event_check_period(event, value)) | ||
4988 | return -EINVAL; | ||
4989 | |||
4982 | event_function_call(event, __perf_event_period, &value); | 4990 | event_function_call(event, __perf_event_period, &value); |
4983 | 4991 | ||
4984 | return 0; | 4992 | return 0; |
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) | |||
9391 | return 0; | 9399 | return 0; |
9392 | } | 9400 | } |
9393 | 9401 | ||
9402 | static int perf_event_nop_int(struct perf_event *event, u64 value) | ||
9403 | { | ||
9404 | return 0; | ||
9405 | } | ||
9406 | |||
9394 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); | 9407 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
9395 | 9408 | ||
9396 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) | 9409 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
@@ -9691,6 +9704,9 @@ got_cpu_context: | |||
9691 | pmu->pmu_disable = perf_pmu_nop_void; | 9704 | pmu->pmu_disable = perf_pmu_nop_void; |
9692 | } | 9705 | } |
9693 | 9706 | ||
9707 | if (!pmu->check_period) | ||
9708 | pmu->check_period = perf_event_nop_int; | ||
9709 | |||
9694 | if (!pmu->event_idx) | 9710 | if (!pmu->event_idx) |
9695 | pmu->event_idx = perf_event_idx_default; | 9711 | pmu->event_idx = perf_event_idx_default; |
9696 | 9712 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 309ef5a64af5..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
734 | size = sizeof(struct ring_buffer); | 734 | size = sizeof(struct ring_buffer); |
735 | size += nr_pages * sizeof(void *); | 735 | size += nr_pages * sizeof(void *); |
736 | 736 | ||
737 | if (order_base_2(size) >= MAX_ORDER) | 737 | if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) |
738 | goto fail; | 738 | goto fail; |
739 | 739 | ||
740 | rb = kzalloc(size, GFP_KERNEL); | 740 | rb = kzalloc(size, GFP_KERNEL); |
diff --git a/kernel/signal.c b/kernel/signal.c index 99fa8ff06fd9..57b7771e20d7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2436,9 +2436,12 @@ relock: | |||
2436 | } | 2436 | } |
2437 | 2437 | ||
2438 | /* Has this task already been marked for death? */ | 2438 | /* Has this task already been marked for death? */ |
2439 | ksig->info.si_signo = signr = SIGKILL; | 2439 | if (signal_group_exit(signal)) { |
2440 | if (signal_group_exit(signal)) | 2440 | ksig->info.si_signo = signr = SIGKILL; |
2441 | sigdelset(¤t->pending.signal, SIGKILL); | ||
2442 | recalc_sigpending(); | ||
2441 | goto fatal; | 2443 | goto fatal; |
2444 | } | ||
2442 | 2445 | ||
2443 | for (;;) { | 2446 | for (;;) { |
2444 | struct k_sigaction *ka; | 2447 | struct k_sigaction *ka; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c521b7347482..c4238b441624 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file | |||
3384 | const char tgid_space[] = " "; | 3384 | const char tgid_space[] = " "; |
3385 | const char space[] = " "; | 3385 | const char space[] = " "; |
3386 | 3386 | ||
3387 | print_event_info(buf, m); | ||
3388 | |||
3387 | seq_printf(m, "# %s _-----=> irqs-off\n", | 3389 | seq_printf(m, "# %s _-----=> irqs-off\n", |
3388 | tgid ? tgid_space : space); | 3390 | tgid ? tgid_space : space); |
3389 | seq_printf(m, "# %s / _----=> need-resched\n", | 3391 | seq_printf(m, "# %s / _----=> need-resched\n", |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d5fb09ebba8b..9eaf07f99212 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = { | |||
861 | static nokprobe_inline int | 861 | static nokprobe_inline int |
862 | fetch_store_strlen(unsigned long addr) | 862 | fetch_store_strlen(unsigned long addr) |
863 | { | 863 | { |
864 | mm_segment_t old_fs; | ||
865 | int ret, len = 0; | 864 | int ret, len = 0; |
866 | u8 c; | 865 | u8 c; |
867 | 866 | ||
868 | old_fs = get_fs(); | ||
869 | set_fs(KERNEL_DS); | ||
870 | pagefault_disable(); | ||
871 | |||
872 | do { | 867 | do { |
873 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | 868 | ret = probe_mem_read(&c, (u8 *)addr + len, 1); |
874 | len++; | 869 | len++; |
875 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | 870 | } while (c && ret == 0 && len < MAX_STRING_SIZE); |
876 | 871 | ||
877 | pagefault_enable(); | ||
878 | set_fs(old_fs); | ||
879 | |||
880 | return (ret < 0) ? ret : len; | 872 | return (ret < 0) ? ret : len; |
881 | } | 873 | } |
882 | 874 | ||
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 5c56afc17cf8..4737bb8c07a3 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h | |||
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, | |||
180 | if (unlikely(arg->dynamic)) | 180 | if (unlikely(arg->dynamic)) |
181 | *dl = make_data_loc(maxlen, dyndata - base); | 181 | *dl = make_data_loc(maxlen, dyndata - base); |
182 | ret = process_fetch_insn(arg->code, regs, dl, base); | 182 | ret = process_fetch_insn(arg->code, regs, dl, base); |
183 | if (unlikely(ret < 0 && arg->dynamic)) | 183 | if (unlikely(ret < 0 && arg->dynamic)) { |
184 | *dl = make_data_loc(0, dyndata - base); | 184 | *dl = make_data_loc(0, dyndata - base); |
185 | else | 185 | } else { |
186 | dyndata += ret; | 186 | dyndata += ret; |
187 | maxlen -= ret; | ||
188 | } | ||
187 | } | 189 | } |
188 | } | 190 | } |
189 | 191 | ||
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c6659cb37033..59875eb278ea 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -768,9 +768,11 @@ all_leaves_cluster_together: | |||
768 | new_s0->index_key[i] = | 768 | new_s0->index_key[i] = |
769 | ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); | 769 | ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); |
770 | 770 | ||
771 | blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); | 771 | if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) { |
772 | pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); | 772 | blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); |
773 | new_s0->index_key[keylen - 1] &= ~blank; | 773 | pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); |
774 | new_s0->index_key[keylen - 1] &= ~blank; | ||
775 | } | ||
774 | 776 | ||
775 | /* This now reduces to a node splitting exercise for which we'll need | 777 | /* This now reduces to a node splitting exercise for which we'll need |
776 | * to regenerate the disparity table. | 778 | * to regenerate the disparity table. |
diff --git a/lib/crc32.c b/lib/crc32.c index 45b1d67a1767..4a20455d1f61 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) | |||
206 | EXPORT_SYMBOL(crc32_le); | 206 | EXPORT_SYMBOL(crc32_le); |
207 | EXPORT_SYMBOL(__crc32c_le); | 207 | EXPORT_SYMBOL(__crc32c_le); |
208 | 208 | ||
209 | u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); | 209 | u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); |
210 | u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); | 210 | u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); |
211 | 211 | ||
212 | /* | 212 | /* |
213 | * This multiplies the polynomials x and y modulo the given modulus. | 213 | * This multiplies the polynomials x and y modulo the given modulus. |
@@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
1674 | if (!pmd_present(pmd)) | 1674 | if (!pmd_present(pmd)) |
1675 | return 0; | 1675 | return 0; |
1676 | 1676 | ||
1677 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { | 1677 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || |
1678 | pmd_devmap(pmd))) { | ||
1678 | /* | 1679 | /* |
1679 | * NUMA hinting faults need to be handled in the GUP | 1680 | * NUMA hinting faults need to be handled in the GUP |
1680 | * slowpath for accounting purposes and so that they | 1681 | * slowpath for accounting purposes and so that they |
diff --git a/mm/memblock.c b/mm/memblock.c index 022d4cbb3618..ea31045ba704 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -26,6 +26,13 @@ | |||
26 | 26 | ||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | #define INIT_MEMBLOCK_REGIONS 128 | ||
30 | #define INIT_PHYSMEM_REGIONS 4 | ||
31 | |||
32 | #ifndef INIT_MEMBLOCK_RESERVED_REGIONS | ||
33 | # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS | ||
34 | #endif | ||
35 | |||
29 | /** | 36 | /** |
30 | * DOC: memblock overview | 37 | * DOC: memblock overview |
31 | * | 38 | * |
@@ -92,7 +99,7 @@ unsigned long max_pfn; | |||
92 | unsigned long long max_possible_pfn; | 99 | unsigned long long max_possible_pfn; |
93 | 100 | ||
94 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 101 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
95 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 102 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; |
96 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | 103 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
97 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; | 104 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; |
98 | #endif | 105 | #endif |
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = { | |||
105 | 112 | ||
106 | .reserved.regions = memblock_reserved_init_regions, | 113 | .reserved.regions = memblock_reserved_init_regions, |
107 | .reserved.cnt = 1, /* empty dummy entry */ | 114 | .reserved.cnt = 1, /* empty dummy entry */ |
108 | .reserved.max = INIT_MEMBLOCK_REGIONS, | 115 | .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, |
109 | .reserved.name = "reserved", | 116 | .reserved.name = "reserved", |
110 | 117 | ||
111 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | 118 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 35fdde041f5c..7f79b78bc829 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4675,11 +4675,11 @@ refill: | |||
4675 | /* Even if we own the page, we do not use atomic_set(). | 4675 | /* Even if we own the page, we do not use atomic_set(). |
4676 | * This would break get_page_unless_zero() users. | 4676 | * This would break get_page_unless_zero() users. |
4677 | */ | 4677 | */ |
4678 | page_ref_add(page, size - 1); | 4678 | page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); |
4679 | 4679 | ||
4680 | /* reset page count bias and offset to start of new frag */ | 4680 | /* reset page count bias and offset to start of new frag */ |
4681 | nc->pfmemalloc = page_is_pfmemalloc(page); | 4681 | nc->pfmemalloc = page_is_pfmemalloc(page); |
4682 | nc->pagecnt_bias = size; | 4682 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4683 | nc->offset = size; | 4683 | nc->offset = size; |
4684 | } | 4684 | } |
4685 | 4685 | ||
@@ -4695,10 +4695,10 @@ refill: | |||
4695 | size = nc->size; | 4695 | size = nc->size; |
4696 | #endif | 4696 | #endif |
4697 | /* OK, page count is 0, we can safely set it */ | 4697 | /* OK, page count is 0, we can safely set it */ |
4698 | set_page_count(page, size); | 4698 | set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); |
4699 | 4699 | ||
4700 | /* reset page count bias and offset to start of new frag */ | 4700 | /* reset page count bias and offset to start of new frag */ |
4701 | nc->pagecnt_bias = size; | 4701 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4702 | offset = size - fragsz; | 4702 | offset = size - fragsz; |
4703 | } | 4703 | } |
4704 | 4704 | ||
diff --git a/mm/page_ext.c b/mm/page_ext.c index ae44f7adbe07..8c78b8d45117 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c | |||
@@ -398,10 +398,8 @@ void __init page_ext_init(void) | |||
398 | * We know some arch can have a nodes layout such as | 398 | * We know some arch can have a nodes layout such as |
399 | * -------------pfn--------------> | 399 | * -------------pfn--------------> |
400 | * N0 | N1 | N2 | N0 | N1 | N2|.... | 400 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
401 | * | ||
402 | * Take into account DEFERRED_STRUCT_PAGE_INIT. | ||
403 | */ | 401 | */ |
404 | if (early_pfn_to_nid(pfn) != nid) | 402 | if (pfn_to_nid(pfn) != nid) |
405 | continue; | 403 | continue; |
406 | if (init_section_page_ext(pfn, nid)) | 404 | if (init_section_page_ext(pfn, nid)) |
407 | goto oom; | 405 | goto oom; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index a714c4f800e9..e979705bbf32 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, | |||
491 | delta = freeable / 2; | 491 | delta = freeable / 2; |
492 | } | 492 | } |
493 | 493 | ||
494 | /* | ||
495 | * Make sure we apply some minimal pressure on default priority | ||
496 | * even on small cgroups. Stale objects are not only consuming memory | ||
497 | * by themselves, but can also hold a reference to a dying cgroup, | ||
498 | * preventing it from being reclaimed. A dying cgroup with all | ||
499 | * corresponding structures like per-cpu stats and kmem caches | ||
500 | * can be really big, so it may lead to a significant waste of memory. | ||
501 | */ | ||
502 | delta = max_t(unsigned long long, delta, min(freeable, batch_size)); | ||
503 | |||
504 | total_scan += delta; | 494 | total_scan += delta; |
505 | if (total_scan < 0) { | 495 | if (total_scan < 0) { |
506 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", | 496 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index b85ca809e509..ffc83bebfe40 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -227,6 +227,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, | |||
227 | 227 | ||
228 | switch (ntohs(ethhdr->h_proto)) { | 228 | switch (ntohs(ethhdr->h_proto)) { |
229 | case ETH_P_8021Q: | 229 | case ETH_P_8021Q: |
230 | if (!pskb_may_pull(skb, sizeof(*vhdr))) | ||
231 | goto dropped; | ||
230 | vhdr = vlan_eth_hdr(skb); | 232 | vhdr = vlan_eth_hdr(skb); |
231 | 233 | ||
232 | /* drop batman-in-batman packets to prevent loops */ | 234 | /* drop batman-in-batman packets to prevent loops */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 8e276e0192a1..5d03889502eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower, | |||
8152 | netdev_features_t feature; | 8152 | netdev_features_t feature; |
8153 | int feature_bit; | 8153 | int feature_bit; |
8154 | 8154 | ||
8155 | for_each_netdev_feature(&upper_disables, feature_bit) { | 8155 | for_each_netdev_feature(upper_disables, feature_bit) { |
8156 | feature = __NETIF_F_BIT(feature_bit); | 8156 | feature = __NETIF_F_BIT(feature_bit); |
8157 | if (!(upper->wanted_features & feature) | 8157 | if (!(upper->wanted_features & feature) |
8158 | && (features & feature)) { | 8158 | && (features & feature)) { |
@@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper, | |||
8172 | netdev_features_t feature; | 8172 | netdev_features_t feature; |
8173 | int feature_bit; | 8173 | int feature_bit; |
8174 | 8174 | ||
8175 | for_each_netdev_feature(&upper_disables, feature_bit) { | 8175 | for_each_netdev_feature(upper_disables, feature_bit) { |
8176 | feature = __NETIF_F_BIT(feature_bit); | 8176 | feature = __NETIF_F_BIT(feature_bit); |
8177 | if (!(features & feature) && (lower->features & feature)) { | 8177 | if (!(features & feature) && (lower->features & feature)) { |
8178 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", | 8178 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", |
diff --git a/net/core/filter.c b/net/core/filter.c index 7a54dc11ac2d..f7d0004fc160 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |||
2789 | u32 off = skb_mac_header_len(skb); | 2789 | u32 off = skb_mac_header_len(skb); |
2790 | int ret; | 2790 | int ret; |
2791 | 2791 | ||
2792 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2792 | if (!skb_is_gso_tcp(skb)) |
2793 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2794 | return -ENOTSUPP; | 2793 | return -ENOTSUPP; |
2795 | 2794 | ||
2796 | ret = skb_cow(skb, len_diff); | 2795 | ret = skb_cow(skb, len_diff); |
@@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |||
2831 | u32 off = skb_mac_header_len(skb); | 2830 | u32 off = skb_mac_header_len(skb); |
2832 | int ret; | 2831 | int ret; |
2833 | 2832 | ||
2834 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2833 | if (!skb_is_gso_tcp(skb)) |
2835 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2836 | return -ENOTSUPP; | 2834 | return -ENOTSUPP; |
2837 | 2835 | ||
2838 | ret = skb_unclone(skb, GFP_ATOMIC); | 2836 | ret = skb_unclone(skb, GFP_ATOMIC); |
@@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |||
2957 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2955 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2958 | int ret; | 2956 | int ret; |
2959 | 2957 | ||
2960 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2958 | if (!skb_is_gso_tcp(skb)) |
2961 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2962 | return -ENOTSUPP; | 2959 | return -ENOTSUPP; |
2963 | 2960 | ||
2964 | ret = skb_cow(skb, len_diff); | 2961 | ret = skb_cow(skb, len_diff); |
@@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
2987 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2984 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2988 | int ret; | 2985 | int ret; |
2989 | 2986 | ||
2990 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2987 | if (!skb_is_gso_tcp(skb)) |
2991 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2992 | return -ENOTSUPP; | 2988 | return -ENOTSUPP; |
2993 | 2989 | ||
2994 | ret = skb_unclone(skb, GFP_ATOMIC); | 2990 | ret = skb_unclone(skb, GFP_ATOMIC); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26d848484912..2415d9cb9b89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
356 | */ | 356 | */ |
357 | void *netdev_alloc_frag(unsigned int fragsz) | 357 | void *netdev_alloc_frag(unsigned int fragsz) |
358 | { | 358 | { |
359 | fragsz = SKB_DATA_ALIGN(fragsz); | ||
360 | |||
359 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); | 361 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); |
360 | } | 362 | } |
361 | EXPORT_SYMBOL(netdev_alloc_frag); | 363 | EXPORT_SYMBOL(netdev_alloc_frag); |
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
369 | 371 | ||
370 | void *napi_alloc_frag(unsigned int fragsz) | 372 | void *napi_alloc_frag(unsigned int fragsz) |
371 | { | 373 | { |
374 | fragsz = SKB_DATA_ALIGN(fragsz); | ||
375 | |||
372 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); | 376 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); |
373 | } | 377 | } |
374 | EXPORT_SYMBOL(napi_alloc_frag); | 378 | EXPORT_SYMBOL(napi_alloc_frag); |
diff --git a/net/core/sock.c b/net/core/sock.c index 6aa2e7e0b4fb..bc3512f230a3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) | |||
2380 | } | 2380 | } |
2381 | 2381 | ||
2382 | if (sk_has_memory_pressure(sk)) { | 2382 | if (sk_has_memory_pressure(sk)) { |
2383 | int alloc; | 2383 | u64 alloc; |
2384 | 2384 | ||
2385 | if (!sk_under_memory_pressure(sk)) | 2385 | if (!sk_under_memory_pressure(sk)) |
2386 | return 1; | 2386 | return 1; |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 1a4e9ff02762..5731670c560b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk, | |||
108 | + nla_total_size(1) /* INET_DIAG_TOS */ | 108 | + nla_total_size(1) /* INET_DIAG_TOS */ |
109 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | 109 | + nla_total_size(1) /* INET_DIAG_TCLASS */ |
110 | + nla_total_size(4) /* INET_DIAG_MARK */ | 110 | + nla_total_size(4) /* INET_DIAG_MARK */ |
111 | + nla_total_size(4) /* INET_DIAG_CLASS_ID */ | ||
111 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | 112 | + nla_total_size(sizeof(struct inet_diag_meminfo)) |
112 | + nla_total_size(sizeof(struct inet_diag_msg)) | 113 | + nla_total_size(sizeof(struct inet_diag_msg)) |
113 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) | 114 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) |
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
287 | goto errout; | 288 | goto errout; |
288 | } | 289 | } |
289 | 290 | ||
290 | if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { | 291 | if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || |
292 | ext & (1 << (INET_DIAG_TCLASS - 1))) { | ||
291 | u32 classid = 0; | 293 | u32 classid = 0; |
292 | 294 | ||
293 | #ifdef CONFIG_SOCK_CGROUP_DATA | 295 | #ifdef CONFIG_SOCK_CGROUP_DATA |
294 | classid = sock_cgroup_classid(&sk->sk_cgrp_data); | 296 | classid = sock_cgroup_classid(&sk->sk_cgrp_data); |
295 | #endif | 297 | #endif |
298 | /* Fallback to socket priority if class id isn't set. | ||
299 | * Classful qdiscs use it as direct reference to class. | ||
300 | * For cgroup2 classid is always zero. | ||
301 | */ | ||
302 | if (!classid) | ||
303 | classid = sk->sk_priority; | ||
296 | 304 | ||
297 | if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) | 305 | if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) |
298 | goto errout; | 306 | goto errout; |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d757b9642d0d..be778599bfed 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, | |||
216 | atomic_set(&p->rid, 0); | 216 | atomic_set(&p->rid, 0); |
217 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | 217 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
218 | p->rate_tokens = 0; | 218 | p->rate_tokens = 0; |
219 | p->n_redirects = 0; | ||
219 | /* 60*HZ is arbitrary, but chosen enough high so that the first | 220 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
220 | * calculation of tokens is at its maximum. | 221 | * calculation of tokens is at its maximum. |
221 | */ | 222 | */ |
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 2687db015b6f..fa2ba7c500e4 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c | |||
@@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, | |||
215 | 215 | ||
216 | /* Change outer to look like the reply to an incoming packet */ | 216 | /* Change outer to look like the reply to an incoming packet */ |
217 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 217 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
218 | target.dst.protonum = IPPROTO_ICMP; | ||
218 | if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) | 219 | if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) |
219 | return 0; | 220 | return 0; |
220 | 221 | ||
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c index a0aa13bcabda..0a8a60c1bf9a 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c | |||
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) | |||
105 | int snmp_version(void *context, size_t hdrlen, unsigned char tag, | 105 | int snmp_version(void *context, size_t hdrlen, unsigned char tag, |
106 | const void *data, size_t datalen) | 106 | const void *data, size_t datalen) |
107 | { | 107 | { |
108 | if (datalen != 1) | ||
109 | return -EINVAL; | ||
108 | if (*(unsigned char *)data > 1) | 110 | if (*(unsigned char *)data > 1) |
109 | return -ENOTSUPP; | 111 | return -ENOTSUPP; |
110 | return 1; | 112 | return 1; |
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag, | |||
114 | const void *data, size_t datalen) | 116 | const void *data, size_t datalen) |
115 | { | 117 | { |
116 | struct snmp_ctx *ctx = (struct snmp_ctx *)context; | 118 | struct snmp_ctx *ctx = (struct snmp_ctx *)context; |
117 | __be32 *pdata = (__be32 *)data; | 119 | __be32 *pdata; |
118 | 120 | ||
121 | if (datalen != 4) | ||
122 | return -EINVAL; | ||
123 | pdata = (__be32 *)data; | ||
119 | if (*pdata == ctx->from) { | 124 | if (*pdata == ctx->from) { |
120 | pr_debug("%s: %pI4 to %pI4\n", __func__, | 125 | pr_debug("%s: %pI4 to %pI4\n", __func__, |
121 | (void *)&ctx->from, (void *)&ctx->to); | 126 | (void *)&ctx->from, (void *)&ctx->to); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ce92f73cf104..5163b64f8fb3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
887 | /* No redirected packets during ip_rt_redirect_silence; | 887 | /* No redirected packets during ip_rt_redirect_silence; |
888 | * reset the algorithm. | 888 | * reset the algorithm. |
889 | */ | 889 | */ |
890 | if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) | 890 | if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { |
891 | peer->rate_tokens = 0; | 891 | peer->rate_tokens = 0; |
892 | peer->n_redirects = 0; | ||
893 | } | ||
892 | 894 | ||
893 | /* Too many ignored redirects; do not send anything | 895 | /* Too many ignored redirects; do not send anything |
894 | * set dst.rate_last to the last seen redirected packet. | 896 | * set dst.rate_last to the last seen redirected packet. |
895 | */ | 897 | */ |
896 | if (peer->rate_tokens >= ip_rt_redirect_number) { | 898 | if (peer->n_redirects >= ip_rt_redirect_number) { |
897 | peer->rate_last = jiffies; | 899 | peer->rate_last = jiffies; |
898 | goto out_put_peer; | 900 | goto out_put_peer; |
899 | } | 901 | } |
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
910 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); | 912 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); |
911 | peer->rate_last = jiffies; | 913 | peer->rate_last = jiffies; |
912 | ++peer->rate_tokens; | 914 | ++peer->rate_tokens; |
915 | ++peer->n_redirects; | ||
913 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 916 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
914 | if (log_martians && | 917 | if (log_martians && |
915 | peer->rate_tokens == ip_rt_redirect_number) | 918 | peer->rate_tokens == ip_rt_redirect_number) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2079145a3b7c..cf3c5095c10e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk) | |||
2528 | sk_mem_reclaim(sk); | 2528 | sk_mem_reclaim(sk); |
2529 | tcp_clear_all_retrans_hints(tcp_sk(sk)); | 2529 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
2530 | tcp_sk(sk)->packets_out = 0; | 2530 | tcp_sk(sk)->packets_out = 0; |
2531 | inet_csk(sk)->icsk_backoff = 0; | ||
2531 | } | 2532 | } |
2532 | 2533 | ||
2533 | int tcp_disconnect(struct sock *sk, int flags) | 2534 | int tcp_disconnect(struct sock *sk, int flags) |
@@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2576 | tp->write_seq += tp->max_window + 2; | 2577 | tp->write_seq += tp->max_window + 2; |
2577 | if (tp->write_seq == 0) | 2578 | if (tp->write_seq == 0) |
2578 | tp->write_seq = 1; | 2579 | tp->write_seq = 1; |
2579 | icsk->icsk_backoff = 0; | ||
2580 | tp->snd_cwnd = 2; | 2580 | tp->snd_cwnd = 2; |
2581 | icsk->icsk_probes_out = 0; | 2581 | icsk->icsk_probes_out = 0; |
2582 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 2582 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index efc6fef692ff..ec3cea9d6828 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
536 | if (sock_owned_by_user(sk)) | 536 | if (sock_owned_by_user(sk)) |
537 | break; | 537 | break; |
538 | 538 | ||
539 | skb = tcp_rtx_queue_head(sk); | ||
540 | if (WARN_ON_ONCE(!skb)) | ||
541 | break; | ||
542 | |||
539 | icsk->icsk_backoff--; | 543 | icsk->icsk_backoff--; |
540 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : | 544 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : |
541 | TCP_TIMEOUT_INIT; | 545 | TCP_TIMEOUT_INIT; |
542 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); | 546 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
543 | 547 | ||
544 | skb = tcp_rtx_queue_head(sk); | ||
545 | 548 | ||
546 | tcp_mstamp_refresh(tp); | 549 | tcp_mstamp_refresh(tp); |
547 | delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); | 550 | delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 84c358804355..72ffd3d760ff 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) | |||
1165 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | 1165 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
1166 | if (ifa == ifp) | 1166 | if (ifa == ifp) |
1167 | continue; | 1167 | continue; |
1168 | if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, | 1168 | if (ifa->prefix_len != ifp->prefix_len || |
1169 | !ipv6_prefix_equal(&ifa->addr, &ifp->addr, | ||
1169 | ifp->prefix_len)) | 1170 | ifp->prefix_len)) |
1170 | continue; | 1171 | continue; |
1171 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) | 1172 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 801a9a0c217e..43890898b0b5 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1719,6 +1719,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], | |||
1719 | return 0; | 1719 | return 0; |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static void ip6erspan_set_version(struct nlattr *data[], | ||
1723 | struct __ip6_tnl_parm *parms) | ||
1724 | { | ||
1725 | parms->erspan_ver = 1; | ||
1726 | if (data[IFLA_GRE_ERSPAN_VER]) | ||
1727 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | ||
1728 | |||
1729 | if (parms->erspan_ver == 1) { | ||
1730 | if (data[IFLA_GRE_ERSPAN_INDEX]) | ||
1731 | parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); | ||
1732 | } else if (parms->erspan_ver == 2) { | ||
1733 | if (data[IFLA_GRE_ERSPAN_DIR]) | ||
1734 | parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); | ||
1735 | if (data[IFLA_GRE_ERSPAN_HWID]) | ||
1736 | parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); | ||
1737 | } | ||
1738 | } | ||
1739 | |||
1722 | static void ip6gre_netlink_parms(struct nlattr *data[], | 1740 | static void ip6gre_netlink_parms(struct nlattr *data[], |
1723 | struct __ip6_tnl_parm *parms) | 1741 | struct __ip6_tnl_parm *parms) |
1724 | { | 1742 | { |
@@ -1767,20 +1785,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1767 | 1785 | ||
1768 | if (data[IFLA_GRE_COLLECT_METADATA]) | 1786 | if (data[IFLA_GRE_COLLECT_METADATA]) |
1769 | parms->collect_md = true; | 1787 | parms->collect_md = true; |
1770 | |||
1771 | parms->erspan_ver = 1; | ||
1772 | if (data[IFLA_GRE_ERSPAN_VER]) | ||
1773 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | ||
1774 | |||
1775 | if (parms->erspan_ver == 1) { | ||
1776 | if (data[IFLA_GRE_ERSPAN_INDEX]) | ||
1777 | parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); | ||
1778 | } else if (parms->erspan_ver == 2) { | ||
1779 | if (data[IFLA_GRE_ERSPAN_DIR]) | ||
1780 | parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); | ||
1781 | if (data[IFLA_GRE_ERSPAN_HWID]) | ||
1782 | parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); | ||
1783 | } | ||
1784 | } | 1788 | } |
1785 | 1789 | ||
1786 | static int ip6gre_tap_init(struct net_device *dev) | 1790 | static int ip6gre_tap_init(struct net_device *dev) |
@@ -2203,6 +2207,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, | |||
2203 | int err; | 2207 | int err; |
2204 | 2208 | ||
2205 | ip6gre_netlink_parms(data, &nt->parms); | 2209 | ip6gre_netlink_parms(data, &nt->parms); |
2210 | ip6erspan_set_version(data, &nt->parms); | ||
2206 | ign = net_generic(net, ip6gre_net_id); | 2211 | ign = net_generic(net, ip6gre_net_id); |
2207 | 2212 | ||
2208 | if (nt->parms.collect_md) { | 2213 | if (nt->parms.collect_md) { |
@@ -2248,6 +2253,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], | |||
2248 | if (IS_ERR(t)) | 2253 | if (IS_ERR(t)) |
2249 | return PTR_ERR(t); | 2254 | return PTR_ERR(t); |
2250 | 2255 | ||
2256 | ip6erspan_set_version(data, &p); | ||
2251 | ip6gre_tunnel_unlink_md(ign, t); | 2257 | ip6gre_tunnel_unlink_md(ign, t); |
2252 | ip6gre_tunnel_unlink(ign, t); | 2258 | ip6gre_tunnel_unlink(ign, t); |
2253 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); | 2259 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); |
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index 23022447eb49..7a41ee3c11b4 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c | |||
@@ -226,6 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, | |||
226 | } | 226 | } |
227 | 227 | ||
228 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 228 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
229 | target.dst.protonum = IPPROTO_ICMPV6; | ||
229 | if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) | 230 | if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) |
230 | return 0; | 231 | return 0; |
231 | 232 | ||
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 8d0ba757a46c..9b2f272ca164 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c | |||
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info) | |||
221 | rcu_read_unlock(); | 221 | rcu_read_unlock(); |
222 | 222 | ||
223 | genlmsg_end(msg, hdr); | 223 | genlmsg_end(msg, hdr); |
224 | genlmsg_reply(msg, info); | 224 | return genlmsg_reply(msg, info); |
225 | |||
226 | return 0; | ||
227 | 225 | ||
228 | nla_put_failure: | 226 | nla_put_failure: |
229 | rcu_read_unlock(); | 227 | rcu_read_unlock(); |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 69e831bc317b..54821fb1a960 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH |
11 | * Copyright (C) 2018 Intel Corporation | 11 | * Copyright (C) 2018 - 2019 Intel Corporation |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
366 | 366 | ||
367 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | 367 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); |
368 | 368 | ||
369 | ieee80211_agg_stop_txq(sta, tid); | ||
370 | |||
369 | spin_unlock_bh(&sta->lock); | 371 | spin_unlock_bh(&sta->lock); |
370 | 372 | ||
371 | ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", | 373 | ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2493c74c2d37..96496b2c1670 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
941 | BSS_CHANGED_P2P_PS | | 941 | BSS_CHANGED_P2P_PS | |
942 | BSS_CHANGED_TXPOWER; | 942 | BSS_CHANGED_TXPOWER; |
943 | int err; | 943 | int err; |
944 | int prev_beacon_int; | ||
944 | 945 | ||
945 | old = sdata_dereference(sdata->u.ap.beacon, sdata); | 946 | old = sdata_dereference(sdata->u.ap.beacon, sdata); |
946 | if (old) | 947 | if (old) |
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
963 | 964 | ||
964 | sdata->needed_rx_chains = sdata->local->rx_chains; | 965 | sdata->needed_rx_chains = sdata->local->rx_chains; |
965 | 966 | ||
967 | prev_beacon_int = sdata->vif.bss_conf.beacon_int; | ||
966 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 968 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
967 | 969 | ||
968 | if (params->he_cap) | 970 | if (params->he_cap) |
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
974 | if (!err) | 976 | if (!err) |
975 | ieee80211_vif_copy_chanctx_to_vlans(sdata, false); | 977 | ieee80211_vif_copy_chanctx_to_vlans(sdata, false); |
976 | mutex_unlock(&local->mtx); | 978 | mutex_unlock(&local->mtx); |
977 | if (err) | 979 | if (err) { |
980 | sdata->vif.bss_conf.beacon_int = prev_beacon_int; | ||
978 | return err; | 981 | return err; |
982 | } | ||
979 | 983 | ||
980 | /* | 984 | /* |
981 | * Apply control port protocol, this allows us to | 985 | * Apply control port protocol, this allows us to |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index cad6592c52a1..2ec7011a4d07 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags { | |||
70 | * @dst: mesh path destination mac address | 70 | * @dst: mesh path destination mac address |
71 | * @mpp: mesh proxy mac address | 71 | * @mpp: mesh proxy mac address |
72 | * @rhash: rhashtable list pointer | 72 | * @rhash: rhashtable list pointer |
73 | * @walk_list: linked list containing all mesh_path objects. | ||
73 | * @gate_list: list pointer for known gates list | 74 | * @gate_list: list pointer for known gates list |
74 | * @sdata: mesh subif | 75 | * @sdata: mesh subif |
75 | * @next_hop: mesh neighbor to which frames for this destination will be | 76 | * @next_hop: mesh neighbor to which frames for this destination will be |
@@ -105,6 +106,7 @@ struct mesh_path { | |||
105 | u8 dst[ETH_ALEN]; | 106 | u8 dst[ETH_ALEN]; |
106 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ | 107 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ |
107 | struct rhash_head rhash; | 108 | struct rhash_head rhash; |
109 | struct hlist_node walk_list; | ||
108 | struct hlist_node gate_list; | 110 | struct hlist_node gate_list; |
109 | struct ieee80211_sub_if_data *sdata; | 111 | struct ieee80211_sub_if_data *sdata; |
110 | struct sta_info __rcu *next_hop; | 112 | struct sta_info __rcu *next_hop; |
@@ -133,12 +135,16 @@ struct mesh_path { | |||
133 | * gate's mpath may or may not be resolved and active. | 135 | * gate's mpath may or may not be resolved and active. |
134 | * @gates_lock: protects updates to known_gates | 136 | * @gates_lock: protects updates to known_gates |
135 | * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr | 137 | * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr |
138 | * @walk_head: linked list containging all mesh_path objects | ||
139 | * @walk_lock: lock protecting walk_head | ||
136 | * @entries: number of entries in the table | 140 | * @entries: number of entries in the table |
137 | */ | 141 | */ |
138 | struct mesh_table { | 142 | struct mesh_table { |
139 | struct hlist_head known_gates; | 143 | struct hlist_head known_gates; |
140 | spinlock_t gates_lock; | 144 | spinlock_t gates_lock; |
141 | struct rhashtable rhead; | 145 | struct rhashtable rhead; |
146 | struct hlist_head walk_head; | ||
147 | spinlock_t walk_lock; | ||
142 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ | 148 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ |
143 | }; | 149 | }; |
144 | 150 | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a5125624a76d..88a6d5e18ccc 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void) | |||
59 | return NULL; | 59 | return NULL; |
60 | 60 | ||
61 | INIT_HLIST_HEAD(&newtbl->known_gates); | 61 | INIT_HLIST_HEAD(&newtbl->known_gates); |
62 | INIT_HLIST_HEAD(&newtbl->walk_head); | ||
62 | atomic_set(&newtbl->entries, 0); | 63 | atomic_set(&newtbl->entries, 0); |
63 | spin_lock_init(&newtbl->gates_lock); | 64 | spin_lock_init(&newtbl->gates_lock); |
65 | spin_lock_init(&newtbl->walk_lock); | ||
64 | 66 | ||
65 | return newtbl; | 67 | return newtbl; |
66 | } | 68 | } |
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
249 | static struct mesh_path * | 251 | static struct mesh_path * |
250 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) | 252 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) |
251 | { | 253 | { |
252 | int i = 0, ret; | 254 | int i = 0; |
253 | struct mesh_path *mpath = NULL; | 255 | struct mesh_path *mpath; |
254 | struct rhashtable_iter iter; | ||
255 | |||
256 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
257 | if (ret) | ||
258 | return NULL; | ||
259 | |||
260 | rhashtable_walk_start(&iter); | ||
261 | 256 | ||
262 | while ((mpath = rhashtable_walk_next(&iter))) { | 257 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { |
263 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
264 | continue; | ||
265 | if (IS_ERR(mpath)) | ||
266 | break; | ||
267 | if (i++ == idx) | 258 | if (i++ == idx) |
268 | break; | 259 | break; |
269 | } | 260 | } |
270 | rhashtable_walk_stop(&iter); | ||
271 | rhashtable_walk_exit(&iter); | ||
272 | 261 | ||
273 | if (IS_ERR(mpath) || !mpath) | 262 | if (!mpath) |
274 | return NULL; | 263 | return NULL; |
275 | 264 | ||
276 | if (mpath_expired(mpath)) { | 265 | if (mpath_expired(mpath)) { |
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
432 | return ERR_PTR(-ENOMEM); | 421 | return ERR_PTR(-ENOMEM); |
433 | 422 | ||
434 | tbl = sdata->u.mesh.mesh_paths; | 423 | tbl = sdata->u.mesh.mesh_paths; |
424 | spin_lock_bh(&tbl->walk_lock); | ||
435 | do { | 425 | do { |
436 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, | 426 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, |
437 | &new_mpath->rhash, | 427 | &new_mpath->rhash, |
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
441 | mpath = rhashtable_lookup_fast(&tbl->rhead, | 431 | mpath = rhashtable_lookup_fast(&tbl->rhead, |
442 | dst, | 432 | dst, |
443 | mesh_rht_params); | 433 | mesh_rht_params); |
444 | 434 | else if (!ret) | |
435 | hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); | ||
445 | } while (unlikely(ret == -EEXIST && !mpath)); | 436 | } while (unlikely(ret == -EEXIST && !mpath)); |
437 | spin_unlock_bh(&tbl->walk_lock); | ||
446 | 438 | ||
447 | if (ret && ret != -EEXIST) | 439 | if (ret) { |
448 | return ERR_PTR(ret); | ||
449 | |||
450 | /* At this point either new_mpath was added, or we found a | ||
451 | * matching entry already in the table; in the latter case | ||
452 | * free the unnecessary new entry. | ||
453 | */ | ||
454 | if (ret == -EEXIST) { | ||
455 | kfree(new_mpath); | 440 | kfree(new_mpath); |
441 | |||
442 | if (ret != -EEXIST) | ||
443 | return ERR_PTR(ret); | ||
444 | |||
456 | new_mpath = mpath; | 445 | new_mpath = mpath; |
457 | } | 446 | } |
447 | |||
458 | sdata->u.mesh.mesh_paths_generation++; | 448 | sdata->u.mesh.mesh_paths_generation++; |
459 | return new_mpath; | 449 | return new_mpath; |
460 | } | 450 | } |
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
480 | 470 | ||
481 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 471 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
482 | tbl = sdata->u.mesh.mpp_paths; | 472 | tbl = sdata->u.mesh.mpp_paths; |
473 | |||
474 | spin_lock_bh(&tbl->walk_lock); | ||
483 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, | 475 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, |
484 | &new_mpath->rhash, | 476 | &new_mpath->rhash, |
485 | mesh_rht_params); | 477 | mesh_rht_params); |
478 | if (!ret) | ||
479 | hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); | ||
480 | spin_unlock_bh(&tbl->walk_lock); | ||
481 | |||
482 | if (ret) | ||
483 | kfree(new_mpath); | ||
486 | 484 | ||
487 | sdata->u.mesh.mpp_paths_generation++; | 485 | sdata->u.mesh.mpp_paths_generation++; |
488 | return ret; | 486 | return ret; |
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta) | |||
503 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; | 501 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; |
504 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 502 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
505 | struct mesh_path *mpath; | 503 | struct mesh_path *mpath; |
506 | struct rhashtable_iter iter; | ||
507 | int ret; | ||
508 | |||
509 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
510 | if (ret) | ||
511 | return; | ||
512 | 504 | ||
513 | rhashtable_walk_start(&iter); | 505 | rcu_read_lock(); |
514 | 506 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { | |
515 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
516 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
517 | continue; | ||
518 | if (IS_ERR(mpath)) | ||
519 | break; | ||
520 | if (rcu_access_pointer(mpath->next_hop) == sta && | 507 | if (rcu_access_pointer(mpath->next_hop) == sta && |
521 | mpath->flags & MESH_PATH_ACTIVE && | 508 | mpath->flags & MESH_PATH_ACTIVE && |
522 | !(mpath->flags & MESH_PATH_FIXED)) { | 509 | !(mpath->flags & MESH_PATH_FIXED)) { |
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
530 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); | 517 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); |
531 | } | 518 | } |
532 | } | 519 | } |
533 | rhashtable_walk_stop(&iter); | 520 | rcu_read_unlock(); |
534 | rhashtable_walk_exit(&iter); | ||
535 | } | 521 | } |
536 | 522 | ||
537 | static void mesh_path_free_rcu(struct mesh_table *tbl, | 523 | static void mesh_path_free_rcu(struct mesh_table *tbl, |
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, | |||
551 | 537 | ||
552 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) | 538 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) |
553 | { | 539 | { |
540 | hlist_del_rcu(&mpath->walk_list); | ||
554 | rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); | 541 | rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); |
555 | mesh_path_free_rcu(tbl, mpath); | 542 | mesh_path_free_rcu(tbl, mpath); |
556 | } | 543 | } |
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
571 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 558 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
572 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; | 559 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; |
573 | struct mesh_path *mpath; | 560 | struct mesh_path *mpath; |
574 | struct rhashtable_iter iter; | 561 | struct hlist_node *n; |
575 | int ret; | ||
576 | |||
577 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
578 | if (ret) | ||
579 | return; | ||
580 | |||
581 | rhashtable_walk_start(&iter); | ||
582 | |||
583 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
584 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
585 | continue; | ||
586 | if (IS_ERR(mpath)) | ||
587 | break; | ||
588 | 562 | ||
563 | spin_lock_bh(&tbl->walk_lock); | ||
564 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | ||
589 | if (rcu_access_pointer(mpath->next_hop) == sta) | 565 | if (rcu_access_pointer(mpath->next_hop) == sta) |
590 | __mesh_path_del(tbl, mpath); | 566 | __mesh_path_del(tbl, mpath); |
591 | } | 567 | } |
592 | 568 | spin_unlock_bh(&tbl->walk_lock); | |
593 | rhashtable_walk_stop(&iter); | ||
594 | rhashtable_walk_exit(&iter); | ||
595 | } | 569 | } |
596 | 570 | ||
597 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | 571 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, |
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | |||
599 | { | 573 | { |
600 | struct mesh_table *tbl = sdata->u.mesh.mpp_paths; | 574 | struct mesh_table *tbl = sdata->u.mesh.mpp_paths; |
601 | struct mesh_path *mpath; | 575 | struct mesh_path *mpath; |
602 | struct rhashtable_iter iter; | 576 | struct hlist_node *n; |
603 | int ret; | ||
604 | |||
605 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
606 | if (ret) | ||
607 | return; | ||
608 | |||
609 | rhashtable_walk_start(&iter); | ||
610 | |||
611 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
612 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
613 | continue; | ||
614 | if (IS_ERR(mpath)) | ||
615 | break; | ||
616 | 577 | ||
578 | spin_lock_bh(&tbl->walk_lock); | ||
579 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | ||
617 | if (ether_addr_equal(mpath->mpp, proxy)) | 580 | if (ether_addr_equal(mpath->mpp, proxy)) |
618 | __mesh_path_del(tbl, mpath); | 581 | __mesh_path_del(tbl, mpath); |
619 | } | 582 | } |
620 | 583 | spin_unlock_bh(&tbl->walk_lock); | |
621 | rhashtable_walk_stop(&iter); | ||
622 | rhashtable_walk_exit(&iter); | ||
623 | } | 584 | } |
624 | 585 | ||
625 | static void table_flush_by_iface(struct mesh_table *tbl) | 586 | static void table_flush_by_iface(struct mesh_table *tbl) |
626 | { | 587 | { |
627 | struct mesh_path *mpath; | 588 | struct mesh_path *mpath; |
628 | struct rhashtable_iter iter; | 589 | struct hlist_node *n; |
629 | int ret; | ||
630 | |||
631 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
632 | if (ret) | ||
633 | return; | ||
634 | |||
635 | rhashtable_walk_start(&iter); | ||
636 | 590 | ||
637 | while ((mpath = rhashtable_walk_next(&iter))) { | 591 | spin_lock_bh(&tbl->walk_lock); |
638 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 592 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
639 | continue; | ||
640 | if (IS_ERR(mpath)) | ||
641 | break; | ||
642 | __mesh_path_del(tbl, mpath); | 593 | __mesh_path_del(tbl, mpath); |
643 | } | 594 | } |
644 | 595 | spin_unlock_bh(&tbl->walk_lock); | |
645 | rhashtable_walk_stop(&iter); | ||
646 | rhashtable_walk_exit(&iter); | ||
647 | } | 596 | } |
648 | 597 | ||
649 | /** | 598 | /** |
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl, | |||
675 | { | 624 | { |
676 | struct mesh_path *mpath; | 625 | struct mesh_path *mpath; |
677 | 626 | ||
678 | rcu_read_lock(); | 627 | spin_lock_bh(&tbl->walk_lock); |
679 | mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); | 628 | mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); |
680 | if (!mpath) { | 629 | if (!mpath) { |
681 | rcu_read_unlock(); | 630 | spin_unlock_bh(&tbl->walk_lock); |
682 | return -ENXIO; | 631 | return -ENXIO; |
683 | } | 632 | } |
684 | 633 | ||
685 | __mesh_path_del(tbl, mpath); | 634 | __mesh_path_del(tbl, mpath); |
686 | rcu_read_unlock(); | 635 | spin_unlock_bh(&tbl->walk_lock); |
687 | return 0; | 636 | return 0; |
688 | } | 637 | } |
689 | 638 | ||
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, | |||
854 | struct mesh_table *tbl) | 803 | struct mesh_table *tbl) |
855 | { | 804 | { |
856 | struct mesh_path *mpath; | 805 | struct mesh_path *mpath; |
857 | struct rhashtable_iter iter; | 806 | struct hlist_node *n; |
858 | int ret; | ||
859 | 807 | ||
860 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); | 808 | spin_lock_bh(&tbl->walk_lock); |
861 | if (ret) | 809 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
862 | return; | ||
863 | |||
864 | rhashtable_walk_start(&iter); | ||
865 | |||
866 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
867 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
868 | continue; | ||
869 | if (IS_ERR(mpath)) | ||
870 | break; | ||
871 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 810 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
872 | (!(mpath->flags & MESH_PATH_FIXED)) && | 811 | (!(mpath->flags & MESH_PATH_FIXED)) && |
873 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) | 812 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
874 | __mesh_path_del(tbl, mpath); | 813 | __mesh_path_del(tbl, mpath); |
875 | } | 814 | } |
876 | 815 | spin_unlock_bh(&tbl->walk_lock); | |
877 | rhashtable_walk_stop(&iter); | ||
878 | rhashtable_walk_exit(&iter); | ||
879 | } | 816 | } |
880 | 817 | ||
881 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | 818 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d0eb38b890aa..ba950ae974fc 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright (C) 2015-2017 Intel Deutschland GmbH | 7 | * Copyright (C) 2015-2017 Intel Deutschland GmbH |
8 | * Copyright (C) 2018 Intel Corporation | 8 | * Copyright (C) 2018-2019 Intel Corporation |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -2146,6 +2146,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
2146 | case NL80211_IFTYPE_AP_VLAN: | 2146 | case NL80211_IFTYPE_AP_VLAN: |
2147 | case NL80211_IFTYPE_MONITOR: | 2147 | case NL80211_IFTYPE_MONITOR: |
2148 | break; | 2148 | break; |
2149 | case NL80211_IFTYPE_ADHOC: | ||
2150 | if (sdata->vif.bss_conf.ibss_joined) | ||
2151 | WARN_ON(drv_join_ibss(local, sdata)); | ||
2152 | /* fall through */ | ||
2149 | default: | 2153 | default: |
2150 | ieee80211_reconfig_stations(sdata); | 2154 | ieee80211_reconfig_stations(sdata); |
2151 | /* fall through */ | 2155 | /* fall through */ |
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index cad48d07c818..8401cefd9f65 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig | |||
@@ -29,6 +29,7 @@ config IP_VS_IPV6 | |||
29 | bool "IPv6 support for IPVS" | 29 | bool "IPv6 support for IPVS" |
30 | depends on IPV6 = y || IP_VS = IPV6 | 30 | depends on IPV6 = y || IP_VS = IPV6 |
31 | select IP6_NF_IPTABLES | 31 | select IP6_NF_IPTABLES |
32 | select NF_DEFRAG_IPV6 | ||
32 | ---help--- | 33 | ---help--- |
33 | Add IPv6 support to IPVS. | 34 | Add IPv6 support to IPVS. |
34 | 35 | ||
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index fe9abf3cc10a..235205c93e14 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, | |||
1536 | /* sorry, all this trouble for a no-hit :) */ | 1536 | /* sorry, all this trouble for a no-hit :) */ |
1537 | IP_VS_DBG_PKT(12, af, pp, skb, iph->off, | 1537 | IP_VS_DBG_PKT(12, af, pp, skb, iph->off, |
1538 | "ip_vs_in: packet continues traversal as normal"); | 1538 | "ip_vs_in: packet continues traversal as normal"); |
1539 | if (iph->fragoffs) { | 1539 | |
1540 | /* Fragment that couldn't be mapped to a conn entry | 1540 | /* Fragment couldn't be mapped to a conn entry */ |
1541 | * is missing module nf_defrag_ipv6 | 1541 | if (iph->fragoffs) |
1542 | */ | ||
1543 | IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n"); | ||
1544 | IP_VS_DBG_PKT(7, af, pp, skb, iph->off, | 1542 | IP_VS_DBG_PKT(7, af, pp, skb, iph->off, |
1545 | "unhandled fragment"); | 1543 | "unhandled fragment"); |
1546 | } | 1544 | |
1547 | *verdict = NF_ACCEPT; | 1545 | *verdict = NF_ACCEPT; |
1548 | return 0; | 1546 | return 0; |
1549 | } | 1547 | } |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7d6318664eb2..ac8d848d7624 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #ifdef CONFIG_IP_VS_IPV6 | 43 | #ifdef CONFIG_IP_VS_IPV6 |
44 | #include <net/ipv6.h> | 44 | #include <net/ipv6.h> |
45 | #include <net/ip6_route.h> | 45 | #include <net/ip6_route.h> |
46 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | ||
46 | #endif | 47 | #endif |
47 | #include <net/route.h> | 48 | #include <net/route.h> |
48 | #include <net/sock.h> | 49 | #include <net/sock.h> |
@@ -900,11 +901,17 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | |||
900 | 901 | ||
901 | #ifdef CONFIG_IP_VS_IPV6 | 902 | #ifdef CONFIG_IP_VS_IPV6 |
902 | if (udest->af == AF_INET6) { | 903 | if (udest->af == AF_INET6) { |
904 | int ret; | ||
905 | |||
903 | atype = ipv6_addr_type(&udest->addr.in6); | 906 | atype = ipv6_addr_type(&udest->addr.in6); |
904 | if ((!(atype & IPV6_ADDR_UNICAST) || | 907 | if ((!(atype & IPV6_ADDR_UNICAST) || |
905 | atype & IPV6_ADDR_LINKLOCAL) && | 908 | atype & IPV6_ADDR_LINKLOCAL) && |
906 | !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) | 909 | !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) |
907 | return -EINVAL; | 910 | return -EINVAL; |
911 | |||
912 | ret = nf_defrag_ipv6_enable(svc->ipvs->net); | ||
913 | if (ret) | ||
914 | return ret; | ||
908 | } else | 915 | } else |
909 | #endif | 916 | #endif |
910 | { | 917 | { |
@@ -1228,6 +1235,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, | |||
1228 | ret = -EINVAL; | 1235 | ret = -EINVAL; |
1229 | goto out_err; | 1236 | goto out_err; |
1230 | } | 1237 | } |
1238 | |||
1239 | ret = nf_defrag_ipv6_enable(ipvs->net); | ||
1240 | if (ret) | ||
1241 | goto out_err; | ||
1231 | } | 1242 | } |
1232 | #endif | 1243 | #endif |
1233 | 1244 | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5a92f23f179f..4893f248dfdc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -313,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) | |||
313 | int err; | 313 | int err; |
314 | 314 | ||
315 | list_for_each_entry(rule, &ctx->chain->rules, list) { | 315 | list_for_each_entry(rule, &ctx->chain->rules, list) { |
316 | if (!nft_is_active_next(ctx->net, rule)) | ||
317 | continue; | ||
318 | |||
316 | err = nft_delrule(ctx, rule); | 319 | err = nft_delrule(ctx, rule); |
317 | if (err < 0) | 320 | if (err < 0) |
318 | return err; | 321 | return err; |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index fe64df848365..0a4bad55a8aa 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -315,6 +315,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
315 | { | 315 | { |
316 | struct xt_target *target = expr->ops->data; | 316 | struct xt_target *target = expr->ops->data; |
317 | void *info = nft_expr_priv(expr); | 317 | void *info = nft_expr_priv(expr); |
318 | struct module *me = target->me; | ||
318 | struct xt_tgdtor_param par; | 319 | struct xt_tgdtor_param par; |
319 | 320 | ||
320 | par.net = ctx->net; | 321 | par.net = ctx->net; |
@@ -325,7 +326,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
325 | par.target->destroy(&par); | 326 | par.target->destroy(&par); |
326 | 327 | ||
327 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) | 328 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) |
328 | module_put(target->me); | 329 | module_put(me); |
329 | } | 330 | } |
330 | 331 | ||
331 | static int nft_extension_dump_info(struct sk_buff *skb, int attr, | 332 | static int nft_extension_dump_info(struct sk_buff *skb, int attr, |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index aecadd471e1d..13e1ac333fa4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -1899,7 +1899,7 @@ static int __init xt_init(void) | |||
1899 | seqcount_init(&per_cpu(xt_recseq, i)); | 1899 | seqcount_init(&per_cpu(xt_recseq, i)); |
1900 | } | 1900 | } |
1901 | 1901 | ||
1902 | xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); | 1902 | xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); |
1903 | if (!xt) | 1903 | if (!xt) |
1904 | return -ENOMEM; | 1904 | return -ENOMEM; |
1905 | 1905 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3b1a78906bc0..1cd1d83a4be0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4292 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; | 4292 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; |
4293 | if (unlikely(rb->frames_per_block == 0)) | 4293 | if (unlikely(rb->frames_per_block == 0)) |
4294 | goto out; | 4294 | goto out; |
4295 | if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) | 4295 | if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) |
4296 | goto out; | 4296 | goto out; |
4297 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | 4297 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != |
4298 | req->tp_frame_nr)) | 4298 | req->tp_frame_nr)) |
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 9ccc93f257db..38bb882bb958 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -48,7 +48,7 @@ struct tcindex_data { | |||
48 | u32 hash; /* hash table size; 0 if undefined */ | 48 | u32 hash; /* hash table size; 0 if undefined */ |
49 | u32 alloc_hash; /* allocated size */ | 49 | u32 alloc_hash; /* allocated size */ |
50 | u32 fall_through; /* 0: only classify if explicit match */ | 50 | u32 fall_through; /* 0: only classify if explicit match */ |
51 | struct rcu_head rcu; | 51 | struct rcu_work rwork; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) | 54 | static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) |
@@ -221,17 +221,11 @@ found: | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int tcindex_destroy_element(struct tcf_proto *tp, | 224 | static void tcindex_destroy_work(struct work_struct *work) |
225 | void *arg, struct tcf_walker *walker) | ||
226 | { | ||
227 | bool last; | ||
228 | |||
229 | return tcindex_delete(tp, arg, &last, NULL); | ||
230 | } | ||
231 | |||
232 | static void __tcindex_destroy(struct rcu_head *head) | ||
233 | { | 225 | { |
234 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | 226 | struct tcindex_data *p = container_of(to_rcu_work(work), |
227 | struct tcindex_data, | ||
228 | rwork); | ||
235 | 229 | ||
236 | kfree(p->perfect); | 230 | kfree(p->perfect); |
237 | kfree(p->h); | 231 | kfree(p->h); |
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r) | |||
258 | return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | 252 | return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
259 | } | 253 | } |
260 | 254 | ||
261 | static void __tcindex_partial_destroy(struct rcu_head *head) | 255 | static void tcindex_partial_destroy_work(struct work_struct *work) |
262 | { | 256 | { |
263 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | 257 | struct tcindex_data *p = container_of(to_rcu_work(work), |
258 | struct tcindex_data, | ||
259 | rwork); | ||
264 | 260 | ||
265 | kfree(p->perfect); | 261 | kfree(p->perfect); |
266 | kfree(p); | 262 | kfree(p); |
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp) | |||
275 | kfree(cp->perfect); | 271 | kfree(cp->perfect); |
276 | } | 272 | } |
277 | 273 | ||
278 | static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) | 274 | static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) |
279 | { | 275 | { |
280 | int i, err = 0; | 276 | int i, err = 0; |
281 | 277 | ||
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) | |||
289 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | 285 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
290 | if (err < 0) | 286 | if (err < 0) |
291 | goto errout; | 287 | goto errout; |
288 | #ifdef CONFIG_NET_CLS_ACT | ||
289 | cp->perfect[i].exts.net = net; | ||
290 | #endif | ||
292 | } | 291 | } |
293 | 292 | ||
294 | return 0; | 293 | return 0; |
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
305 | struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) | 304 | struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) |
306 | { | 305 | { |
307 | struct tcindex_filter_result new_filter_result, *old_r = r; | 306 | struct tcindex_filter_result new_filter_result, *old_r = r; |
308 | struct tcindex_filter_result cr; | ||
309 | struct tcindex_data *cp = NULL, *oldp; | 307 | struct tcindex_data *cp = NULL, *oldp; |
310 | struct tcindex_filter *f = NULL; /* make gcc behave */ | 308 | struct tcindex_filter *f = NULL; /* make gcc behave */ |
309 | struct tcf_result cr = {}; | ||
311 | int err, balloc = 0; | 310 | int err, balloc = 0; |
312 | struct tcf_exts e; | 311 | struct tcf_exts e; |
313 | 312 | ||
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
337 | if (p->perfect) { | 336 | if (p->perfect) { |
338 | int i; | 337 | int i; |
339 | 338 | ||
340 | if (tcindex_alloc_perfect_hash(cp) < 0) | 339 | if (tcindex_alloc_perfect_hash(net, cp) < 0) |
341 | goto errout; | 340 | goto errout; |
342 | for (i = 0; i < cp->hash; i++) | 341 | for (i = 0; i < cp->hash; i++) |
343 | cp->perfect[i].res = p->perfect[i].res; | 342 | cp->perfect[i].res = p->perfect[i].res; |
@@ -348,11 +347,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
348 | err = tcindex_filter_result_init(&new_filter_result); | 347 | err = tcindex_filter_result_init(&new_filter_result); |
349 | if (err < 0) | 348 | if (err < 0) |
350 | goto errout1; | 349 | goto errout1; |
351 | err = tcindex_filter_result_init(&cr); | ||
352 | if (err < 0) | ||
353 | goto errout1; | ||
354 | if (old_r) | 350 | if (old_r) |
355 | cr.res = r->res; | 351 | cr = r->res; |
356 | 352 | ||
357 | if (tb[TCA_TCINDEX_HASH]) | 353 | if (tb[TCA_TCINDEX_HASH]) |
358 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); | 354 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
406 | err = -ENOMEM; | 402 | err = -ENOMEM; |
407 | if (!cp->perfect && !cp->h) { | 403 | if (!cp->perfect && !cp->h) { |
408 | if (valid_perfect_hash(cp)) { | 404 | if (valid_perfect_hash(cp)) { |
409 | if (tcindex_alloc_perfect_hash(cp) < 0) | 405 | if (tcindex_alloc_perfect_hash(net, cp) < 0) |
410 | goto errout_alloc; | 406 | goto errout_alloc; |
411 | balloc = 1; | 407 | balloc = 1; |
412 | } else { | 408 | } else { |
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
443 | } | 439 | } |
444 | 440 | ||
445 | if (tb[TCA_TCINDEX_CLASSID]) { | 441 | if (tb[TCA_TCINDEX_CLASSID]) { |
446 | cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); | 442 | cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); |
447 | tcf_bind_filter(tp, &cr.res, base); | 443 | tcf_bind_filter(tp, &cr, base); |
448 | } | 444 | } |
449 | 445 | ||
450 | if (old_r && old_r != r) { | 446 | if (old_r && old_r != r) { |
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
456 | } | 452 | } |
457 | 453 | ||
458 | oldp = p; | 454 | oldp = p; |
459 | r->res = cr.res; | 455 | r->res = cr; |
460 | tcf_exts_change(&r->exts, &e); | 456 | tcf_exts_change(&r->exts, &e); |
461 | 457 | ||
462 | rcu_assign_pointer(tp->root, cp); | 458 | rcu_assign_pointer(tp->root, cp); |
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
475 | ; /* nothing */ | 471 | ; /* nothing */ |
476 | 472 | ||
477 | rcu_assign_pointer(*fp, f); | 473 | rcu_assign_pointer(*fp, f); |
474 | } else { | ||
475 | tcf_exts_destroy(&new_filter_result.exts); | ||
478 | } | 476 | } |
479 | 477 | ||
480 | if (oldp) | 478 | if (oldp) |
481 | call_rcu(&oldp->rcu, __tcindex_partial_destroy); | 479 | tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); |
482 | return 0; | 480 | return 0; |
483 | 481 | ||
484 | errout_alloc: | 482 | errout_alloc: |
@@ -487,7 +485,6 @@ errout_alloc: | |||
487 | else if (balloc == 2) | 485 | else if (balloc == 2) |
488 | kfree(cp->h); | 486 | kfree(cp->h); |
489 | errout1: | 487 | errout1: |
490 | tcf_exts_destroy(&cr.exts); | ||
491 | tcf_exts_destroy(&new_filter_result.exts); | 488 | tcf_exts_destroy(&new_filter_result.exts); |
492 | errout: | 489 | errout: |
493 | kfree(cp); | 490 | kfree(cp); |
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp, | |||
562 | struct netlink_ext_ack *extack) | 559 | struct netlink_ext_ack *extack) |
563 | { | 560 | { |
564 | struct tcindex_data *p = rtnl_dereference(tp->root); | 561 | struct tcindex_data *p = rtnl_dereference(tp->root); |
565 | struct tcf_walker walker; | 562 | int i; |
566 | 563 | ||
567 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); | 564 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); |
568 | walker.count = 0; | ||
569 | walker.skip = 0; | ||
570 | walker.fn = tcindex_destroy_element; | ||
571 | tcindex_walk(tp, &walker); | ||
572 | 565 | ||
573 | call_rcu(&p->rcu, __tcindex_destroy); | 566 | if (p->perfect) { |
567 | for (i = 0; i < p->hash; i++) { | ||
568 | struct tcindex_filter_result *r = p->perfect + i; | ||
569 | |||
570 | tcf_unbind_filter(tp, &r->res); | ||
571 | if (tcf_exts_get_net(&r->exts)) | ||
572 | tcf_queue_work(&r->rwork, | ||
573 | tcindex_destroy_rexts_work); | ||
574 | else | ||
575 | __tcindex_destroy_rexts(r); | ||
576 | } | ||
577 | } | ||
578 | |||
579 | for (i = 0; p->h && i < p->hash; i++) { | ||
580 | struct tcindex_filter *f, *next; | ||
581 | bool last; | ||
582 | |||
583 | for (f = rtnl_dereference(p->h[i]); f; f = next) { | ||
584 | next = rtnl_dereference(f->next); | ||
585 | tcindex_delete(tp, &f->result, &last, NULL); | ||
586 | } | ||
587 | } | ||
588 | |||
589 | tcf_queue_work(&p->rwork, tcindex_destroy_work); | ||
574 | } | 590 | } |
575 | 591 | ||
576 | 592 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 66ba2ce2320f..968a85fe4d4a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev) | |||
500 | * netif_carrier_on - set carrier | 500 | * netif_carrier_on - set carrier |
501 | * @dev: network device | 501 | * @dev: network device |
502 | * | 502 | * |
503 | * Device has detected that carrier. | 503 | * Device has detected acquisition of carrier. |
504 | */ | 504 | */ |
505 | void netif_carrier_on(struct net_device *dev) | 505 | void netif_carrier_on(struct net_device *dev) |
506 | { | 506 | { |
diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 078f01a8d582..435847d98b51 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c | |||
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc) | |||
256 | + nla_total_size(1) /* INET_DIAG_TOS */ | 256 | + nla_total_size(1) /* INET_DIAG_TOS */ |
257 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | 257 | + nla_total_size(1) /* INET_DIAG_TCLASS */ |
258 | + nla_total_size(4) /* INET_DIAG_MARK */ | 258 | + nla_total_size(4) /* INET_DIAG_MARK */ |
259 | + nla_total_size(4) /* INET_DIAG_CLASS_ID */ | ||
259 | + nla_total_size(addrlen * asoc->peer.transport_count) | 260 | + nla_total_size(addrlen * asoc->peer.transport_count) |
260 | + nla_total_size(addrlen * addrcnt) | 261 | + nla_total_size(addrlen * addrcnt) |
261 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | 262 | + nla_total_size(sizeof(struct inet_diag_meminfo)) |
diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 123e9f2dc226..edfcf16e704c 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c | |||
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb) | |||
36 | { | 36 | { |
37 | skb->ip_summed = CHECKSUM_NONE; | 37 | skb->ip_summed = CHECKSUM_NONE; |
38 | skb->csum_not_inet = 0; | 38 | skb->csum_not_inet = 0; |
39 | gso_reset_checksum(skb, ~0); | ||
39 | return sctp_compute_cksum(skb, skb_transport_offset(skb)); | 40 | return sctp_compute_cksum(skb, skb_transport_offset(skb)); |
40 | } | 41 | } |
41 | 42 | ||
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index f24633114dfd..2936ed17bf9e 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -144,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, | |||
144 | } | 144 | } |
145 | } | 145 | } |
146 | 146 | ||
147 | for (i = outcnt; i < stream->outcnt; i++) | 147 | for (i = outcnt; i < stream->outcnt; i++) { |
148 | kfree(SCTP_SO(stream, i)->ext); | 148 | kfree(SCTP_SO(stream, i)->ext); |
149 | SCTP_SO(stream, i)->ext = NULL; | ||
150 | } | ||
149 | } | 151 | } |
150 | 152 | ||
151 | static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, | 153 | static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a712c9f8699b..fb07ad8d69a6 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -101,9 +101,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, | |||
101 | 101 | ||
102 | conn->tx_cdc_seq++; | 102 | conn->tx_cdc_seq++; |
103 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; | 103 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; |
104 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, | 104 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); |
105 | &conn->local_tx_ctrl, conn); | ||
106 | smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn); | ||
107 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); | 105 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); |
108 | if (!rc) | 106 | if (!rc) |
109 | smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); | 107 | smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 271e2524dc8f..f1cdde9d4b89 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
@@ -211,26 +211,27 @@ static inline int smc_curs_diff_large(unsigned int size, | |||
211 | 211 | ||
212 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, | 212 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, |
213 | union smc_host_cursor *local, | 213 | union smc_host_cursor *local, |
214 | union smc_host_cursor *save, | ||
214 | struct smc_connection *conn) | 215 | struct smc_connection *conn) |
215 | { | 216 | { |
216 | union smc_host_cursor temp; | 217 | smc_curs_copy(save, local, conn); |
217 | 218 | peer->count = htonl(save->count); | |
218 | smc_curs_copy(&temp, local, conn); | 219 | peer->wrap = htons(save->wrap); |
219 | peer->count = htonl(temp.count); | ||
220 | peer->wrap = htons(temp.wrap); | ||
221 | /* peer->reserved = htons(0); must be ensured by caller */ | 220 | /* peer->reserved = htons(0); must be ensured by caller */ |
222 | } | 221 | } |
223 | 222 | ||
224 | static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, | 223 | static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, |
225 | struct smc_host_cdc_msg *local, | 224 | struct smc_connection *conn, |
226 | struct smc_connection *conn) | 225 | union smc_host_cursor *save) |
227 | { | 226 | { |
227 | struct smc_host_cdc_msg *local = &conn->local_tx_ctrl; | ||
228 | |||
228 | peer->common.type = local->common.type; | 229 | peer->common.type = local->common.type; |
229 | peer->len = local->len; | 230 | peer->len = local->len; |
230 | peer->seqno = htons(local->seqno); | 231 | peer->seqno = htons(local->seqno); |
231 | peer->token = htonl(local->token); | 232 | peer->token = htonl(local->token); |
232 | smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); | 233 | smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn); |
233 | smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); | 234 | smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn); |
234 | peer->prod_flags = local->prod_flags; | 235 | peer->prod_flags = local->prod_flags; |
235 | peer->conn_state_flags = local->conn_state_flags; | 236 | peer->conn_state_flags = local->conn_state_flags; |
236 | } | 237 | } |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index fb6656295204..507105127095 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c | |||
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
44 | unsigned char *cksum, unsigned char *buf) | 44 | unsigned char *cksum, unsigned char *buf) |
45 | { | 45 | { |
46 | struct crypto_sync_skcipher *cipher; | 46 | struct crypto_sync_skcipher *cipher; |
47 | unsigned char plain[8]; | 47 | unsigned char *plain; |
48 | s32 code; | 48 | s32 code; |
49 | 49 | ||
50 | dprintk("RPC: %s:\n", __func__); | 50 | dprintk("RPC: %s:\n", __func__); |
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
52 | if (IS_ERR(cipher)) | 52 | if (IS_ERR(cipher)) |
53 | return PTR_ERR(cipher); | 53 | return PTR_ERR(cipher); |
54 | 54 | ||
55 | plain = kmalloc(8, GFP_NOFS); | ||
56 | if (!plain) | ||
57 | return -ENOMEM; | ||
58 | |||
55 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); | 59 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); |
56 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); | 60 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); |
57 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); | 61 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); |
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
67 | 71 | ||
68 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); | 72 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); |
69 | out: | 73 | out: |
74 | kfree(plain); | ||
70 | crypto_free_sync_skcipher(cipher); | 75 | crypto_free_sync_skcipher(cipher); |
71 | return code; | 76 | return code; |
72 | } | 77 | } |
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx, | |||
77 | u32 seqnum, | 82 | u32 seqnum, |
78 | unsigned char *cksum, unsigned char *buf) | 83 | unsigned char *cksum, unsigned char *buf) |
79 | { | 84 | { |
80 | unsigned char plain[8]; | 85 | unsigned char *plain; |
86 | s32 code; | ||
81 | 87 | ||
82 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | 88 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
83 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, | 89 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, |
84 | cksum, buf); | 90 | cksum, buf); |
85 | 91 | ||
92 | plain = kmalloc(8, GFP_NOFS); | ||
93 | if (!plain) | ||
94 | return -ENOMEM; | ||
95 | |||
86 | plain[0] = (unsigned char) (seqnum & 0xff); | 96 | plain[0] = (unsigned char) (seqnum & 0xff); |
87 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); | 97 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); |
88 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); | 98 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); |
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx, | |||
93 | plain[6] = direction; | 103 | plain[6] = direction; |
94 | plain[7] = direction; | 104 | plain[7] = direction; |
95 | 105 | ||
96 | return krb5_encrypt(key, cksum, plain, buf, 8); | 106 | code = krb5_encrypt(key, cksum, plain, buf, 8); |
107 | kfree(plain); | ||
108 | return code; | ||
97 | } | 109 | } |
98 | 110 | ||
99 | static s32 | 111 | static s32 |
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, | |||
101 | unsigned char *buf, int *direction, s32 *seqnum) | 113 | unsigned char *buf, int *direction, s32 *seqnum) |
102 | { | 114 | { |
103 | struct crypto_sync_skcipher *cipher; | 115 | struct crypto_sync_skcipher *cipher; |
104 | unsigned char plain[8]; | 116 | unsigned char *plain; |
105 | s32 code; | 117 | s32 code; |
106 | 118 | ||
107 | dprintk("RPC: %s:\n", __func__); | 119 | dprintk("RPC: %s:\n", __func__); |
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, | |||
113 | if (code) | 125 | if (code) |
114 | goto out; | 126 | goto out; |
115 | 127 | ||
128 | plain = kmalloc(8, GFP_NOFS); | ||
129 | if (!plain) { | ||
130 | code = -ENOMEM; | ||
131 | goto out; | ||
132 | } | ||
133 | |||
116 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); | 134 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); |
117 | if (code) | 135 | if (code) |
118 | goto out; | 136 | goto out_plain; |
119 | 137 | ||
120 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) | 138 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) |
121 | || (plain[4] != plain[7])) { | 139 | || (plain[4] != plain[7])) { |
122 | code = (s32)KG_BAD_SEQ; | 140 | code = (s32)KG_BAD_SEQ; |
123 | goto out; | 141 | goto out_plain; |
124 | } | 142 | } |
125 | 143 | ||
126 | *direction = plain[4]; | 144 | *direction = plain[4]; |
127 | 145 | ||
128 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | | 146 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | |
129 | (plain[2] << 8) | (plain[3])); | 147 | (plain[2] << 8) | (plain[3])); |
148 | out_plain: | ||
149 | kfree(plain); | ||
130 | out: | 150 | out: |
131 | crypto_free_sync_skcipher(cipher); | 151 | crypto_free_sync_skcipher(cipher); |
132 | return code; | 152 | return code; |
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx, | |||
139 | int *direction, u32 *seqnum) | 159 | int *direction, u32 *seqnum) |
140 | { | 160 | { |
141 | s32 code; | 161 | s32 code; |
142 | unsigned char plain[8]; | 162 | unsigned char *plain; |
143 | struct crypto_sync_skcipher *key = kctx->seq; | 163 | struct crypto_sync_skcipher *key = kctx->seq; |
144 | 164 | ||
145 | dprintk("RPC: krb5_get_seq_num:\n"); | 165 | dprintk("RPC: krb5_get_seq_num:\n"); |
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx, | |||
147 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | 167 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
148 | return krb5_get_rc4_seq_num(kctx, cksum, buf, | 168 | return krb5_get_rc4_seq_num(kctx, cksum, buf, |
149 | direction, seqnum); | 169 | direction, seqnum); |
170 | plain = kmalloc(8, GFP_NOFS); | ||
171 | if (!plain) | ||
172 | return -ENOMEM; | ||
150 | 173 | ||
151 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) | 174 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) |
152 | return code; | 175 | goto out; |
153 | 176 | ||
154 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || | 177 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || |
155 | (plain[4] != plain[7])) | 178 | (plain[4] != plain[7])) { |
156 | return (s32)KG_BAD_SEQ; | 179 | code = (s32)KG_BAD_SEQ; |
180 | goto out; | ||
181 | } | ||
157 | 182 | ||
158 | *direction = plain[4]; | 183 | *direction = plain[4]; |
159 | 184 | ||
160 | *seqnum = ((plain[0]) | | 185 | *seqnum = ((plain[0]) | |
161 | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); | 186 | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); |
162 | 187 | ||
163 | return 0; | 188 | out: |
189 | kfree(plain); | ||
190 | return code; | ||
164 | } | 191 | } |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index c048eb6deaaf..d4018e5a24c5 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -80,7 +80,7 @@ unx_match(struct auth_cred *acred, struct rpc_cred *cred, int flags) | |||
80 | if (!uid_eq(cred->cr_cred->fsuid, acred->cred->fsuid) || !gid_eq(cred->cr_cred->fsgid, acred->cred->fsgid)) | 80 | if (!uid_eq(cred->cr_cred->fsuid, acred->cred->fsuid) || !gid_eq(cred->cr_cred->fsgid, acred->cred->fsgid)) |
81 | return 0; | 81 | return 0; |
82 | 82 | ||
83 | if (acred->cred && acred->cred->group_info != NULL) | 83 | if (acred->cred->group_info != NULL) |
84 | groups = acred->cred->group_info->ngroups; | 84 | groups = acred->cred->group_info->ngroups; |
85 | if (groups > UNX_NGROUPS) | 85 | if (groups > UNX_NGROUPS) |
86 | groups = UNX_NGROUPS; | 86 | groups = UNX_NGROUPS; |
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index ec451b8114b0..b9313c15ee3a 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
@@ -252,7 +252,6 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) | |||
252 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, | 252 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, |
253 | rq_bc_pa_list); | 253 | rq_bc_pa_list); |
254 | req->rq_reply_bytes_recvd = 0; | 254 | req->rq_reply_bytes_recvd = 0; |
255 | req->rq_bytes_sent = 0; | ||
256 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | 255 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
257 | sizeof(req->rq_private_buf)); | 256 | sizeof(req->rq_private_buf)); |
258 | req->rq_xid = xid; | 257 | req->rq_xid = xid; |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index 45a033329cd4..19bb356230ed 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c | |||
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | |||
146 | rcu_read_lock(); | 146 | rcu_read_lock(); |
147 | xprt = rcu_dereference(clnt->cl_xprt); | 147 | xprt = rcu_dereference(clnt->cl_xprt); |
148 | /* no "debugfs" dentry? Don't bother with the symlink. */ | 148 | /* no "debugfs" dentry? Don't bother with the symlink. */ |
149 | if (!xprt->debugfs) { | 149 | if (IS_ERR_OR_NULL(xprt->debugfs)) { |
150 | rcu_read_unlock(); | 150 | rcu_read_unlock(); |
151 | return; | 151 | return; |
152 | } | 152 | } |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index adc3c40cc733..2168d4d9c09f 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/freezer.h> | 21 | #include <linux/freezer.h> |
22 | #include <linux/sched/mm.h> | ||
22 | 23 | ||
23 | #include <linux/sunrpc/clnt.h> | 24 | #include <linux/sunrpc/clnt.h> |
24 | 25 | ||
@@ -902,7 +903,10 @@ void rpc_execute(struct rpc_task *task) | |||
902 | 903 | ||
903 | static void rpc_async_schedule(struct work_struct *work) | 904 | static void rpc_async_schedule(struct work_struct *work) |
904 | { | 905 | { |
906 | unsigned int pflags = memalloc_nofs_save(); | ||
907 | |||
905 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); | 908 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
909 | memalloc_nofs_restore(pflags); | ||
906 | } | 910 | } |
907 | 911 | ||
908 | /** | 912 | /** |
@@ -1067,7 +1071,10 @@ static void rpc_free_task(struct rpc_task *task) | |||
1067 | 1071 | ||
1068 | static void rpc_async_release(struct work_struct *work) | 1072 | static void rpc_async_release(struct work_struct *work) |
1069 | { | 1073 | { |
1074 | unsigned int pflags = memalloc_nofs_save(); | ||
1075 | |||
1070 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | 1076 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
1077 | memalloc_nofs_restore(pflags); | ||
1071 | } | 1078 | } |
1072 | 1079 | ||
1073 | static void rpc_release_resources_task(struct rpc_task *task) | 1080 | static void rpc_release_resources_task(struct rpc_task *task) |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index bc7489f1fe55..1cf4e379be7b 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/sunrpc/metrics.h> | 49 | #include <linux/sunrpc/metrics.h> |
50 | #include <linux/sunrpc/bc_xprt.h> | 50 | #include <linux/sunrpc/bc_xprt.h> |
51 | #include <linux/rcupdate.h> | 51 | #include <linux/rcupdate.h> |
52 | #include <linux/sched/mm.h> | ||
52 | 53 | ||
53 | #include <trace/events/sunrpc.h> | 54 | #include <trace/events/sunrpc.h> |
54 | 55 | ||
@@ -643,11 +644,13 @@ static void xprt_autoclose(struct work_struct *work) | |||
643 | { | 644 | { |
644 | struct rpc_xprt *xprt = | 645 | struct rpc_xprt *xprt = |
645 | container_of(work, struct rpc_xprt, task_cleanup); | 646 | container_of(work, struct rpc_xprt, task_cleanup); |
647 | unsigned int pflags = memalloc_nofs_save(); | ||
646 | 648 | ||
647 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | 649 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
648 | xprt->ops->close(xprt); | 650 | xprt->ops->close(xprt); |
649 | xprt_release_write(xprt, NULL); | 651 | xprt_release_write(xprt, NULL); |
650 | wake_up_bit(&xprt->state, XPRT_LOCKED); | 652 | wake_up_bit(&xprt->state, XPRT_LOCKED); |
653 | memalloc_nofs_restore(pflags); | ||
651 | } | 654 | } |
652 | 655 | ||
653 | /** | 656 | /** |
@@ -1601,7 +1604,6 @@ xprt_request_init(struct rpc_task *task) | |||
1601 | req->rq_buffer = NULL; | 1604 | req->rq_buffer = NULL; |
1602 | req->rq_xid = xprt_alloc_xid(xprt); | 1605 | req->rq_xid = xprt_alloc_xid(xprt); |
1603 | xprt_init_connect_cookie(req, xprt); | 1606 | xprt_init_connect_cookie(req, xprt); |
1604 | req->rq_bytes_sent = 0; | ||
1605 | req->rq_snd_buf.len = 0; | 1607 | req->rq_snd_buf.len = 0; |
1606 | req->rq_snd_buf.buflen = 0; | 1608 | req->rq_snd_buf.buflen = 0; |
1607 | req->rq_rcv_buf.len = 0; | 1609 | req->rq_rcv_buf.len = 0; |
@@ -1723,6 +1725,7 @@ void xprt_release(struct rpc_task *task) | |||
1723 | xprt->ops->buf_free(task); | 1725 | xprt->ops->buf_free(task); |
1724 | xprt_inject_disconnect(xprt); | 1726 | xprt_inject_disconnect(xprt); |
1725 | xdr_free_bvec(&req->rq_rcv_buf); | 1727 | xdr_free_bvec(&req->rq_rcv_buf); |
1728 | xdr_free_bvec(&req->rq_snd_buf); | ||
1726 | if (req->rq_cred != NULL) | 1729 | if (req->rq_cred != NULL) |
1727 | put_rpccred(req->rq_cred); | 1730 | put_rpccred(req->rq_cred); |
1728 | task->tk_rqstp = NULL; | 1731 | task->tk_rqstp = NULL; |
@@ -1751,7 +1754,6 @@ xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) | |||
1751 | */ | 1754 | */ |
1752 | xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + | 1755 | xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + |
1753 | xbufp->tail[0].iov_len; | 1756 | xbufp->tail[0].iov_len; |
1754 | req->rq_bytes_sent = 0; | ||
1755 | } | 1757 | } |
1756 | #endif | 1758 | #endif |
1757 | 1759 | ||
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 98c1e43eb7b1..d79b18c1f4cd 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c | |||
@@ -267,7 +267,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, | |||
267 | 267 | ||
268 | /* Prepare rqst */ | 268 | /* Prepare rqst */ |
269 | rqst->rq_reply_bytes_recvd = 0; | 269 | rqst->rq_reply_bytes_recvd = 0; |
270 | rqst->rq_bytes_sent = 0; | ||
271 | rqst->rq_xid = *p; | 270 | rqst->rq_xid = *p; |
272 | 271 | ||
273 | rqst->rq_private_buf.len = size; | 272 | rqst->rq_private_buf.len = size; |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index e7274dc10120..5d261353bd90 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -737,7 +737,6 @@ xprt_rdma_send_request(struct rpc_rqst *rqst) | |||
737 | goto drop_connection; | 737 | goto drop_connection; |
738 | 738 | ||
739 | rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; | 739 | rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; |
740 | rqst->rq_bytes_sent = 0; | ||
741 | 740 | ||
742 | /* An RPC with no reply will throw off credit accounting, | 741 | /* An RPC with no reply will throw off credit accounting, |
743 | * so drop the connection to reset the credit grant. | 742 | * so drop the connection to reset the credit grant. |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index b4e997d53ec7..89a63391d4d4 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
527 | 527 | ||
528 | sendcq = ib_alloc_cq(ia->ri_device, NULL, | 528 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
529 | ep->rep_attr.cap.max_send_wr + 1, | 529 | ep->rep_attr.cap.max_send_wr + 1, |
530 | 1, IB_POLL_WORKQUEUE); | 530 | ia->ri_device->num_comp_vectors > 1 ? 1 : 0, |
531 | IB_POLL_WORKQUEUE); | ||
531 | if (IS_ERR(sendcq)) { | 532 | if (IS_ERR(sendcq)) { |
532 | rc = PTR_ERR(sendcq); | 533 | rc = PTR_ERR(sendcq); |
533 | goto out1; | 534 | goto out1; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index ae09d850cd11..53de72d2dded 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/bvec.h> | 50 | #include <linux/bvec.h> |
51 | #include <linux/highmem.h> | 51 | #include <linux/highmem.h> |
52 | #include <linux/uio.h> | 52 | #include <linux/uio.h> |
53 | #include <linux/sched/mm.h> | ||
53 | 54 | ||
54 | #include <trace/events/sunrpc.h> | 55 | #include <trace/events/sunrpc.h> |
55 | 56 | ||
@@ -404,8 +405,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
404 | size_t want, seek_init = seek, offset = 0; | 405 | size_t want, seek_init = seek, offset = 0; |
405 | ssize_t ret; | 406 | ssize_t ret; |
406 | 407 | ||
407 | if (seek < buf->head[0].iov_len) { | 408 | want = min_t(size_t, count, buf->head[0].iov_len); |
408 | want = min_t(size_t, count, buf->head[0].iov_len); | 409 | if (seek < want) { |
409 | ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); | 410 | ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); |
410 | if (ret <= 0) | 411 | if (ret <= 0) |
411 | goto sock_err; | 412 | goto sock_err; |
@@ -416,8 +417,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
416 | goto out; | 417 | goto out; |
417 | seek = 0; | 418 | seek = 0; |
418 | } else { | 419 | } else { |
419 | seek -= buf->head[0].iov_len; | 420 | seek -= want; |
420 | offset += buf->head[0].iov_len; | 421 | offset += want; |
421 | } | 422 | } |
422 | 423 | ||
423 | want = xs_alloc_sparse_pages(buf, | 424 | want = xs_alloc_sparse_pages(buf, |
@@ -442,8 +443,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
442 | offset += want; | 443 | offset += want; |
443 | } | 444 | } |
444 | 445 | ||
445 | if (seek < buf->tail[0].iov_len) { | 446 | want = min_t(size_t, count - offset, buf->tail[0].iov_len); |
446 | want = min_t(size_t, count - offset, buf->tail[0].iov_len); | 447 | if (seek < want) { |
447 | ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); | 448 | ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); |
448 | if (ret <= 0) | 449 | if (ret <= 0) |
449 | goto sock_err; | 450 | goto sock_err; |
@@ -453,7 +454,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
453 | if (ret != want) | 454 | if (ret != want) |
454 | goto out; | 455 | goto out; |
455 | } else | 456 | } else |
456 | offset += buf->tail[0].iov_len; | 457 | offset = seek_init; |
457 | ret = -EMSGSIZE; | 458 | ret = -EMSGSIZE; |
458 | out: | 459 | out: |
459 | *read = offset - seek_init; | 460 | *read = offset - seek_init; |
@@ -481,6 +482,14 @@ xs_read_stream_request_done(struct sock_xprt *transport) | |||
481 | return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT); | 482 | return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT); |
482 | } | 483 | } |
483 | 484 | ||
485 | static void | ||
486 | xs_read_stream_check_eor(struct sock_xprt *transport, | ||
487 | struct msghdr *msg) | ||
488 | { | ||
489 | if (xs_read_stream_request_done(transport)) | ||
490 | msg->msg_flags |= MSG_EOR; | ||
491 | } | ||
492 | |||
484 | static ssize_t | 493 | static ssize_t |
485 | xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, | 494 | xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, |
486 | int flags, struct rpc_rqst *req) | 495 | int flags, struct rpc_rqst *req) |
@@ -492,17 +501,21 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, | |||
492 | xs_read_header(transport, buf); | 501 | xs_read_header(transport, buf); |
493 | 502 | ||
494 | want = transport->recv.len - transport->recv.offset; | 503 | want = transport->recv.len - transport->recv.offset; |
495 | ret = xs_read_xdr_buf(transport->sock, msg, flags, buf, | 504 | if (want != 0) { |
496 | transport->recv.copied + want, transport->recv.copied, | 505 | ret = xs_read_xdr_buf(transport->sock, msg, flags, buf, |
497 | &read); | 506 | transport->recv.copied + want, |
498 | transport->recv.offset += read; | 507 | transport->recv.copied, |
499 | transport->recv.copied += read; | 508 | &read); |
500 | if (transport->recv.offset == transport->recv.len) { | 509 | transport->recv.offset += read; |
501 | if (xs_read_stream_request_done(transport)) | 510 | transport->recv.copied += read; |
502 | msg->msg_flags |= MSG_EOR; | ||
503 | return read; | ||
504 | } | 511 | } |
505 | 512 | ||
513 | if (transport->recv.offset == transport->recv.len) | ||
514 | xs_read_stream_check_eor(transport, msg); | ||
515 | |||
516 | if (want == 0) | ||
517 | return 0; | ||
518 | |||
506 | switch (ret) { | 519 | switch (ret) { |
507 | default: | 520 | default: |
508 | break; | 521 | break; |
@@ -655,13 +668,34 @@ out_err: | |||
655 | return ret != 0 ? ret : -ESHUTDOWN; | 668 | return ret != 0 ? ret : -ESHUTDOWN; |
656 | } | 669 | } |
657 | 670 | ||
671 | static __poll_t xs_poll_socket(struct sock_xprt *transport) | ||
672 | { | ||
673 | return transport->sock->ops->poll(NULL, transport->sock, NULL); | ||
674 | } | ||
675 | |||
676 | static bool xs_poll_socket_readable(struct sock_xprt *transport) | ||
677 | { | ||
678 | __poll_t events = xs_poll_socket(transport); | ||
679 | |||
680 | return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP); | ||
681 | } | ||
682 | |||
683 | static void xs_poll_check_readable(struct sock_xprt *transport) | ||
684 | { | ||
685 | |||
686 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | ||
687 | if (!xs_poll_socket_readable(transport)) | ||
688 | return; | ||
689 | if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) | ||
690 | queue_work(xprtiod_workqueue, &transport->recv_worker); | ||
691 | } | ||
692 | |||
658 | static void xs_stream_data_receive(struct sock_xprt *transport) | 693 | static void xs_stream_data_receive(struct sock_xprt *transport) |
659 | { | 694 | { |
660 | size_t read = 0; | 695 | size_t read = 0; |
661 | ssize_t ret = 0; | 696 | ssize_t ret = 0; |
662 | 697 | ||
663 | mutex_lock(&transport->recv_mutex); | 698 | mutex_lock(&transport->recv_mutex); |
664 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | ||
665 | if (transport->sock == NULL) | 699 | if (transport->sock == NULL) |
666 | goto out; | 700 | goto out; |
667 | for (;;) { | 701 | for (;;) { |
@@ -671,6 +705,10 @@ static void xs_stream_data_receive(struct sock_xprt *transport) | |||
671 | read += ret; | 705 | read += ret; |
672 | cond_resched(); | 706 | cond_resched(); |
673 | } | 707 | } |
708 | if (ret == -ESHUTDOWN) | ||
709 | kernel_sock_shutdown(transport->sock, SHUT_RDWR); | ||
710 | else | ||
711 | xs_poll_check_readable(transport); | ||
674 | out: | 712 | out: |
675 | mutex_unlock(&transport->recv_mutex); | 713 | mutex_unlock(&transport->recv_mutex); |
676 | trace_xs_stream_read_data(&transport->xprt, ret, read); | 714 | trace_xs_stream_read_data(&transport->xprt, ret, read); |
@@ -680,7 +718,10 @@ static void xs_stream_data_receive_workfn(struct work_struct *work) | |||
680 | { | 718 | { |
681 | struct sock_xprt *transport = | 719 | struct sock_xprt *transport = |
682 | container_of(work, struct sock_xprt, recv_worker); | 720 | container_of(work, struct sock_xprt, recv_worker); |
721 | unsigned int pflags = memalloc_nofs_save(); | ||
722 | |||
683 | xs_stream_data_receive(transport); | 723 | xs_stream_data_receive(transport); |
724 | memalloc_nofs_restore(pflags); | ||
684 | } | 725 | } |
685 | 726 | ||
686 | static void | 727 | static void |
@@ -690,99 +731,65 @@ xs_stream_reset_connect(struct sock_xprt *transport) | |||
690 | transport->recv.len = 0; | 731 | transport->recv.len = 0; |
691 | transport->recv.copied = 0; | 732 | transport->recv.copied = 0; |
692 | transport->xmit.offset = 0; | 733 | transport->xmit.offset = 0; |
734 | } | ||
735 | |||
736 | static void | ||
737 | xs_stream_start_connect(struct sock_xprt *transport) | ||
738 | { | ||
693 | transport->xprt.stat.connect_count++; | 739 | transport->xprt.stat.connect_count++; |
694 | transport->xprt.stat.connect_start = jiffies; | 740 | transport->xprt.stat.connect_start = jiffies; |
695 | } | 741 | } |
696 | 742 | ||
697 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) | 743 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
698 | 744 | ||
745 | static int xs_sendmsg(struct socket *sock, struct msghdr *msg, size_t seek) | ||
746 | { | ||
747 | if (seek) | ||
748 | iov_iter_advance(&msg->msg_iter, seek); | ||
749 | return sock_sendmsg(sock, msg); | ||
750 | } | ||
751 | |||
752 | static int xs_send_kvec(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t seek) | ||
753 | { | ||
754 | iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); | ||
755 | return xs_sendmsg(sock, msg, seek); | ||
756 | } | ||
757 | |||
758 | static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_buf *xdr, size_t base) | ||
759 | { | ||
760 | int err; | ||
761 | |||
762 | err = xdr_alloc_bvec(xdr, GFP_KERNEL); | ||
763 | if (err < 0) | ||
764 | return err; | ||
765 | |||
766 | iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, | ||
767 | xdr_buf_pagecount(xdr), | ||
768 | xdr->page_len + xdr->page_base); | ||
769 | return xs_sendmsg(sock, msg, base + xdr->page_base); | ||
770 | } | ||
771 | |||
772 | #define xs_record_marker_len() sizeof(rpc_fraghdr) | ||
773 | |||
699 | /* Common case: | 774 | /* Common case: |
700 | * - stream transport | 775 | * - stream transport |
701 | * - sending from byte 0 of the message | 776 | * - sending from byte 0 of the message |
702 | * - the message is wholly contained in @xdr's head iovec | 777 | * - the message is wholly contained in @xdr's head iovec |
703 | */ | 778 | */ |
704 | static int xs_send_rm_and_kvec(struct socket *sock, struct xdr_buf *xdr, | 779 | static int xs_send_rm_and_kvec(struct socket *sock, struct msghdr *msg, |
705 | unsigned int remainder) | 780 | rpc_fraghdr marker, struct kvec *vec, size_t base) |
706 | { | 781 | { |
707 | struct msghdr msg = { | ||
708 | .msg_flags = XS_SENDMSG_FLAGS | (remainder ? MSG_MORE : 0) | ||
709 | }; | ||
710 | rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | | ||
711 | (u32)xdr->len); | ||
712 | struct kvec iov[2] = { | 782 | struct kvec iov[2] = { |
713 | { | 783 | [0] = { |
714 | .iov_base = &marker, | 784 | .iov_base = &marker, |
715 | .iov_len = sizeof(marker) | 785 | .iov_len = sizeof(marker) |
716 | }, | 786 | }, |
717 | { | 787 | [1] = *vec, |
718 | .iov_base = xdr->head[0].iov_base, | ||
719 | .iov_len = xdr->head[0].iov_len | ||
720 | }, | ||
721 | }; | ||
722 | int ret; | ||
723 | |||
724 | ret = kernel_sendmsg(sock, &msg, iov, 2, | ||
725 | iov[0].iov_len + iov[1].iov_len); | ||
726 | if (ret < 0) | ||
727 | return ret; | ||
728 | if (ret < iov[0].iov_len) | ||
729 | return -EPIPE; | ||
730 | return ret - iov[0].iov_len; | ||
731 | } | ||
732 | |||
733 | static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) | ||
734 | { | ||
735 | struct msghdr msg = { | ||
736 | .msg_name = addr, | ||
737 | .msg_namelen = addrlen, | ||
738 | .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), | ||
739 | }; | ||
740 | struct kvec iov = { | ||
741 | .iov_base = vec->iov_base + base, | ||
742 | .iov_len = vec->iov_len - base, | ||
743 | }; | 788 | }; |
789 | size_t len = iov[0].iov_len + iov[1].iov_len; | ||
744 | 790 | ||
745 | if (iov.iov_len != 0) | 791 | iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len); |
746 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | 792 | return xs_sendmsg(sock, msg, base); |
747 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | ||
748 | } | ||
749 | |||
750 | static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p) | ||
751 | { | ||
752 | ssize_t (*do_sendpage)(struct socket *sock, struct page *page, | ||
753 | int offset, size_t size, int flags); | ||
754 | struct page **ppage; | ||
755 | unsigned int remainder; | ||
756 | int err; | ||
757 | |||
758 | remainder = xdr->page_len - base; | ||
759 | base += xdr->page_base; | ||
760 | ppage = xdr->pages + (base >> PAGE_SHIFT); | ||
761 | base &= ~PAGE_MASK; | ||
762 | do_sendpage = sock->ops->sendpage; | ||
763 | if (!zerocopy) | ||
764 | do_sendpage = sock_no_sendpage; | ||
765 | for(;;) { | ||
766 | unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); | ||
767 | int flags = XS_SENDMSG_FLAGS; | ||
768 | |||
769 | remainder -= len; | ||
770 | if (more) | ||
771 | flags |= MSG_MORE; | ||
772 | if (remainder != 0) | ||
773 | flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE; | ||
774 | err = do_sendpage(sock, *ppage, base, len, flags); | ||
775 | if (remainder == 0 || err != len) | ||
776 | break; | ||
777 | *sent_p += err; | ||
778 | ppage++; | ||
779 | base = 0; | ||
780 | } | ||
781 | if (err > 0) { | ||
782 | *sent_p += err; | ||
783 | err = 0; | ||
784 | } | ||
785 | return err; | ||
786 | } | 793 | } |
787 | 794 | ||
788 | /** | 795 | /** |
@@ -792,53 +799,60 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i | |||
792 | * @addrlen: UDP only -- length of destination address | 799 | * @addrlen: UDP only -- length of destination address |
793 | * @xdr: buffer containing this request | 800 | * @xdr: buffer containing this request |
794 | * @base: starting position in the buffer | 801 | * @base: starting position in the buffer |
795 | * @zerocopy: true if it is safe to use sendpage() | 802 | * @rm: stream record marker field |
796 | * @sent_p: return the total number of bytes successfully queued for sending | 803 | * @sent_p: return the total number of bytes successfully queued for sending |
797 | * | 804 | * |
798 | */ | 805 | */ |
799 | static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p) | 806 | static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, rpc_fraghdr rm, int *sent_p) |
800 | { | 807 | { |
801 | unsigned int remainder = xdr->len - base; | 808 | struct msghdr msg = { |
809 | .msg_name = addr, | ||
810 | .msg_namelen = addrlen, | ||
811 | .msg_flags = XS_SENDMSG_FLAGS | MSG_MORE, | ||
812 | }; | ||
813 | unsigned int rmsize = rm ? sizeof(rm) : 0; | ||
814 | unsigned int remainder = rmsize + xdr->len - base; | ||
815 | unsigned int want; | ||
802 | int err = 0; | 816 | int err = 0; |
803 | int sent = 0; | ||
804 | 817 | ||
805 | if (unlikely(!sock)) | 818 | if (unlikely(!sock)) |
806 | return -ENOTSOCK; | 819 | return -ENOTSOCK; |
807 | 820 | ||
808 | if (base != 0) { | 821 | want = xdr->head[0].iov_len + rmsize; |
809 | addr = NULL; | 822 | if (base < want) { |
810 | addrlen = 0; | 823 | unsigned int len = want - base; |
811 | } | ||
812 | |||
813 | if (base < xdr->head[0].iov_len || addr != NULL) { | ||
814 | unsigned int len = xdr->head[0].iov_len - base; | ||
815 | remainder -= len; | 824 | remainder -= len; |
816 | if (!base && !addr) | 825 | if (remainder == 0) |
817 | err = xs_send_rm_and_kvec(sock, xdr, remainder); | 826 | msg.msg_flags &= ~MSG_MORE; |
827 | if (rmsize) | ||
828 | err = xs_send_rm_and_kvec(sock, &msg, rm, | ||
829 | &xdr->head[0], base); | ||
818 | else | 830 | else |
819 | err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], | 831 | err = xs_send_kvec(sock, &msg, &xdr->head[0], base); |
820 | base, remainder != 0); | ||
821 | if (remainder == 0 || err != len) | 832 | if (remainder == 0 || err != len) |
822 | goto out; | 833 | goto out; |
823 | *sent_p += err; | 834 | *sent_p += err; |
824 | base = 0; | 835 | base = 0; |
825 | } else | 836 | } else |
826 | base -= xdr->head[0].iov_len; | 837 | base -= want; |
827 | 838 | ||
828 | if (base < xdr->page_len) { | 839 | if (base < xdr->page_len) { |
829 | unsigned int len = xdr->page_len - base; | 840 | unsigned int len = xdr->page_len - base; |
830 | remainder -= len; | 841 | remainder -= len; |
831 | err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent); | 842 | if (remainder == 0) |
832 | *sent_p += sent; | 843 | msg.msg_flags &= ~MSG_MORE; |
833 | if (remainder == 0 || sent != len) | 844 | err = xs_send_pagedata(sock, &msg, xdr, base); |
845 | if (remainder == 0 || err != len) | ||
834 | goto out; | 846 | goto out; |
847 | *sent_p += err; | ||
835 | base = 0; | 848 | base = 0; |
836 | } else | 849 | } else |
837 | base -= xdr->page_len; | 850 | base -= xdr->page_len; |
838 | 851 | ||
839 | if (base >= xdr->tail[0].iov_len) | 852 | if (base >= xdr->tail[0].iov_len) |
840 | return 0; | 853 | return 0; |
841 | err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); | 854 | msg.msg_flags &= ~MSG_MORE; |
855 | err = xs_send_kvec(sock, &msg, &xdr->tail[0], base); | ||
842 | out: | 856 | out: |
843 | if (err > 0) { | 857 | if (err > 0) { |
844 | *sent_p += err; | 858 | *sent_p += err; |
@@ -907,6 +921,17 @@ xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req) | |||
907 | return transport->xmit.offset != 0 && req->rq_bytes_sent == 0; | 921 | return transport->xmit.offset != 0 && req->rq_bytes_sent == 0; |
908 | } | 922 | } |
909 | 923 | ||
924 | /* | ||
925 | * Return the stream record marker field for a record of length < 2^31-1 | ||
926 | */ | ||
927 | static rpc_fraghdr | ||
928 | xs_stream_record_marker(struct xdr_buf *xdr) | ||
929 | { | ||
930 | if (!xdr->len) | ||
931 | return 0; | ||
932 | return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len); | ||
933 | } | ||
934 | |||
910 | /** | 935 | /** |
911 | * xs_local_send_request - write an RPC request to an AF_LOCAL socket | 936 | * xs_local_send_request - write an RPC request to an AF_LOCAL socket |
912 | * @req: pointer to RPC request | 937 | * @req: pointer to RPC request |
@@ -939,7 +964,8 @@ static int xs_local_send_request(struct rpc_rqst *req) | |||
939 | req->rq_xtime = ktime_get(); | 964 | req->rq_xtime = ktime_get(); |
940 | status = xs_sendpages(transport->sock, NULL, 0, xdr, | 965 | status = xs_sendpages(transport->sock, NULL, 0, xdr, |
941 | transport->xmit.offset, | 966 | transport->xmit.offset, |
942 | true, &sent); | 967 | xs_stream_record_marker(xdr), |
968 | &sent); | ||
943 | dprintk("RPC: %s(%u) = %d\n", | 969 | dprintk("RPC: %s(%u) = %d\n", |
944 | __func__, xdr->len - transport->xmit.offset, status); | 970 | __func__, xdr->len - transport->xmit.offset, status); |
945 | 971 | ||
@@ -951,7 +977,6 @@ static int xs_local_send_request(struct rpc_rqst *req) | |||
951 | req->rq_bytes_sent = transport->xmit.offset; | 977 | req->rq_bytes_sent = transport->xmit.offset; |
952 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | 978 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { |
953 | req->rq_xmit_bytes_sent += transport->xmit.offset; | 979 | req->rq_xmit_bytes_sent += transport->xmit.offset; |
954 | req->rq_bytes_sent = 0; | ||
955 | transport->xmit.offset = 0; | 980 | transport->xmit.offset = 0; |
956 | return 0; | 981 | return 0; |
957 | } | 982 | } |
@@ -1007,7 +1032,7 @@ static int xs_udp_send_request(struct rpc_rqst *req) | |||
1007 | 1032 | ||
1008 | req->rq_xtime = ktime_get(); | 1033 | req->rq_xtime = ktime_get(); |
1009 | status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, | 1034 | status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, |
1010 | xdr, 0, true, &sent); | 1035 | xdr, 0, 0, &sent); |
1011 | 1036 | ||
1012 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", | 1037 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
1013 | xdr->len, status); | 1038 | xdr->len, status); |
@@ -1071,7 +1096,6 @@ static int xs_tcp_send_request(struct rpc_rqst *req) | |||
1071 | struct rpc_xprt *xprt = req->rq_xprt; | 1096 | struct rpc_xprt *xprt = req->rq_xprt; |
1072 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 1097 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1073 | struct xdr_buf *xdr = &req->rq_snd_buf; | 1098 | struct xdr_buf *xdr = &req->rq_snd_buf; |
1074 | bool zerocopy = true; | ||
1075 | bool vm_wait = false; | 1099 | bool vm_wait = false; |
1076 | int status; | 1100 | int status; |
1077 | int sent; | 1101 | int sent; |
@@ -1086,12 +1110,6 @@ static int xs_tcp_send_request(struct rpc_rqst *req) | |||
1086 | xs_pktdump("packet data:", | 1110 | xs_pktdump("packet data:", |
1087 | req->rq_svec->iov_base, | 1111 | req->rq_svec->iov_base, |
1088 | req->rq_svec->iov_len); | 1112 | req->rq_svec->iov_len); |
1089 | /* Don't use zero copy if this is a resend. If the RPC call | ||
1090 | * completes while the socket holds a reference to the pages, | ||
1091 | * then we may end up resending corrupted data. | ||
1092 | */ | ||
1093 | if (req->rq_task->tk_flags & RPC_TASK_SENT) | ||
1094 | zerocopy = false; | ||
1095 | 1113 | ||
1096 | if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) | 1114 | if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) |
1097 | xs_tcp_set_socket_timeouts(xprt, transport->sock); | 1115 | xs_tcp_set_socket_timeouts(xprt, transport->sock); |
@@ -1104,7 +1122,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req) | |||
1104 | sent = 0; | 1122 | sent = 0; |
1105 | status = xs_sendpages(transport->sock, NULL, 0, xdr, | 1123 | status = xs_sendpages(transport->sock, NULL, 0, xdr, |
1106 | transport->xmit.offset, | 1124 | transport->xmit.offset, |
1107 | zerocopy, &sent); | 1125 | xs_stream_record_marker(xdr), |
1126 | &sent); | ||
1108 | 1127 | ||
1109 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", | 1128 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
1110 | xdr->len - transport->xmit.offset, status); | 1129 | xdr->len - transport->xmit.offset, status); |
@@ -1115,7 +1134,6 @@ static int xs_tcp_send_request(struct rpc_rqst *req) | |||
1115 | req->rq_bytes_sent = transport->xmit.offset; | 1134 | req->rq_bytes_sent = transport->xmit.offset; |
1116 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | 1135 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { |
1117 | req->rq_xmit_bytes_sent += transport->xmit.offset; | 1136 | req->rq_xmit_bytes_sent += transport->xmit.offset; |
1118 | req->rq_bytes_sent = 0; | ||
1119 | transport->xmit.offset = 0; | 1137 | transport->xmit.offset = 0; |
1120 | return 0; | 1138 | return 0; |
1121 | } | 1139 | } |
@@ -1255,6 +1273,8 @@ static void xs_reset_transport(struct sock_xprt *transport) | |||
1255 | xprt_clear_connected(xprt); | 1273 | xprt_clear_connected(xprt); |
1256 | write_unlock_bh(&sk->sk_callback_lock); | 1274 | write_unlock_bh(&sk->sk_callback_lock); |
1257 | xs_sock_reset_connection_flags(xprt); | 1275 | xs_sock_reset_connection_flags(xprt); |
1276 | /* Reset stream record info */ | ||
1277 | xs_stream_reset_connect(transport); | ||
1258 | mutex_unlock(&transport->recv_mutex); | 1278 | mutex_unlock(&transport->recv_mutex); |
1259 | 1279 | ||
1260 | trace_rpc_socket_close(xprt, sock); | 1280 | trace_rpc_socket_close(xprt, sock); |
@@ -1382,7 +1402,6 @@ static void xs_udp_data_receive(struct sock_xprt *transport) | |||
1382 | int err; | 1402 | int err; |
1383 | 1403 | ||
1384 | mutex_lock(&transport->recv_mutex); | 1404 | mutex_lock(&transport->recv_mutex); |
1385 | clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); | ||
1386 | sk = transport->inet; | 1405 | sk = transport->inet; |
1387 | if (sk == NULL) | 1406 | if (sk == NULL) |
1388 | goto out; | 1407 | goto out; |
@@ -1394,6 +1413,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport) | |||
1394 | consume_skb(skb); | 1413 | consume_skb(skb); |
1395 | cond_resched(); | 1414 | cond_resched(); |
1396 | } | 1415 | } |
1416 | xs_poll_check_readable(transport); | ||
1397 | out: | 1417 | out: |
1398 | mutex_unlock(&transport->recv_mutex); | 1418 | mutex_unlock(&transport->recv_mutex); |
1399 | } | 1419 | } |
@@ -1402,7 +1422,10 @@ static void xs_udp_data_receive_workfn(struct work_struct *work) | |||
1402 | { | 1422 | { |
1403 | struct sock_xprt *transport = | 1423 | struct sock_xprt *transport = |
1404 | container_of(work, struct sock_xprt, recv_worker); | 1424 | container_of(work, struct sock_xprt, recv_worker); |
1425 | unsigned int pflags = memalloc_nofs_save(); | ||
1426 | |||
1405 | xs_udp_data_receive(transport); | 1427 | xs_udp_data_receive(transport); |
1428 | memalloc_nofs_restore(pflags); | ||
1406 | } | 1429 | } |
1407 | 1430 | ||
1408 | /** | 1431 | /** |
@@ -1893,7 +1916,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, | |||
1893 | sk->sk_write_space = xs_udp_write_space; | 1916 | sk->sk_write_space = xs_udp_write_space; |
1894 | sock_set_flag(sk, SOCK_FASYNC); | 1917 | sock_set_flag(sk, SOCK_FASYNC); |
1895 | sk->sk_error_report = xs_error_report; | 1918 | sk->sk_error_report = xs_error_report; |
1896 | sk->sk_allocation = GFP_NOIO; | ||
1897 | 1919 | ||
1898 | xprt_clear_connected(xprt); | 1920 | xprt_clear_connected(xprt); |
1899 | 1921 | ||
@@ -1904,7 +1926,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, | |||
1904 | write_unlock_bh(&sk->sk_callback_lock); | 1926 | write_unlock_bh(&sk->sk_callback_lock); |
1905 | } | 1927 | } |
1906 | 1928 | ||
1907 | xs_stream_reset_connect(transport); | 1929 | xs_stream_start_connect(transport); |
1908 | 1930 | ||
1909 | return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); | 1931 | return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); |
1910 | } | 1932 | } |
@@ -2081,7 +2103,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2081 | sk->sk_data_ready = xs_data_ready; | 2103 | sk->sk_data_ready = xs_data_ready; |
2082 | sk->sk_write_space = xs_udp_write_space; | 2104 | sk->sk_write_space = xs_udp_write_space; |
2083 | sock_set_flag(sk, SOCK_FASYNC); | 2105 | sock_set_flag(sk, SOCK_FASYNC); |
2084 | sk->sk_allocation = GFP_NOIO; | ||
2085 | 2106 | ||
2086 | xprt_set_connected(xprt); | 2107 | xprt_set_connected(xprt); |
2087 | 2108 | ||
@@ -2244,7 +2265,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2244 | sk->sk_write_space = xs_tcp_write_space; | 2265 | sk->sk_write_space = xs_tcp_write_space; |
2245 | sock_set_flag(sk, SOCK_FASYNC); | 2266 | sock_set_flag(sk, SOCK_FASYNC); |
2246 | sk->sk_error_report = xs_error_report; | 2267 | sk->sk_error_report = xs_error_report; |
2247 | sk->sk_allocation = GFP_NOIO; | ||
2248 | 2268 | ||
2249 | /* socket options */ | 2269 | /* socket options */ |
2250 | sock_reset_flag(sk, SOCK_LINGER); | 2270 | sock_reset_flag(sk, SOCK_LINGER); |
@@ -2264,8 +2284,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2264 | 2284 | ||
2265 | xs_set_memalloc(xprt); | 2285 | xs_set_memalloc(xprt); |
2266 | 2286 | ||
2267 | /* Reset TCP record info */ | 2287 | xs_stream_start_connect(transport); |
2268 | xs_stream_reset_connect(transport); | ||
2269 | 2288 | ||
2270 | /* Tell the socket layer to start connecting... */ | 2289 | /* Tell the socket layer to start connecting... */ |
2271 | set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); | 2290 | set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 2792a3cae682..85ad5c0678d0 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, | |||
1145 | default: | 1145 | default: |
1146 | pr_warn("Dropping received illegal msg type\n"); | 1146 | pr_warn("Dropping received illegal msg type\n"); |
1147 | kfree_skb(skb); | 1147 | kfree_skb(skb); |
1148 | return false; | 1148 | return true; |
1149 | }; | 1149 | }; |
1150 | } | 1150 | } |
1151 | 1151 | ||
@@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, | |||
1425 | l->rcv_unacked = 0; | 1425 | l->rcv_unacked = 0; |
1426 | } else { | 1426 | } else { |
1427 | /* RESET_MSG or ACTIVATE_MSG */ | 1427 | /* RESET_MSG or ACTIVATE_MSG */ |
1428 | if (mtyp == ACTIVATE_MSG) { | ||
1429 | msg_set_dest_session_valid(hdr, 1); | ||
1430 | msg_set_dest_session(hdr, l->peer_session); | ||
1431 | } | ||
1428 | msg_set_max_pkt(hdr, l->advertised_mtu); | 1432 | msg_set_max_pkt(hdr, l->advertised_mtu); |
1429 | strcpy(data, l->if_name); | 1433 | strcpy(data, l->if_name); |
1430 | msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); | 1434 | msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); |
@@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |||
1642 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); | 1646 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
1643 | break; | 1647 | break; |
1644 | } | 1648 | } |
1649 | |||
1650 | /* If this endpoint was re-created while peer was ESTABLISHING | ||
1651 | * it doesn't know current session number. Force re-synch. | ||
1652 | */ | ||
1653 | if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) && | ||
1654 | l->session != msg_dest_session(hdr)) { | ||
1655 | if (less(l->session, msg_dest_session(hdr))) | ||
1656 | l->session = msg_dest_session(hdr) + 1; | ||
1657 | break; | ||
1658 | } | ||
1659 | |||
1645 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ | 1660 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ |
1646 | if (mtyp == RESET_MSG || !link_is_up(l)) | 1661 | if (mtyp == RESET_MSG || !link_is_up(l)) |
1647 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); | 1662 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); |
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a0924956bb61..d7e4b8b93f9d 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n) | |||
360 | msg_set_bits(m, 1, 0, 0xffff, n); | 360 | msg_set_bits(m, 1, 0, 0xffff, n); |
361 | } | 361 | } |
362 | 362 | ||
363 | /* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch | ||
364 | * link peer session number | ||
365 | */ | ||
366 | static inline bool msg_dest_session_valid(struct tipc_msg *m) | ||
367 | { | ||
368 | return msg_bits(m, 1, 16, 0x1); | ||
369 | } | ||
370 | |||
371 | static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid) | ||
372 | { | ||
373 | msg_set_bits(m, 1, 16, 0x1, valid); | ||
374 | } | ||
375 | |||
376 | static inline u16 msg_dest_session(struct tipc_msg *m) | ||
377 | { | ||
378 | return msg_bits(m, 1, 0, 0xffff); | ||
379 | } | ||
380 | |||
381 | static inline void msg_set_dest_session(struct tipc_msg *m, u16 n) | ||
382 | { | ||
383 | msg_set_bits(m, 1, 0, 0xffff, n); | ||
384 | } | ||
363 | 385 | ||
364 | /* | 386 | /* |
365 | * Word 2 | 387 | * Word 2 |
diff --git a/net/tipc/node.c b/net/tipc/node.c index db2a6c3e0be9..2dc4919ab23c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) | |||
830 | tipc_node_write_lock(n); | 830 | tipc_node_write_lock(n); |
831 | if (!tipc_link_is_establishing(l)) { | 831 | if (!tipc_link_is_establishing(l)) { |
832 | __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); | 832 | __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); |
833 | if (delete) { | ||
834 | kfree(l); | ||
835 | le->link = NULL; | ||
836 | n->link_cnt--; | ||
837 | } | ||
838 | } else { | 833 | } else { |
839 | /* Defuse pending tipc_node_link_up() */ | 834 | /* Defuse pending tipc_node_link_up() */ |
835 | tipc_link_reset(l); | ||
840 | tipc_link_fsm_evt(l, LINK_RESET_EVT); | 836 | tipc_link_fsm_evt(l, LINK_RESET_EVT); |
841 | } | 837 | } |
838 | if (delete) { | ||
839 | kfree(l); | ||
840 | le->link = NULL; | ||
841 | n->link_cnt--; | ||
842 | } | ||
842 | trace_tipc_node_link_down(n, true, "node link down or deleted!"); | 843 | trace_tipc_node_link_down(n, true, "node link down or deleted!"); |
843 | tipc_node_write_unlock(n); | 844 | tipc_node_write_unlock(n); |
844 | if (delete) | 845 | if (delete) |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index c361ce782412..c3d5ab01fba7 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work) | |||
1651 | 1651 | ||
1652 | static void vmci_transport_destruct(struct vsock_sock *vsk) | 1652 | static void vmci_transport_destruct(struct vsock_sock *vsk) |
1653 | { | 1653 | { |
1654 | /* transport can be NULL if we hit a failure at init() time */ | ||
1655 | if (!vmci_trans(vsk)) | ||
1656 | return; | ||
1657 | |||
1654 | /* Ensure that the detach callback doesn't use the sk/vsk | 1658 | /* Ensure that the detach callback doesn't use the sk/vsk |
1655 | * we are about to destruct. | 1659 | * we are about to destruct. |
1656 | */ | 1660 | */ |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 623dfe5e211c..b36ad8efb5e5 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) | |||
1068 | 1068 | ||
1069 | ASSERT_RTNL(); | 1069 | ASSERT_RTNL(); |
1070 | 1070 | ||
1071 | flush_work(&wdev->pmsr_free_wk); | ||
1072 | |||
1071 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); | 1073 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); |
1072 | 1074 | ||
1073 | list_del_rcu(&wdev->list); | 1075 | list_del_rcu(&wdev->list); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 74150ad95823..d91a408db113 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -250,7 +250,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { | |||
250 | [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = | 250 | [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = |
251 | NLA_POLICY_MAX(NLA_U8, 15), | 251 | NLA_POLICY_MAX(NLA_U8, 15), |
252 | [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = | 252 | [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = |
253 | NLA_POLICY_MAX(NLA_U8, 15), | 253 | NLA_POLICY_MAX(NLA_U8, 31), |
254 | [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, | 254 | [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, |
255 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, | 255 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, |
256 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, | 256 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, |
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index de9286703280..0216ab555249 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c | |||
@@ -256,8 +256,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) | |||
256 | if (err) | 256 | if (err) |
257 | goto out_err; | 257 | goto out_err; |
258 | } else { | 258 | } else { |
259 | memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]), | 259 | memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN); |
260 | ETH_ALEN); | ||
261 | memset(req->mac_addr_mask, 0xff, ETH_ALEN); | 260 | memset(req->mac_addr_mask, 0xff, ETH_ALEN); |
262 | } | 261 | } |
263 | 262 | ||
@@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) | |||
272 | 271 | ||
273 | req->n_peers = count; | 272 | req->n_peers = count; |
274 | req->cookie = cfg80211_assign_cookie(rdev); | 273 | req->cookie = cfg80211_assign_cookie(rdev); |
274 | req->nl_portid = info->snd_portid; | ||
275 | 275 | ||
276 | err = rdev_start_pmsr(rdev, wdev, req); | 276 | err = rdev_start_pmsr(rdev, wdev, req); |
277 | if (err) | 277 | if (err) |
@@ -530,14 +530,14 @@ free: | |||
530 | } | 530 | } |
531 | EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); | 531 | EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); |
532 | 532 | ||
533 | void cfg80211_pmsr_free_wk(struct work_struct *work) | 533 | static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev) |
534 | { | 534 | { |
535 | struct wireless_dev *wdev = container_of(work, struct wireless_dev, | ||
536 | pmsr_free_wk); | ||
537 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); | 535 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); |
538 | struct cfg80211_pmsr_request *req, *tmp; | 536 | struct cfg80211_pmsr_request *req, *tmp; |
539 | LIST_HEAD(free_list); | 537 | LIST_HEAD(free_list); |
540 | 538 | ||
539 | lockdep_assert_held(&wdev->mtx); | ||
540 | |||
541 | spin_lock_bh(&wdev->pmsr_lock); | 541 | spin_lock_bh(&wdev->pmsr_lock); |
542 | list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { | 542 | list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { |
543 | if (req->nl_portid) | 543 | if (req->nl_portid) |
@@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work) | |||
547 | spin_unlock_bh(&wdev->pmsr_lock); | 547 | spin_unlock_bh(&wdev->pmsr_lock); |
548 | 548 | ||
549 | list_for_each_entry_safe(req, tmp, &free_list, list) { | 549 | list_for_each_entry_safe(req, tmp, &free_list, list) { |
550 | wdev_lock(wdev); | ||
551 | rdev_abort_pmsr(rdev, wdev, req); | 550 | rdev_abort_pmsr(rdev, wdev, req); |
552 | wdev_unlock(wdev); | ||
553 | 551 | ||
554 | kfree(req); | 552 | kfree(req); |
555 | } | 553 | } |
556 | } | 554 | } |
557 | 555 | ||
556 | void cfg80211_pmsr_free_wk(struct work_struct *work) | ||
557 | { | ||
558 | struct wireless_dev *wdev = container_of(work, struct wireless_dev, | ||
559 | pmsr_free_wk); | ||
560 | |||
561 | wdev_lock(wdev); | ||
562 | cfg80211_pmsr_process_abort(wdev); | ||
563 | wdev_unlock(wdev); | ||
564 | } | ||
565 | |||
558 | void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) | 566 | void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) |
559 | { | 567 | { |
560 | struct cfg80211_pmsr_request *req; | 568 | struct cfg80211_pmsr_request *req; |
@@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) | |||
568 | spin_unlock_bh(&wdev->pmsr_lock); | 576 | spin_unlock_bh(&wdev->pmsr_lock); |
569 | 577 | ||
570 | if (found) | 578 | if (found) |
571 | schedule_work(&wdev->pmsr_free_wk); | 579 | cfg80211_pmsr_process_abort(wdev); |
572 | flush_work(&wdev->pmsr_free_wk); | 580 | |
573 | WARN_ON(!list_empty(&wdev->pmsr_list)); | 581 | WARN_ON(!list_empty(&wdev->pmsr_list)); |
574 | } | 582 | } |
575 | 583 | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c index cd48cdd582c0..ec30e3732c7b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright 2017 Intel Deutschland GmbH | 7 | * Copyright 2017 Intel Deutschland GmbH |
8 | * Copyright (C) 2018 Intel Corporation | 8 | * Copyright (C) 2018-2019 Intel Corporation |
9 | */ | 9 | */ |
10 | #include <linux/export.h> | 10 | #include <linux/export.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mpls.h> | 19 | #include <linux/mpls.h> |
20 | #include <linux/gcd.h> | 20 | #include <linux/gcd.h> |
21 | #include <linux/bitfield.h> | 21 | #include <linux/bitfield.h> |
22 | #include <linux/nospec.h> | ||
22 | #include "core.h" | 23 | #include "core.h" |
23 | #include "rdev-ops.h" | 24 | #include "rdev-ops.h" |
24 | 25 | ||
@@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
715 | { | 716 | { |
716 | unsigned int dscp; | 717 | unsigned int dscp; |
717 | unsigned char vlan_priority; | 718 | unsigned char vlan_priority; |
719 | unsigned int ret; | ||
718 | 720 | ||
719 | /* skb->priority values from 256->263 are magic values to | 721 | /* skb->priority values from 256->263 are magic values to |
720 | * directly indicate a specific 802.1d priority. This is used | 722 | * directly indicate a specific 802.1d priority. This is used |
721 | * to allow 802.1d priority to be passed directly in from VLAN | 723 | * to allow 802.1d priority to be passed directly in from VLAN |
722 | * tags, etc. | 724 | * tags, etc. |
723 | */ | 725 | */ |
724 | if (skb->priority >= 256 && skb->priority <= 263) | 726 | if (skb->priority >= 256 && skb->priority <= 263) { |
725 | return skb->priority - 256; | 727 | ret = skb->priority - 256; |
728 | goto out; | ||
729 | } | ||
726 | 730 | ||
727 | if (skb_vlan_tag_present(skb)) { | 731 | if (skb_vlan_tag_present(skb)) { |
728 | vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) | 732 | vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) |
729 | >> VLAN_PRIO_SHIFT; | 733 | >> VLAN_PRIO_SHIFT; |
730 | if (vlan_priority > 0) | 734 | if (vlan_priority > 0) { |
731 | return vlan_priority; | 735 | ret = vlan_priority; |
736 | goto out; | ||
737 | } | ||
732 | } | 738 | } |
733 | 739 | ||
734 | switch (skb->protocol) { | 740 | switch (skb->protocol) { |
@@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
747 | if (!mpls) | 753 | if (!mpls) |
748 | return 0; | 754 | return 0; |
749 | 755 | ||
750 | return (ntohl(mpls->entry) & MPLS_LS_TC_MASK) | 756 | ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK) |
751 | >> MPLS_LS_TC_SHIFT; | 757 | >> MPLS_LS_TC_SHIFT; |
758 | goto out; | ||
752 | } | 759 | } |
753 | case htons(ETH_P_80221): | 760 | case htons(ETH_P_80221): |
754 | /* 802.21 is always network control traffic */ | 761 | /* 802.21 is always network control traffic */ |
@@ -761,18 +768,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
761 | unsigned int i, tmp_dscp = dscp >> 2; | 768 | unsigned int i, tmp_dscp = dscp >> 2; |
762 | 769 | ||
763 | for (i = 0; i < qos_map->num_des; i++) { | 770 | for (i = 0; i < qos_map->num_des; i++) { |
764 | if (tmp_dscp == qos_map->dscp_exception[i].dscp) | 771 | if (tmp_dscp == qos_map->dscp_exception[i].dscp) { |
765 | return qos_map->dscp_exception[i].up; | 772 | ret = qos_map->dscp_exception[i].up; |
773 | goto out; | ||
774 | } | ||
766 | } | 775 | } |
767 | 776 | ||
768 | for (i = 0; i < 8; i++) { | 777 | for (i = 0; i < 8; i++) { |
769 | if (tmp_dscp >= qos_map->up[i].low && | 778 | if (tmp_dscp >= qos_map->up[i].low && |
770 | tmp_dscp <= qos_map->up[i].high) | 779 | tmp_dscp <= qos_map->up[i].high) { |
771 | return i; | 780 | ret = i; |
781 | goto out; | ||
782 | } | ||
772 | } | 783 | } |
773 | } | 784 | } |
774 | 785 | ||
775 | return dscp >> 5; | 786 | ret = dscp >> 5; |
787 | out: | ||
788 | return array_index_nospec(ret, IEEE80211_NUM_TIDS); | ||
776 | } | 789 | } |
777 | EXPORT_SYMBOL(cfg80211_classify8021d); | 790 | EXPORT_SYMBOL(cfg80211_classify8021d); |
778 | 791 | ||
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5121729b8b63..ec3a828672ef 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb) | |||
352 | unsigned int lci = 1; | 352 | unsigned int lci = 1; |
353 | struct sock *sk; | 353 | struct sock *sk; |
354 | 354 | ||
355 | read_lock_bh(&x25_list_lock); | 355 | while ((sk = x25_find_socket(lci, nb)) != NULL) { |
356 | |||
357 | while ((sk = __x25_find_socket(lci, nb)) != NULL) { | ||
358 | sock_put(sk); | 356 | sock_put(sk); |
359 | if (++lci == 4096) { | 357 | if (++lci == 4096) { |
360 | lci = 0; | 358 | lci = 0; |
361 | break; | 359 | break; |
362 | } | 360 | } |
361 | cond_resched(); | ||
363 | } | 362 | } |
364 | 363 | ||
365 | read_unlock_bh(&x25_list_lock); | ||
366 | return lci; | 364 | return lci; |
367 | } | 365 | } |
368 | 366 | ||
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index d4de871e7d4d..37e1fe180769 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
@@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
125 | return 0; | 125 | return 0; |
126 | 126 | ||
127 | err_unreg_umem: | 127 | err_unreg_umem: |
128 | xdp_clear_umem_at_qid(dev, queue_id); | ||
129 | if (!force_zc) | 128 | if (!force_zc) |
130 | err = 0; /* fallback to copy mode */ | 129 | err = 0; /* fallback to copy mode */ |
130 | if (err) | ||
131 | xdp_clear_umem_at_qid(dev, queue_id); | ||
131 | out_rtnl_unlock: | 132 | out_rtnl_unlock: |
132 | rtnl_unlock(); | 133 | rtnl_unlock(); |
133 | return err; | 134 | return err; |
@@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem) | |||
259 | if (!umem->pgs) | 260 | if (!umem->pgs) |
260 | return -ENOMEM; | 261 | return -ENOMEM; |
261 | 262 | ||
262 | down_write(¤t->mm->mmap_sem); | 263 | down_read(¤t->mm->mmap_sem); |
263 | npgs = get_user_pages(umem->address, umem->npgs, | 264 | npgs = get_user_pages_longterm(umem->address, umem->npgs, |
264 | gup_flags, &umem->pgs[0], NULL); | 265 | gup_flags, &umem->pgs[0], NULL); |
265 | up_write(¤t->mm->mmap_sem); | 266 | up_read(¤t->mm->mmap_sem); |
266 | 267 | ||
267 | if (npgs != umem->npgs) { | 268 | if (npgs != umem->npgs) { |
268 | if (npgs >= 0) { | 269 | if (npgs >= 0) { |
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index a03268454a27..45f3b528dc09 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c | |||
@@ -669,6 +669,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, | |||
669 | if (!umem) | 669 | if (!umem) |
670 | return -EINVAL; | 670 | return -EINVAL; |
671 | 671 | ||
672 | /* Matches the smp_wmb() in XDP_UMEM_REG */ | ||
673 | smp_rmb(); | ||
672 | if (offset == XDP_UMEM_PGOFF_FILL_RING) | 674 | if (offset == XDP_UMEM_PGOFF_FILL_RING) |
673 | q = READ_ONCE(umem->fq); | 675 | q = READ_ONCE(umem->fq); |
674 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) | 676 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) |
@@ -678,6 +680,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, | |||
678 | if (!q) | 680 | if (!q) |
679 | return -EINVAL; | 681 | return -EINVAL; |
680 | 682 | ||
683 | /* Matches the smp_wmb() in xsk_init_queue */ | ||
684 | smp_rmb(); | ||
681 | qpg = virt_to_head_page(q->ring); | 685 | qpg = virt_to_head_page(q->ring); |
682 | if (size > (PAGE_SIZE << compound_order(qpg))) | 686 | if (size > (PAGE_SIZE << compound_order(qpg))) |
683 | return -EINVAL; | 687 | return -EINVAL; |
diff --git a/security/keys/internal.h b/security/keys/internal.h index 479909b858c7..8f533c81aa8d 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h | |||
@@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm) | |||
186 | return key_task_permission(key_ref, current_cred(), perm); | 186 | return key_task_permission(key_ref, current_cred(), perm); |
187 | } | 187 | } |
188 | 188 | ||
189 | /* | ||
190 | * Authorisation record for request_key(). | ||
191 | */ | ||
192 | struct request_key_auth { | ||
193 | struct key *target_key; | ||
194 | struct key *dest_keyring; | ||
195 | const struct cred *cred; | ||
196 | void *callout_info; | ||
197 | size_t callout_len; | ||
198 | pid_t pid; | ||
199 | } __randomize_layout; | ||
200 | |||
201 | extern struct key_type key_type_request_key_auth; | 189 | extern struct key_type key_type_request_key_auth; |
202 | extern struct key *request_key_auth_new(struct key *target, | 190 | extern struct key *request_key_auth_new(struct key *target, |
191 | const char *op, | ||
203 | const void *callout_info, | 192 | const void *callout_info, |
204 | size_t callout_len, | 193 | size_t callout_len, |
205 | struct key *dest_keyring); | 194 | struct key *dest_keyring); |
diff --git a/security/keys/key.c b/security/keys/key.c index 44a80d6741a1..696f1c092c50 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
265 | 265 | ||
266 | spin_lock(&user->lock); | 266 | spin_lock(&user->lock); |
267 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { | 267 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { |
268 | if (user->qnkeys + 1 >= maxkeys || | 268 | if (user->qnkeys + 1 > maxkeys || |
269 | user->qnbytes + quotalen >= maxbytes || | 269 | user->qnbytes + quotalen > maxbytes || |
270 | user->qnbytes + quotalen < user->qnbytes) | 270 | user->qnbytes + quotalen < user->qnbytes) |
271 | goto no_quota; | 271 | goto no_quota; |
272 | } | 272 | } |
@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
297 | key->gid = gid; | 297 | key->gid = gid; |
298 | key->perm = perm; | 298 | key->perm = perm; |
299 | key->restrict_link = restrict_link; | 299 | key->restrict_link = restrict_link; |
300 | key->last_used_at = ktime_get_real_seconds(); | ||
300 | 301 | ||
301 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) | 302 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) |
302 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; | 303 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; |
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index e8093d025966..7bbe03593e58 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/security.h> | 25 | #include <linux/security.h> |
26 | #include <linux/uio.h> | 26 | #include <linux/uio.h> |
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <keys/request_key_auth-type.h> | ||
28 | #include "internal.h" | 29 | #include "internal.h" |
29 | 30 | ||
30 | #define KEY_MAX_DESC_SIZE 4096 | 31 | #define KEY_MAX_DESC_SIZE 4096 |
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 02c77e928f68..0e0b9ccad2f8 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/security.h> | 19 | #include <linux/security.h> |
20 | #include <linux/user_namespace.h> | 20 | #include <linux/user_namespace.h> |
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <keys/request_key_auth-type.h> | ||
22 | #include "internal.h" | 23 | #include "internal.h" |
23 | 24 | ||
24 | /* Session keyring create vs join semaphore */ | 25 | /* Session keyring create vs join semaphore */ |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 301f0e300dbd..3f56a312dd35 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -18,31 +18,30 @@ | |||
18 | #include <linux/keyctl.h> | 18 | #include <linux/keyctl.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include "internal.h" | 20 | #include "internal.h" |
21 | #include <keys/request_key_auth-type.h> | ||
21 | 22 | ||
22 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ | 23 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ |
23 | 24 | ||
24 | /** | 25 | /** |
25 | * complete_request_key - Complete the construction of a key. | 26 | * complete_request_key - Complete the construction of a key. |
26 | * @cons: The key construction record. | 27 | * @auth_key: The authorisation key. |
27 | * @error: The success or failute of the construction. | 28 | * @error: The success or failute of the construction. |
28 | * | 29 | * |
29 | * Complete the attempt to construct a key. The key will be negated | 30 | * Complete the attempt to construct a key. The key will be negated |
30 | * if an error is indicated. The authorisation key will be revoked | 31 | * if an error is indicated. The authorisation key will be revoked |
31 | * unconditionally. | 32 | * unconditionally. |
32 | */ | 33 | */ |
33 | void complete_request_key(struct key_construction *cons, int error) | 34 | void complete_request_key(struct key *authkey, int error) |
34 | { | 35 | { |
35 | kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); | 36 | struct request_key_auth *rka = get_request_key_auth(authkey); |
37 | struct key *key = rka->target_key; | ||
38 | |||
39 | kenter("%d{%d},%d", authkey->serial, key->serial, error); | ||
36 | 40 | ||
37 | if (error < 0) | 41 | if (error < 0) |
38 | key_negate_and_link(cons->key, key_negative_timeout, NULL, | 42 | key_negate_and_link(key, key_negative_timeout, NULL, authkey); |
39 | cons->authkey); | ||
40 | else | 43 | else |
41 | key_revoke(cons->authkey); | 44 | key_revoke(authkey); |
42 | |||
43 | key_put(cons->key); | ||
44 | key_put(cons->authkey); | ||
45 | kfree(cons); | ||
46 | } | 45 | } |
47 | EXPORT_SYMBOL(complete_request_key); | 46 | EXPORT_SYMBOL(complete_request_key); |
48 | 47 | ||
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp, | |||
91 | * Request userspace finish the construction of a key | 90 | * Request userspace finish the construction of a key |
92 | * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" | 91 | * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" |
93 | */ | 92 | */ |
94 | static int call_sbin_request_key(struct key_construction *cons, | 93 | static int call_sbin_request_key(struct key *authkey, void *aux) |
95 | const char *op, | ||
96 | void *aux) | ||
97 | { | 94 | { |
98 | static char const request_key[] = "/sbin/request-key"; | 95 | static char const request_key[] = "/sbin/request-key"; |
96 | struct request_key_auth *rka = get_request_key_auth(authkey); | ||
99 | const struct cred *cred = current_cred(); | 97 | const struct cred *cred = current_cred(); |
100 | key_serial_t prkey, sskey; | 98 | key_serial_t prkey, sskey; |
101 | struct key *key = cons->key, *authkey = cons->authkey, *keyring, | 99 | struct key *key = rka->target_key, *keyring, *session; |
102 | *session; | ||
103 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; | 100 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; |
104 | char key_str[12], keyring_str[3][12]; | 101 | char key_str[12], keyring_str[3][12]; |
105 | char desc[20]; | 102 | char desc[20]; |
106 | int ret, i; | 103 | int ret, i; |
107 | 104 | ||
108 | kenter("{%d},{%d},%s", key->serial, authkey->serial, op); | 105 | kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op); |
109 | 106 | ||
110 | ret = install_user_keyrings(); | 107 | ret = install_user_keyrings(); |
111 | if (ret < 0) | 108 | if (ret < 0) |
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons, | |||
163 | /* set up the argument list */ | 160 | /* set up the argument list */ |
164 | i = 0; | 161 | i = 0; |
165 | argv[i++] = (char *)request_key; | 162 | argv[i++] = (char *)request_key; |
166 | argv[i++] = (char *) op; | 163 | argv[i++] = (char *)rka->op; |
167 | argv[i++] = key_str; | 164 | argv[i++] = key_str; |
168 | argv[i++] = uid_str; | 165 | argv[i++] = uid_str; |
169 | argv[i++] = gid_str; | 166 | argv[i++] = gid_str; |
@@ -191,7 +188,7 @@ error_link: | |||
191 | key_put(keyring); | 188 | key_put(keyring); |
192 | 189 | ||
193 | error_alloc: | 190 | error_alloc: |
194 | complete_request_key(cons, ret); | 191 | complete_request_key(authkey, ret); |
195 | kleave(" = %d", ret); | 192 | kleave(" = %d", ret); |
196 | return ret; | 193 | return ret; |
197 | } | 194 | } |
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info, | |||
205 | size_t callout_len, void *aux, | 202 | size_t callout_len, void *aux, |
206 | struct key *dest_keyring) | 203 | struct key *dest_keyring) |
207 | { | 204 | { |
208 | struct key_construction *cons; | ||
209 | request_key_actor_t actor; | 205 | request_key_actor_t actor; |
210 | struct key *authkey; | 206 | struct key *authkey; |
211 | int ret; | 207 | int ret; |
212 | 208 | ||
213 | kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); | 209 | kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); |
214 | 210 | ||
215 | cons = kmalloc(sizeof(*cons), GFP_KERNEL); | ||
216 | if (!cons) | ||
217 | return -ENOMEM; | ||
218 | |||
219 | /* allocate an authorisation key */ | 211 | /* allocate an authorisation key */ |
220 | authkey = request_key_auth_new(key, callout_info, callout_len, | 212 | authkey = request_key_auth_new(key, "create", callout_info, callout_len, |
221 | dest_keyring); | 213 | dest_keyring); |
222 | if (IS_ERR(authkey)) { | 214 | if (IS_ERR(authkey)) |
223 | kfree(cons); | 215 | return PTR_ERR(authkey); |
224 | ret = PTR_ERR(authkey); | ||
225 | authkey = NULL; | ||
226 | } else { | ||
227 | cons->authkey = key_get(authkey); | ||
228 | cons->key = key_get(key); | ||
229 | 216 | ||
230 | /* make the call */ | 217 | /* Make the call */ |
231 | actor = call_sbin_request_key; | 218 | actor = call_sbin_request_key; |
232 | if (key->type->request_key) | 219 | if (key->type->request_key) |
233 | actor = key->type->request_key; | 220 | actor = key->type->request_key; |
234 | 221 | ||
235 | ret = actor(cons, "create", aux); | 222 | ret = actor(authkey, aux); |
236 | 223 | ||
237 | /* check that the actor called complete_request_key() prior to | 224 | /* check that the actor called complete_request_key() prior to |
238 | * returning an error */ | 225 | * returning an error */ |
239 | WARN_ON(ret < 0 && | 226 | WARN_ON(ret < 0 && |
240 | !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); | 227 | !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); |
241 | key_put(authkey); | ||
242 | } | ||
243 | 228 | ||
229 | key_put(authkey); | ||
244 | kleave(" = %d", ret); | 230 | kleave(" = %d", ret); |
245 | return ret; | 231 | return ret; |
246 | } | 232 | } |
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring) | |||
275 | if (cred->request_key_auth) { | 261 | if (cred->request_key_auth) { |
276 | authkey = cred->request_key_auth; | 262 | authkey = cred->request_key_auth; |
277 | down_read(&authkey->sem); | 263 | down_read(&authkey->sem); |
278 | rka = authkey->payload.data[0]; | 264 | rka = get_request_key_auth(authkey); |
279 | if (!test_bit(KEY_FLAG_REVOKED, | 265 | if (!test_bit(KEY_FLAG_REVOKED, |
280 | &authkey->flags)) | 266 | &authkey->flags)) |
281 | dest_keyring = | 267 | dest_keyring = |
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 87ea2f54dedc..afc304e8b61e 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include "internal.h" | 19 | #include "internal.h" |
20 | #include <keys/user-type.h> | 20 | #include <keys/request_key_auth-type.h> |
21 | 21 | ||
22 | static int request_key_auth_preparse(struct key_preparsed_payload *); | 22 | static int request_key_auth_preparse(struct key_preparsed_payload *); |
23 | static void request_key_auth_free_preparse(struct key_preparsed_payload *); | 23 | static void request_key_auth_free_preparse(struct key_preparsed_payload *); |
@@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key, | |||
68 | static void request_key_auth_describe(const struct key *key, | 68 | static void request_key_auth_describe(const struct key *key, |
69 | struct seq_file *m) | 69 | struct seq_file *m) |
70 | { | 70 | { |
71 | struct request_key_auth *rka = key->payload.data[0]; | 71 | struct request_key_auth *rka = get_request_key_auth(key); |
72 | 72 | ||
73 | seq_puts(m, "key:"); | 73 | seq_puts(m, "key:"); |
74 | seq_puts(m, key->description); | 74 | seq_puts(m, key->description); |
@@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key, | |||
83 | static long request_key_auth_read(const struct key *key, | 83 | static long request_key_auth_read(const struct key *key, |
84 | char __user *buffer, size_t buflen) | 84 | char __user *buffer, size_t buflen) |
85 | { | 85 | { |
86 | struct request_key_auth *rka = key->payload.data[0]; | 86 | struct request_key_auth *rka = get_request_key_auth(key); |
87 | size_t datalen; | 87 | size_t datalen; |
88 | long ret; | 88 | long ret; |
89 | 89 | ||
@@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key, | |||
109 | */ | 109 | */ |
110 | static void request_key_auth_revoke(struct key *key) | 110 | static void request_key_auth_revoke(struct key *key) |
111 | { | 111 | { |
112 | struct request_key_auth *rka = key->payload.data[0]; | 112 | struct request_key_auth *rka = get_request_key_auth(key); |
113 | 113 | ||
114 | kenter("{%d}", key->serial); | 114 | kenter("{%d}", key->serial); |
115 | 115 | ||
@@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka) | |||
136 | */ | 136 | */ |
137 | static void request_key_auth_destroy(struct key *key) | 137 | static void request_key_auth_destroy(struct key *key) |
138 | { | 138 | { |
139 | struct request_key_auth *rka = key->payload.data[0]; | 139 | struct request_key_auth *rka = get_request_key_auth(key); |
140 | 140 | ||
141 | kenter("{%d}", key->serial); | 141 | kenter("{%d}", key->serial); |
142 | 142 | ||
@@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key) | |||
147 | * Create an authorisation token for /sbin/request-key or whoever to gain | 147 | * Create an authorisation token for /sbin/request-key or whoever to gain |
148 | * access to the caller's security data. | 148 | * access to the caller's security data. |
149 | */ | 149 | */ |
150 | struct key *request_key_auth_new(struct key *target, const void *callout_info, | 150 | struct key *request_key_auth_new(struct key *target, const char *op, |
151 | size_t callout_len, struct key *dest_keyring) | 151 | const void *callout_info, size_t callout_len, |
152 | struct key *dest_keyring) | ||
152 | { | 153 | { |
153 | struct request_key_auth *rka, *irka; | 154 | struct request_key_auth *rka, *irka; |
154 | const struct cred *cred = current->cred; | 155 | const struct cred *cred = current->cred; |
@@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, | |||
166 | if (!rka->callout_info) | 167 | if (!rka->callout_info) |
167 | goto error_free_rka; | 168 | goto error_free_rka; |
168 | rka->callout_len = callout_len; | 169 | rka->callout_len = callout_len; |
170 | strlcpy(rka->op, op, sizeof(rka->op)); | ||
169 | 171 | ||
170 | /* see if the calling process is already servicing the key request of | 172 | /* see if the calling process is already servicing the key request of |
171 | * another process */ | 173 | * another process */ |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 6c99fa8ac5fa..6c0b30391ba9 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -2112,13 +2112,6 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, | |||
2112 | return 0; | 2112 | return 0; |
2113 | } | 2113 | } |
2114 | 2114 | ||
2115 | /* allow waiting for a capture stream that hasn't been started */ | ||
2116 | #if IS_ENABLED(CONFIG_SND_PCM_OSS) | ||
2117 | #define wait_capture_start(substream) ((substream)->oss.oss) | ||
2118 | #else | ||
2119 | #define wait_capture_start(substream) false | ||
2120 | #endif | ||
2121 | |||
2122 | /* the common loop for read/write data */ | 2115 | /* the common loop for read/write data */ |
2123 | snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, | 2116 | snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, |
2124 | void *data, bool interleaved, | 2117 | void *data, bool interleaved, |
@@ -2184,16 +2177,11 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, | |||
2184 | snd_pcm_update_hw_ptr(substream); | 2177 | snd_pcm_update_hw_ptr(substream); |
2185 | 2178 | ||
2186 | if (!is_playback && | 2179 | if (!is_playback && |
2187 | runtime->status->state == SNDRV_PCM_STATE_PREPARED) { | 2180 | runtime->status->state == SNDRV_PCM_STATE_PREPARED && |
2188 | if (size >= runtime->start_threshold) { | 2181 | size >= runtime->start_threshold) { |
2189 | err = snd_pcm_start(substream); | 2182 | err = snd_pcm_start(substream); |
2190 | if (err < 0) | 2183 | if (err < 0) |
2191 | goto _end_unlock; | ||
2192 | } else if (!wait_capture_start(substream)) { | ||
2193 | /* nothing to do */ | ||
2194 | err = 0; | ||
2195 | goto _end_unlock; | 2184 | goto _end_unlock; |
2196 | } | ||
2197 | } | 2185 | } |
2198 | 2186 | ||
2199 | avail = snd_pcm_avail(substream); | 2187 | avail = snd_pcm_avail(substream); |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 152f54137082..a4ee7656d9ee 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
924 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), | 924 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), |
925 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), | 925 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), |
926 | SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), | 926 | SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), |
927 | SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), | ||
927 | SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), | 928 | SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), |
928 | SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), | 929 | SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), |
929 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), | 930 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6df758adff84..1ffa36e987b4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1855,6 +1855,8 @@ enum { | |||
1855 | ALC887_FIXUP_BASS_CHMAP, | 1855 | ALC887_FIXUP_BASS_CHMAP, |
1856 | ALC1220_FIXUP_GB_DUAL_CODECS, | 1856 | ALC1220_FIXUP_GB_DUAL_CODECS, |
1857 | ALC1220_FIXUP_CLEVO_P950, | 1857 | ALC1220_FIXUP_CLEVO_P950, |
1858 | ALC1220_FIXUP_SYSTEM76_ORYP5, | ||
1859 | ALC1220_FIXUP_SYSTEM76_ORYP5_PINS, | ||
1858 | }; | 1860 | }; |
1859 | 1861 | ||
1860 | static void alc889_fixup_coef(struct hda_codec *codec, | 1862 | static void alc889_fixup_coef(struct hda_codec *codec, |
@@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, | |||
2056 | snd_hda_override_conn_list(codec, 0x1b, 1, conn1); | 2058 | snd_hda_override_conn_list(codec, 0x1b, 1, conn1); |
2057 | } | 2059 | } |
2058 | 2060 | ||
2061 | static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, | ||
2062 | const struct hda_fixup *fix, int action); | ||
2063 | |||
2064 | static void alc1220_fixup_system76_oryp5(struct hda_codec *codec, | ||
2065 | const struct hda_fixup *fix, | ||
2066 | int action) | ||
2067 | { | ||
2068 | alc1220_fixup_clevo_p950(codec, fix, action); | ||
2069 | alc_fixup_headset_mode_no_hp_mic(codec, fix, action); | ||
2070 | } | ||
2071 | |||
2059 | static const struct hda_fixup alc882_fixups[] = { | 2072 | static const struct hda_fixup alc882_fixups[] = { |
2060 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { | 2073 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { |
2061 | .type = HDA_FIXUP_PINS, | 2074 | .type = HDA_FIXUP_PINS, |
@@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = { | |||
2300 | .type = HDA_FIXUP_FUNC, | 2313 | .type = HDA_FIXUP_FUNC, |
2301 | .v.func = alc1220_fixup_clevo_p950, | 2314 | .v.func = alc1220_fixup_clevo_p950, |
2302 | }, | 2315 | }, |
2316 | [ALC1220_FIXUP_SYSTEM76_ORYP5] = { | ||
2317 | .type = HDA_FIXUP_FUNC, | ||
2318 | .v.func = alc1220_fixup_system76_oryp5, | ||
2319 | }, | ||
2320 | [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = { | ||
2321 | .type = HDA_FIXUP_PINS, | ||
2322 | .v.pins = (const struct hda_pintbl[]) { | ||
2323 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | ||
2324 | {} | ||
2325 | }, | ||
2326 | .chained = true, | ||
2327 | .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5, | ||
2328 | }, | ||
2303 | }; | 2329 | }; |
2304 | 2330 | ||
2305 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { | 2331 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
@@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
2376 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), | 2402 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), |
2377 | SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), | 2403 | SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), |
2378 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), | 2404 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), |
2405 | SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), | ||
2406 | SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), | ||
2379 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2407 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
2380 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2408 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
2381 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), | 2409 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), |
@@ -5632,6 +5660,7 @@ enum { | |||
5632 | ALC294_FIXUP_ASUS_SPK, | 5660 | ALC294_FIXUP_ASUS_SPK, |
5633 | ALC225_FIXUP_HEADSET_JACK, | 5661 | ALC225_FIXUP_HEADSET_JACK, |
5634 | ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, | 5662 | ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, |
5663 | ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, | ||
5635 | }; | 5664 | }; |
5636 | 5665 | ||
5637 | static const struct hda_fixup alc269_fixups[] = { | 5666 | static const struct hda_fixup alc269_fixups[] = { |
@@ -6587,6 +6616,17 @@ static const struct hda_fixup alc269_fixups[] = { | |||
6587 | .chained = true, | 6616 | .chained = true, |
6588 | .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC | 6617 | .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC |
6589 | }, | 6618 | }, |
6619 | [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = { | ||
6620 | .type = HDA_FIXUP_VERBS, | ||
6621 | .v.verbs = (const struct hda_verb[]) { | ||
6622 | /* Disable PCBEEP-IN passthrough */ | ||
6623 | { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 }, | ||
6624 | { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 }, | ||
6625 | { } | ||
6626 | }, | ||
6627 | .chained = true, | ||
6628 | .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE | ||
6629 | }, | ||
6590 | }; | 6630 | }; |
6591 | 6631 | ||
6592 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 6632 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
@@ -7272,7 +7312,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
7272 | {0x12, 0x90a60130}, | 7312 | {0x12, 0x90a60130}, |
7273 | {0x19, 0x03a11020}, | 7313 | {0x19, 0x03a11020}, |
7274 | {0x21, 0x0321101f}), | 7314 | {0x21, 0x0321101f}), |
7275 | SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, | 7315 | SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, |
7276 | {0x12, 0x90a60130}, | 7316 | {0x12, 0x90a60130}, |
7277 | {0x14, 0x90170110}, | 7317 | {0x14, 0x90170110}, |
7278 | {0x19, 0x04a11040}, | 7318 | {0x19, 0x04a11040}, |
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index d00734d31e04..e5b6769b9797 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c | |||
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
795 | if (hcd->spdif) | 795 | if (hcd->spdif) |
796 | hcp->daidrv[i] = hdmi_spdif_dai; | 796 | hcp->daidrv[i] = hdmi_spdif_dai; |
797 | 797 | ||
798 | dev_set_drvdata(dev, hcp); | ||
799 | |||
798 | ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, | 800 | ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, |
799 | dai_count); | 801 | dai_count); |
800 | if (ret) { | 802 | if (ret) { |
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
802 | __func__, ret); | 804 | __func__, ret); |
803 | return ret; | 805 | return ret; |
804 | } | 806 | } |
805 | |||
806 | dev_set_drvdata(dev, hcp); | ||
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | 809 | ||
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 89c43b26c379..a9b91bcfcc09 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c | |||
@@ -1778,7 +1778,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = { | |||
1778 | {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, | 1778 | {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, |
1779 | {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, | 1779 | {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, |
1780 | {"ADC STO1 ASRC", NULL, "AD ASRC"}, | 1780 | {"ADC STO1 ASRC", NULL, "AD ASRC"}, |
1781 | {"ADC STO1 ASRC", NULL, "DA ASRC"}, | ||
1781 | {"ADC STO1 ASRC", NULL, "CLKDET"}, | 1782 | {"ADC STO1 ASRC", NULL, "CLKDET"}, |
1783 | {"DAC STO1 ASRC", NULL, "AD ASRC"}, | ||
1782 | {"DAC STO1 ASRC", NULL, "DA ASRC"}, | 1784 | {"DAC STO1 ASRC", NULL, "DA ASRC"}, |
1783 | {"DAC STO1 ASRC", NULL, "CLKDET"}, | 1785 | {"DAC STO1 ASRC", NULL, "CLKDET"}, |
1784 | 1786 | ||
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 37e001cf9cd1..3fe34417ec89 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c | |||
@@ -462,7 +462,7 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv) | |||
462 | conf_idx = 0; | 462 | conf_idx = 0; |
463 | node = of_get_child_by_name(top, PREFIX "dai-link"); | 463 | node = of_get_child_by_name(top, PREFIX "dai-link"); |
464 | if (!node) { | 464 | if (!node) { |
465 | node = dev->of_node; | 465 | node = of_node_get(top); |
466 | loop = 0; | 466 | loop = 0; |
467 | } | 467 | } |
468 | 468 | ||
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index d6c62aa13041..d4bde4834ce5 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c | |||
@@ -604,6 +604,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, | |||
604 | unsigned int fmt) | 604 | unsigned int fmt) |
605 | { | 605 | { |
606 | struct i2s_dai *i2s = to_info(dai); | 606 | struct i2s_dai *i2s = to_info(dai); |
607 | struct i2s_dai *other = get_other_dai(i2s); | ||
607 | int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; | 608 | int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; |
608 | u32 mod, tmp = 0; | 609 | u32 mod, tmp = 0; |
609 | unsigned long flags; | 610 | unsigned long flags; |
@@ -661,7 +662,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, | |||
661 | * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any | 662 | * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any |
662 | * clock configuration assigned in DT is not overwritten. | 663 | * clock configuration assigned in DT is not overwritten. |
663 | */ | 664 | */ |
664 | if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL) | 665 | if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL && |
666 | other->clk_data.clks == NULL) | ||
665 | i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, | 667 | i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, |
666 | 0, SND_SOC_CLOCK_IN); | 668 | 0, SND_SOC_CLOCK_IN); |
667 | break; | 669 | break; |
@@ -699,7 +701,9 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, | |||
699 | struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) | 701 | struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) |
700 | { | 702 | { |
701 | struct i2s_dai *i2s = to_info(dai); | 703 | struct i2s_dai *i2s = to_info(dai); |
704 | struct i2s_dai *other = get_other_dai(i2s); | ||
702 | u32 mod, mask = 0, val = 0; | 705 | u32 mod, mask = 0, val = 0; |
706 | struct clk *rclksrc; | ||
703 | unsigned long flags; | 707 | unsigned long flags; |
704 | 708 | ||
705 | WARN_ON(!pm_runtime_active(dai->dev)); | 709 | WARN_ON(!pm_runtime_active(dai->dev)); |
@@ -782,6 +786,13 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, | |||
782 | 786 | ||
783 | i2s->frmclk = params_rate(params); | 787 | i2s->frmclk = params_rate(params); |
784 | 788 | ||
789 | rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; | ||
790 | if (!rclksrc || IS_ERR(rclksrc)) | ||
791 | rclksrc = other->clk_table[CLK_I2S_RCLK_SRC]; | ||
792 | |||
793 | if (rclksrc && !IS_ERR(rclksrc)) | ||
794 | i2s->rclk_srcrate = clk_get_rate(rclksrc); | ||
795 | |||
785 | return 0; | 796 | return 0; |
786 | } | 797 | } |
787 | 798 | ||
@@ -886,11 +897,6 @@ static int config_setup(struct i2s_dai *i2s) | |||
886 | return 0; | 897 | return 0; |
887 | 898 | ||
888 | if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { | 899 | if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { |
889 | struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; | ||
890 | |||
891 | if (rclksrc && !IS_ERR(rclksrc)) | ||
892 | i2s->rclk_srcrate = clk_get_rate(rclksrc); | ||
893 | |||
894 | psr = i2s->rclk_srcrate / i2s->frmclk / rfs; | 900 | psr = i2s->rclk_srcrate / i2s->frmclk / rfs; |
895 | writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); | 901 | writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); |
896 | dev_dbg(&i2s->pdev->dev, | 902 | dev_dbg(&i2s->pdev->dev, |
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 59e250cc2e9d..e819e965e1db 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c | |||
@@ -1526,14 +1526,14 @@ int rsnd_kctrl_new(struct rsnd_mod *mod, | |||
1526 | int ret; | 1526 | int ret; |
1527 | 1527 | ||
1528 | /* | 1528 | /* |
1529 | * 1) Avoid duplicate register (ex. MIXer case) | 1529 | * 1) Avoid duplicate register for DVC with MIX case |
1530 | * 2) re-register if card was rebinded | 1530 | * 2) Allow duplicate register for MIX |
1531 | * 3) re-register if card was rebinded | ||
1531 | */ | 1532 | */ |
1532 | list_for_each_entry(kctrl, &card->controls, list) { | 1533 | list_for_each_entry(kctrl, &card->controls, list) { |
1533 | struct rsnd_kctrl_cfg *c = kctrl->private_data; | 1534 | struct rsnd_kctrl_cfg *c = kctrl->private_data; |
1534 | 1535 | ||
1535 | if (strcmp(kctrl->id.name, name) == 0 && | 1536 | if (c == cfg) |
1536 | c->mod == mod) | ||
1537 | return 0; | 1537 | return 0; |
1538 | } | 1538 | } |
1539 | 1539 | ||
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 45ef295743ec..f5afab631abb 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c | |||
@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, | |||
286 | if (rsnd_ssi_is_multi_slave(mod, io)) | 286 | if (rsnd_ssi_is_multi_slave(mod, io)) |
287 | return 0; | 287 | return 0; |
288 | 288 | ||
289 | if (ssi->usrcnt > 1) { | 289 | if (ssi->usrcnt > 0) { |
290 | if (ssi->rate != rate) { | 290 | if (ssi->rate != rate) { |
291 | dev_err(dev, "SSI parent/child should use same rate\n"); | 291 | dev_err(dev, "SSI parent/child should use same rate\n"); |
292 | return -EINVAL; | 292 | return -EINVAL; |
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index c5934adcfd01..c74991dd18ab 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c | |||
@@ -79,7 +79,7 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod, | |||
79 | break; | 79 | break; |
80 | case 9: | 80 | case 9: |
81 | for (i = 0; i < 4; i++) | 81 | for (i = 0; i < 4; i++) |
82 | rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << (id * 4)); | 82 | rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << 4); |
83 | break; | 83 | break; |
84 | } | 84 | } |
85 | 85 | ||
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index aae450ba4f08..50617db05c46 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -735,12 +735,17 @@ static struct snd_soc_component *soc_find_component( | |||
735 | const struct device_node *of_node, const char *name) | 735 | const struct device_node *of_node, const char *name) |
736 | { | 736 | { |
737 | struct snd_soc_component *component; | 737 | struct snd_soc_component *component; |
738 | struct device_node *component_of_node; | ||
738 | 739 | ||
739 | lockdep_assert_held(&client_mutex); | 740 | lockdep_assert_held(&client_mutex); |
740 | 741 | ||
741 | for_each_component(component) { | 742 | for_each_component(component) { |
742 | if (of_node) { | 743 | if (of_node) { |
743 | if (component->dev->of_node == of_node) | 744 | component_of_node = component->dev->of_node; |
745 | if (!component_of_node && component->dev->parent) | ||
746 | component_of_node = component->dev->parent->of_node; | ||
747 | |||
748 | if (component_of_node == of_node) | ||
744 | return component; | 749 | return component; |
745 | } else if (name && strcmp(component->name, name) == 0) { | 750 | } else if (name && strcmp(component->name, name) == 0) { |
746 | return component; | 751 | return component; |
@@ -951,7 +956,7 @@ static void soc_remove_dai(struct snd_soc_dai *dai, int order) | |||
951 | { | 956 | { |
952 | int err; | 957 | int err; |
953 | 958 | ||
954 | if (!dai || !dai->probed || | 959 | if (!dai || !dai->probed || !dai->driver || |
955 | dai->driver->remove_order != order) | 960 | dai->driver->remove_order != order) |
956 | return; | 961 | return; |
957 | 962 | ||
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 2c4c13419539..20bad755888b 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -70,12 +70,16 @@ static int dapm_up_seq[] = { | |||
70 | [snd_soc_dapm_clock_supply] = 1, | 70 | [snd_soc_dapm_clock_supply] = 1, |
71 | [snd_soc_dapm_supply] = 2, | 71 | [snd_soc_dapm_supply] = 2, |
72 | [snd_soc_dapm_micbias] = 3, | 72 | [snd_soc_dapm_micbias] = 3, |
73 | [snd_soc_dapm_vmid] = 3, | ||
73 | [snd_soc_dapm_dai_link] = 2, | 74 | [snd_soc_dapm_dai_link] = 2, |
74 | [snd_soc_dapm_dai_in] = 4, | 75 | [snd_soc_dapm_dai_in] = 4, |
75 | [snd_soc_dapm_dai_out] = 4, | 76 | [snd_soc_dapm_dai_out] = 4, |
76 | [snd_soc_dapm_aif_in] = 4, | 77 | [snd_soc_dapm_aif_in] = 4, |
77 | [snd_soc_dapm_aif_out] = 4, | 78 | [snd_soc_dapm_aif_out] = 4, |
78 | [snd_soc_dapm_mic] = 5, | 79 | [snd_soc_dapm_mic] = 5, |
80 | [snd_soc_dapm_siggen] = 5, | ||
81 | [snd_soc_dapm_input] = 5, | ||
82 | [snd_soc_dapm_output] = 5, | ||
79 | [snd_soc_dapm_mux] = 6, | 83 | [snd_soc_dapm_mux] = 6, |
80 | [snd_soc_dapm_demux] = 6, | 84 | [snd_soc_dapm_demux] = 6, |
81 | [snd_soc_dapm_dac] = 7, | 85 | [snd_soc_dapm_dac] = 7, |
@@ -83,11 +87,19 @@ static int dapm_up_seq[] = { | |||
83 | [snd_soc_dapm_mixer] = 8, | 87 | [snd_soc_dapm_mixer] = 8, |
84 | [snd_soc_dapm_mixer_named_ctl] = 8, | 88 | [snd_soc_dapm_mixer_named_ctl] = 8, |
85 | [snd_soc_dapm_pga] = 9, | 89 | [snd_soc_dapm_pga] = 9, |
90 | [snd_soc_dapm_buffer] = 9, | ||
91 | [snd_soc_dapm_scheduler] = 9, | ||
92 | [snd_soc_dapm_effect] = 9, | ||
93 | [snd_soc_dapm_src] = 9, | ||
94 | [snd_soc_dapm_asrc] = 9, | ||
95 | [snd_soc_dapm_encoder] = 9, | ||
96 | [snd_soc_dapm_decoder] = 9, | ||
86 | [snd_soc_dapm_adc] = 10, | 97 | [snd_soc_dapm_adc] = 10, |
87 | [snd_soc_dapm_out_drv] = 11, | 98 | [snd_soc_dapm_out_drv] = 11, |
88 | [snd_soc_dapm_hp] = 11, | 99 | [snd_soc_dapm_hp] = 11, |
89 | [snd_soc_dapm_spk] = 11, | 100 | [snd_soc_dapm_spk] = 11, |
90 | [snd_soc_dapm_line] = 11, | 101 | [snd_soc_dapm_line] = 11, |
102 | [snd_soc_dapm_sink] = 11, | ||
91 | [snd_soc_dapm_kcontrol] = 12, | 103 | [snd_soc_dapm_kcontrol] = 12, |
92 | [snd_soc_dapm_post] = 13, | 104 | [snd_soc_dapm_post] = 13, |
93 | }; | 105 | }; |
@@ -100,13 +112,25 @@ static int dapm_down_seq[] = { | |||
100 | [snd_soc_dapm_spk] = 3, | 112 | [snd_soc_dapm_spk] = 3, |
101 | [snd_soc_dapm_line] = 3, | 113 | [snd_soc_dapm_line] = 3, |
102 | [snd_soc_dapm_out_drv] = 3, | 114 | [snd_soc_dapm_out_drv] = 3, |
115 | [snd_soc_dapm_sink] = 3, | ||
103 | [snd_soc_dapm_pga] = 4, | 116 | [snd_soc_dapm_pga] = 4, |
117 | [snd_soc_dapm_buffer] = 4, | ||
118 | [snd_soc_dapm_scheduler] = 4, | ||
119 | [snd_soc_dapm_effect] = 4, | ||
120 | [snd_soc_dapm_src] = 4, | ||
121 | [snd_soc_dapm_asrc] = 4, | ||
122 | [snd_soc_dapm_encoder] = 4, | ||
123 | [snd_soc_dapm_decoder] = 4, | ||
104 | [snd_soc_dapm_switch] = 5, | 124 | [snd_soc_dapm_switch] = 5, |
105 | [snd_soc_dapm_mixer_named_ctl] = 5, | 125 | [snd_soc_dapm_mixer_named_ctl] = 5, |
106 | [snd_soc_dapm_mixer] = 5, | 126 | [snd_soc_dapm_mixer] = 5, |
107 | [snd_soc_dapm_dac] = 6, | 127 | [snd_soc_dapm_dac] = 6, |
108 | [snd_soc_dapm_mic] = 7, | 128 | [snd_soc_dapm_mic] = 7, |
129 | [snd_soc_dapm_siggen] = 7, | ||
130 | [snd_soc_dapm_input] = 7, | ||
131 | [snd_soc_dapm_output] = 7, | ||
109 | [snd_soc_dapm_micbias] = 8, | 132 | [snd_soc_dapm_micbias] = 8, |
133 | [snd_soc_dapm_vmid] = 8, | ||
110 | [snd_soc_dapm_mux] = 9, | 134 | [snd_soc_dapm_mux] = 9, |
111 | [snd_soc_dapm_demux] = 9, | 135 | [snd_soc_dapm_demux] = 9, |
112 | [snd_soc_dapm_aif_in] = 10, | 136 | [snd_soc_dapm_aif_in] = 10, |
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 045ef136903d..731b963b6995 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c | |||
@@ -502,6 +502,7 @@ static void remove_dai(struct snd_soc_component *comp, | |||
502 | { | 502 | { |
503 | struct snd_soc_dai_driver *dai_drv = | 503 | struct snd_soc_dai_driver *dai_drv = |
504 | container_of(dobj, struct snd_soc_dai_driver, dobj); | 504 | container_of(dobj, struct snd_soc_dai_driver, dobj); |
505 | struct snd_soc_dai *dai; | ||
505 | 506 | ||
506 | if (pass != SOC_TPLG_PASS_PCM_DAI) | 507 | if (pass != SOC_TPLG_PASS_PCM_DAI) |
507 | return; | 508 | return; |
@@ -509,6 +510,10 @@ static void remove_dai(struct snd_soc_component *comp, | |||
509 | if (dobj->ops && dobj->ops->dai_unload) | 510 | if (dobj->ops && dobj->ops->dai_unload) |
510 | dobj->ops->dai_unload(comp, dobj); | 511 | dobj->ops->dai_unload(comp, dobj); |
511 | 512 | ||
513 | list_for_each_entry(dai, &comp->dai_list, list) | ||
514 | if (dai->driver == dai_drv) | ||
515 | dai->driver = NULL; | ||
516 | |||
512 | kfree(dai_drv->name); | 517 | kfree(dai_drv->name); |
513 | list_del(&dobj->list); | 518 | list_del(&dobj->list); |
514 | kfree(dai_drv); | 519 | kfree(dai_drv); |
@@ -2482,6 +2487,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, | |||
2482 | struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) | 2487 | struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) |
2483 | { | 2488 | { |
2484 | struct soc_tplg tplg; | 2489 | struct soc_tplg tplg; |
2490 | int ret; | ||
2485 | 2491 | ||
2486 | /* setup parsing context */ | 2492 | /* setup parsing context */ |
2487 | memset(&tplg, 0, sizeof(tplg)); | 2493 | memset(&tplg, 0, sizeof(tplg)); |
@@ -2495,7 +2501,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, | |||
2495 | tplg.bytes_ext_ops = ops->bytes_ext_ops; | 2501 | tplg.bytes_ext_ops = ops->bytes_ext_ops; |
2496 | tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; | 2502 | tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; |
2497 | 2503 | ||
2498 | return soc_tplg_load(&tplg); | 2504 | ret = soc_tplg_load(&tplg); |
2505 | /* free the created components if fail to load topology */ | ||
2506 | if (ret) | ||
2507 | snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL); | ||
2508 | |||
2509 | return ret; | ||
2499 | } | 2510 | } |
2500 | EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); | 2511 | EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); |
2501 | 2512 | ||
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 382847154227..db114f3977e0 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum, | |||
314 | return 0; | 314 | return 0; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk | ||
318 | * applies. Returns 1 if a quirk was found. | ||
319 | */ | ||
317 | static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, | 320 | static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, |
318 | struct usb_device *dev, | 321 | struct usb_device *dev, |
319 | struct usb_interface_descriptor *altsd, | 322 | struct usb_interface_descriptor *altsd, |
@@ -384,7 +387,7 @@ add_sync_ep: | |||
384 | 387 | ||
385 | subs->data_endpoint->sync_master = subs->sync_endpoint; | 388 | subs->data_endpoint->sync_master = subs->sync_endpoint; |
386 | 389 | ||
387 | return 0; | 390 | return 1; |
388 | } | 391 | } |
389 | 392 | ||
390 | static int set_sync_endpoint(struct snd_usb_substream *subs, | 393 | static int set_sync_endpoint(struct snd_usb_substream *subs, |
@@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, | |||
423 | if (err < 0) | 426 | if (err < 0) |
424 | return err; | 427 | return err; |
425 | 428 | ||
429 | /* endpoint set by quirk */ | ||
430 | if (err > 0) | ||
431 | return 0; | ||
432 | |||
426 | if (altsd->bNumEndpoints < 2) | 433 | if (altsd->bNumEndpoints < 2) |
427 | return 0; | 434 | return 0; |
428 | 435 | ||
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index fd92ce8388fc..57aaeaf8e192 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" | 15 | #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" |
16 | #elif defined(__riscv) | 16 | #elif defined(__riscv) |
17 | #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" | 17 | #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" |
18 | #elif defined(__alpha__) | ||
19 | #include "../../arch/alpha/include/uapi/asm/bitsperlong.h" | ||
18 | #else | 20 | #else |
19 | #include <asm-generic/bitsperlong.h> | 21 | #include <asm-generic/bitsperlong.h> |
20 | #endif | 22 | #endif |
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index 9050eeea5f5f..1de8bd8ccf5d 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile | |||
@@ -9,6 +9,3 @@ all: $(TEST_PROGS) | |||
9 | top_srcdir = ../../../../.. | 9 | top_srcdir = ../../../../.. |
10 | KSFT_KHDR_INSTALL := 1 | 10 | KSFT_KHDR_INSTALL := 1 |
11 | include ../../lib.mk | 11 | include ../../lib.mk |
12 | |||
13 | clean: | ||
14 | rm -fr $(TEST_GEN_FILES) | ||
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9e350fd34504..9c486fad3f9f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) | |||
626 | /* Awaken to handle a signal, request we sleep again later. */ | 626 | /* Awaken to handle a signal, request we sleep again later. */ |
627 | kvm_make_request(KVM_REQ_SLEEP, vcpu); | 627 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
628 | } | 628 | } |
629 | |||
630 | /* | ||
631 | * Make sure we will observe a potential reset request if we've | ||
632 | * observed a change to the power state. Pairs with the smp_wmb() in | ||
633 | * kvm_psci_vcpu_on(). | ||
634 | */ | ||
635 | smp_rmb(); | ||
629 | } | 636 | } |
630 | 637 | ||
631 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) | 638 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) |
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) | |||
639 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) | 646 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) |
640 | vcpu_req_sleep(vcpu); | 647 | vcpu_req_sleep(vcpu); |
641 | 648 | ||
649 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) | ||
650 | kvm_reset_vcpu(vcpu); | ||
651 | |||
642 | /* | 652 | /* |
643 | * Clear IRQ_PENDING requests that were made to guarantee | 653 | * Clear IRQ_PENDING requests that were made to guarantee |
644 | * that a VCPU sees new virtual interrupts. | 654 | * that a VCPU sees new virtual interrupts. |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index fbdf3ac2f001..30251e288629 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1695 | 1695 | ||
1696 | vma_pagesize = vma_kernel_pagesize(vma); | 1696 | vma_pagesize = vma_kernel_pagesize(vma); |
1697 | /* | 1697 | /* |
1698 | * PUD level may not exist for a VM but PMD is guaranteed to | 1698 | * The stage2 has a minimum of 2 level table (For arm64 see |
1699 | * exist. | 1699 | * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can |
1700 | * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). | ||
1701 | * As for PUD huge maps, we must make sure that we have at least | ||
1702 | * 3 levels, i.e, PMD is not folded. | ||
1700 | */ | 1703 | */ |
1701 | if ((vma_pagesize == PMD_SIZE || | 1704 | if ((vma_pagesize == PMD_SIZE || |
1702 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && | 1705 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && |
1703 | !force_pte) { | 1706 | !force_pte) { |
1704 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; | 1707 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; |
1705 | } | 1708 | } |
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 9b73d3ad918a..34d08ee63747 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c | |||
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) | |||
104 | 104 | ||
105 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | 105 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
106 | { | 106 | { |
107 | struct vcpu_reset_state *reset_state; | ||
107 | struct kvm *kvm = source_vcpu->kvm; | 108 | struct kvm *kvm = source_vcpu->kvm; |
108 | struct kvm_vcpu *vcpu = NULL; | 109 | struct kvm_vcpu *vcpu = NULL; |
109 | struct swait_queue_head *wq; | ||
110 | unsigned long cpu_id; | 110 | unsigned long cpu_id; |
111 | unsigned long context_id; | ||
112 | phys_addr_t target_pc; | ||
113 | 111 | ||
114 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; | 112 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; |
115 | if (vcpu_mode_is_32bit(source_vcpu)) | 113 | if (vcpu_mode_is_32bit(source_vcpu)) |
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
130 | return PSCI_RET_INVALID_PARAMS; | 128 | return PSCI_RET_INVALID_PARAMS; |
131 | } | 129 | } |
132 | 130 | ||
133 | target_pc = smccc_get_arg2(source_vcpu); | 131 | reset_state = &vcpu->arch.reset_state; |
134 | context_id = smccc_get_arg3(source_vcpu); | ||
135 | 132 | ||
136 | kvm_reset_vcpu(vcpu); | 133 | reset_state->pc = smccc_get_arg2(source_vcpu); |
137 | |||
138 | /* Gracefully handle Thumb2 entry point */ | ||
139 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
140 | target_pc &= ~((phys_addr_t) 1); | ||
141 | vcpu_set_thumb(vcpu); | ||
142 | } | ||
143 | 134 | ||
144 | /* Propagate caller endianness */ | 135 | /* Propagate caller endianness */ |
145 | if (kvm_vcpu_is_be(source_vcpu)) | 136 | reset_state->be = kvm_vcpu_is_be(source_vcpu); |
146 | kvm_vcpu_set_be(vcpu); | ||
147 | 137 | ||
148 | *vcpu_pc(vcpu) = target_pc; | ||
149 | /* | 138 | /* |
150 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 | 139 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 |
151 | * the general puspose registers are undefined upon CPU_ON. | 140 | * the general puspose registers are undefined upon CPU_ON. |
152 | */ | 141 | */ |
153 | smccc_set_retval(vcpu, context_id, 0, 0, 0); | 142 | reset_state->r0 = smccc_get_arg3(source_vcpu); |
154 | vcpu->arch.power_off = false; | 143 | |
155 | smp_mb(); /* Make sure the above is visible */ | 144 | WRITE_ONCE(reset_state->reset, true); |
145 | kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); | ||
156 | 146 | ||
157 | wq = kvm_arch_vcpu_wq(vcpu); | 147 | /* |
158 | swake_up_one(wq); | 148 | * Make sure the reset request is observed if the change to |
149 | * power_state is observed. | ||
150 | */ | ||
151 | smp_wmb(); | ||
152 | |||
153 | vcpu->arch.power_off = false; | ||
154 | kvm_vcpu_wake_up(vcpu); | ||
159 | 155 | ||
160 | return PSCI_RET_SUCCESS; | 156 | return PSCI_RET_SUCCESS; |
161 | } | 157 | } |
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 07aa900bac56..1f62f2b8065d 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c | |||
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | spin_lock_irqsave(&irq->irq_lock, flags); | 254 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
255 | print_irq_state(s, irq, vcpu); | 255 | print_irq_state(s, irq, vcpu); |
256 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 256 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
257 | 257 | ||
258 | vgic_put_irq(kvm, irq); | 258 | vgic_put_irq(kvm, irq); |
259 | return 0; | 259 | return 0; |
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index c0c0b88af1d5..3bdb31eaed64 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c | |||
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm) | |||
64 | struct vgic_dist *dist = &kvm->arch.vgic; | 64 | struct vgic_dist *dist = &kvm->arch.vgic; |
65 | 65 | ||
66 | INIT_LIST_HEAD(&dist->lpi_list_head); | 66 | INIT_LIST_HEAD(&dist->lpi_list_head); |
67 | spin_lock_init(&dist->lpi_list_lock); | 67 | raw_spin_lock_init(&dist->lpi_list_lock); |
68 | } | 68 | } |
69 | 69 | ||
70 | /* CREATION */ | 70 | /* CREATION */ |
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) | |||
171 | 171 | ||
172 | irq->intid = i + VGIC_NR_PRIVATE_IRQS; | 172 | irq->intid = i + VGIC_NR_PRIVATE_IRQS; |
173 | INIT_LIST_HEAD(&irq->ap_list); | 173 | INIT_LIST_HEAD(&irq->ap_list); |
174 | spin_lock_init(&irq->irq_lock); | 174 | raw_spin_lock_init(&irq->irq_lock); |
175 | irq->vcpu = NULL; | 175 | irq->vcpu = NULL; |
176 | irq->target_vcpu = vcpu0; | 176 | irq->target_vcpu = vcpu0; |
177 | kref_init(&irq->refcount); | 177 | kref_init(&irq->refcount); |
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
206 | vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; | 206 | vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; |
207 | 207 | ||
208 | INIT_LIST_HEAD(&vgic_cpu->ap_list_head); | 208 | INIT_LIST_HEAD(&vgic_cpu->ap_list_head); |
209 | spin_lock_init(&vgic_cpu->ap_list_lock); | 209 | raw_spin_lock_init(&vgic_cpu->ap_list_lock); |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Enable and configure all SGIs to be edge-triggered and | 212 | * Enable and configure all SGIs to be edge-triggered and |
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
216 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; | 216 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; |
217 | 217 | ||
218 | INIT_LIST_HEAD(&irq->ap_list); | 218 | INIT_LIST_HEAD(&irq->ap_list); |
219 | spin_lock_init(&irq->irq_lock); | 219 | raw_spin_lock_init(&irq->irq_lock); |
220 | irq->intid = i; | 220 | irq->intid = i; |
221 | irq->vcpu = NULL; | 221 | irq->vcpu = NULL; |
222 | irq->target_vcpu = vcpu; | 222 | irq->target_vcpu = vcpu; |
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
231 | irq->config = VGIC_CONFIG_LEVEL; | 231 | irq->config = VGIC_CONFIG_LEVEL; |
232 | } | 232 | } |
233 | 233 | ||
234 | /* | ||
235 | * GICv3 can only be created via the KVM_DEVICE_CREATE API and | ||
236 | * so we always know the emulation type at this point as it's | ||
237 | * either explicitly configured as GICv3, or explicitly | ||
238 | * configured as GICv2, or not configured yet which also | ||
239 | * implies GICv2. | ||
240 | */ | ||
241 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | 234 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) |
242 | irq->group = 1; | 235 | irq->group = 1; |
243 | else | 236 | else |
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm) | |||
281 | { | 274 | { |
282 | struct vgic_dist *dist = &kvm->arch.vgic; | 275 | struct vgic_dist *dist = &kvm->arch.vgic; |
283 | struct kvm_vcpu *vcpu; | 276 | struct kvm_vcpu *vcpu; |
284 | int ret = 0, i; | 277 | int ret = 0, i, idx; |
285 | 278 | ||
286 | if (vgic_initialized(kvm)) | 279 | if (vgic_initialized(kvm)) |
287 | return 0; | 280 | return 0; |
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm) | |||
298 | if (ret) | 291 | if (ret) |
299 | goto out; | 292 | goto out; |
300 | 293 | ||
294 | /* Initialize groups on CPUs created before the VGIC type was known */ | ||
295 | kvm_for_each_vcpu(idx, vcpu, kvm) { | ||
296 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
297 | |||
298 | for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { | ||
299 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; | ||
300 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | ||
301 | irq->group = 1; | ||
302 | else | ||
303 | irq->group = 0; | ||
304 | } | ||
305 | } | ||
306 | |||
301 | if (vgic_has_its(kvm)) { | 307 | if (vgic_has_its(kvm)) { |
302 | ret = vgic_v4_init(kvm); | 308 | ret = vgic_v4_init(kvm); |
303 | if (ret) | 309 | if (ret) |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index eb2a390a6c86..ab3f47745d9c 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
65 | 65 | ||
66 | INIT_LIST_HEAD(&irq->lpi_list); | 66 | INIT_LIST_HEAD(&irq->lpi_list); |
67 | INIT_LIST_HEAD(&irq->ap_list); | 67 | INIT_LIST_HEAD(&irq->ap_list); |
68 | spin_lock_init(&irq->irq_lock); | 68 | raw_spin_lock_init(&irq->irq_lock); |
69 | 69 | ||
70 | irq->config = VGIC_CONFIG_EDGE; | 70 | irq->config = VGIC_CONFIG_EDGE; |
71 | kref_init(&irq->refcount); | 71 | kref_init(&irq->refcount); |
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
73 | irq->target_vcpu = vcpu; | 73 | irq->target_vcpu = vcpu; |
74 | irq->group = 1; | 74 | irq->group = 1; |
75 | 75 | ||
76 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 76 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * There could be a race with another vgic_add_lpi(), so we need to | 79 | * There could be a race with another vgic_add_lpi(), so we need to |
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
101 | dist->lpi_list_count++; | 101 | dist->lpi_list_count++; |
102 | 102 | ||
103 | out_unlock: | 103 | out_unlock: |
104 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 104 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * We "cache" the configuration table entries in our struct vgic_irq's. | 107 | * We "cache" the configuration table entries in our struct vgic_irq's. |
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
287 | if (ret) | 287 | if (ret) |
288 | return ret; | 288 | return ret; |
289 | 289 | ||
290 | spin_lock_irqsave(&irq->irq_lock, flags); | 290 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
291 | 291 | ||
292 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { | 292 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { |
293 | irq->priority = LPI_PROP_PRIORITY(prop); | 293 | irq->priority = LPI_PROP_PRIORITY(prop); |
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
299 | } | 299 | } |
300 | } | 300 | } |
301 | 301 | ||
302 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 302 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
303 | 303 | ||
304 | if (irq->hw) | 304 | if (irq->hw) |
305 | return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); | 305 | return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); |
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
332 | if (!intids) | 332 | if (!intids) |
333 | return -ENOMEM; | 333 | return -ENOMEM; |
334 | 334 | ||
335 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 335 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
337 | if (i == irq_count) | 337 | if (i == irq_count) |
338 | break; | 338 | break; |
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
341 | continue; | 341 | continue; |
342 | intids[i++] = irq->intid; | 342 | intids[i++] = irq->intid; |
343 | } | 343 | } |
344 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 344 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
345 | 345 | ||
346 | *intid_ptr = intids; | 346 | *intid_ptr = intids; |
347 | return i; | 347 | return i; |
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) | |||
352 | int ret = 0; | 352 | int ret = 0; |
353 | unsigned long flags; | 353 | unsigned long flags; |
354 | 354 | ||
355 | spin_lock_irqsave(&irq->irq_lock, flags); | 355 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
356 | irq->target_vcpu = vcpu; | 356 | irq->target_vcpu = vcpu; |
357 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 357 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
358 | 358 | ||
359 | if (irq->hw) { | 359 | if (irq->hw) { |
360 | struct its_vlpi_map map; | 360 | struct its_vlpi_map map; |
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
455 | } | 455 | } |
456 | 456 | ||
457 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); | 457 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); |
458 | spin_lock_irqsave(&irq->irq_lock, flags); | 458 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
459 | irq->pending_latch = pendmask & (1U << bit_nr); | 459 | irq->pending_latch = pendmask & (1U << bit_nr); |
460 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 460 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
461 | vgic_put_irq(vcpu->kvm, irq); | 461 | vgic_put_irq(vcpu->kvm, irq); |
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, | |||
612 | return irq_set_irqchip_state(irq->host_irq, | 612 | return irq_set_irqchip_state(irq->host_irq, |
613 | IRQCHIP_STATE_PENDING, true); | 613 | IRQCHIP_STATE_PENDING, true); |
614 | 614 | ||
615 | spin_lock_irqsave(&irq->irq_lock, flags); | 615 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
616 | irq->pending_latch = true; | 616 | irq->pending_latch = true; |
617 | vgic_queue_irq_unlock(kvm, irq, flags); | 617 | vgic_queue_irq_unlock(kvm, irq, flags); |
618 | 618 | ||
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 738b65d2d0e7..b535fffc7400 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c | |||
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, | |||
147 | 147 | ||
148 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); | 148 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); |
149 | 149 | ||
150 | spin_lock_irqsave(&irq->irq_lock, flags); | 150 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
151 | irq->pending_latch = true; | 151 | irq->pending_latch = true; |
152 | irq->source |= 1U << source_vcpu->vcpu_id; | 152 | irq->source |= 1U << source_vcpu->vcpu_id; |
153 | 153 | ||
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, | |||
191 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); | 191 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); |
192 | int target; | 192 | int target; |
193 | 193 | ||
194 | spin_lock_irqsave(&irq->irq_lock, flags); | 194 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
195 | 195 | ||
196 | irq->targets = (val >> (i * 8)) & cpu_mask; | 196 | irq->targets = (val >> (i * 8)) & cpu_mask; |
197 | target = irq->targets ? __ffs(irq->targets) : 0; | 197 | target = irq->targets ? __ffs(irq->targets) : 0; |
198 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); | 198 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); |
199 | 199 | ||
200 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 200 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
201 | vgic_put_irq(vcpu->kvm, irq); | 201 | vgic_put_irq(vcpu->kvm, irq); |
202 | } | 202 | } |
203 | } | 203 | } |
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, | |||
230 | for (i = 0; i < len; i++) { | 230 | for (i = 0; i < len; i++) { |
231 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 231 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
232 | 232 | ||
233 | spin_lock_irqsave(&irq->irq_lock, flags); | 233 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
234 | 234 | ||
235 | irq->source &= ~((val >> (i * 8)) & 0xff); | 235 | irq->source &= ~((val >> (i * 8)) & 0xff); |
236 | if (!irq->source) | 236 | if (!irq->source) |
237 | irq->pending_latch = false; | 237 | irq->pending_latch = false; |
238 | 238 | ||
239 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 239 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
240 | vgic_put_irq(vcpu->kvm, irq); | 240 | vgic_put_irq(vcpu->kvm, irq); |
241 | } | 241 | } |
242 | } | 242 | } |
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, | |||
252 | for (i = 0; i < len; i++) { | 252 | for (i = 0; i < len; i++) { |
253 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 253 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
254 | 254 | ||
255 | spin_lock_irqsave(&irq->irq_lock, flags); | 255 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
256 | 256 | ||
257 | irq->source |= (val >> (i * 8)) & 0xff; | 257 | irq->source |= (val >> (i * 8)) & 0xff; |
258 | 258 | ||
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, | |||
260 | irq->pending_latch = true; | 260 | irq->pending_latch = true; |
261 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 261 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
262 | } else { | 262 | } else { |
263 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 263 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
264 | } | 264 | } |
265 | vgic_put_irq(vcpu->kvm, irq); | 265 | vgic_put_irq(vcpu->kvm, irq); |
266 | } | 266 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index b3d1f0985117..4a12322bf7df 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c | |||
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, | |||
169 | if (!irq) | 169 | if (!irq) |
170 | return; | 170 | return; |
171 | 171 | ||
172 | spin_lock_irqsave(&irq->irq_lock, flags); | 172 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
173 | 173 | ||
174 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ | 174 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ |
175 | irq->mpidr = val & GENMASK(23, 0); | 175 | irq->mpidr = val & GENMASK(23, 0); |
176 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); | 176 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); |
177 | 177 | ||
178 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 178 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
179 | vgic_put_irq(vcpu->kvm, irq); | 179 | vgic_put_irq(vcpu->kvm, irq); |
180 | } | 180 | } |
181 | 181 | ||
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
281 | for (i = 0; i < len * 8; i++) { | 281 | for (i = 0; i < len * 8; i++) { |
282 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 282 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
283 | 283 | ||
284 | spin_lock_irqsave(&irq->irq_lock, flags); | 284 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
285 | if (test_bit(i, &val)) { | 285 | if (test_bit(i, &val)) { |
286 | /* | 286 | /* |
287 | * pending_latch is set irrespective of irq type | 287 | * pending_latch is set irrespective of irq type |
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
292 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 292 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
293 | } else { | 293 | } else { |
294 | irq->pending_latch = false; | 294 | irq->pending_latch = false; |
295 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 295 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
296 | } | 296 | } |
297 | 297 | ||
298 | vgic_put_irq(vcpu->kvm, irq); | 298 | vgic_put_irq(vcpu->kvm, irq); |
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) | |||
957 | 957 | ||
958 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); | 958 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); |
959 | 959 | ||
960 | spin_lock_irqsave(&irq->irq_lock, flags); | 960 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
961 | 961 | ||
962 | /* | 962 | /* |
963 | * An access targetting Group0 SGIs can only generate | 963 | * An access targetting Group0 SGIs can only generate |
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) | |||
968 | irq->pending_latch = true; | 968 | irq->pending_latch = true; |
969 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 969 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
970 | } else { | 970 | } else { |
971 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 971 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
972 | } | 972 | } |
973 | 973 | ||
974 | vgic_put_irq(vcpu->kvm, irq); | 974 | vgic_put_irq(vcpu->kvm, irq); |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index ceeda7e04a4d..7de42fba05b5 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, | |||
77 | for (i = 0; i < len * 8; i++) { | 77 | for (i = 0; i < len * 8; i++) { |
78 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 78 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
79 | 79 | ||
80 | spin_lock_irqsave(&irq->irq_lock, flags); | 80 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
81 | irq->group = !!(val & BIT(i)); | 81 | irq->group = !!(val & BIT(i)); |
82 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 82 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
83 | 83 | ||
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, | |||
120 | for_each_set_bit(i, &val, len * 8) { | 120 | for_each_set_bit(i, &val, len * 8) { |
121 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 121 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
122 | 122 | ||
123 | spin_lock_irqsave(&irq->irq_lock, flags); | 123 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
124 | irq->enabled = true; | 124 | irq->enabled = true; |
125 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 125 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
126 | 126 | ||
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, | |||
139 | for_each_set_bit(i, &val, len * 8) { | 139 | for_each_set_bit(i, &val, len * 8) { |
140 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 140 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
141 | 141 | ||
142 | spin_lock_irqsave(&irq->irq_lock, flags); | 142 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
143 | 143 | ||
144 | irq->enabled = false; | 144 | irq->enabled = false; |
145 | 145 | ||
146 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 146 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
147 | vgic_put_irq(vcpu->kvm, irq); | 147 | vgic_put_irq(vcpu->kvm, irq); |
148 | } | 148 | } |
149 | } | 149 | } |
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, | |||
160 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 160 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
161 | unsigned long flags; | 161 | unsigned long flags; |
162 | 162 | ||
163 | spin_lock_irqsave(&irq->irq_lock, flags); | 163 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
164 | if (irq_is_pending(irq)) | 164 | if (irq_is_pending(irq)) |
165 | value |= (1U << i); | 165 | value |= (1U << i); |
166 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 166 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
167 | 167 | ||
168 | vgic_put_irq(vcpu->kvm, irq); | 168 | vgic_put_irq(vcpu->kvm, irq); |
169 | } | 169 | } |
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, | |||
215 | for_each_set_bit(i, &val, len * 8) { | 215 | for_each_set_bit(i, &val, len * 8) { |
216 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 216 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
217 | 217 | ||
218 | spin_lock_irqsave(&irq->irq_lock, flags); | 218 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
219 | if (irq->hw) | 219 | if (irq->hw) |
220 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); | 220 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); |
221 | else | 221 | else |
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, | |||
262 | for_each_set_bit(i, &val, len * 8) { | 262 | for_each_set_bit(i, &val, len * 8) { |
263 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 263 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
264 | 264 | ||
265 | spin_lock_irqsave(&irq->irq_lock, flags); | 265 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
266 | 266 | ||
267 | if (irq->hw) | 267 | if (irq->hw) |
268 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); | 268 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); |
269 | else | 269 | else |
270 | irq->pending_latch = false; | 270 | irq->pending_latch = false; |
271 | 271 | ||
272 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 272 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
273 | vgic_put_irq(vcpu->kvm, irq); | 273 | vgic_put_irq(vcpu->kvm, irq); |
274 | } | 274 | } |
275 | } | 275 | } |
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
311 | unsigned long flags; | 311 | unsigned long flags; |
312 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); | 312 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); |
313 | 313 | ||
314 | spin_lock_irqsave(&irq->irq_lock, flags); | 314 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
315 | 315 | ||
316 | if (irq->hw) { | 316 | if (irq->hw) { |
317 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); | 317 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); |
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
342 | if (irq->active) | 342 | if (irq->active) |
343 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 343 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
344 | else | 344 | else |
345 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 345 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
346 | } | 346 | } |
347 | 347 | ||
348 | /* | 348 | /* |
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, | |||
485 | for (i = 0; i < len; i++) { | 485 | for (i = 0; i < len; i++) { |
486 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 486 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
487 | 487 | ||
488 | spin_lock_irqsave(&irq->irq_lock, flags); | 488 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
489 | /* Narrow the priority range to what we actually support */ | 489 | /* Narrow the priority range to what we actually support */ |
490 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); | 490 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); |
491 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 491 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
492 | 492 | ||
493 | vgic_put_irq(vcpu->kvm, irq); | 493 | vgic_put_irq(vcpu->kvm, irq); |
494 | } | 494 | } |
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, | |||
534 | continue; | 534 | continue; |
535 | 535 | ||
536 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 536 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
537 | spin_lock_irqsave(&irq->irq_lock, flags); | 537 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
538 | 538 | ||
539 | if (test_bit(i * 2 + 1, &val)) | 539 | if (test_bit(i * 2 + 1, &val)) |
540 | irq->config = VGIC_CONFIG_EDGE; | 540 | irq->config = VGIC_CONFIG_EDGE; |
541 | else | 541 | else |
542 | irq->config = VGIC_CONFIG_LEVEL; | 542 | irq->config = VGIC_CONFIG_LEVEL; |
543 | 543 | ||
544 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 544 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
545 | vgic_put_irq(vcpu->kvm, irq); | 545 | vgic_put_irq(vcpu->kvm, irq); |
546 | } | 546 | } |
547 | } | 547 | } |
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, | |||
590 | * restore irq config before line level. | 590 | * restore irq config before line level. |
591 | */ | 591 | */ |
592 | new_level = !!(val & (1U << i)); | 592 | new_level = !!(val & (1U << i)); |
593 | spin_lock_irqsave(&irq->irq_lock, flags); | 593 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
594 | irq->line_level = new_level; | 594 | irq->line_level = new_level; |
595 | if (new_level) | 595 | if (new_level) |
596 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 596 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
597 | else | 597 | else |
598 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 598 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
599 | 599 | ||
600 | vgic_put_irq(vcpu->kvm, irq); | 600 | vgic_put_irq(vcpu->kvm, irq); |
601 | } | 601 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 69b892abd7dc..d91a8938aa7c 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
84 | 84 | ||
85 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | 85 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
86 | 86 | ||
87 | spin_lock(&irq->irq_lock); | 87 | raw_spin_lock(&irq->irq_lock); |
88 | 88 | ||
89 | /* Always preserve the active bit */ | 89 | /* Always preserve the active bit */ |
90 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); | 90 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); |
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
127 | vgic_irq_set_phys_active(irq, false); | 127 | vgic_irq_set_phys_active(irq, false); |
128 | } | 128 | } |
129 | 129 | ||
130 | spin_unlock(&irq->irq_lock); | 130 | raw_spin_unlock(&irq->irq_lock); |
131 | vgic_put_irq(vcpu->kvm, irq); | 131 | vgic_put_irq(vcpu->kvm, irq); |
132 | } | 132 | } |
133 | 133 | ||
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 9c0dd234ebe8..4ee0aeb9a905 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
76 | if (!irq) /* An LPI could have been unmapped. */ | 76 | if (!irq) /* An LPI could have been unmapped. */ |
77 | continue; | 77 | continue; |
78 | 78 | ||
79 | spin_lock(&irq->irq_lock); | 79 | raw_spin_lock(&irq->irq_lock); |
80 | 80 | ||
81 | /* Always preserve the active bit */ | 81 | /* Always preserve the active bit */ |
82 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); | 82 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); |
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
119 | vgic_irq_set_phys_active(irq, false); | 119 | vgic_irq_set_phys_active(irq, false); |
120 | } | 120 | } |
121 | 121 | ||
122 | spin_unlock(&irq->irq_lock); | 122 | raw_spin_unlock(&irq->irq_lock); |
123 | vgic_put_irq(vcpu->kvm, irq); | 123 | vgic_put_irq(vcpu->kvm, irq); |
124 | } | 124 | } |
125 | 125 | ||
@@ -347,9 +347,9 @@ retry: | |||
347 | 347 | ||
348 | status = val & (1 << bit_nr); | 348 | status = val & (1 << bit_nr); |
349 | 349 | ||
350 | spin_lock_irqsave(&irq->irq_lock, flags); | 350 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
351 | if (irq->target_vcpu != vcpu) { | 351 | if (irq->target_vcpu != vcpu) { |
352 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 352 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
353 | goto retry; | 353 | goto retry; |
354 | } | 354 | } |
355 | irq->pending_latch = status; | 355 | irq->pending_latch = status; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 870b1185173b..abd9c7352677 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { | |||
54 | * When taking more than one ap_list_lock at the same time, always take the | 54 | * When taking more than one ap_list_lock at the same time, always take the |
55 | * lowest numbered VCPU's ap_list_lock first, so: | 55 | * lowest numbered VCPU's ap_list_lock first, so: |
56 | * vcpuX->vcpu_id < vcpuY->vcpu_id: | 56 | * vcpuX->vcpu_id < vcpuY->vcpu_id: |
57 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); | 57 | * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); |
58 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); | 58 | * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); |
59 | * | 59 | * |
60 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have | 60 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have |
61 | * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer | 61 | * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer |
62 | * spinlocks for any lock that may be taken while injecting an interrupt. | 62 | * spinlocks for any lock that may be taken while injecting an interrupt. |
63 | */ | 63 | */ |
64 | 64 | ||
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
72 | struct vgic_irq *irq = NULL; | 72 | struct vgic_irq *irq = NULL; |
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | 74 | ||
75 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 75 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
76 | 76 | ||
77 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 77 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
78 | if (irq->intid != intid) | 78 | if (irq->intid != intid) |
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
88 | irq = NULL; | 88 | irq = NULL; |
89 | 89 | ||
90 | out_unlock: | 90 | out_unlock: |
91 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 91 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
92 | 92 | ||
93 | return irq; | 93 | return irq; |
94 | } | 94 | } |
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | |||
138 | if (irq->intid < VGIC_MIN_LPI) | 138 | if (irq->intid < VGIC_MIN_LPI) |
139 | return; | 139 | return; |
140 | 140 | ||
141 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 141 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { | 142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
143 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 143 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
144 | return; | 144 | return; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | list_del(&irq->lpi_list); | 147 | list_del(&irq->lpi_list); |
148 | dist->lpi_list_count--; | 148 | dist->lpi_list_count--; |
149 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 149 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
150 | 150 | ||
151 | kfree(irq); | 151 | kfree(irq); |
152 | } | 152 | } |
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
244 | bool penda, pendb; | 244 | bool penda, pendb; |
245 | int ret; | 245 | int ret; |
246 | 246 | ||
247 | spin_lock(&irqa->irq_lock); | 247 | raw_spin_lock(&irqa->irq_lock); |
248 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); | 248 | raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); |
249 | 249 | ||
250 | if (irqa->active || irqb->active) { | 250 | if (irqa->active || irqb->active) { |
251 | ret = (int)irqb->active - (int)irqa->active; | 251 | ret = (int)irqb->active - (int)irqa->active; |
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
263 | /* Both pending and enabled, sort by priority */ | 263 | /* Both pending and enabled, sort by priority */ |
264 | ret = irqa->priority - irqb->priority; | 264 | ret = irqa->priority - irqb->priority; |
265 | out: | 265 | out: |
266 | spin_unlock(&irqb->irq_lock); | 266 | raw_spin_unlock(&irqb->irq_lock); |
267 | spin_unlock(&irqa->irq_lock); | 267 | raw_spin_unlock(&irqa->irq_lock); |
268 | return ret; | 268 | return ret; |
269 | } | 269 | } |
270 | 270 | ||
@@ -325,7 +325,7 @@ retry: | |||
325 | * not need to be inserted into an ap_list and there is also | 325 | * not need to be inserted into an ap_list and there is also |
326 | * no more work for us to do. | 326 | * no more work for us to do. |
327 | */ | 327 | */ |
328 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 328 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
329 | 329 | ||
330 | /* | 330 | /* |
331 | * We have to kick the VCPU here, because we could be | 331 | * We have to kick the VCPU here, because we could be |
@@ -347,12 +347,12 @@ retry: | |||
347 | * We must unlock the irq lock to take the ap_list_lock where | 347 | * We must unlock the irq lock to take the ap_list_lock where |
348 | * we are going to insert this new pending interrupt. | 348 | * we are going to insert this new pending interrupt. |
349 | */ | 349 | */ |
350 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 350 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
351 | 351 | ||
352 | /* someone can do stuff here, which we re-check below */ | 352 | /* someone can do stuff here, which we re-check below */ |
353 | 353 | ||
354 | spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 354 | raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
355 | spin_lock(&irq->irq_lock); | 355 | raw_spin_lock(&irq->irq_lock); |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * Did something change behind our backs? | 358 | * Did something change behind our backs? |
@@ -367,10 +367,11 @@ retry: | |||
367 | */ | 367 | */ |
368 | 368 | ||
369 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { | 369 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { |
370 | spin_unlock(&irq->irq_lock); | 370 | raw_spin_unlock(&irq->irq_lock); |
371 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 371 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, |
372 | flags); | ||
372 | 373 | ||
373 | spin_lock_irqsave(&irq->irq_lock, flags); | 374 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
374 | goto retry; | 375 | goto retry; |
375 | } | 376 | } |
376 | 377 | ||
@@ -382,8 +383,8 @@ retry: | |||
382 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); | 383 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
383 | irq->vcpu = vcpu; | 384 | irq->vcpu = vcpu; |
384 | 385 | ||
385 | spin_unlock(&irq->irq_lock); | 386 | raw_spin_unlock(&irq->irq_lock); |
386 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 387 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
387 | 388 | ||
388 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | 389 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
389 | kvm_vcpu_kick(vcpu); | 390 | kvm_vcpu_kick(vcpu); |
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |||
430 | if (!irq) | 431 | if (!irq) |
431 | return -EINVAL; | 432 | return -EINVAL; |
432 | 433 | ||
433 | spin_lock_irqsave(&irq->irq_lock, flags); | 434 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
434 | 435 | ||
435 | if (!vgic_validate_injection(irq, level, owner)) { | 436 | if (!vgic_validate_injection(irq, level, owner)) { |
436 | /* Nothing to see here, move along... */ | 437 | /* Nothing to see here, move along... */ |
437 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 438 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
438 | vgic_put_irq(kvm, irq); | 439 | vgic_put_irq(kvm, irq); |
439 | return 0; | 440 | return 0; |
440 | } | 441 | } |
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | |||
494 | 495 | ||
495 | BUG_ON(!irq); | 496 | BUG_ON(!irq); |
496 | 497 | ||
497 | spin_lock_irqsave(&irq->irq_lock, flags); | 498 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
498 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); | 499 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); |
499 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 500 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
500 | vgic_put_irq(vcpu->kvm, irq); | 501 | vgic_put_irq(vcpu->kvm, irq); |
501 | 502 | ||
502 | return ret; | 503 | return ret; |
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | |||
519 | if (!irq->hw) | 520 | if (!irq->hw) |
520 | goto out; | 521 | goto out; |
521 | 522 | ||
522 | spin_lock_irqsave(&irq->irq_lock, flags); | 523 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
523 | irq->active = false; | 524 | irq->active = false; |
524 | irq->pending_latch = false; | 525 | irq->pending_latch = false; |
525 | irq->line_level = false; | 526 | irq->line_level = false; |
526 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 527 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
527 | out: | 528 | out: |
528 | vgic_put_irq(vcpu->kvm, irq); | 529 | vgic_put_irq(vcpu->kvm, irq); |
529 | } | 530 | } |
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) | |||
539 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | 540 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
540 | BUG_ON(!irq); | 541 | BUG_ON(!irq); |
541 | 542 | ||
542 | spin_lock_irqsave(&irq->irq_lock, flags); | 543 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
543 | kvm_vgic_unmap_irq(irq); | 544 | kvm_vgic_unmap_irq(irq); |
544 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 545 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
545 | vgic_put_irq(vcpu->kvm, irq); | 546 | vgic_put_irq(vcpu->kvm, irq); |
546 | 547 | ||
547 | return 0; | 548 | return 0; |
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) | |||
571 | return -EINVAL; | 572 | return -EINVAL; |
572 | 573 | ||
573 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | 574 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
574 | spin_lock_irqsave(&irq->irq_lock, flags); | 575 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
575 | if (irq->owner && irq->owner != owner) | 576 | if (irq->owner && irq->owner != owner) |
576 | ret = -EEXIST; | 577 | ret = -EEXIST; |
577 | else | 578 | else |
578 | irq->owner = owner; | 579 | irq->owner = owner; |
579 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 580 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
580 | 581 | ||
581 | return ret; | 582 | return ret; |
582 | } | 583 | } |
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) | |||
597 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 598 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
598 | 599 | ||
599 | retry: | 600 | retry: |
600 | spin_lock(&vgic_cpu->ap_list_lock); | 601 | raw_spin_lock(&vgic_cpu->ap_list_lock); |
601 | 602 | ||
602 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { | 603 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
603 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; | 604 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; |
604 | bool target_vcpu_needs_kick = false; | 605 | bool target_vcpu_needs_kick = false; |
605 | 606 | ||
606 | spin_lock(&irq->irq_lock); | 607 | raw_spin_lock(&irq->irq_lock); |
607 | 608 | ||
608 | BUG_ON(vcpu != irq->vcpu); | 609 | BUG_ON(vcpu != irq->vcpu); |
609 | 610 | ||
@@ -616,7 +617,7 @@ retry: | |||
616 | */ | 617 | */ |
617 | list_del(&irq->ap_list); | 618 | list_del(&irq->ap_list); |
618 | irq->vcpu = NULL; | 619 | irq->vcpu = NULL; |
619 | spin_unlock(&irq->irq_lock); | 620 | raw_spin_unlock(&irq->irq_lock); |
620 | 621 | ||
621 | /* | 622 | /* |
622 | * This vgic_put_irq call matches the | 623 | * This vgic_put_irq call matches the |
@@ -631,14 +632,14 @@ retry: | |||
631 | 632 | ||
632 | if (target_vcpu == vcpu) { | 633 | if (target_vcpu == vcpu) { |
633 | /* We're on the right CPU */ | 634 | /* We're on the right CPU */ |
634 | spin_unlock(&irq->irq_lock); | 635 | raw_spin_unlock(&irq->irq_lock); |
635 | continue; | 636 | continue; |
636 | } | 637 | } |
637 | 638 | ||
638 | /* This interrupt looks like it has to be migrated. */ | 639 | /* This interrupt looks like it has to be migrated. */ |
639 | 640 | ||
640 | spin_unlock(&irq->irq_lock); | 641 | raw_spin_unlock(&irq->irq_lock); |
641 | spin_unlock(&vgic_cpu->ap_list_lock); | 642 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
642 | 643 | ||
643 | /* | 644 | /* |
644 | * Ensure locking order by always locking the smallest | 645 | * Ensure locking order by always locking the smallest |
@@ -652,10 +653,10 @@ retry: | |||
652 | vcpuB = vcpu; | 653 | vcpuB = vcpu; |
653 | } | 654 | } |
654 | 655 | ||
655 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 656 | raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
656 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, | 657 | raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
657 | SINGLE_DEPTH_NESTING); | 658 | SINGLE_DEPTH_NESTING); |
658 | spin_lock(&irq->irq_lock); | 659 | raw_spin_lock(&irq->irq_lock); |
659 | 660 | ||
660 | /* | 661 | /* |
661 | * If the affinity has been preserved, move the | 662 | * If the affinity has been preserved, move the |
@@ -675,9 +676,9 @@ retry: | |||
675 | target_vcpu_needs_kick = true; | 676 | target_vcpu_needs_kick = true; |
676 | } | 677 | } |
677 | 678 | ||
678 | spin_unlock(&irq->irq_lock); | 679 | raw_spin_unlock(&irq->irq_lock); |
679 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); | 680 | raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); |
680 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 681 | raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
681 | 682 | ||
682 | if (target_vcpu_needs_kick) { | 683 | if (target_vcpu_needs_kick) { |
683 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); | 684 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); |
@@ -687,7 +688,7 @@ retry: | |||
687 | goto retry; | 688 | goto retry; |
688 | } | 689 | } |
689 | 690 | ||
690 | spin_unlock(&vgic_cpu->ap_list_lock); | 691 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
691 | } | 692 | } |
692 | 693 | ||
693 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) | 694 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, | |||
741 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 742 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
742 | int w; | 743 | int w; |
743 | 744 | ||
744 | spin_lock(&irq->irq_lock); | 745 | raw_spin_lock(&irq->irq_lock); |
745 | /* GICv2 SGIs can count for more than one... */ | 746 | /* GICv2 SGIs can count for more than one... */ |
746 | w = vgic_irq_get_lr_count(irq); | 747 | w = vgic_irq_get_lr_count(irq); |
747 | spin_unlock(&irq->irq_lock); | 748 | raw_spin_unlock(&irq->irq_lock); |
748 | 749 | ||
749 | count += w; | 750 | count += w; |
750 | *multi_sgi |= (w > 1); | 751 | *multi_sgi |= (w > 1); |
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
770 | count = 0; | 771 | count = 0; |
771 | 772 | ||
772 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 773 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
773 | spin_lock(&irq->irq_lock); | 774 | raw_spin_lock(&irq->irq_lock); |
774 | 775 | ||
775 | /* | 776 | /* |
776 | * If we have multi-SGIs in the pipeline, we need to | 777 | * If we have multi-SGIs in the pipeline, we need to |
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
780 | * the AP list has been sorted already. | 781 | * the AP list has been sorted already. |
781 | */ | 782 | */ |
782 | if (multi_sgi && irq->priority > prio) { | 783 | if (multi_sgi && irq->priority > prio) { |
783 | spin_unlock(&irq->irq_lock); | 784 | _raw_spin_unlock(&irq->irq_lock); |
784 | break; | 785 | break; |
785 | } | 786 | } |
786 | 787 | ||
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
791 | prio = irq->priority; | 792 | prio = irq->priority; |
792 | } | 793 | } |
793 | 794 | ||
794 | spin_unlock(&irq->irq_lock); | 795 | raw_spin_unlock(&irq->irq_lock); |
795 | 796 | ||
796 | if (count == kvm_vgic_global_state.nr_lr) { | 797 | if (count == kvm_vgic_global_state.nr_lr) { |
797 | if (!list_is_last(&irq->ap_list, | 798 | if (!list_is_last(&irq->ap_list, |
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
872 | 873 | ||
873 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 874 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
874 | 875 | ||
875 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 876 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
876 | vgic_flush_lr_state(vcpu); | 877 | vgic_flush_lr_state(vcpu); |
877 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 878 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
878 | 879 | ||
879 | if (can_access_vgic_from_kernel()) | 880 | if (can_access_vgic_from_kernel()) |
880 | vgic_restore_state(vcpu); | 881 | vgic_restore_state(vcpu); |
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
918 | 919 | ||
919 | vgic_get_vmcr(vcpu, &vmcr); | 920 | vgic_get_vmcr(vcpu, &vmcr); |
920 | 921 | ||
921 | spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); | 922 | raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
922 | 923 | ||
923 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 924 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
924 | spin_lock(&irq->irq_lock); | 925 | raw_spin_lock(&irq->irq_lock); |
925 | pending = irq_is_pending(irq) && irq->enabled && | 926 | pending = irq_is_pending(irq) && irq->enabled && |
926 | !irq->active && | 927 | !irq->active && |
927 | irq->priority < vmcr.pmr; | 928 | irq->priority < vmcr.pmr; |
928 | spin_unlock(&irq->irq_lock); | 929 | raw_spin_unlock(&irq->irq_lock); |
929 | 930 | ||
930 | if (pending) | 931 | if (pending) |
931 | break; | 932 | break; |
932 | } | 933 | } |
933 | 934 | ||
934 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); | 935 | raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
935 | 936 | ||
936 | return pending; | 937 | return pending; |
937 | } | 938 | } |
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) | |||
963 | return false; | 964 | return false; |
964 | 965 | ||
965 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | 966 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
966 | spin_lock_irqsave(&irq->irq_lock, flags); | 967 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
967 | map_is_active = irq->hw && irq->active; | 968 | map_is_active = irq->hw && irq->active; |
968 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 969 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
969 | vgic_put_irq(vcpu->kvm, irq); | 970 | vgic_put_irq(vcpu->kvm, irq); |
970 | 971 | ||
971 | return map_is_active; | 972 | return map_is_active; |
972 | } | 973 | } |
973 | |||