diff options
author | Dave Airlie <airlied@redhat.com> | 2017-06-26 17:24:49 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-06-26 18:28:30 -0400 |
commit | 6d61e70ccc21606ffb8a0a03bd3aba24f659502b (patch) | |
tree | 69f5bfb29d085cc42839445d34170bd3ee4f7408 | |
parent | 338ffbf7cb5eee0ed4600650d03cd2d7cd1cac9d (diff) | |
parent | c0bc126f97fb929b3ae02c1c62322645d70eb408 (diff) |
Backmerge tag 'v4.12-rc7' into drm-next
Linux 4.12-rc7
Needed at least rc6 for drm-misc-next-fixes, may as well go to rc7
461 files changed, 4152 insertions, 2758 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 0f5c3b4347c6..7737ab5d04b2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -3811,6 +3811,13 @@ | |||
3811 | expediting. Set to zero to disable automatic | 3811 | expediting. Set to zero to disable automatic |
3812 | expediting. | 3812 | expediting. |
3813 | 3813 | ||
3814 | stack_guard_gap= [MM] | ||
3815 | override the default stack gap protection. The value | ||
3816 | is in page units and it defines how many pages prior | ||
3817 | to (for stacks growing down) resp. after (for stacks | ||
3818 | growing up) the main stack are reserved for no other | ||
3819 | mapping. Default value is 256 pages. | ||
3820 | |||
3814 | stacktrace [FTRACE] | 3821 | stacktrace [FTRACE] |
3815 | Enabled the stack tracer on boot up. | 3822 | Enabled the stack tracer on boot up. |
3816 | 3823 | ||
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt index e9c5a1d9834a..f465647a4dd2 100644 --- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt | |||
@@ -22,7 +22,8 @@ Required properties : | |||
22 | - #clock-cells : must contain 1 | 22 | - #clock-cells : must contain 1 |
23 | - #reset-cells : must contain 1 | 23 | - #reset-cells : must contain 1 |
24 | 24 | ||
25 | For the PRCM CCUs on H3/A64, one more clock is needed: | 25 | For the PRCM CCUs on H3/A64, two more clocks are needed: |
26 | - "pll-periph": the SoC's peripheral PLL from the main CCU | ||
26 | - "iosc": the SoC's internal frequency oscillator | 27 | - "iosc": the SoC's internal frequency oscillator |
27 | 28 | ||
28 | Example for generic CCU: | 29 | Example for generic CCU: |
@@ -39,8 +40,8 @@ Example for PRCM CCU: | |||
39 | r_ccu: clock@01f01400 { | 40 | r_ccu: clock@01f01400 { |
40 | compatible = "allwinner,sun50i-a64-r-ccu"; | 41 | compatible = "allwinner,sun50i-a64-r-ccu"; |
41 | reg = <0x01f01400 0x100>; | 42 | reg = <0x01f01400 0x100>; |
42 | clocks = <&osc24M>, <&osc32k>, <&iosc>; | 43 | clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>; |
43 | clock-names = "hosc", "losc", "iosc"; | 44 | clock-names = "hosc", "losc", "iosc", "pll-periph"; |
44 | #clock-cells = <1>; | 45 | #clock-cells = <1>; |
45 | #reset-cells = <1>; | 46 | #reset-cells = <1>; |
46 | }; | 47 | }; |
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 42c3bb2d53e8..01e331a5f3e7 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt | |||
@@ -41,9 +41,9 @@ Required properties: | |||
41 | Optional properties: | 41 | Optional properties: |
42 | 42 | ||
43 | In order to use the GPIO lines in PWM mode, some additional optional | 43 | In order to use the GPIO lines in PWM mode, some additional optional |
44 | properties are required. Only Armada 370 and XP support these properties. | 44 | properties are required. |
45 | 45 | ||
46 | - compatible: Must contain "marvell,armada-370-xp-gpio" | 46 | - compatible: Must contain "marvell,armada-370-gpio" |
47 | 47 | ||
48 | - reg: an additional register set is needed, for the GPIO Blink | 48 | - reg: an additional register set is needed, for the GPIO Blink |
49 | Counter on/off registers. | 49 | Counter on/off registers. |
@@ -71,7 +71,7 @@ Example: | |||
71 | }; | 71 | }; |
72 | 72 | ||
73 | gpio1: gpio@18140 { | 73 | gpio1: gpio@18140 { |
74 | compatible = "marvell,armada-370-xp-gpio"; | 74 | compatible = "marvell,armada-370-gpio"; |
75 | reg = <0x18140 0x40>, <0x181c8 0x08>; | 75 | reg = <0x18140 0x40>, <0x181c8 0x08>; |
76 | reg-names = "gpio", "pwm"; | 76 | reg-names = "gpio", "pwm"; |
77 | ngpios = <17>; | 77 | ngpios = <17>; |
diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt index bbd083f5600a..1db6e0057a63 100644 --- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt +++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt | |||
@@ -31,7 +31,7 @@ Example: | |||
31 | compatible = "st,stm32-timers"; | 31 | compatible = "st,stm32-timers"; |
32 | reg = <0x40010000 0x400>; | 32 | reg = <0x40010000 0x400>; |
33 | clocks = <&rcc 0 160>; | 33 | clocks = <&rcc 0 160>; |
34 | clock-names = "clk_int"; | 34 | clock-names = "int"; |
35 | 35 | ||
36 | pwm { | 36 | pwm { |
37 | compatible = "st,stm32-pwm"; | 37 | compatible = "st,stm32-pwm"; |
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt index d6c6e41648d4..8ec2ca21adeb 100644 --- a/Documentation/devicetree/bindings/net/dsa/b53.txt +++ b/Documentation/devicetree/bindings/net/dsa/b53.txt | |||
@@ -34,7 +34,7 @@ Required properties: | |||
34 | "brcm,bcm6328-switch" | 34 | "brcm,bcm6328-switch" |
35 | "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" | 35 | "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" |
36 | 36 | ||
37 | See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional | 37 | See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional |
38 | required and optional properties. | 38 | required and optional properties. |
39 | 39 | ||
40 | Examples: | 40 | Examples: |
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt index 16c3a9501f5d..acfafc8e143c 100644 --- a/Documentation/devicetree/bindings/net/smsc911x.txt +++ b/Documentation/devicetree/bindings/net/smsc911x.txt | |||
@@ -27,6 +27,7 @@ Optional properties: | |||
27 | of the device. On many systems this is wired high so the device goes | 27 | of the device. On many systems this is wired high so the device goes |
28 | out of reset at power-on, but if it is under program control, this | 28 | out of reset at power-on, but if it is under program control, this |
29 | optional GPIO can wake up in response to it. | 29 | optional GPIO can wake up in response to it. |
30 | - vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies | ||
30 | 31 | ||
31 | Examples: | 32 | Examples: |
32 | 33 | ||
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 59f4db2a0c85..f55639d71d35 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt | |||
@@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware | |||
122 | or will be computed in the stack. Capable hardware can pass the hash in | 122 | or will be computed in the stack. Capable hardware can pass the hash in |
123 | the receive descriptor for the packet; this would usually be the same | 123 | the receive descriptor for the packet; this would usually be the same |
124 | hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in | 124 | hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in |
125 | skb->rx_hash and can be used elsewhere in the stack as a hash of the | 125 | skb->hash and can be used elsewhere in the stack as a hash of the |
126 | packet’s flow. | 126 | packet’s flow. |
127 | 127 | ||
128 | Each receive hardware queue has an associated list of CPUs to which | 128 | Each receive hardware queue has an associated list of CPUs to which |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 12 | 2 | PATCHLEVEL = 12 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -1437,7 +1437,7 @@ help: | |||
1437 | @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' | 1437 | @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' |
1438 | @echo ' make V=2 [targets] 2 => give reason for rebuild of target' | 1438 | @echo ' make V=2 [targets] 2 => give reason for rebuild of target' |
1439 | @echo ' make O=dir [targets] Locate all output files in "dir", including .config' | 1439 | @echo ' make O=dir [targets] Locate all output files in "dir", including .config' |
1440 | @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' | 1440 | @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)' |
1441 | @echo ' make C=2 [targets] Force check of all c source with $$CHECK' | 1441 | @echo ' make C=2 [targets] Force check of all c source with $$CHECK' |
1442 | @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' | 1442 | @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' |
1443 | @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' | 1443 | @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' |
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 3e25e8d6486b..2e13683dfb24 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c | |||
@@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
65 | 65 | ||
66 | vma = find_vma(mm, addr); | 66 | vma = find_vma(mm, addr); |
67 | if (TASK_SIZE - len >= addr && | 67 | if (TASK_SIZE - len >= addr && |
68 | (!vma || addr + len <= vma->vm_start)) | 68 | (!vma || addr + len <= vm_start_gap(vma))) |
69 | return addr; | 69 | return addr; |
70 | } | 70 | } |
71 | 71 | ||
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts index c5d2589c55fc..fc864a855991 100644 --- a/arch/arm/boot/dts/am335x-sl50.dts +++ b/arch/arm/boot/dts/am335x-sl50.dts | |||
@@ -220,7 +220,7 @@ | |||
220 | 220 | ||
221 | mmc1_pins: pinmux_mmc1_pins { | 221 | mmc1_pins: pinmux_mmc1_pins { |
222 | pinctrl-single,pins = < | 222 | pinctrl-single,pins = < |
223 | AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ | 223 | AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */ |
224 | >; | 224 | >; |
225 | }; | 225 | }; |
226 | 226 | ||
@@ -280,10 +280,6 @@ | |||
280 | AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ | 280 | AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ |
281 | AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ | 281 | AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ |
282 | AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ | 282 | AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ |
283 | /* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */ | ||
284 | AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */ | ||
285 | AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */ | ||
286 | AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */ | ||
287 | /* PDI Bus - Battery system */ | 283 | /* PDI Bus - Battery system */ |
288 | AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ | 284 | AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ |
289 | AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ | 285 | AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ |
@@ -384,7 +380,7 @@ | |||
384 | pinctrl-names = "default"; | 380 | pinctrl-names = "default"; |
385 | pinctrl-0 = <&mmc1_pins>; | 381 | pinctrl-0 = <&mmc1_pins>; |
386 | bus-width = <4>; | 382 | bus-width = <4>; |
387 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; | 383 | cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; |
388 | vmmc-supply = <&vmmcsd_fixed>; | 384 | vmmc-supply = <&vmmcsd_fixed>; |
389 | }; | 385 | }; |
390 | 386 | ||
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index 1aeeacb3a884..d4f600dbb7eb 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi | |||
@@ -558,10 +558,11 @@ | |||
558 | }; | 558 | }; |
559 | 559 | ||
560 | r_ccu: clock@1f01400 { | 560 | r_ccu: clock@1f01400 { |
561 | compatible = "allwinner,sun50i-a64-r-ccu"; | 561 | compatible = "allwinner,sun8i-h3-r-ccu"; |
562 | reg = <0x01f01400 0x100>; | 562 | reg = <0x01f01400 0x100>; |
563 | clocks = <&osc24M>, <&osc32k>, <&iosc>; | 563 | clocks = <&osc24M>, <&osc32k>, <&iosc>, |
564 | clock-names = "hosc", "losc", "iosc"; | 564 | <&ccu 9>; |
565 | clock-names = "hosc", "losc", "iosc", "pll-periph"; | ||
565 | #clock-cells = <1>; | 566 | #clock-cells = <1>; |
566 | #reset-cells = <1>; | 567 | #reset-cells = <1>; |
567 | }; | 568 | }; |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2239fde10b80..f0701d8d24df 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
90 | 90 | ||
91 | vma = find_vma(mm, addr); | 91 | vma = find_vma(mm, addr); |
92 | if (TASK_SIZE - len >= addr && | 92 | if (TASK_SIZE - len >= addr && |
93 | (!vma || addr + len <= vma->vm_start)) | 93 | (!vma || addr + len <= vm_start_gap(vma))) |
94 | return addr; | 94 | return addr; |
95 | } | 95 | } |
96 | 96 | ||
@@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
141 | addr = PAGE_ALIGN(addr); | 141 | addr = PAGE_ALIGN(addr); |
142 | vma = find_vma(mm, addr); | 142 | vma = find_vma(mm, addr); |
143 | if (TASK_SIZE - len >= addr && | 143 | if (TASK_SIZE - len >= addr && |
144 | (!vma || addr + len <= vma->vm_start)) | 144 | (!vma || addr + len <= vm_start_gap(vma))) |
145 | return addr; | 145 | return addr; |
146 | } | 146 | } |
147 | 147 | ||
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index c7f669f5884f..166c9ef884dc 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | |||
@@ -406,8 +406,9 @@ | |||
406 | r_ccu: clock@1f01400 { | 406 | r_ccu: clock@1f01400 { |
407 | compatible = "allwinner,sun50i-a64-r-ccu"; | 407 | compatible = "allwinner,sun50i-a64-r-ccu"; |
408 | reg = <0x01f01400 0x100>; | 408 | reg = <0x01f01400 0x100>; |
409 | clocks = <&osc24M>, <&osc32k>, <&iosc>; | 409 | clocks = <&osc24M>, <&osc32k>, <&iosc>, |
410 | clock-names = "hosc", "losc", "iosc"; | 410 | <&ccu 11>; |
411 | clock-names = "hosc", "losc", "iosc", "pll-periph"; | ||
411 | #clock-cells = <1>; | 412 | #clock-cells = <1>; |
412 | #reset-cells = <1>; | 413 | #reset-cells = <1>; |
413 | }; | 414 | }; |
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi index 4d314a253fd9..732e2e06f503 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi | |||
@@ -40,7 +40,7 @@ | |||
40 | * OTHER DEALINGS IN THE SOFTWARE. | 40 | * OTHER DEALINGS IN THE SOFTWARE. |
41 | */ | 41 | */ |
42 | 42 | ||
43 | #include "sunxi-h3-h5.dtsi" | 43 | #include <arm/sunxi-h3-h5.dtsi> |
44 | 44 | ||
45 | / { | 45 | / { |
46 | cpus { | 46 | cpus { |
diff --git a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi deleted file mode 120000 index 036f01dc2b9b..000000000000 --- a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | ../../../../arm/boot/dts/sunxi-h3-h5.dtsi \ No newline at end of file | ||
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 41b6e31f8f55..d0cb007fa482 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk) | |||
221 | /* tkr_mono.cycle_last == tkr_raw.cycle_last */ | 221 | /* tkr_mono.cycle_last == tkr_raw.cycle_last */ |
222 | vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; | 222 | vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; |
223 | vdso_data->raw_time_sec = tk->raw_time.tv_sec; | 223 | vdso_data->raw_time_sec = tk->raw_time.tv_sec; |
224 | vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; | 224 | vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec << |
225 | tk->tkr_raw.shift) + | ||
226 | tk->tkr_raw.xtime_nsec; | ||
225 | vdso_data->xtime_clock_sec = tk->xtime_sec; | 227 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
226 | vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; | 228 | vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; |
227 | /* tkr_raw.xtime_nsec == 0 */ | ||
228 | vdso_data->cs_mono_mult = tk->tkr_mono.mult; | 229 | vdso_data->cs_mono_mult = tk->tkr_mono.mult; |
229 | vdso_data->cs_raw_mult = tk->tkr_raw.mult; | 230 | vdso_data->cs_raw_mult = tk->tkr_raw.mult; |
230 | /* tkr_mono.shift == tkr_raw.shift */ | 231 | /* tkr_mono.shift == tkr_raw.shift */ |
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index e00b4671bd7c..76320e920965 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S | |||
@@ -256,7 +256,6 @@ monotonic_raw: | |||
256 | seqcnt_check fail=monotonic_raw | 256 | seqcnt_check fail=monotonic_raw |
257 | 257 | ||
258 | /* All computations are done with left-shifted nsecs. */ | 258 | /* All computations are done with left-shifted nsecs. */ |
259 | lsl x14, x14, x12 | ||
260 | get_nsec_per_sec res=x9 | 259 | get_nsec_per_sec res=x9 |
261 | lsl x9, x9, x12 | 260 | lsl x9, x9, x12 |
262 | 261 | ||
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 71f930501ade..c870d6f01ac2 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
@@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly; | |||
36 | #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) | 36 | #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) |
37 | #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) | 37 | #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) |
38 | #define TCALL_CNT (MAX_BPF_JIT_REG + 2) | 38 | #define TCALL_CNT (MAX_BPF_JIT_REG + 2) |
39 | #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) | ||
39 | 40 | ||
40 | /* Map BPF registers to A64 registers */ | 41 | /* Map BPF registers to A64 registers */ |
41 | static const int bpf2a64[] = { | 42 | static const int bpf2a64[] = { |
@@ -57,6 +58,7 @@ static const int bpf2a64[] = { | |||
57 | /* temporary registers for internal BPF JIT */ | 58 | /* temporary registers for internal BPF JIT */ |
58 | [TMP_REG_1] = A64_R(10), | 59 | [TMP_REG_1] = A64_R(10), |
59 | [TMP_REG_2] = A64_R(11), | 60 | [TMP_REG_2] = A64_R(11), |
61 | [TMP_REG_3] = A64_R(12), | ||
60 | /* tail_call_cnt */ | 62 | /* tail_call_cnt */ |
61 | [TCALL_CNT] = A64_R(26), | 63 | [TCALL_CNT] = A64_R(26), |
62 | /* temporary register for blinding constants */ | 64 | /* temporary register for blinding constants */ |
@@ -319,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | |||
319 | const u8 src = bpf2a64[insn->src_reg]; | 321 | const u8 src = bpf2a64[insn->src_reg]; |
320 | const u8 tmp = bpf2a64[TMP_REG_1]; | 322 | const u8 tmp = bpf2a64[TMP_REG_1]; |
321 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | 323 | const u8 tmp2 = bpf2a64[TMP_REG_2]; |
324 | const u8 tmp3 = bpf2a64[TMP_REG_3]; | ||
322 | const s16 off = insn->off; | 325 | const s16 off = insn->off; |
323 | const s32 imm = insn->imm; | 326 | const s32 imm = insn->imm; |
324 | const int i = insn - ctx->prog->insnsi; | 327 | const int i = insn - ctx->prog->insnsi; |
@@ -689,10 +692,10 @@ emit_cond_jmp: | |||
689 | emit(A64_PRFM(tmp, PST, L1, STRM), ctx); | 692 | emit(A64_PRFM(tmp, PST, L1, STRM), ctx); |
690 | emit(A64_LDXR(isdw, tmp2, tmp), ctx); | 693 | emit(A64_LDXR(isdw, tmp2, tmp), ctx); |
691 | emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); | 694 | emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); |
692 | emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); | 695 | emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); |
693 | jmp_offset = -3; | 696 | jmp_offset = -3; |
694 | check_imm19(jmp_offset); | 697 | check_imm19(jmp_offset); |
695 | emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); | 698 | emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); |
696 | break; | 699 | break; |
697 | 700 | ||
698 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ | 701 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ |
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index da82c25301e7..46aa289c5102 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c | |||
@@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
75 | addr = PAGE_ALIGN(addr); | 75 | addr = PAGE_ALIGN(addr); |
76 | vma = find_vma(current->mm, addr); | 76 | vma = find_vma(current->mm, addr); |
77 | if (TASK_SIZE - len >= addr && | 77 | if (TASK_SIZE - len >= addr && |
78 | (!vma || addr + len <= vma->vm_start)) | 78 | (!vma || addr + len <= vm_start_gap(vma))) |
79 | goto success; | 79 | goto success; |
80 | } | 80 | } |
81 | 81 | ||
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 2728a9a9c7c5..145b5ce8eb7e 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile | |||
@@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@ | |||
128 | -DADDR_BITS=$(ADDR_BITS) \ | 128 | -DADDR_BITS=$(ADDR_BITS) \ |
129 | -DADDR_CELLS=$(itb_addr_cells) | 129 | -DADDR_CELLS=$(itb_addr_cells) |
130 | 130 | ||
131 | $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE | 131 | $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE |
132 | $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) | 132 | $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) |
133 | 133 | ||
134 | $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE | 134 | $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE |
135 | $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) | 135 | $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) |
136 | 136 | ||
137 | $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE | 137 | $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE |
138 | $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) | 138 | $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) |
139 | 139 | ||
140 | $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE | 140 | $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE |
141 | $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) | 141 | $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) |
142 | 142 | ||
143 | $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE | 143 | $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE |
144 | $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) | 144 | $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) |
145 | 145 | ||
146 | quiet_cmd_itb-image = ITB $@ | 146 | quiet_cmd_itb-image = ITB $@ |
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h index d34536e7653f..279b6d14ffeb 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h | |||
@@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table; | |||
35 | * easily, subsequent pte tables have to be allocated in one physical | 35 | * easily, subsequent pte tables have to be allocated in one physical |
36 | * chunk of RAM. | 36 | * chunk of RAM. |
37 | */ | 37 | */ |
38 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
39 | #define LAST_PKMAP 512 | ||
40 | #else | ||
38 | #define LAST_PKMAP 1024 | 41 | #define LAST_PKMAP 1024 |
42 | #endif | ||
43 | |||
39 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | 44 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) |
40 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | 45 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) |
41 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | 46 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) |
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index 291846d9ba83..ad1a99948f27 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h | |||
@@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t; | |||
43 | 43 | ||
44 | #define flush_insn_slot(p) \ | 44 | #define flush_insn_slot(p) \ |
45 | do { \ | 45 | do { \ |
46 | flush_icache_range((unsigned long)p->addr, \ | 46 | if (p->addr) \ |
47 | flush_icache_range((unsigned long)p->addr, \ | ||
47 | (unsigned long)p->addr + \ | 48 | (unsigned long)p->addr + \ |
48 | (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ | 49 | (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ |
49 | } while (0) | 50 | } while (0) |
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 6f94bed571c4..74afe8c76bdd 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h | |||
@@ -19,6 +19,10 @@ | |||
19 | #define __ARCH_USE_5LEVEL_HACK | 19 | #define __ARCH_USE_5LEVEL_HACK |
20 | #include <asm-generic/pgtable-nopmd.h> | 20 | #include <asm-generic/pgtable-nopmd.h> |
21 | 21 | ||
22 | #ifdef CONFIG_HIGHMEM | ||
23 | #include <asm/highmem.h> | ||
24 | #endif | ||
25 | |||
22 | extern int temp_tlb_entry; | 26 | extern int temp_tlb_entry; |
23 | 27 | ||
24 | /* | 28 | /* |
@@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
62 | 66 | ||
63 | #define VMALLOC_START MAP_BASE | 67 | #define VMALLOC_START MAP_BASE |
64 | 68 | ||
65 | #define PKMAP_BASE (0xfe000000UL) | 69 | #define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1)) |
70 | #define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP) | ||
66 | 71 | ||
67 | #ifdef CONFIG_HIGHMEM | 72 | #ifdef CONFIG_HIGHMEM |
68 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | 73 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) |
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index b11facd11c9d..f702a459a830 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
804 | break; | 804 | break; |
805 | } | 805 | } |
806 | /* Compact branch: BNEZC || JIALC */ | 806 | /* Compact branch: BNEZC || JIALC */ |
807 | if (insn.i_format.rs) | 807 | if (!insn.i_format.rs) { |
808 | /* JIALC: set $31/ra */ | ||
808 | regs->regs[31] = epc + 4; | 809 | regs->regs[31] = epc + 4; |
810 | } | ||
809 | regs->cp0_epc += 8; | 811 | regs->cp0_epc += 8; |
810 | break; | 812 | break; |
811 | #endif | 813 | #endif |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 30a3b75e88eb..9d9b8fbae202 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command) | |||
38 | 38 | ||
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | /* | ||
42 | * Check if the address is in kernel space | ||
43 | * | ||
44 | * Clone core_kernel_text() from kernel/extable.c, but doesn't call | ||
45 | * init_kernel_text() for Ftrace doesn't trace functions in init sections. | ||
46 | */ | ||
47 | static inline int in_kernel_space(unsigned long ip) | ||
48 | { | ||
49 | if (ip >= (unsigned long)_stext && | ||
50 | ip <= (unsigned long)_etext) | ||
51 | return 1; | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_DYNAMIC_FTRACE | 41 | #ifdef CONFIG_DYNAMIC_FTRACE |
56 | 42 | ||
57 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 43 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
@@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod, | |||
198 | * If ip is in kernel space, no long call, otherwise, long call is | 184 | * If ip is in kernel space, no long call, otherwise, long call is |
199 | * needed. | 185 | * needed. |
200 | */ | 186 | */ |
201 | new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; | 187 | new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; |
202 | #ifdef CONFIG_64BIT | 188 | #ifdef CONFIG_64BIT |
203 | return ftrace_modify_code(ip, new); | 189 | return ftrace_modify_code(ip, new); |
204 | #else | 190 | #else |
@@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
218 | unsigned int new; | 204 | unsigned int new; |
219 | unsigned long ip = rec->ip; | 205 | unsigned long ip = rec->ip; |
220 | 206 | ||
221 | new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; | 207 | new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; |
222 | 208 | ||
223 | #ifdef CONFIG_64BIT | 209 | #ifdef CONFIG_64BIT |
224 | return ftrace_modify_code(ip, new); | 210 | return ftrace_modify_code(ip, new); |
225 | #else | 211 | #else |
226 | return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? | 212 | return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? |
227 | INSN_NOP : insn_la_mcount[1]); | 213 | INSN_NOP : insn_la_mcount[1]); |
228 | #endif | 214 | #endif |
229 | } | 215 | } |
@@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long | |||
289 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for | 275 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for |
290 | * kernel, move after the instruction "move ra, at"(offset is 16) | 276 | * kernel, move after the instruction "move ra, at"(offset is 16) |
291 | */ | 277 | */ |
292 | ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); | 278 | ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); |
293 | 279 | ||
294 | /* | 280 | /* |
295 | * search the text until finding the non-store instruction or "s{d,w} | 281 | * search the text until finding the non-store instruction or "s{d,w} |
@@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, | |||
394 | * entries configured through the tracing/set_graph_function interface. | 380 | * entries configured through the tracing/set_graph_function interface. |
395 | */ | 381 | */ |
396 | 382 | ||
397 | insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; | 383 | insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; |
398 | trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); | 384 | trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); |
399 | 385 | ||
400 | /* Only trace if the calling function expects to */ | 386 | /* Only trace if the calling function expects to */ |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 313a88b2973f..f3e301f95aef 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1597 | break; | 1597 | break; |
1598 | case CPU_P5600: | 1598 | case CPU_P5600: |
1599 | case CPU_P6600: | 1599 | case CPU_P6600: |
1600 | case CPU_I6400: | ||
1601 | /* 8-bit event numbers */ | 1600 | /* 8-bit event numbers */ |
1602 | raw_id = config & 0x1ff; | 1601 | raw_id = config & 0x1ff; |
1603 | base_id = raw_id & 0xff; | 1602 | base_id = raw_id & 0xff; |
@@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | |||
1610 | raw_event.range = P; | 1609 | raw_event.range = P; |
1611 | #endif | 1610 | #endif |
1612 | break; | 1611 | break; |
1612 | case CPU_I6400: | ||
1613 | /* 8-bit event numbers */ | ||
1614 | base_id = config & 0xff; | ||
1615 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1616 | break; | ||
1613 | case CPU_1004K: | 1617 | case CPU_1004K: |
1614 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) | 1618 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) |
1615 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | 1619 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; |
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 7c6336dd2638..7cd92166a0b9 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi) | |||
166 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, | 166 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, |
167 | bool user, bool kernel) | 167 | bool user, bool kernel) |
168 | { | 168 | { |
169 | int idx_user, idx_kernel; | 169 | /* |
170 | * Initialize idx_user and idx_kernel to workaround bogus | ||
171 | * maybe-initialized warning when using GCC 6. | ||
172 | */ | ||
173 | int idx_user = 0, idx_kernel = 0; | ||
170 | unsigned long flags, old_entryhi; | 174 | unsigned long flags, old_entryhi; |
171 | 175 | ||
172 | local_irq_save(flags); | 176 | local_irq_save(flags); |
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 64dd8bdd92c3..28adeabe851f 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c | |||
@@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, | |||
93 | 93 | ||
94 | vma = find_vma(mm, addr); | 94 | vma = find_vma(mm, addr); |
95 | if (TASK_SIZE - len >= addr && | 95 | if (TASK_SIZE - len >= addr && |
96 | (!vma || addr + len <= vma->vm_start)) | 96 | (!vma || addr + len <= vm_start_gap(vma))) |
97 | return addr; | 97 | return addr; |
98 | } | 98 | } |
99 | 99 | ||
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index adc6911ba748..b19a3c506b1e 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c | |||
@@ -51,15 +51,15 @@ void __init pagetable_init(void) | |||
51 | /* | 51 | /* |
52 | * Fixed mappings: | 52 | * Fixed mappings: |
53 | */ | 53 | */ |
54 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 54 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); |
55 | fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); | 55 | fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); |
56 | 56 | ||
57 | #ifdef CONFIG_HIGHMEM | 57 | #ifdef CONFIG_HIGHMEM |
58 | /* | 58 | /* |
59 | * Permanent kmaps: | 59 | * Permanent kmaps: |
60 | */ | 60 | */ |
61 | vaddr = PKMAP_BASE; | 61 | vaddr = PKMAP_BASE; |
62 | fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | 62 | fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); |
63 | 63 | ||
64 | pgd = swapper_pg_dir + __pgd_offset(vaddr); | 64 | pgd = swapper_pg_dir + __pgd_offset(vaddr); |
65 | pud = pud_offset(pgd, vaddr); | 65 | pud = pud_offset(pgd, vaddr); |
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index e5288638a1d9..378a754ca186 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c | |||
@@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
90 | unsigned long len, unsigned long pgoff, unsigned long flags) | 90 | unsigned long len, unsigned long pgoff, unsigned long flags) |
91 | { | 91 | { |
92 | struct mm_struct *mm = current->mm; | 92 | struct mm_struct *mm = current->mm; |
93 | struct vm_area_struct *vma; | 93 | struct vm_area_struct *vma, *prev; |
94 | unsigned long task_size = TASK_SIZE; | 94 | unsigned long task_size = TASK_SIZE; |
95 | int do_color_align, last_mmap; | 95 | int do_color_align, last_mmap; |
96 | struct vm_unmapped_area_info info; | 96 | struct vm_unmapped_area_info info; |
@@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
117 | else | 117 | else |
118 | addr = PAGE_ALIGN(addr); | 118 | addr = PAGE_ALIGN(addr); |
119 | 119 | ||
120 | vma = find_vma(mm, addr); | 120 | vma = find_vma_prev(mm, addr, &prev); |
121 | if (task_size - len >= addr && | 121 | if (task_size - len >= addr && |
122 | (!vma || addr + len <= vma->vm_start)) | 122 | (!vma || addr + len <= vm_start_gap(vma)) && |
123 | (!prev || addr >= vm_end_gap(prev))) | ||
123 | goto found_addr; | 124 | goto found_addr; |
124 | } | 125 | } |
125 | 126 | ||
@@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
143 | const unsigned long len, const unsigned long pgoff, | 144 | const unsigned long len, const unsigned long pgoff, |
144 | const unsigned long flags) | 145 | const unsigned long flags) |
145 | { | 146 | { |
146 | struct vm_area_struct *vma; | 147 | struct vm_area_struct *vma, *prev; |
147 | struct mm_struct *mm = current->mm; | 148 | struct mm_struct *mm = current->mm; |
148 | unsigned long addr = addr0; | 149 | unsigned long addr = addr0; |
149 | int do_color_align, last_mmap; | 150 | int do_color_align, last_mmap; |
@@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
177 | addr = COLOR_ALIGN(addr, last_mmap, pgoff); | 178 | addr = COLOR_ALIGN(addr, last_mmap, pgoff); |
178 | else | 179 | else |
179 | addr = PAGE_ALIGN(addr); | 180 | addr = PAGE_ALIGN(addr); |
180 | vma = find_vma(mm, addr); | 181 | |
182 | vma = find_vma_prev(mm, addr, &prev); | ||
181 | if (TASK_SIZE - len >= addr && | 183 | if (TASK_SIZE - len >= addr && |
182 | (!vma || addr + len <= vma->vm_start)) | 184 | (!vma || addr + len <= vm_start_gap(vma)) && |
185 | (!prev || addr >= vm_end_gap(prev))) | ||
183 | goto found_addr; | 186 | goto found_addr; |
184 | } | 187 | } |
185 | 188 | ||
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index f2c562a0a427..0151af6c2a50 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h | |||
@@ -104,7 +104,7 @@ | |||
104 | "1: "PPC_TLNEI" %4,0\n" \ | 104 | "1: "PPC_TLNEI" %4,0\n" \ |
105 | _EMIT_BUG_ENTRY \ | 105 | _EMIT_BUG_ENTRY \ |
106 | : : "i" (__FILE__), "i" (__LINE__), \ | 106 | : : "i" (__FILE__), "i" (__LINE__), \ |
107 | "i" (BUGFLAG_TAINT(TAINT_WARN)), \ | 107 | "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\ |
108 | "i" (sizeof(struct bug_entry)), \ | 108 | "i" (sizeof(struct bug_entry)), \ |
109 | "r" (__ret_warn_on)); \ | 109 | "r" (__ret_warn_on)); \ |
110 | } \ | 110 | } \ |
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index a83821f33ea3..8814a7249ceb 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h | |||
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, | |||
103 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); | 103 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); |
104 | extern int kprobe_handler(struct pt_regs *regs); | 104 | extern int kprobe_handler(struct pt_regs *regs); |
105 | extern int kprobe_post_handler(struct pt_regs *regs); | 105 | extern int kprobe_post_handler(struct pt_regs *regs); |
106 | extern int is_current_kprobe_addr(unsigned long addr); | ||
106 | #ifdef CONFIG_KPROBES_ON_FTRACE | 107 | #ifdef CONFIG_KPROBES_ON_FTRACE |
107 | extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, | 108 | extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
108 | struct kprobe_ctlblk *kcb); | 109 | struct kprobe_ctlblk *kcb); |
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index c8a822acf962..c23ff4389ca2 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h | |||
@@ -94,11 +94,13 @@ struct xive_q { | |||
94 | * store at 0 and some ESBs support doing a trigger via a | 94 | * store at 0 and some ESBs support doing a trigger via a |
95 | * separate trigger page. | 95 | * separate trigger page. |
96 | */ | 96 | */ |
97 | #define XIVE_ESB_GET 0x800 | 97 | #define XIVE_ESB_STORE_EOI 0x400 /* Store */ |
98 | #define XIVE_ESB_SET_PQ_00 0xc00 | 98 | #define XIVE_ESB_LOAD_EOI 0x000 /* Load */ |
99 | #define XIVE_ESB_SET_PQ_01 0xd00 | 99 | #define XIVE_ESB_GET 0x800 /* Load */ |
100 | #define XIVE_ESB_SET_PQ_10 0xe00 | 100 | #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ |
101 | #define XIVE_ESB_SET_PQ_11 0xf00 | 101 | #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ |
102 | #define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ | ||
103 | #define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ | ||
102 | 104 | ||
103 | #define XIVE_ESB_VAL_P 0x2 | 105 | #define XIVE_ESB_VAL_P 0x2 |
104 | #define XIVE_ESB_VAL_Q 0x1 | 106 | #define XIVE_ESB_VAL_Q 0x1 |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index ae418b85c17c..b886795060fd 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION() | |||
1411 | .balign IFETCH_ALIGN_BYTES | 1411 | .balign IFETCH_ALIGN_BYTES |
1412 | do_hash_page: | 1412 | do_hash_page: |
1413 | #ifdef CONFIG_PPC_STD_MMU_64 | 1413 | #ifdef CONFIG_PPC_STD_MMU_64 |
1414 | andis. r0,r4,0xa410 /* weird error? */ | 1414 | andis. r0,r4,0xa450 /* weird error? */ |
1415 | bne- handle_page_fault /* if not, try to insert a HPTE */ | 1415 | bne- handle_page_fault /* if not, try to insert a HPTE */ |
1416 | andis. r0,r4,DSISR_DABRMATCH@h | ||
1417 | bne- handle_dabr_fault | ||
1418 | CURRENT_THREAD_INFO(r11, r1) | 1416 | CURRENT_THREAD_INFO(r11, r1) |
1419 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | 1417 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
1420 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | 1418 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ |
@@ -1438,11 +1436,16 @@ do_hash_page: | |||
1438 | 1436 | ||
1439 | /* Error */ | 1437 | /* Error */ |
1440 | blt- 13f | 1438 | blt- 13f |
1439 | |||
1440 | /* Reload DSISR into r4 for the DABR check below */ | ||
1441 | ld r4,_DSISR(r1) | ||
1441 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 1442 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
1442 | 1443 | ||
1443 | /* Here we have a page fault that hash_page can't handle. */ | 1444 | /* Here we have a page fault that hash_page can't handle. */ |
1444 | handle_page_fault: | 1445 | handle_page_fault: |
1445 | 11: ld r4,_DAR(r1) | 1446 | 11: andis. r0,r4,DSISR_DABRMATCH@h |
1447 | bne- handle_dabr_fault | ||
1448 | ld r4,_DAR(r1) | ||
1446 | ld r5,_DSISR(r1) | 1449 | ld r5,_DSISR(r1) |
1447 | addi r3,r1,STACK_FRAME_OVERHEAD | 1450 | addi r3,r1,STACK_FRAME_OVERHEAD |
1448 | bl do_page_fault | 1451 | bl do_page_fault |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index fc4343514bed..01addfb0ed0a 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |||
43 | 43 | ||
44 | struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; | 44 | struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; |
45 | 45 | ||
46 | int is_current_kprobe_addr(unsigned long addr) | ||
47 | { | ||
48 | struct kprobe *p = kprobe_running(); | ||
49 | return (p && (unsigned long)p->addr == addr) ? 1 : 0; | ||
50 | } | ||
51 | |||
46 | bool arch_within_kprobe_blacklist(unsigned long addr) | 52 | bool arch_within_kprobe_blacklist(unsigned long addr) |
47 | { | 53 | { |
48 | return (addr >= (unsigned long)__kprobes_text_start && | 54 | return (addr >= (unsigned long)__kprobes_text_start && |
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
617 | regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); | 623 | regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); |
618 | #endif | 624 | #endif |
619 | 625 | ||
626 | /* | ||
627 | * jprobes use jprobe_return() which skips the normal return | ||
628 | * path of the function, and this messes up the accounting of the | ||
629 | * function graph tracer. | ||
630 | * | ||
631 | * Pause function graph tracing while performing the jprobe function. | ||
632 | */ | ||
633 | pause_graph_tracing(); | ||
634 | |||
620 | return 1; | 635 | return 1; |
621 | } | 636 | } |
622 | NOKPROBE_SYMBOL(setjmp_pre_handler); | 637 | NOKPROBE_SYMBOL(setjmp_pre_handler); |
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
642 | * saved regs... | 657 | * saved regs... |
643 | */ | 658 | */ |
644 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); | 659 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
660 | /* It's OK to start function graph tracing again */ | ||
661 | unpause_graph_tracing(); | ||
645 | preempt_enable_no_resched(); | 662 | preempt_enable_no_resched(); |
646 | return 1; | 663 | return 1; |
647 | } | 664 | } |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a8c1f99e9607..4640f6d64f8b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -616,6 +616,24 @@ void __init exc_lvl_early_init(void) | |||
616 | #endif | 616 | #endif |
617 | 617 | ||
618 | /* | 618 | /* |
619 | * Emergency stacks are used for a range of things, from asynchronous | ||
620 | * NMIs (system reset, machine check) to synchronous, process context. | ||
621 | * We set preempt_count to zero, even though that isn't necessarily correct. To | ||
622 | * get the right value we'd need to copy it from the previous thread_info, but | ||
623 | * doing that might fault causing more problems. | ||
624 | * TODO: what to do with accounting? | ||
625 | */ | ||
626 | static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) | ||
627 | { | ||
628 | ti->task = NULL; | ||
629 | ti->cpu = cpu; | ||
630 | ti->preempt_count = 0; | ||
631 | ti->local_flags = 0; | ||
632 | ti->flags = 0; | ||
633 | klp_init_thread_info(ti); | ||
634 | } | ||
635 | |||
636 | /* | ||
619 | * Stack space used when we detect a bad kernel stack pointer, and | 637 | * Stack space used when we detect a bad kernel stack pointer, and |
620 | * early in SMP boots before relocation is enabled. Exclusive emergency | 638 | * early in SMP boots before relocation is enabled. Exclusive emergency |
621 | * stack for machine checks. | 639 | * stack for machine checks. |
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void) | |||
633 | * Since we use these as temporary stacks during secondary CPU | 651 | * Since we use these as temporary stacks during secondary CPU |
634 | * bringup, we need to get at them in real mode. This means they | 652 | * bringup, we need to get at them in real mode. This means they |
635 | * must also be within the RMO region. | 653 | * must also be within the RMO region. |
654 | * | ||
655 | * The IRQ stacks allocated elsewhere in this file are zeroed and | ||
656 | * initialized in kernel/irq.c. These are initialized here in order | ||
657 | * to have emergency stacks available as early as possible. | ||
636 | */ | 658 | */ |
637 | limit = min(safe_stack_limit(), ppc64_rma_size); | 659 | limit = min(safe_stack_limit(), ppc64_rma_size); |
638 | 660 | ||
639 | for_each_possible_cpu(i) { | 661 | for_each_possible_cpu(i) { |
640 | struct thread_info *ti; | 662 | struct thread_info *ti; |
641 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); | 663 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); |
642 | klp_init_thread_info(ti); | 664 | memset(ti, 0, THREAD_SIZE); |
665 | emerg_stack_init_thread_info(ti, i); | ||
643 | paca[i].emergency_sp = (void *)ti + THREAD_SIZE; | 666 | paca[i].emergency_sp = (void *)ti + THREAD_SIZE; |
644 | 667 | ||
645 | #ifdef CONFIG_PPC_BOOK3S_64 | 668 | #ifdef CONFIG_PPC_BOOK3S_64 |
646 | /* emergency stack for NMI exception handling. */ | 669 | /* emergency stack for NMI exception handling. */ |
647 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); | 670 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); |
648 | klp_init_thread_info(ti); | 671 | memset(ti, 0, THREAD_SIZE); |
672 | emerg_stack_init_thread_info(ti, i); | ||
649 | paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; | 673 | paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; |
650 | 674 | ||
651 | /* emergency stack for machine check exception handling. */ | 675 | /* emergency stack for machine check exception handling. */ |
652 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); | 676 | ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); |
653 | klp_init_thread_info(ti); | 677 | memset(ti, 0, THREAD_SIZE); |
678 | emerg_stack_init_thread_info(ti, i); | ||
654 | paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; | 679 | paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; |
655 | #endif | 680 | #endif |
656 | } | 681 | } |
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 7c933a99f5d5..c98e90b4ea7b 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S | |||
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller) | |||
45 | stdu r1,-SWITCH_FRAME_SIZE(r1) | 45 | stdu r1,-SWITCH_FRAME_SIZE(r1) |
46 | 46 | ||
47 | /* Save all gprs to pt_regs */ | 47 | /* Save all gprs to pt_regs */ |
48 | SAVE_8GPRS(0,r1) | 48 | SAVE_GPR(0, r1) |
49 | SAVE_8GPRS(8,r1) | 49 | SAVE_10GPRS(2, r1) |
50 | SAVE_8GPRS(16,r1) | 50 | SAVE_10GPRS(12, r1) |
51 | SAVE_8GPRS(24,r1) | 51 | SAVE_10GPRS(22, r1) |
52 | |||
53 | /* Save previous stack pointer (r1) */ | ||
54 | addi r8, r1, SWITCH_FRAME_SIZE | ||
55 | std r8, GPR1(r1) | ||
52 | 56 | ||
53 | /* Load special regs for save below */ | 57 | /* Load special regs for save below */ |
54 | mfmsr r8 | 58 | mfmsr r8 |
@@ -95,18 +99,44 @@ ftrace_call: | |||
95 | bl ftrace_stub | 99 | bl ftrace_stub |
96 | nop | 100 | nop |
97 | 101 | ||
98 | /* Load ctr with the possibly modified NIP */ | 102 | /* Load the possibly modified NIP */ |
99 | ld r3, _NIP(r1) | 103 | ld r15, _NIP(r1) |
100 | mtctr r3 | 104 | |
101 | #ifdef CONFIG_LIVEPATCH | 105 | #ifdef CONFIG_LIVEPATCH |
102 | cmpd r14,r3 /* has NIP been altered? */ | 106 | cmpd r14, r15 /* has NIP been altered? */ |
107 | #endif | ||
108 | |||
109 | #if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE) | ||
110 | /* NIP has not been altered, skip over further checks */ | ||
111 | beq 1f | ||
112 | |||
113 | /* Check if there is an active kprobe on us */ | ||
114 | subi r3, r14, 4 | ||
115 | bl is_current_kprobe_addr | ||
116 | nop | ||
117 | |||
118 | /* | ||
119 | * If r3 == 1, then this is a kprobe/jprobe. | ||
120 | * else, this is livepatched function. | ||
121 | * | ||
122 | * The conditional branch for livepatch_handler below will use the | ||
123 | * result of this comparison. For kprobe/jprobe, we just need to branch to | ||
124 | * the new NIP, not call livepatch_handler. The branch below is bne, so we | ||
125 | * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want | ||
126 | * CR0[EQ] = (r3 == 1). | ||
127 | */ | ||
128 | cmpdi r3, 1 | ||
129 | 1: | ||
103 | #endif | 130 | #endif |
104 | 131 | ||
132 | /* Load CTR with the possibly modified NIP */ | ||
133 | mtctr r15 | ||
134 | |||
105 | /* Restore gprs */ | 135 | /* Restore gprs */ |
106 | REST_8GPRS(0,r1) | 136 | REST_GPR(0,r1) |
107 | REST_8GPRS(8,r1) | 137 | REST_10GPRS(2,r1) |
108 | REST_8GPRS(16,r1) | 138 | REST_10GPRS(12,r1) |
109 | REST_8GPRS(24,r1) | 139 | REST_10GPRS(22,r1) |
110 | 140 | ||
111 | /* Restore possibly modified LR */ | 141 | /* Restore possibly modified LR */ |
112 | ld r0, _LINK(r1) | 142 | ld r0, _LINK(r1) |
@@ -119,7 +149,10 @@ ftrace_call: | |||
119 | addi r1, r1, SWITCH_FRAME_SIZE | 149 | addi r1, r1, SWITCH_FRAME_SIZE |
120 | 150 | ||
121 | #ifdef CONFIG_LIVEPATCH | 151 | #ifdef CONFIG_LIVEPATCH |
122 | /* Based on the cmpd above, if the NIP was altered handle livepatch */ | 152 | /* |
153 | * Based on the cmpd or cmpdi above, if the NIP was altered and we're | ||
154 | * not on a kprobe/jprobe, then handle livepatch. | ||
155 | */ | ||
123 | bne- livepatch_handler | 156 | bne- livepatch_handler |
124 | #endif | 157 | #endif |
125 | 158 | ||
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 42b7a4fd57d9..8d1a365b8edc 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
1486 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); | 1486 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); |
1487 | break; | 1487 | break; |
1488 | case KVM_REG_PPC_TB_OFFSET: | 1488 | case KVM_REG_PPC_TB_OFFSET: |
1489 | /* | ||
1490 | * POWER9 DD1 has an erratum where writing TBU40 causes | ||
1491 | * the timebase to lose ticks. So we don't let the | ||
1492 | * timebase offset be changed on P9 DD1. (It is | ||
1493 | * initialized to zero.) | ||
1494 | */ | ||
1495 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | ||
1496 | break; | ||
1489 | /* round up to multiple of 2^24 */ | 1497 | /* round up to multiple of 2^24 */ |
1490 | vcpu->arch.vcore->tb_offset = | 1498 | vcpu->arch.vcore->tb_offset = |
1491 | ALIGN(set_reg_val(id, *val), 1UL << 24); | 1499 | ALIGN(set_reg_val(id, *val), 1UL << 24); |
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
2907 | { | 2915 | { |
2908 | int r; | 2916 | int r; |
2909 | int srcu_idx; | 2917 | int srcu_idx; |
2918 | unsigned long ebb_regs[3] = {}; /* shut up GCC */ | ||
2919 | unsigned long user_tar = 0; | ||
2920 | unsigned int user_vrsave; | ||
2910 | 2921 | ||
2911 | if (!vcpu->arch.sane) { | 2922 | if (!vcpu->arch.sane) { |
2912 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 2923 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
2913 | return -EINVAL; | 2924 | return -EINVAL; |
2914 | } | 2925 | } |
2915 | 2926 | ||
2927 | /* | ||
2928 | * Don't allow entry with a suspended transaction, because | ||
2929 | * the guest entry/exit code will lose it. | ||
2930 | * If the guest has TM enabled, save away their TM-related SPRs | ||
2931 | * (they will get restored by the TM unavailable interrupt). | ||
2932 | */ | ||
2933 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
2934 | if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && | ||
2935 | (current->thread.regs->msr & MSR_TM)) { | ||
2936 | if (MSR_TM_ACTIVE(current->thread.regs->msr)) { | ||
2937 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
2938 | run->fail_entry.hardware_entry_failure_reason = 0; | ||
2939 | return -EINVAL; | ||
2940 | } | ||
2941 | current->thread.tm_tfhar = mfspr(SPRN_TFHAR); | ||
2942 | current->thread.tm_tfiar = mfspr(SPRN_TFIAR); | ||
2943 | current->thread.tm_texasr = mfspr(SPRN_TEXASR); | ||
2944 | current->thread.regs->msr &= ~MSR_TM; | ||
2945 | } | ||
2946 | #endif | ||
2947 | |||
2916 | kvmppc_core_prepare_to_enter(vcpu); | 2948 | kvmppc_core_prepare_to_enter(vcpu); |
2917 | 2949 | ||
2918 | /* No need to go into the guest when all we'll do is come back out */ | 2950 | /* No need to go into the guest when all we'll do is come back out */ |
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
2934 | 2966 | ||
2935 | flush_all_to_thread(current); | 2967 | flush_all_to_thread(current); |
2936 | 2968 | ||
2969 | /* Save userspace EBB and other register values */ | ||
2970 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { | ||
2971 | ebb_regs[0] = mfspr(SPRN_EBBHR); | ||
2972 | ebb_regs[1] = mfspr(SPRN_EBBRR); | ||
2973 | ebb_regs[2] = mfspr(SPRN_BESCR); | ||
2974 | user_tar = mfspr(SPRN_TAR); | ||
2975 | } | ||
2976 | user_vrsave = mfspr(SPRN_VRSAVE); | ||
2977 | |||
2937 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; | 2978 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; |
2938 | vcpu->arch.pgdir = current->mm->pgd; | 2979 | vcpu->arch.pgdir = current->mm->pgd; |
2939 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 2980 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
2960 | } | 3001 | } |
2961 | } while (is_kvmppc_resume_guest(r)); | 3002 | } while (is_kvmppc_resume_guest(r)); |
2962 | 3003 | ||
3004 | /* Restore userspace EBB and other register values */ | ||
3005 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { | ||
3006 | mtspr(SPRN_EBBHR, ebb_regs[0]); | ||
3007 | mtspr(SPRN_EBBRR, ebb_regs[1]); | ||
3008 | mtspr(SPRN_BESCR, ebb_regs[2]); | ||
3009 | mtspr(SPRN_TAR, user_tar); | ||
3010 | mtspr(SPRN_FSCR, current->thread.fscr); | ||
3011 | } | ||
3012 | mtspr(SPRN_VRSAVE, user_vrsave); | ||
3013 | |||
2963 | out: | 3014 | out: |
2964 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; | 3015 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
2965 | atomic_dec(&vcpu->kvm->arch.vcpus_running); | 3016 | atomic_dec(&vcpu->kvm->arch.vcpus_running); |
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 0fdc4a28970b..404deb512844 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
121 | * Put whatever is in the decrementer into the | 121 | * Put whatever is in the decrementer into the |
122 | * hypervisor decrementer. | 122 | * hypervisor decrementer. |
123 | */ | 123 | */ |
124 | BEGIN_FTR_SECTION | ||
125 | ld r5, HSTATE_KVM_VCORE(r13) | ||
126 | ld r6, VCORE_KVM(r5) | ||
127 | ld r9, KVM_HOST_LPCR(r6) | ||
128 | andis. r9, r9, LPCR_LD@h | ||
129 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
124 | mfspr r8,SPRN_DEC | 130 | mfspr r8,SPRN_DEC |
125 | mftb r7 | 131 | mftb r7 |
126 | mtspr SPRN_HDEC,r8 | 132 | BEGIN_FTR_SECTION |
133 | /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */ | ||
134 | bne 32f | ||
135 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
127 | extsw r8,r8 | 136 | extsw r8,r8 |
137 | 32: mtspr SPRN_HDEC,r8 | ||
128 | add r8,r8,r7 | 138 | add r8,r8,r7 |
129 | std r8,HSTATE_DECEXP(r13) | 139 | std r8,HSTATE_DECEXP(r13) |
130 | 140 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bdb3f76ceb6b..4888dd494604 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -32,12 +32,29 @@ | |||
32 | #include <asm/opal.h> | 32 | #include <asm/opal.h> |
33 | #include <asm/xive-regs.h> | 33 | #include <asm/xive-regs.h> |
34 | 34 | ||
35 | /* Sign-extend HDEC if not on POWER9 */ | ||
36 | #define EXTEND_HDEC(reg) \ | ||
37 | BEGIN_FTR_SECTION; \ | ||
38 | extsw reg, reg; \ | ||
39 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | ||
40 | |||
35 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | 41 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) |
36 | 42 | ||
37 | /* Values in HSTATE_NAPPING(r13) */ | 43 | /* Values in HSTATE_NAPPING(r13) */ |
38 | #define NAPPING_CEDE 1 | 44 | #define NAPPING_CEDE 1 |
39 | #define NAPPING_NOVCPU 2 | 45 | #define NAPPING_NOVCPU 2 |
40 | 46 | ||
47 | /* Stack frame offsets for kvmppc_hv_entry */ | ||
48 | #define SFS 144 | ||
49 | #define STACK_SLOT_TRAP (SFS-4) | ||
50 | #define STACK_SLOT_TID (SFS-16) | ||
51 | #define STACK_SLOT_PSSCR (SFS-24) | ||
52 | #define STACK_SLOT_PID (SFS-32) | ||
53 | #define STACK_SLOT_IAMR (SFS-40) | ||
54 | #define STACK_SLOT_CIABR (SFS-48) | ||
55 | #define STACK_SLOT_DAWR (SFS-56) | ||
56 | #define STACK_SLOT_DAWRX (SFS-64) | ||
57 | |||
41 | /* | 58 | /* |
42 | * Call kvmppc_hv_entry in real mode. | 59 | * Call kvmppc_hv_entry in real mode. |
43 | * Must be called with interrupts hard-disabled. | 60 | * Must be called with interrupts hard-disabled. |
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
214 | kvmppc_primary_no_guest: | 231 | kvmppc_primary_no_guest: |
215 | /* We handle this much like a ceded vcpu */ | 232 | /* We handle this much like a ceded vcpu */ |
216 | /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ | 233 | /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ |
234 | /* HDEC may be larger than DEC for arch >= v3.00, but since the */ | ||
235 | /* HDEC value came from DEC in the first place, it will fit */ | ||
217 | mfspr r3, SPRN_HDEC | 236 | mfspr r3, SPRN_HDEC |
218 | mtspr SPRN_DEC, r3 | 237 | mtspr SPRN_DEC, r3 |
219 | /* | 238 | /* |
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup: | |||
295 | 314 | ||
296 | /* See if our timeslice has expired (HDEC is negative) */ | 315 | /* See if our timeslice has expired (HDEC is negative) */ |
297 | mfspr r0, SPRN_HDEC | 316 | mfspr r0, SPRN_HDEC |
317 | EXTEND_HDEC(r0) | ||
298 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER | 318 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER |
299 | cmpwi r0, 0 | 319 | cmpdi r0, 0 |
300 | blt kvm_novcpu_exit | 320 | blt kvm_novcpu_exit |
301 | 321 | ||
302 | /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ | 322 | /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ |
@@ -319,10 +339,10 @@ kvm_novcpu_exit: | |||
319 | bl kvmhv_accumulate_time | 339 | bl kvmhv_accumulate_time |
320 | #endif | 340 | #endif |
321 | 13: mr r3, r12 | 341 | 13: mr r3, r12 |
322 | stw r12, 112-4(r1) | 342 | stw r12, STACK_SLOT_TRAP(r1) |
323 | bl kvmhv_commence_exit | 343 | bl kvmhv_commence_exit |
324 | nop | 344 | nop |
325 | lwz r12, 112-4(r1) | 345 | lwz r12, STACK_SLOT_TRAP(r1) |
326 | b kvmhv_switch_to_host | 346 | b kvmhv_switch_to_host |
327 | 347 | ||
328 | /* | 348 | /* |
@@ -390,8 +410,8 @@ kvm_secondary_got_guest: | |||
390 | lbz r4, HSTATE_PTID(r13) | 410 | lbz r4, HSTATE_PTID(r13) |
391 | cmpwi r4, 0 | 411 | cmpwi r4, 0 |
392 | bne 63f | 412 | bne 63f |
393 | lis r6, 0x7fff | 413 | LOAD_REG_ADDR(r6, decrementer_max) |
394 | ori r6, r6, 0xffff | 414 | ld r6, 0(r6) |
395 | mtspr SPRN_HDEC, r6 | 415 | mtspr SPRN_HDEC, r6 |
396 | /* and set per-LPAR registers, if doing dynamic micro-threading */ | 416 | /* and set per-LPAR registers, if doing dynamic micro-threading */ |
397 | ld r6, HSTATE_SPLIT_MODE(r13) | 417 | ld r6, HSTATE_SPLIT_MODE(r13) |
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
545 | * * | 565 | * * |
546 | *****************************************************************************/ | 566 | *****************************************************************************/ |
547 | 567 | ||
548 | /* Stack frame offsets */ | ||
549 | #define STACK_SLOT_TID (112-16) | ||
550 | #define STACK_SLOT_PSSCR (112-24) | ||
551 | #define STACK_SLOT_PID (112-32) | ||
552 | |||
553 | .global kvmppc_hv_entry | 568 | .global kvmppc_hv_entry |
554 | kvmppc_hv_entry: | 569 | kvmppc_hv_entry: |
555 | 570 | ||
@@ -565,7 +580,7 @@ kvmppc_hv_entry: | |||
565 | */ | 580 | */ |
566 | mflr r0 | 581 | mflr r0 |
567 | std r0, PPC_LR_STKOFF(r1) | 582 | std r0, PPC_LR_STKOFF(r1) |
568 | stdu r1, -112(r1) | 583 | stdu r1, -SFS(r1) |
569 | 584 | ||
570 | /* Save R1 in the PACA */ | 585 | /* Save R1 in the PACA */ |
571 | std r1, HSTATE_HOST_R1(r13) | 586 | std r1, HSTATE_HOST_R1(r13) |
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION | |||
749 | mfspr r5, SPRN_TIDR | 764 | mfspr r5, SPRN_TIDR |
750 | mfspr r6, SPRN_PSSCR | 765 | mfspr r6, SPRN_PSSCR |
751 | mfspr r7, SPRN_PID | 766 | mfspr r7, SPRN_PID |
767 | mfspr r8, SPRN_IAMR | ||
752 | std r5, STACK_SLOT_TID(r1) | 768 | std r5, STACK_SLOT_TID(r1) |
753 | std r6, STACK_SLOT_PSSCR(r1) | 769 | std r6, STACK_SLOT_PSSCR(r1) |
754 | std r7, STACK_SLOT_PID(r1) | 770 | std r7, STACK_SLOT_PID(r1) |
771 | std r8, STACK_SLOT_IAMR(r1) | ||
755 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | 772 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
773 | BEGIN_FTR_SECTION | ||
774 | mfspr r5, SPRN_CIABR | ||
775 | mfspr r6, SPRN_DAWR | ||
776 | mfspr r7, SPRN_DAWRX | ||
777 | std r5, STACK_SLOT_CIABR(r1) | ||
778 | std r6, STACK_SLOT_DAWR(r1) | ||
779 | std r7, STACK_SLOT_DAWRX(r1) | ||
780 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
756 | 781 | ||
757 | BEGIN_FTR_SECTION | 782 | BEGIN_FTR_SECTION |
758 | /* Set partition DABR */ | 783 | /* Set partition DABR */ |
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |||
968 | 993 | ||
969 | /* Check if HDEC expires soon */ | 994 | /* Check if HDEC expires soon */ |
970 | mfspr r3, SPRN_HDEC | 995 | mfspr r3, SPRN_HDEC |
971 | cmpwi r3, 512 /* 1 microsecond */ | 996 | EXTEND_HDEC(r3) |
997 | cmpdi r3, 512 /* 1 microsecond */ | ||
972 | blt hdec_soon | 998 | blt hdec_soon |
973 | 999 | ||
974 | #ifdef CONFIG_KVM_XICS | 1000 | #ifdef CONFIG_KVM_XICS |
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |||
1505 | * set by the guest could disrupt the host. | 1531 | * set by the guest could disrupt the host. |
1506 | */ | 1532 | */ |
1507 | li r0, 0 | 1533 | li r0, 0 |
1508 | mtspr SPRN_IAMR, r0 | 1534 | mtspr SPRN_PSPB, r0 |
1509 | mtspr SPRN_CIABR, r0 | ||
1510 | mtspr SPRN_DAWRX, r0 | ||
1511 | mtspr SPRN_WORT, r0 | 1535 | mtspr SPRN_WORT, r0 |
1512 | BEGIN_FTR_SECTION | 1536 | BEGIN_FTR_SECTION |
1537 | mtspr SPRN_IAMR, r0 | ||
1513 | mtspr SPRN_TCSCR, r0 | 1538 | mtspr SPRN_TCSCR, r0 |
1514 | /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ | 1539 | /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ |
1515 | li r0, 1 | 1540 | li r0, 1 |
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |||
1525 | std r6,VCPU_UAMOR(r9) | 1550 | std r6,VCPU_UAMOR(r9) |
1526 | li r6,0 | 1551 | li r6,0 |
1527 | mtspr SPRN_AMR,r6 | 1552 | mtspr SPRN_AMR,r6 |
1553 | mtspr SPRN_UAMOR, r6 | ||
1528 | 1554 | ||
1529 | /* Switch DSCR back to host value */ | 1555 | /* Switch DSCR back to host value */ |
1530 | mfspr r8, SPRN_DSCR | 1556 | mfspr r8, SPRN_DSCR |
@@ -1670,12 +1696,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
1670 | 1696 | ||
1671 | /* Restore host values of some registers */ | 1697 | /* Restore host values of some registers */ |
1672 | BEGIN_FTR_SECTION | 1698 | BEGIN_FTR_SECTION |
1699 | ld r5, STACK_SLOT_CIABR(r1) | ||
1700 | ld r6, STACK_SLOT_DAWR(r1) | ||
1701 | ld r7, STACK_SLOT_DAWRX(r1) | ||
1702 | mtspr SPRN_CIABR, r5 | ||
1703 | mtspr SPRN_DAWR, r6 | ||
1704 | mtspr SPRN_DAWRX, r7 | ||
1705 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
1706 | BEGIN_FTR_SECTION | ||
1673 | ld r5, STACK_SLOT_TID(r1) | 1707 | ld r5, STACK_SLOT_TID(r1) |
1674 | ld r6, STACK_SLOT_PSSCR(r1) | 1708 | ld r6, STACK_SLOT_PSSCR(r1) |
1675 | ld r7, STACK_SLOT_PID(r1) | 1709 | ld r7, STACK_SLOT_PID(r1) |
1710 | ld r8, STACK_SLOT_IAMR(r1) | ||
1676 | mtspr SPRN_TIDR, r5 | 1711 | mtspr SPRN_TIDR, r5 |
1677 | mtspr SPRN_PSSCR, r6 | 1712 | mtspr SPRN_PSSCR, r6 |
1678 | mtspr SPRN_PID, r7 | 1713 | mtspr SPRN_PID, r7 |
1714 | mtspr SPRN_IAMR, r8 | ||
1679 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | 1715 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
1680 | BEGIN_FTR_SECTION | 1716 | BEGIN_FTR_SECTION |
1681 | PPC_INVALIDATE_ERAT | 1717 | PPC_INVALIDATE_ERAT |
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
1819 | li r0, KVM_GUEST_MODE_NONE | 1855 | li r0, KVM_GUEST_MODE_NONE |
1820 | stb r0, HSTATE_IN_GUEST(r13) | 1856 | stb r0, HSTATE_IN_GUEST(r13) |
1821 | 1857 | ||
1822 | ld r0, 112+PPC_LR_STKOFF(r1) | 1858 | ld r0, SFS+PPC_LR_STKOFF(r1) |
1823 | addi r1, r1, 112 | 1859 | addi r1, r1, SFS |
1824 | mtlr r0 | 1860 | mtlr r0 |
1825 | blr | 1861 | blr |
1826 | 1862 | ||
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) | |||
2366 | mfspr r3, SPRN_DEC | 2402 | mfspr r3, SPRN_DEC |
2367 | mfspr r4, SPRN_HDEC | 2403 | mfspr r4, SPRN_HDEC |
2368 | mftb r5 | 2404 | mftb r5 |
2369 | cmpw r3, r4 | 2405 | extsw r3, r3 |
2406 | EXTEND_HDEC(r4) | ||
2407 | cmpd r3, r4 | ||
2370 | ble 67f | 2408 | ble 67f |
2371 | mtspr SPRN_DEC, r4 | 2409 | mtspr SPRN_DEC, r4 |
2372 | 67: | 2410 | 67: |
2373 | /* save expiry time of guest decrementer */ | 2411 | /* save expiry time of guest decrementer */ |
2374 | extsw r3, r3 | ||
2375 | add r3, r3, r5 | 2412 | add r3, r3, r5 |
2376 | ld r4, HSTATE_KVM_VCPU(r13) | 2413 | ld r4, HSTATE_KVM_VCPU(r13) |
2377 | ld r5, HSTATE_KVM_VCORE(r13) | 2414 | ld r5, HSTATE_KVM_VCORE(r13) |
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 023a31133c37..4636ca6e7d38 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c | |||
@@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) | |||
69 | { | 69 | { |
70 | /* If the XIVE supports the new "store EOI facility, use it */ | 70 | /* If the XIVE supports the new "store EOI facility, use it */ |
71 | if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) | 71 | if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) |
72 | __x_writeq(0, __x_eoi_page(xd)); | 72 | __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); |
73 | else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { | 73 | else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { |
74 | opal_int_eoi(hw_irq); | 74 | opal_int_eoi(hw_irq); |
75 | } else { | 75 | } else { |
@@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) | |||
89 | * properly. | 89 | * properly. |
90 | */ | 90 | */ |
91 | if (xd->flags & XIVE_IRQ_FLAG_LSI) | 91 | if (xd->flags & XIVE_IRQ_FLAG_LSI) |
92 | __x_readq(__x_eoi_page(xd)); | 92 | __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); |
93 | else { | 93 | else { |
94 | eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); | 94 | eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); |
95 | 95 | ||
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 6575b9aabef4..a12e86395025 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c | |||
@@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
68 | addr = ALIGN(addr, huge_page_size(h)); | 68 | addr = ALIGN(addr, huge_page_size(h)); |
69 | vma = find_vma(mm, addr); | 69 | vma = find_vma(mm, addr); |
70 | if (mm->task_size - len >= addr && | 70 | if (mm->task_size - len >= addr && |
71 | (!vma || addr + len <= vma->vm_start)) | 71 | (!vma || addr + len <= vm_start_gap(vma))) |
72 | return addr; | 72 | return addr; |
73 | } | 73 | } |
74 | /* | 74 | /* |
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 9dbd2a733d6b..0ee6be4f1ba4 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c | |||
@@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
112 | addr = PAGE_ALIGN(addr); | 112 | addr = PAGE_ALIGN(addr); |
113 | vma = find_vma(mm, addr); | 113 | vma = find_vma(mm, addr); |
114 | if (mm->task_size - len >= addr && addr >= mmap_min_addr && | 114 | if (mm->task_size - len >= addr && addr >= mmap_min_addr && |
115 | (!vma || addr + len <= vma->vm_start)) | 115 | (!vma || addr + len <= vm_start_gap(vma))) |
116 | return addr; | 116 | return addr; |
117 | } | 117 | } |
118 | 118 | ||
@@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, | |||
157 | addr = PAGE_ALIGN(addr); | 157 | addr = PAGE_ALIGN(addr); |
158 | vma = find_vma(mm, addr); | 158 | vma = find_vma(mm, addr); |
159 | if (mm->task_size - len >= addr && addr >= mmap_min_addr && | 159 | if (mm->task_size - len >= addr && addr >= mmap_min_addr && |
160 | (!vma || addr + len <= vma->vm_start)) | 160 | (!vma || addr + len <= vm_start_gap(vma))) |
161 | return addr; | 161 | return addr; |
162 | } | 162 | } |
163 | 163 | ||
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 966b9fccfa66..45f6740dd407 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c | |||
@@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, | |||
99 | if ((mm->task_size - len) < addr) | 99 | if ((mm->task_size - len) < addr) |
100 | return 0; | 100 | return 0; |
101 | vma = find_vma(mm, addr); | 101 | vma = find_vma(mm, addr); |
102 | return (!vma || (addr + len) <= vma->vm_start); | 102 | return (!vma || (addr + len) <= vm_start_gap(vma)); |
103 | } | 103 | } |
104 | 104 | ||
105 | static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) | 105 | static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) |
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index cbd82fde5770..09ceea6175ba 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c | |||
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user, | |||
101 | struct pt_regs *regs_user_copy) | 101 | struct pt_regs *regs_user_copy) |
102 | { | 102 | { |
103 | regs_user->regs = task_pt_regs(current); | 103 | regs_user->regs = task_pt_regs(current); |
104 | regs_user->abi = perf_reg_abi(current); | 104 | regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) : |
105 | PERF_SAMPLE_REGS_ABI_NONE; | ||
105 | } | 106 | } |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 78fa9395b8c5..b5d960d6db3d 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) | |||
75 | if (WARN_ON(!gpdev)) | 75 | if (WARN_ON(!gpdev)) |
76 | return NULL; | 76 | return NULL; |
77 | 77 | ||
78 | if (WARN_ON(!gpdev->dev.of_node)) | 78 | /* Not all PCI devices have device-tree nodes */ |
79 | if (!gpdev->dev.of_node) | ||
79 | return NULL; | 80 | return NULL; |
80 | 81 | ||
81 | /* Get assoicated PCI device */ | 82 | /* Get assoicated PCI device */ |
@@ -448,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, | |||
448 | return mmio_atsd_reg; | 449 | return mmio_atsd_reg; |
449 | } | 450 | } |
450 | 451 | ||
451 | static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) | 452 | static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) |
452 | { | 453 | { |
453 | unsigned long launch; | 454 | unsigned long launch; |
454 | 455 | ||
@@ -464,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) | |||
464 | /* PID */ | 465 | /* PID */ |
465 | launch |= pid << PPC_BITLSHIFT(38); | 466 | launch |= pid << PPC_BITLSHIFT(38); |
466 | 467 | ||
468 | /* No flush */ | ||
469 | launch |= !flush << PPC_BITLSHIFT(39); | ||
470 | |||
467 | /* Invalidating the entire process doesn't use a va */ | 471 | /* Invalidating the entire process doesn't use a va */ |
468 | return mmio_launch_invalidate(npu, launch, 0); | 472 | return mmio_launch_invalidate(npu, launch, 0); |
469 | } | 473 | } |
470 | 474 | ||
471 | static int mmio_invalidate_va(struct npu *npu, unsigned long va, | 475 | static int mmio_invalidate_va(struct npu *npu, unsigned long va, |
472 | unsigned long pid) | 476 | unsigned long pid, bool flush) |
473 | { | 477 | { |
474 | unsigned long launch; | 478 | unsigned long launch; |
475 | 479 | ||
@@ -485,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va, | |||
485 | /* PID */ | 489 | /* PID */ |
486 | launch |= pid << PPC_BITLSHIFT(38); | 490 | launch |= pid << PPC_BITLSHIFT(38); |
487 | 491 | ||
492 | /* No flush */ | ||
493 | launch |= !flush << PPC_BITLSHIFT(39); | ||
494 | |||
488 | return mmio_launch_invalidate(npu, launch, va); | 495 | return mmio_launch_invalidate(npu, launch, va); |
489 | } | 496 | } |
490 | 497 | ||
491 | #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) | 498 | #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) |
492 | 499 | ||
500 | struct mmio_atsd_reg { | ||
501 | struct npu *npu; | ||
502 | int reg; | ||
503 | }; | ||
504 | |||
505 | static void mmio_invalidate_wait( | ||
506 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) | ||
507 | { | ||
508 | struct npu *npu; | ||
509 | int i, reg; | ||
510 | |||
511 | /* Wait for all invalidations to complete */ | ||
512 | for (i = 0; i <= max_npu2_index; i++) { | ||
513 | if (mmio_atsd_reg[i].reg < 0) | ||
514 | continue; | ||
515 | |||
516 | /* Wait for completion */ | ||
517 | npu = mmio_atsd_reg[i].npu; | ||
518 | reg = mmio_atsd_reg[i].reg; | ||
519 | while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) | ||
520 | cpu_relax(); | ||
521 | |||
522 | put_mmio_atsd_reg(npu, reg); | ||
523 | |||
524 | /* | ||
525 | * The GPU requires two flush ATSDs to ensure all entries have | ||
526 | * been flushed. We use PID 0 as it will never be used for a | ||
527 | * process on the GPU. | ||
528 | */ | ||
529 | if (flush) | ||
530 | mmio_invalidate_pid(npu, 0, true); | ||
531 | } | ||
532 | } | ||
533 | |||
493 | /* | 534 | /* |
494 | * Invalidate either a single address or an entire PID depending on | 535 | * Invalidate either a single address or an entire PID depending on |
495 | * the value of va. | 536 | * the value of va. |
496 | */ | 537 | */ |
497 | static void mmio_invalidate(struct npu_context *npu_context, int va, | 538 | static void mmio_invalidate(struct npu_context *npu_context, int va, |
498 | unsigned long address) | 539 | unsigned long address, bool flush) |
499 | { | 540 | { |
500 | int i, j, reg; | 541 | int i, j; |
501 | struct npu *npu; | 542 | struct npu *npu; |
502 | struct pnv_phb *nphb; | 543 | struct pnv_phb *nphb; |
503 | struct pci_dev *npdev; | 544 | struct pci_dev *npdev; |
504 | struct { | 545 | struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; |
505 | struct npu *npu; | ||
506 | int reg; | ||
507 | } mmio_atsd_reg[NV_MAX_NPUS]; | ||
508 | unsigned long pid = npu_context->mm->context.id; | 546 | unsigned long pid = npu_context->mm->context.id; |
509 | 547 | ||
510 | /* | 548 | /* |
@@ -524,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, | |||
524 | 562 | ||
525 | if (va) | 563 | if (va) |
526 | mmio_atsd_reg[i].reg = | 564 | mmio_atsd_reg[i].reg = |
527 | mmio_invalidate_va(npu, address, pid); | 565 | mmio_invalidate_va(npu, address, pid, |
566 | flush); | ||
528 | else | 567 | else |
529 | mmio_atsd_reg[i].reg = | 568 | mmio_atsd_reg[i].reg = |
530 | mmio_invalidate_pid(npu, pid); | 569 | mmio_invalidate_pid(npu, pid, flush); |
531 | 570 | ||
532 | /* | 571 | /* |
533 | * The NPU hardware forwards the shootdown to all GPUs | 572 | * The NPU hardware forwards the shootdown to all GPUs |
@@ -543,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, | |||
543 | */ | 582 | */ |
544 | flush_tlb_mm(npu_context->mm); | 583 | flush_tlb_mm(npu_context->mm); |
545 | 584 | ||
546 | /* Wait for all invalidations to complete */ | 585 | mmio_invalidate_wait(mmio_atsd_reg, flush); |
547 | for (i = 0; i <= max_npu2_index; i++) { | 586 | if (flush) |
548 | if (mmio_atsd_reg[i].reg < 0) | 587 | /* Wait for the flush to complete */ |
549 | continue; | 588 | mmio_invalidate_wait(mmio_atsd_reg, false); |
550 | |||
551 | /* Wait for completion */ | ||
552 | npu = mmio_atsd_reg[i].npu; | ||
553 | reg = mmio_atsd_reg[i].reg; | ||
554 | while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) | ||
555 | cpu_relax(); | ||
556 | put_mmio_atsd_reg(npu, reg); | ||
557 | } | ||
558 | } | 589 | } |
559 | 590 | ||
560 | static void pnv_npu2_mn_release(struct mmu_notifier *mn, | 591 | static void pnv_npu2_mn_release(struct mmu_notifier *mn, |
@@ -570,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn, | |||
570 | * There should be no more translation requests for this PID, but we | 601 | * There should be no more translation requests for this PID, but we |
571 | * need to ensure any entries for it are removed from the TLB. | 602 | * need to ensure any entries for it are removed from the TLB. |
572 | */ | 603 | */ |
573 | mmio_invalidate(npu_context, 0, 0); | 604 | mmio_invalidate(npu_context, 0, 0, true); |
574 | } | 605 | } |
575 | 606 | ||
576 | static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, | 607 | static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, |
@@ -580,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, | |||
580 | { | 611 | { |
581 | struct npu_context *npu_context = mn_to_npu_context(mn); | 612 | struct npu_context *npu_context = mn_to_npu_context(mn); |
582 | 613 | ||
583 | mmio_invalidate(npu_context, 1, address); | 614 | mmio_invalidate(npu_context, 1, address, true); |
584 | } | 615 | } |
585 | 616 | ||
586 | static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, | 617 | static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, |
@@ -589,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, | |||
589 | { | 620 | { |
590 | struct npu_context *npu_context = mn_to_npu_context(mn); | 621 | struct npu_context *npu_context = mn_to_npu_context(mn); |
591 | 622 | ||
592 | mmio_invalidate(npu_context, 1, address); | 623 | mmio_invalidate(npu_context, 1, address, true); |
593 | } | 624 | } |
594 | 625 | ||
595 | static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, | 626 | static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, |
@@ -599,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, | |||
599 | struct npu_context *npu_context = mn_to_npu_context(mn); | 630 | struct npu_context *npu_context = mn_to_npu_context(mn); |
600 | unsigned long address; | 631 | unsigned long address; |
601 | 632 | ||
602 | for (address = start; address <= end; address += PAGE_SIZE) | 633 | for (address = start; address < end; address += PAGE_SIZE) |
603 | mmio_invalidate(npu_context, 1, address); | 634 | mmio_invalidate(npu_context, 1, address, false); |
635 | |||
636 | /* Do the flush only on the final addess == end */ | ||
637 | mmio_invalidate(npu_context, 1, address, true); | ||
604 | } | 638 | } |
605 | 639 | ||
606 | static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { | 640 | static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { |
@@ -650,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, | |||
650 | /* No nvlink associated with this GPU device */ | 684 | /* No nvlink associated with this GPU device */ |
651 | return ERR_PTR(-ENODEV); | 685 | return ERR_PTR(-ENODEV); |
652 | 686 | ||
653 | if (!mm) { | 687 | if (!mm || mm->context.id == 0) { |
654 | /* kernel thread contexts are not supported */ | 688 | /* |
689 | * Kernel thread contexts are not supported and context id 0 is | ||
690 | * reserved on the GPU. | ||
691 | */ | ||
655 | return ERR_PTR(-EINVAL); | 692 | return ERR_PTR(-EINVAL); |
656 | } | 693 | } |
657 | 694 | ||
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 913825086b8d..8f5e3035483b 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c | |||
@@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) | |||
297 | { | 297 | { |
298 | /* If the XIVE supports the new "store EOI facility, use it */ | 298 | /* If the XIVE supports the new "store EOI facility, use it */ |
299 | if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) | 299 | if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) |
300 | out_be64(xd->eoi_mmio, 0); | 300 | out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0); |
301 | else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { | 301 | else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { |
302 | /* | 302 | /* |
303 | * The FW told us to call it. This happens for some | 303 | * The FW told us to call it. This happens for some |
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index a5039fa89314..282072206df7 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
@@ -30,6 +30,7 @@ CONFIG_USER_NS=y | |||
30 | CONFIG_SCHED_AUTOGROUP=y | 30 | CONFIG_SCHED_AUTOGROUP=y |
31 | CONFIG_BLK_DEV_INITRD=y | 31 | CONFIG_BLK_DEV_INITRD=y |
32 | CONFIG_EXPERT=y | 32 | CONFIG_EXPERT=y |
33 | # CONFIG_SYSFS_SYSCALL is not set | ||
33 | CONFIG_BPF_SYSCALL=y | 34 | CONFIG_BPF_SYSCALL=y |
34 | CONFIG_USERFAULTFD=y | 35 | CONFIG_USERFAULTFD=y |
35 | # CONFIG_COMPAT_BRK is not set | 36 | # CONFIG_COMPAT_BRK is not set |
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y | |||
44 | CONFIG_MODULE_FORCE_UNLOAD=y | 45 | CONFIG_MODULE_FORCE_UNLOAD=y |
45 | CONFIG_MODVERSIONS=y | 46 | CONFIG_MODVERSIONS=y |
46 | CONFIG_MODULE_SRCVERSION_ALL=y | 47 | CONFIG_MODULE_SRCVERSION_ALL=y |
48 | CONFIG_BLK_DEV_INTEGRITY=y | ||
47 | CONFIG_BLK_DEV_THROTTLING=y | 49 | CONFIG_BLK_DEV_THROTTLING=y |
50 | CONFIG_BLK_WBT=y | ||
51 | CONFIG_BLK_WBT_SQ=y | ||
48 | CONFIG_PARTITION_ADVANCED=y | 52 | CONFIG_PARTITION_ADVANCED=y |
49 | CONFIG_IBM_PARTITION=y | 53 | CONFIG_IBM_PARTITION=y |
50 | CONFIG_BSD_DISKLABEL=y | 54 | CONFIG_BSD_DISKLABEL=y |
@@ -90,6 +94,8 @@ CONFIG_UNIX=y | |||
90 | CONFIG_UNIX_DIAG=m | 94 | CONFIG_UNIX_DIAG=m |
91 | CONFIG_XFRM_USER=m | 95 | CONFIG_XFRM_USER=m |
92 | CONFIG_NET_KEY=m | 96 | CONFIG_NET_KEY=m |
97 | CONFIG_SMC=m | ||
98 | CONFIG_SMC_DIAG=m | ||
93 | CONFIG_INET=y | 99 | CONFIG_INET=y |
94 | CONFIG_IP_MULTICAST=y | 100 | CONFIG_IP_MULTICAST=y |
95 | CONFIG_IP_ADVANCED_ROUTER=y | 101 | CONFIG_IP_ADVANCED_ROUTER=y |
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m | |||
359 | CONFIG_NET_ACT_SKBEDIT=m | 365 | CONFIG_NET_ACT_SKBEDIT=m |
360 | CONFIG_NET_ACT_CSUM=m | 366 | CONFIG_NET_ACT_CSUM=m |
361 | CONFIG_DNS_RESOLVER=y | 367 | CONFIG_DNS_RESOLVER=y |
368 | CONFIG_NETLINK_DIAG=m | ||
362 | CONFIG_CGROUP_NET_PRIO=y | 369 | CONFIG_CGROUP_NET_PRIO=y |
363 | CONFIG_BPF_JIT=y | 370 | CONFIG_BPF_JIT=y |
364 | CONFIG_NET_PKTGEN=m | 371 | CONFIG_NET_PKTGEN=m |
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y | |||
367 | CONFIG_DMA_CMA=y | 374 | CONFIG_DMA_CMA=y |
368 | CONFIG_CMA_SIZE_MBYTES=0 | 375 | CONFIG_CMA_SIZE_MBYTES=0 |
369 | CONFIG_CONNECTOR=y | 376 | CONFIG_CONNECTOR=y |
377 | CONFIG_ZRAM=m | ||
370 | CONFIG_BLK_DEV_LOOP=m | 378 | CONFIG_BLK_DEV_LOOP=m |
371 | CONFIG_BLK_DEV_CRYPTOLOOP=m | 379 | CONFIG_BLK_DEV_CRYPTOLOOP=m |
380 | CONFIG_BLK_DEV_DRBD=m | ||
372 | CONFIG_BLK_DEV_NBD=m | 381 | CONFIG_BLK_DEV_NBD=m |
373 | CONFIG_BLK_DEV_OSD=m | 382 | CONFIG_BLK_DEV_OSD=m |
374 | CONFIG_BLK_DEV_RAM=y | 383 | CONFIG_BLK_DEV_RAM=y |
375 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 384 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
376 | CONFIG_CDROM_PKTCDVD=m | 385 | CONFIG_BLK_DEV_RAM_DAX=y |
377 | CONFIG_ATA_OVER_ETH=m | ||
378 | CONFIG_VIRTIO_BLK=y | 386 | CONFIG_VIRTIO_BLK=y |
387 | CONFIG_BLK_DEV_RBD=m | ||
379 | CONFIG_ENCLOSURE_SERVICES=m | 388 | CONFIG_ENCLOSURE_SERVICES=m |
389 | CONFIG_GENWQE=m | ||
380 | CONFIG_RAID_ATTRS=m | 390 | CONFIG_RAID_ATTRS=m |
381 | CONFIG_SCSI=y | 391 | CONFIG_SCSI=y |
382 | CONFIG_BLK_DEV_SD=y | 392 | CONFIG_BLK_DEV_SD=y |
@@ -442,6 +452,8 @@ CONFIG_NLMON=m | |||
442 | # CONFIG_NET_VENDOR_INTEL is not set | 452 | # CONFIG_NET_VENDOR_INTEL is not set |
443 | # CONFIG_NET_VENDOR_MARVELL is not set | 453 | # CONFIG_NET_VENDOR_MARVELL is not set |
444 | CONFIG_MLX4_EN=m | 454 | CONFIG_MLX4_EN=m |
455 | CONFIG_MLX5_CORE=m | ||
456 | CONFIG_MLX5_CORE_EN=y | ||
445 | # CONFIG_NET_VENDOR_NATSEMI is not set | 457 | # CONFIG_NET_VENDOR_NATSEMI is not set |
446 | CONFIG_PPP=m | 458 | CONFIG_PPP=m |
447 | CONFIG_PPP_BSDCOMP=m | 459 | CONFIG_PPP_BSDCOMP=m |
@@ -452,7 +464,6 @@ CONFIG_PPTP=m | |||
452 | CONFIG_PPPOL2TP=m | 464 | CONFIG_PPPOL2TP=m |
453 | CONFIG_PPP_ASYNC=m | 465 | CONFIG_PPP_ASYNC=m |
454 | CONFIG_PPP_SYNC_TTY=m | 466 | CONFIG_PPP_SYNC_TTY=m |
455 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | ||
456 | # CONFIG_INPUT_KEYBOARD is not set | 467 | # CONFIG_INPUT_KEYBOARD is not set |
457 | # CONFIG_INPUT_MOUSE is not set | 468 | # CONFIG_INPUT_MOUSE is not set |
458 | # CONFIG_SERIO is not set | 469 | # CONFIG_SERIO is not set |
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m | |||
471 | CONFIG_INFINIBAND=m | 482 | CONFIG_INFINIBAND=m |
472 | CONFIG_INFINIBAND_USER_ACCESS=m | 483 | CONFIG_INFINIBAND_USER_ACCESS=m |
473 | CONFIG_MLX4_INFINIBAND=m | 484 | CONFIG_MLX4_INFINIBAND=m |
485 | CONFIG_MLX5_INFINIBAND=m | ||
474 | CONFIG_VIRTIO_BALLOON=m | 486 | CONFIG_VIRTIO_BALLOON=m |
475 | CONFIG_EXT4_FS=y | 487 | CONFIG_EXT4_FS=y |
476 | CONFIG_EXT4_FS_POSIX_ACL=y | 488 | CONFIG_EXT4_FS_POSIX_ACL=y |
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y | |||
487 | CONFIG_XFS_RT=y | 499 | CONFIG_XFS_RT=y |
488 | CONFIG_XFS_DEBUG=y | 500 | CONFIG_XFS_DEBUG=y |
489 | CONFIG_GFS2_FS=m | 501 | CONFIG_GFS2_FS=m |
502 | CONFIG_GFS2_FS_LOCKING_DLM=y | ||
490 | CONFIG_OCFS2_FS=m | 503 | CONFIG_OCFS2_FS=m |
491 | CONFIG_BTRFS_FS=y | 504 | CONFIG_BTRFS_FS=y |
492 | CONFIG_BTRFS_FS_POSIX_ACL=y | 505 | CONFIG_BTRFS_FS_POSIX_ACL=y |
506 | CONFIG_BTRFS_DEBUG=y | ||
493 | CONFIG_NILFS2_FS=m | 507 | CONFIG_NILFS2_FS=m |
508 | CONFIG_FS_DAX=y | ||
509 | CONFIG_EXPORTFS_BLOCK_OPS=y | ||
494 | CONFIG_FANOTIFY=y | 510 | CONFIG_FANOTIFY=y |
511 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | ||
495 | CONFIG_QUOTA_NETLINK_INTERFACE=y | 512 | CONFIG_QUOTA_NETLINK_INTERFACE=y |
513 | CONFIG_QUOTA_DEBUG=y | ||
496 | CONFIG_QFMT_V1=m | 514 | CONFIG_QFMT_V1=m |
497 | CONFIG_QFMT_V2=m | 515 | CONFIG_QFMT_V2=m |
498 | CONFIG_AUTOFS4_FS=m | 516 | CONFIG_AUTOFS4_FS=m |
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y | |||
558 | CONFIG_DEBUG_SECTION_MISMATCH=y | 576 | CONFIG_DEBUG_SECTION_MISMATCH=y |
559 | CONFIG_MAGIC_SYSRQ=y | 577 | CONFIG_MAGIC_SYSRQ=y |
560 | CONFIG_DEBUG_PAGEALLOC=y | 578 | CONFIG_DEBUG_PAGEALLOC=y |
579 | CONFIG_DEBUG_RODATA_TEST=y | ||
561 | CONFIG_DEBUG_OBJECTS=y | 580 | CONFIG_DEBUG_OBJECTS=y |
562 | CONFIG_DEBUG_OBJECTS_SELFTEST=y | 581 | CONFIG_DEBUG_OBJECTS_SELFTEST=y |
563 | CONFIG_DEBUG_OBJECTS_FREE=y | 582 | CONFIG_DEBUG_OBJECTS_FREE=y |
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y | |||
580 | CONFIG_WQ_WATCHDOG=y | 599 | CONFIG_WQ_WATCHDOG=y |
581 | CONFIG_PANIC_ON_OOPS=y | 600 | CONFIG_PANIC_ON_OOPS=y |
582 | CONFIG_DEBUG_TIMEKEEPING=y | 601 | CONFIG_DEBUG_TIMEKEEPING=y |
583 | CONFIG_TIMER_STATS=y | ||
584 | CONFIG_DEBUG_RT_MUTEXES=y | 602 | CONFIG_DEBUG_RT_MUTEXES=y |
585 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y | 603 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y |
586 | CONFIG_PROVE_LOCKING=y | 604 | CONFIG_PROVE_LOCKING=y |
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m | |||
595 | CONFIG_RCU_CPU_STALL_TIMEOUT=300 | 613 | CONFIG_RCU_CPU_STALL_TIMEOUT=300 |
596 | CONFIG_NOTIFIER_ERROR_INJECTION=m | 614 | CONFIG_NOTIFIER_ERROR_INJECTION=m |
597 | CONFIG_PM_NOTIFIER_ERROR_INJECT=m | 615 | CONFIG_PM_NOTIFIER_ERROR_INJECT=m |
616 | CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m | ||
598 | CONFIG_FAULT_INJECTION=y | 617 | CONFIG_FAULT_INJECTION=y |
599 | CONFIG_FAILSLAB=y | 618 | CONFIG_FAILSLAB=y |
600 | CONFIG_FAIL_PAGE_ALLOC=y | 619 | CONFIG_FAIL_PAGE_ALLOC=y |
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y | |||
616 | CONFIG_TRACE_ENUM_MAP_FILE=y | 635 | CONFIG_TRACE_ENUM_MAP_FILE=y |
617 | CONFIG_LKDTM=m | 636 | CONFIG_LKDTM=m |
618 | CONFIG_TEST_LIST_SORT=y | 637 | CONFIG_TEST_LIST_SORT=y |
638 | CONFIG_TEST_SORT=y | ||
619 | CONFIG_KPROBES_SANITY_TEST=y | 639 | CONFIG_KPROBES_SANITY_TEST=y |
620 | CONFIG_RBTREE_TEST=y | 640 | CONFIG_RBTREE_TEST=y |
621 | CONFIG_INTERVAL_TREE_TEST=m | 641 | CONFIG_INTERVAL_TREE_TEST=m |
622 | CONFIG_PERCPU_TEST=m | 642 | CONFIG_PERCPU_TEST=m |
623 | CONFIG_ATOMIC64_SELFTEST=y | 643 | CONFIG_ATOMIC64_SELFTEST=y |
624 | CONFIG_TEST_STRING_HELPERS=y | ||
625 | CONFIG_TEST_KSTRTOX=y | ||
626 | CONFIG_DMA_API_DEBUG=y | 644 | CONFIG_DMA_API_DEBUG=y |
627 | CONFIG_TEST_BPF=m | 645 | CONFIG_TEST_BPF=m |
628 | CONFIG_BUG_ON_DATA_CORRUPTION=y | 646 | CONFIG_BUG_ON_DATA_CORRUPTION=y |
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y | |||
630 | CONFIG_ENCRYPTED_KEYS=m | 648 | CONFIG_ENCRYPTED_KEYS=m |
631 | CONFIG_SECURITY=y | 649 | CONFIG_SECURITY=y |
632 | CONFIG_SECURITY_NETWORK=y | 650 | CONFIG_SECURITY_NETWORK=y |
651 | CONFIG_HARDENED_USERCOPY=y | ||
633 | CONFIG_SECURITY_SELINUX=y | 652 | CONFIG_SECURITY_SELINUX=y |
634 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | 653 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y |
635 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | 654 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 |
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m | |||
640 | CONFIG_CRYPTO_DH=m | 659 | CONFIG_CRYPTO_DH=m |
641 | CONFIG_CRYPTO_ECDH=m | 660 | CONFIG_CRYPTO_ECDH=m |
642 | CONFIG_CRYPTO_USER=m | 661 | CONFIG_CRYPTO_USER=m |
662 | CONFIG_CRYPTO_PCRYPT=m | ||
643 | CONFIG_CRYPTO_CRYPTD=m | 663 | CONFIG_CRYPTO_CRYPTD=m |
664 | CONFIG_CRYPTO_MCRYPTD=m | ||
644 | CONFIG_CRYPTO_TEST=m | 665 | CONFIG_CRYPTO_TEST=m |
645 | CONFIG_CRYPTO_CCM=m | 666 | CONFIG_CRYPTO_CCM=m |
646 | CONFIG_CRYPTO_GCM=m | 667 | CONFIG_CRYPTO_GCM=m |
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
648 | CONFIG_CRYPTO_LRW=m | 669 | CONFIG_CRYPTO_LRW=m |
649 | CONFIG_CRYPTO_PCBC=m | 670 | CONFIG_CRYPTO_PCBC=m |
650 | CONFIG_CRYPTO_KEYWRAP=m | 671 | CONFIG_CRYPTO_KEYWRAP=m |
672 | CONFIG_CRYPTO_CMAC=m | ||
651 | CONFIG_CRYPTO_XCBC=m | 673 | CONFIG_CRYPTO_XCBC=m |
652 | CONFIG_CRYPTO_VMAC=m | 674 | CONFIG_CRYPTO_VMAC=m |
653 | CONFIG_CRYPTO_CRC32=m | 675 | CONFIG_CRYPTO_CRC32=m |
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m | |||
657 | CONFIG_CRYPTO_RMD256=m | 679 | CONFIG_CRYPTO_RMD256=m |
658 | CONFIG_CRYPTO_RMD320=m | 680 | CONFIG_CRYPTO_RMD320=m |
659 | CONFIG_CRYPTO_SHA512=m | 681 | CONFIG_CRYPTO_SHA512=m |
682 | CONFIG_CRYPTO_SHA3=m | ||
660 | CONFIG_CRYPTO_TGR192=m | 683 | CONFIG_CRYPTO_TGR192=m |
661 | CONFIG_CRYPTO_WP512=m | 684 | CONFIG_CRYPTO_WP512=m |
685 | CONFIG_CRYPTO_AES_TI=m | ||
662 | CONFIG_CRYPTO_ANUBIS=m | 686 | CONFIG_CRYPTO_ANUBIS=m |
663 | CONFIG_CRYPTO_BLOWFISH=m | 687 | CONFIG_CRYPTO_BLOWFISH=m |
664 | CONFIG_CRYPTO_CAMELLIA=m | 688 | CONFIG_CRYPTO_CAMELLIA=m |
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m | |||
674 | CONFIG_CRYPTO_842=m | 698 | CONFIG_CRYPTO_842=m |
675 | CONFIG_CRYPTO_LZ4=m | 699 | CONFIG_CRYPTO_LZ4=m |
676 | CONFIG_CRYPTO_LZ4HC=m | 700 | CONFIG_CRYPTO_LZ4HC=m |
701 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
677 | CONFIG_CRYPTO_USER_API_HASH=m | 702 | CONFIG_CRYPTO_USER_API_HASH=m |
678 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 703 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
679 | CONFIG_CRYPTO_USER_API_RNG=m | 704 | CONFIG_CRYPTO_USER_API_RNG=m |
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m | |||
685 | CONFIG_CRYPTO_SHA512_S390=m | 710 | CONFIG_CRYPTO_SHA512_S390=m |
686 | CONFIG_CRYPTO_DES_S390=m | 711 | CONFIG_CRYPTO_DES_S390=m |
687 | CONFIG_CRYPTO_AES_S390=m | 712 | CONFIG_CRYPTO_AES_S390=m |
713 | CONFIG_CRYPTO_PAES_S390=m | ||
688 | CONFIG_CRYPTO_GHASH_S390=m | 714 | CONFIG_CRYPTO_GHASH_S390=m |
689 | CONFIG_CRYPTO_CRC32_S390=y | 715 | CONFIG_CRYPTO_CRC32_S390=y |
690 | CONFIG_ASYMMETRIC_KEY_TYPE=y | 716 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | |||
692 | CONFIG_X509_CERTIFICATE_PARSER=m | 718 | CONFIG_X509_CERTIFICATE_PARSER=m |
693 | CONFIG_CRC7=m | 719 | CONFIG_CRC7=m |
694 | CONFIG_CRC8=m | 720 | CONFIG_CRC8=m |
721 | CONFIG_RANDOM32_SELFTEST=y | ||
695 | CONFIG_CORDIC=m | 722 | CONFIG_CORDIC=m |
696 | CONFIG_CMM=m | 723 | CONFIG_CMM=m |
697 | CONFIG_APPLDATA_BASE=y | 724 | CONFIG_APPLDATA_BASE=y |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 83970b5afb2b..3c6b78189fbc 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y | |||
31 | CONFIG_SCHED_AUTOGROUP=y | 31 | CONFIG_SCHED_AUTOGROUP=y |
32 | CONFIG_BLK_DEV_INITRD=y | 32 | CONFIG_BLK_DEV_INITRD=y |
33 | CONFIG_EXPERT=y | 33 | CONFIG_EXPERT=y |
34 | # CONFIG_SYSFS_SYSCALL is not set | ||
34 | CONFIG_BPF_SYSCALL=y | 35 | CONFIG_BPF_SYSCALL=y |
35 | CONFIG_USERFAULTFD=y | 36 | CONFIG_USERFAULTFD=y |
36 | # CONFIG_COMPAT_BRK is not set | 37 | # CONFIG_COMPAT_BRK is not set |
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y | |||
46 | CONFIG_MODULE_FORCE_UNLOAD=y | 47 | CONFIG_MODULE_FORCE_UNLOAD=y |
47 | CONFIG_MODVERSIONS=y | 48 | CONFIG_MODVERSIONS=y |
48 | CONFIG_MODULE_SRCVERSION_ALL=y | 49 | CONFIG_MODULE_SRCVERSION_ALL=y |
50 | CONFIG_BLK_DEV_INTEGRITY=y | ||
49 | CONFIG_BLK_DEV_THROTTLING=y | 51 | CONFIG_BLK_DEV_THROTTLING=y |
52 | CONFIG_BLK_WBT=y | ||
53 | CONFIG_BLK_WBT_SQ=y | ||
50 | CONFIG_PARTITION_ADVANCED=y | 54 | CONFIG_PARTITION_ADVANCED=y |
51 | CONFIG_IBM_PARTITION=y | 55 | CONFIG_IBM_PARTITION=y |
52 | CONFIG_BSD_DISKLABEL=y | 56 | CONFIG_BSD_DISKLABEL=y |
@@ -88,6 +92,8 @@ CONFIG_UNIX=y | |||
88 | CONFIG_UNIX_DIAG=m | 92 | CONFIG_UNIX_DIAG=m |
89 | CONFIG_XFRM_USER=m | 93 | CONFIG_XFRM_USER=m |
90 | CONFIG_NET_KEY=m | 94 | CONFIG_NET_KEY=m |
95 | CONFIG_SMC=m | ||
96 | CONFIG_SMC_DIAG=m | ||
91 | CONFIG_INET=y | 97 | CONFIG_INET=y |
92 | CONFIG_IP_MULTICAST=y | 98 | CONFIG_IP_MULTICAST=y |
93 | CONFIG_IP_ADVANCED_ROUTER=y | 99 | CONFIG_IP_ADVANCED_ROUTER=y |
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m | |||
356 | CONFIG_NET_ACT_SKBEDIT=m | 362 | CONFIG_NET_ACT_SKBEDIT=m |
357 | CONFIG_NET_ACT_CSUM=m | 363 | CONFIG_NET_ACT_CSUM=m |
358 | CONFIG_DNS_RESOLVER=y | 364 | CONFIG_DNS_RESOLVER=y |
365 | CONFIG_NETLINK_DIAG=m | ||
359 | CONFIG_CGROUP_NET_PRIO=y | 366 | CONFIG_CGROUP_NET_PRIO=y |
360 | CONFIG_BPF_JIT=y | 367 | CONFIG_BPF_JIT=y |
361 | CONFIG_NET_PKTGEN=m | 368 | CONFIG_NET_PKTGEN=m |
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y | |||
364 | CONFIG_DMA_CMA=y | 371 | CONFIG_DMA_CMA=y |
365 | CONFIG_CMA_SIZE_MBYTES=0 | 372 | CONFIG_CMA_SIZE_MBYTES=0 |
366 | CONFIG_CONNECTOR=y | 373 | CONFIG_CONNECTOR=y |
374 | CONFIG_ZRAM=m | ||
367 | CONFIG_BLK_DEV_LOOP=m | 375 | CONFIG_BLK_DEV_LOOP=m |
368 | CONFIG_BLK_DEV_CRYPTOLOOP=m | 376 | CONFIG_BLK_DEV_CRYPTOLOOP=m |
377 | CONFIG_BLK_DEV_DRBD=m | ||
369 | CONFIG_BLK_DEV_NBD=m | 378 | CONFIG_BLK_DEV_NBD=m |
370 | CONFIG_BLK_DEV_OSD=m | 379 | CONFIG_BLK_DEV_OSD=m |
371 | CONFIG_BLK_DEV_RAM=y | 380 | CONFIG_BLK_DEV_RAM=y |
372 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 381 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
373 | CONFIG_CDROM_PKTCDVD=m | 382 | CONFIG_BLK_DEV_RAM_DAX=y |
374 | CONFIG_ATA_OVER_ETH=m | ||
375 | CONFIG_VIRTIO_BLK=y | 383 | CONFIG_VIRTIO_BLK=y |
376 | CONFIG_ENCLOSURE_SERVICES=m | 384 | CONFIG_ENCLOSURE_SERVICES=m |
385 | CONFIG_GENWQE=m | ||
377 | CONFIG_RAID_ATTRS=m | 386 | CONFIG_RAID_ATTRS=m |
378 | CONFIG_SCSI=y | 387 | CONFIG_SCSI=y |
379 | CONFIG_BLK_DEV_SD=y | 388 | CONFIG_BLK_DEV_SD=y |
@@ -439,6 +448,8 @@ CONFIG_NLMON=m | |||
439 | # CONFIG_NET_VENDOR_INTEL is not set | 448 | # CONFIG_NET_VENDOR_INTEL is not set |
440 | # CONFIG_NET_VENDOR_MARVELL is not set | 449 | # CONFIG_NET_VENDOR_MARVELL is not set |
441 | CONFIG_MLX4_EN=m | 450 | CONFIG_MLX4_EN=m |
451 | CONFIG_MLX5_CORE=m | ||
452 | CONFIG_MLX5_CORE_EN=y | ||
442 | # CONFIG_NET_VENDOR_NATSEMI is not set | 453 | # CONFIG_NET_VENDOR_NATSEMI is not set |
443 | CONFIG_PPP=m | 454 | CONFIG_PPP=m |
444 | CONFIG_PPP_BSDCOMP=m | 455 | CONFIG_PPP_BSDCOMP=m |
@@ -449,7 +460,6 @@ CONFIG_PPTP=m | |||
449 | CONFIG_PPPOL2TP=m | 460 | CONFIG_PPPOL2TP=m |
450 | CONFIG_PPP_ASYNC=m | 461 | CONFIG_PPP_ASYNC=m |
451 | CONFIG_PPP_SYNC_TTY=m | 462 | CONFIG_PPP_SYNC_TTY=m |
452 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | ||
453 | # CONFIG_INPUT_KEYBOARD is not set | 463 | # CONFIG_INPUT_KEYBOARD is not set |
454 | # CONFIG_INPUT_MOUSE is not set | 464 | # CONFIG_INPUT_MOUSE is not set |
455 | # CONFIG_SERIO is not set | 465 | # CONFIG_SERIO is not set |
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m | |||
468 | CONFIG_INFINIBAND=m | 478 | CONFIG_INFINIBAND=m |
469 | CONFIG_INFINIBAND_USER_ACCESS=m | 479 | CONFIG_INFINIBAND_USER_ACCESS=m |
470 | CONFIG_MLX4_INFINIBAND=m | 480 | CONFIG_MLX4_INFINIBAND=m |
481 | CONFIG_MLX5_INFINIBAND=m | ||
471 | CONFIG_VIRTIO_BALLOON=m | 482 | CONFIG_VIRTIO_BALLOON=m |
472 | CONFIG_EXT4_FS=y | 483 | CONFIG_EXT4_FS=y |
473 | CONFIG_EXT4_FS_POSIX_ACL=y | 484 | CONFIG_EXT4_FS_POSIX_ACL=y |
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y | |||
483 | CONFIG_XFS_POSIX_ACL=y | 494 | CONFIG_XFS_POSIX_ACL=y |
484 | CONFIG_XFS_RT=y | 495 | CONFIG_XFS_RT=y |
485 | CONFIG_GFS2_FS=m | 496 | CONFIG_GFS2_FS=m |
497 | CONFIG_GFS2_FS_LOCKING_DLM=y | ||
486 | CONFIG_OCFS2_FS=m | 498 | CONFIG_OCFS2_FS=m |
487 | CONFIG_BTRFS_FS=y | 499 | CONFIG_BTRFS_FS=y |
488 | CONFIG_BTRFS_FS_POSIX_ACL=y | 500 | CONFIG_BTRFS_FS_POSIX_ACL=y |
489 | CONFIG_NILFS2_FS=m | 501 | CONFIG_NILFS2_FS=m |
502 | CONFIG_FS_DAX=y | ||
503 | CONFIG_EXPORTFS_BLOCK_OPS=y | ||
490 | CONFIG_FANOTIFY=y | 504 | CONFIG_FANOTIFY=y |
505 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | ||
491 | CONFIG_QUOTA_NETLINK_INTERFACE=y | 506 | CONFIG_QUOTA_NETLINK_INTERFACE=y |
492 | CONFIG_QFMT_V1=m | 507 | CONFIG_QFMT_V1=m |
493 | CONFIG_QFMT_V2=m | 508 | CONFIG_QFMT_V2=m |
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y | |||
553 | CONFIG_MAGIC_SYSRQ=y | 568 | CONFIG_MAGIC_SYSRQ=y |
554 | CONFIG_DEBUG_MEMORY_INIT=y | 569 | CONFIG_DEBUG_MEMORY_INIT=y |
555 | CONFIG_PANIC_ON_OOPS=y | 570 | CONFIG_PANIC_ON_OOPS=y |
556 | CONFIG_TIMER_STATS=y | ||
557 | CONFIG_RCU_TORTURE_TEST=m | 571 | CONFIG_RCU_TORTURE_TEST=m |
558 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | 572 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 |
559 | CONFIG_LATENCYTOP=y | 573 | CONFIG_LATENCYTOP=y |
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y | |||
576 | CONFIG_ENCRYPTED_KEYS=m | 590 | CONFIG_ENCRYPTED_KEYS=m |
577 | CONFIG_SECURITY=y | 591 | CONFIG_SECURITY=y |
578 | CONFIG_SECURITY_NETWORK=y | 592 | CONFIG_SECURITY_NETWORK=y |
593 | CONFIG_HARDENED_USERCOPY=y | ||
579 | CONFIG_SECURITY_SELINUX=y | 594 | CONFIG_SECURITY_SELINUX=y |
580 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | 595 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y |
581 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | 596 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 |
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
599 | CONFIG_CRYPTO_LRW=m | 614 | CONFIG_CRYPTO_LRW=m |
600 | CONFIG_CRYPTO_PCBC=m | 615 | CONFIG_CRYPTO_PCBC=m |
601 | CONFIG_CRYPTO_KEYWRAP=m | 616 | CONFIG_CRYPTO_KEYWRAP=m |
617 | CONFIG_CRYPTO_CMAC=m | ||
602 | CONFIG_CRYPTO_XCBC=m | 618 | CONFIG_CRYPTO_XCBC=m |
603 | CONFIG_CRYPTO_VMAC=m | 619 | CONFIG_CRYPTO_VMAC=m |
604 | CONFIG_CRYPTO_CRC32=m | 620 | CONFIG_CRYPTO_CRC32=m |
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m | |||
611 | CONFIG_CRYPTO_SHA3=m | 627 | CONFIG_CRYPTO_SHA3=m |
612 | CONFIG_CRYPTO_TGR192=m | 628 | CONFIG_CRYPTO_TGR192=m |
613 | CONFIG_CRYPTO_WP512=m | 629 | CONFIG_CRYPTO_WP512=m |
630 | CONFIG_CRYPTO_AES_TI=m | ||
614 | CONFIG_CRYPTO_ANUBIS=m | 631 | CONFIG_CRYPTO_ANUBIS=m |
615 | CONFIG_CRYPTO_BLOWFISH=m | 632 | CONFIG_CRYPTO_BLOWFISH=m |
616 | CONFIG_CRYPTO_CAMELLIA=m | 633 | CONFIG_CRYPTO_CAMELLIA=m |
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m | |||
626 | CONFIG_CRYPTO_842=m | 643 | CONFIG_CRYPTO_842=m |
627 | CONFIG_CRYPTO_LZ4=m | 644 | CONFIG_CRYPTO_LZ4=m |
628 | CONFIG_CRYPTO_LZ4HC=m | 645 | CONFIG_CRYPTO_LZ4HC=m |
646 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
629 | CONFIG_CRYPTO_USER_API_HASH=m | 647 | CONFIG_CRYPTO_USER_API_HASH=m |
630 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 648 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
631 | CONFIG_CRYPTO_USER_API_RNG=m | 649 | CONFIG_CRYPTO_USER_API_RNG=m |
632 | CONFIG_CRYPTO_USER_API_AEAD=m | 650 | CONFIG_CRYPTO_USER_API_AEAD=m |
633 | CONFIG_ZCRYPT=m | 651 | CONFIG_ZCRYPT=m |
652 | CONFIG_PKEY=m | ||
634 | CONFIG_CRYPTO_SHA1_S390=m | 653 | CONFIG_CRYPTO_SHA1_S390=m |
635 | CONFIG_CRYPTO_SHA256_S390=m | 654 | CONFIG_CRYPTO_SHA256_S390=m |
636 | CONFIG_CRYPTO_SHA512_S390=m | 655 | CONFIG_CRYPTO_SHA512_S390=m |
637 | CONFIG_CRYPTO_DES_S390=m | 656 | CONFIG_CRYPTO_DES_S390=m |
638 | CONFIG_CRYPTO_AES_S390=m | 657 | CONFIG_CRYPTO_AES_S390=m |
658 | CONFIG_CRYPTO_PAES_S390=m | ||
639 | CONFIG_CRYPTO_GHASH_S390=m | 659 | CONFIG_CRYPTO_GHASH_S390=m |
640 | CONFIG_CRYPTO_CRC32_S390=y | 660 | CONFIG_CRYPTO_CRC32_S390=y |
641 | CONFIG_CRC7=m | 661 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index fbc6542aaf59..653d72bcc007 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y | |||
31 | CONFIG_SCHED_AUTOGROUP=y | 31 | CONFIG_SCHED_AUTOGROUP=y |
32 | CONFIG_BLK_DEV_INITRD=y | 32 | CONFIG_BLK_DEV_INITRD=y |
33 | CONFIG_EXPERT=y | 33 | CONFIG_EXPERT=y |
34 | # CONFIG_SYSFS_SYSCALL is not set | ||
34 | CONFIG_BPF_SYSCALL=y | 35 | CONFIG_BPF_SYSCALL=y |
35 | CONFIG_USERFAULTFD=y | 36 | CONFIG_USERFAULTFD=y |
36 | # CONFIG_COMPAT_BRK is not set | 37 | # CONFIG_COMPAT_BRK is not set |
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y | |||
44 | CONFIG_MODULE_FORCE_UNLOAD=y | 45 | CONFIG_MODULE_FORCE_UNLOAD=y |
45 | CONFIG_MODVERSIONS=y | 46 | CONFIG_MODVERSIONS=y |
46 | CONFIG_MODULE_SRCVERSION_ALL=y | 47 | CONFIG_MODULE_SRCVERSION_ALL=y |
48 | CONFIG_BLK_DEV_INTEGRITY=y | ||
47 | CONFIG_BLK_DEV_THROTTLING=y | 49 | CONFIG_BLK_DEV_THROTTLING=y |
50 | CONFIG_BLK_WBT=y | ||
51 | CONFIG_BLK_WBT_SQ=y | ||
48 | CONFIG_PARTITION_ADVANCED=y | 52 | CONFIG_PARTITION_ADVANCED=y |
49 | CONFIG_IBM_PARTITION=y | 53 | CONFIG_IBM_PARTITION=y |
50 | CONFIG_BSD_DISKLABEL=y | 54 | CONFIG_BSD_DISKLABEL=y |
@@ -86,6 +90,8 @@ CONFIG_UNIX=y | |||
86 | CONFIG_UNIX_DIAG=m | 90 | CONFIG_UNIX_DIAG=m |
87 | CONFIG_XFRM_USER=m | 91 | CONFIG_XFRM_USER=m |
88 | CONFIG_NET_KEY=m | 92 | CONFIG_NET_KEY=m |
93 | CONFIG_SMC=m | ||
94 | CONFIG_SMC_DIAG=m | ||
89 | CONFIG_INET=y | 95 | CONFIG_INET=y |
90 | CONFIG_IP_MULTICAST=y | 96 | CONFIG_IP_MULTICAST=y |
91 | CONFIG_IP_ADVANCED_ROUTER=y | 97 | CONFIG_IP_ADVANCED_ROUTER=y |
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m | |||
354 | CONFIG_NET_ACT_SKBEDIT=m | 360 | CONFIG_NET_ACT_SKBEDIT=m |
355 | CONFIG_NET_ACT_CSUM=m | 361 | CONFIG_NET_ACT_CSUM=m |
356 | CONFIG_DNS_RESOLVER=y | 362 | CONFIG_DNS_RESOLVER=y |
363 | CONFIG_NETLINK_DIAG=m | ||
357 | CONFIG_CGROUP_NET_PRIO=y | 364 | CONFIG_CGROUP_NET_PRIO=y |
358 | CONFIG_BPF_JIT=y | 365 | CONFIG_BPF_JIT=y |
359 | CONFIG_NET_PKTGEN=m | 366 | CONFIG_NET_PKTGEN=m |
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y | |||
362 | CONFIG_DMA_CMA=y | 369 | CONFIG_DMA_CMA=y |
363 | CONFIG_CMA_SIZE_MBYTES=0 | 370 | CONFIG_CMA_SIZE_MBYTES=0 |
364 | CONFIG_CONNECTOR=y | 371 | CONFIG_CONNECTOR=y |
372 | CONFIG_ZRAM=m | ||
365 | CONFIG_BLK_DEV_LOOP=m | 373 | CONFIG_BLK_DEV_LOOP=m |
366 | CONFIG_BLK_DEV_CRYPTOLOOP=m | 374 | CONFIG_BLK_DEV_CRYPTOLOOP=m |
375 | CONFIG_BLK_DEV_DRBD=m | ||
367 | CONFIG_BLK_DEV_NBD=m | 376 | CONFIG_BLK_DEV_NBD=m |
368 | CONFIG_BLK_DEV_OSD=m | 377 | CONFIG_BLK_DEV_OSD=m |
369 | CONFIG_BLK_DEV_RAM=y | 378 | CONFIG_BLK_DEV_RAM=y |
370 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 379 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
371 | CONFIG_CDROM_PKTCDVD=m | 380 | CONFIG_BLK_DEV_RAM_DAX=y |
372 | CONFIG_ATA_OVER_ETH=m | ||
373 | CONFIG_VIRTIO_BLK=y | 381 | CONFIG_VIRTIO_BLK=y |
374 | CONFIG_ENCLOSURE_SERVICES=m | 382 | CONFIG_ENCLOSURE_SERVICES=m |
383 | CONFIG_GENWQE=m | ||
375 | CONFIG_RAID_ATTRS=m | 384 | CONFIG_RAID_ATTRS=m |
376 | CONFIG_SCSI=y | 385 | CONFIG_SCSI=y |
377 | CONFIG_BLK_DEV_SD=y | 386 | CONFIG_BLK_DEV_SD=y |
@@ -437,6 +446,8 @@ CONFIG_NLMON=m | |||
437 | # CONFIG_NET_VENDOR_INTEL is not set | 446 | # CONFIG_NET_VENDOR_INTEL is not set |
438 | # CONFIG_NET_VENDOR_MARVELL is not set | 447 | # CONFIG_NET_VENDOR_MARVELL is not set |
439 | CONFIG_MLX4_EN=m | 448 | CONFIG_MLX4_EN=m |
449 | CONFIG_MLX5_CORE=m | ||
450 | CONFIG_MLX5_CORE_EN=y | ||
440 | # CONFIG_NET_VENDOR_NATSEMI is not set | 451 | # CONFIG_NET_VENDOR_NATSEMI is not set |
441 | CONFIG_PPP=m | 452 | CONFIG_PPP=m |
442 | CONFIG_PPP_BSDCOMP=m | 453 | CONFIG_PPP_BSDCOMP=m |
@@ -447,7 +458,6 @@ CONFIG_PPTP=m | |||
447 | CONFIG_PPPOL2TP=m | 458 | CONFIG_PPPOL2TP=m |
448 | CONFIG_PPP_ASYNC=m | 459 | CONFIG_PPP_ASYNC=m |
449 | CONFIG_PPP_SYNC_TTY=m | 460 | CONFIG_PPP_SYNC_TTY=m |
450 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | ||
451 | # CONFIG_INPUT_KEYBOARD is not set | 461 | # CONFIG_INPUT_KEYBOARD is not set |
452 | # CONFIG_INPUT_MOUSE is not set | 462 | # CONFIG_INPUT_MOUSE is not set |
453 | # CONFIG_SERIO is not set | 463 | # CONFIG_SERIO is not set |
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m | |||
466 | CONFIG_INFINIBAND=m | 476 | CONFIG_INFINIBAND=m |
467 | CONFIG_INFINIBAND_USER_ACCESS=m | 477 | CONFIG_INFINIBAND_USER_ACCESS=m |
468 | CONFIG_MLX4_INFINIBAND=m | 478 | CONFIG_MLX4_INFINIBAND=m |
479 | CONFIG_MLX5_INFINIBAND=m | ||
469 | CONFIG_VIRTIO_BALLOON=m | 480 | CONFIG_VIRTIO_BALLOON=m |
470 | CONFIG_EXT4_FS=y | 481 | CONFIG_EXT4_FS=y |
471 | CONFIG_EXT4_FS_POSIX_ACL=y | 482 | CONFIG_EXT4_FS_POSIX_ACL=y |
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y | |||
481 | CONFIG_XFS_POSIX_ACL=y | 492 | CONFIG_XFS_POSIX_ACL=y |
482 | CONFIG_XFS_RT=y | 493 | CONFIG_XFS_RT=y |
483 | CONFIG_GFS2_FS=m | 494 | CONFIG_GFS2_FS=m |
495 | CONFIG_GFS2_FS_LOCKING_DLM=y | ||
484 | CONFIG_OCFS2_FS=m | 496 | CONFIG_OCFS2_FS=m |
485 | CONFIG_BTRFS_FS=y | 497 | CONFIG_BTRFS_FS=y |
486 | CONFIG_BTRFS_FS_POSIX_ACL=y | 498 | CONFIG_BTRFS_FS_POSIX_ACL=y |
487 | CONFIG_NILFS2_FS=m | 499 | CONFIG_NILFS2_FS=m |
500 | CONFIG_FS_DAX=y | ||
501 | CONFIG_EXPORTFS_BLOCK_OPS=y | ||
488 | CONFIG_FANOTIFY=y | 502 | CONFIG_FANOTIFY=y |
503 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | ||
489 | CONFIG_QUOTA_NETLINK_INTERFACE=y | 504 | CONFIG_QUOTA_NETLINK_INTERFACE=y |
490 | CONFIG_QFMT_V1=m | 505 | CONFIG_QFMT_V1=m |
491 | CONFIG_QFMT_V2=m | 506 | CONFIG_QFMT_V2=m |
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y | |||
551 | CONFIG_MAGIC_SYSRQ=y | 566 | CONFIG_MAGIC_SYSRQ=y |
552 | CONFIG_DEBUG_MEMORY_INIT=y | 567 | CONFIG_DEBUG_MEMORY_INIT=y |
553 | CONFIG_PANIC_ON_OOPS=y | 568 | CONFIG_PANIC_ON_OOPS=y |
554 | CONFIG_TIMER_STATS=y | ||
555 | CONFIG_RCU_TORTURE_TEST=m | 569 | CONFIG_RCU_TORTURE_TEST=m |
556 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | 570 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 |
557 | CONFIG_LATENCYTOP=y | 571 | CONFIG_LATENCYTOP=y |
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y | |||
574 | CONFIG_ENCRYPTED_KEYS=m | 588 | CONFIG_ENCRYPTED_KEYS=m |
575 | CONFIG_SECURITY=y | 589 | CONFIG_SECURITY=y |
576 | CONFIG_SECURITY_NETWORK=y | 590 | CONFIG_SECURITY_NETWORK=y |
591 | CONFIG_HARDENED_USERCOPY=y | ||
577 | CONFIG_SECURITY_SELINUX=y | 592 | CONFIG_SECURITY_SELINUX=y |
578 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | 593 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y |
579 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | 594 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 |
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
597 | CONFIG_CRYPTO_LRW=m | 612 | CONFIG_CRYPTO_LRW=m |
598 | CONFIG_CRYPTO_PCBC=m | 613 | CONFIG_CRYPTO_PCBC=m |
599 | CONFIG_CRYPTO_KEYWRAP=m | 614 | CONFIG_CRYPTO_KEYWRAP=m |
615 | CONFIG_CRYPTO_CMAC=m | ||
600 | CONFIG_CRYPTO_XCBC=m | 616 | CONFIG_CRYPTO_XCBC=m |
601 | CONFIG_CRYPTO_VMAC=m | 617 | CONFIG_CRYPTO_VMAC=m |
602 | CONFIG_CRYPTO_CRC32=m | 618 | CONFIG_CRYPTO_CRC32=m |
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m | |||
609 | CONFIG_CRYPTO_SHA3=m | 625 | CONFIG_CRYPTO_SHA3=m |
610 | CONFIG_CRYPTO_TGR192=m | 626 | CONFIG_CRYPTO_TGR192=m |
611 | CONFIG_CRYPTO_WP512=m | 627 | CONFIG_CRYPTO_WP512=m |
628 | CONFIG_CRYPTO_AES_TI=m | ||
612 | CONFIG_CRYPTO_ANUBIS=m | 629 | CONFIG_CRYPTO_ANUBIS=m |
613 | CONFIG_CRYPTO_BLOWFISH=m | 630 | CONFIG_CRYPTO_BLOWFISH=m |
614 | CONFIG_CRYPTO_CAMELLIA=m | 631 | CONFIG_CRYPTO_CAMELLIA=m |
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m | |||
624 | CONFIG_CRYPTO_842=m | 641 | CONFIG_CRYPTO_842=m |
625 | CONFIG_CRYPTO_LZ4=m | 642 | CONFIG_CRYPTO_LZ4=m |
626 | CONFIG_CRYPTO_LZ4HC=m | 643 | CONFIG_CRYPTO_LZ4HC=m |
644 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
627 | CONFIG_CRYPTO_USER_API_HASH=m | 645 | CONFIG_CRYPTO_USER_API_HASH=m |
628 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 646 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
629 | CONFIG_CRYPTO_USER_API_RNG=m | 647 | CONFIG_CRYPTO_USER_API_RNG=m |
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m | |||
635 | CONFIG_CRYPTO_SHA512_S390=m | 653 | CONFIG_CRYPTO_SHA512_S390=m |
636 | CONFIG_CRYPTO_DES_S390=m | 654 | CONFIG_CRYPTO_DES_S390=m |
637 | CONFIG_CRYPTO_AES_S390=m | 655 | CONFIG_CRYPTO_AES_S390=m |
656 | CONFIG_CRYPTO_PAES_S390=m | ||
638 | CONFIG_CRYPTO_GHASH_S390=m | 657 | CONFIG_CRYPTO_GHASH_S390=m |
639 | CONFIG_CRYPTO_CRC32_S390=y | 658 | CONFIG_CRYPTO_CRC32_S390=y |
640 | CONFIG_CRC7=m | 659 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index e23d97c13735..afa46a7406ea 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig | |||
@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y | |||
12 | CONFIG_NR_CPUS=2 | 12 | CONFIG_NR_CPUS=2 |
13 | # CONFIG_HOTPLUG_CPU is not set | 13 | # CONFIG_HOTPLUG_CPU is not set |
14 | CONFIG_HZ_100=y | 14 | CONFIG_HZ_100=y |
15 | # CONFIG_ARCH_RANDOM is not set | ||
15 | # CONFIG_COMPACTION is not set | 16 | # CONFIG_COMPACTION is not set |
16 | # CONFIG_MIGRATION is not set | 17 | # CONFIG_MIGRATION is not set |
18 | # CONFIG_BOUNCE is not set | ||
17 | # CONFIG_CHECK_STACK is not set | 19 | # CONFIG_CHECK_STACK is not set |
18 | # CONFIG_CHSC_SCH is not set | 20 | # CONFIG_CHSC_SCH is not set |
19 | # CONFIG_SCM_BUS is not set | 21 | # CONFIG_SCM_BUS is not set |
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y | |||
36 | CONFIG_SCSI_LOGGING=y | 38 | CONFIG_SCSI_LOGGING=y |
37 | CONFIG_SCSI_FC_ATTRS=y | 39 | CONFIG_SCSI_FC_ATTRS=y |
38 | CONFIG_ZFCP=y | 40 | CONFIG_ZFCP=y |
39 | # CONFIG_INPUT_MOUSEDEV_PSAUX is not set | ||
40 | # CONFIG_INPUT_KEYBOARD is not set | 41 | # CONFIG_INPUT_KEYBOARD is not set |
41 | # CONFIG_INPUT_MOUSE is not set | 42 | # CONFIG_INPUT_MOUSE is not set |
42 | # CONFIG_SERIO is not set | 43 | # CONFIG_SERIO is not set |
43 | # CONFIG_HVC_IUCV is not set | 44 | # CONFIG_HVC_IUCV is not set |
45 | # CONFIG_HW_RANDOM_S390 is not set | ||
44 | CONFIG_RAW_DRIVER=y | 46 | CONFIG_RAW_DRIVER=y |
45 | # CONFIG_SCLP_ASYNC is not set | 47 | # CONFIG_SCLP_ASYNC is not set |
46 | # CONFIG_HMC_DRV is not set | 48 | # CONFIG_HMC_DRV is not set |
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y | |||
54 | # CONFIG_INOTIFY_USER is not set | 56 | # CONFIG_INOTIFY_USER is not set |
55 | CONFIG_CONFIGFS_FS=y | 57 | CONFIG_CONFIGFS_FS=y |
56 | # CONFIG_MISC_FILESYSTEMS is not set | 58 | # CONFIG_MISC_FILESYSTEMS is not set |
59 | # CONFIG_NETWORK_FILESYSTEMS is not set | ||
57 | CONFIG_PRINTK_TIME=y | 60 | CONFIG_PRINTK_TIME=y |
58 | CONFIG_DEBUG_INFO=y | 61 | CONFIG_DEBUG_INFO=y |
59 | CONFIG_DEBUG_FS=y | ||
60 | CONFIG_DEBUG_KERNEL=y | 62 | CONFIG_DEBUG_KERNEL=y |
61 | CONFIG_PANIC_ON_OOPS=y | 63 | CONFIG_PANIC_ON_OOPS=y |
62 | # CONFIG_SCHED_DEBUG is not set | 64 | # CONFIG_SCHED_DEBUG is not set |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 97189dbaf34b..20244a38c886 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y | |||
28 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
29 | CONFIG_BLK_DEV_INITRD=y | 29 | CONFIG_BLK_DEV_INITRD=y |
30 | CONFIG_EXPERT=y | 30 | CONFIG_EXPERT=y |
31 | # CONFIG_SYSFS_SYSCALL is not set | ||
31 | CONFIG_BPF_SYSCALL=y | 32 | CONFIG_BPF_SYSCALL=y |
32 | CONFIG_USERFAULTFD=y | 33 | CONFIG_USERFAULTFD=y |
33 | # CONFIG_COMPAT_BRK is not set | 34 | # CONFIG_COMPAT_BRK is not set |
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y | |||
108 | CONFIG_SCSI_VIRTIO=y | 109 | CONFIG_SCSI_VIRTIO=y |
109 | CONFIG_MD=y | 110 | CONFIG_MD=y |
110 | CONFIG_MD_LINEAR=m | 111 | CONFIG_MD_LINEAR=m |
111 | CONFIG_MD_RAID0=m | ||
112 | CONFIG_MD_MULTIPATH=m | 112 | CONFIG_MD_MULTIPATH=m |
113 | CONFIG_BLK_DEV_DM=y | 113 | CONFIG_BLK_DEV_DM=y |
114 | CONFIG_DM_CRYPT=m | 114 | CONFIG_DM_CRYPT=m |
@@ -131,6 +131,7 @@ CONFIG_TUN=m | |||
131 | CONFIG_VIRTIO_NET=y | 131 | CONFIG_VIRTIO_NET=y |
132 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 132 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
133 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 133 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
134 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
134 | # CONFIG_INPUT is not set | 135 | # CONFIG_INPUT is not set |
135 | # CONFIG_SERIO is not set | 136 | # CONFIG_SERIO is not set |
136 | CONFIG_DEVKMEM=y | 137 | CONFIG_DEVKMEM=y |
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y | |||
162 | CONFIG_DEBUG_PAGEALLOC=y | 163 | CONFIG_DEBUG_PAGEALLOC=y |
163 | CONFIG_DETECT_HUNG_TASK=y | 164 | CONFIG_DETECT_HUNG_TASK=y |
164 | CONFIG_PANIC_ON_OOPS=y | 165 | CONFIG_PANIC_ON_OOPS=y |
165 | CONFIG_TIMER_STATS=y | ||
166 | CONFIG_DEBUG_RT_MUTEXES=y | 166 | CONFIG_DEBUG_RT_MUTEXES=y |
167 | CONFIG_PROVE_LOCKING=y | 167 | CONFIG_PROVE_LOCKING=y |
168 | CONFIG_LOCK_STAT=y | 168 | CONFIG_LOCK_STAT=y |
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y | |||
172 | CONFIG_DEBUG_SG=y | 172 | CONFIG_DEBUG_SG=y |
173 | CONFIG_DEBUG_NOTIFIERS=y | 173 | CONFIG_DEBUG_NOTIFIERS=y |
174 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | 174 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 |
175 | CONFIG_RCU_TRACE=y | ||
176 | CONFIG_LATENCYTOP=y | 175 | CONFIG_LATENCYTOP=y |
177 | CONFIG_SCHED_TRACER=y | 176 | CONFIG_SCHED_TRACER=y |
178 | CONFIG_FTRACE_SYSCALLS=y | 177 | CONFIG_FTRACE_SYSCALLS=y |
179 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | 178 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y |
180 | CONFIG_STACK_TRACER=y | 179 | CONFIG_STACK_TRACER=y |
181 | CONFIG_BLK_DEV_IO_TRACE=y | 180 | CONFIG_BLK_DEV_IO_TRACE=y |
182 | CONFIG_UPROBE_EVENTS=y | ||
183 | CONFIG_FUNCTION_PROFILER=y | 181 | CONFIG_FUNCTION_PROFILER=y |
184 | CONFIG_TRACE_ENUM_MAP_FILE=y | 182 | CONFIG_TRACE_ENUM_MAP_FILE=y |
185 | CONFIG_KPROBES_SANITY_TEST=y | 183 | CONFIG_KPROBES_SANITY_TEST=y |
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m | |||
190 | CONFIG_CRYPTO_GCM=m | 188 | CONFIG_CRYPTO_GCM=m |
191 | CONFIG_CRYPTO_CBC=y | 189 | CONFIG_CRYPTO_CBC=y |
192 | CONFIG_CRYPTO_CTS=m | 190 | CONFIG_CRYPTO_CTS=m |
193 | CONFIG_CRYPTO_ECB=m | ||
194 | CONFIG_CRYPTO_LRW=m | 191 | CONFIG_CRYPTO_LRW=m |
195 | CONFIG_CRYPTO_PCBC=m | 192 | CONFIG_CRYPTO_PCBC=m |
196 | CONFIG_CRYPTO_XTS=m | 193 | CONFIG_CRYPTO_XTS=m |
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
230 | CONFIG_CRYPTO_USER_API_RNG=m | 227 | CONFIG_CRYPTO_USER_API_RNG=m |
231 | CONFIG_ZCRYPT=m | 228 | CONFIG_ZCRYPT=m |
232 | CONFIG_PKEY=m | 229 | CONFIG_PKEY=m |
230 | CONFIG_CRYPTO_PAES_S390=m | ||
233 | CONFIG_CRYPTO_SHA1_S390=m | 231 | CONFIG_CRYPTO_SHA1_S390=m |
234 | CONFIG_CRYPTO_SHA256_S390=m | 232 | CONFIG_CRYPTO_SHA256_S390=m |
235 | CONFIG_CRYPTO_SHA512_S390=m | 233 | CONFIG_CRYPTO_SHA512_S390=m |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index e408d9cc5b96..6315037335ba 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -231,12 +231,17 @@ ENTRY(sie64a) | |||
231 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 231 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
232 | .Lsie_done: | 232 | .Lsie_done: |
233 | # some program checks are suppressing. C code (e.g. do_protection_exception) | 233 | # some program checks are suppressing. C code (e.g. do_protection_exception) |
234 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | 234 | # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There |
235 | # instructions between sie64a and .Lsie_done should not cause program | 235 | # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. |
236 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. | 236 | # Other instructions between sie64a and .Lsie_done should not cause program |
237 | # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. | ||
237 | # See also .Lcleanup_sie | 238 | # See also .Lcleanup_sie |
238 | .Lrewind_pad: | 239 | .Lrewind_pad6: |
239 | nop 0 | 240 | nopr 7 |
241 | .Lrewind_pad4: | ||
242 | nopr 7 | ||
243 | .Lrewind_pad2: | ||
244 | nopr 7 | ||
240 | .globl sie_exit | 245 | .globl sie_exit |
241 | sie_exit: | 246 | sie_exit: |
242 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | 247 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area |
@@ -249,7 +254,9 @@ sie_exit: | |||
249 | stg %r14,__SF_EMPTY+16(%r15) # set exit reason code | 254 | stg %r14,__SF_EMPTY+16(%r15) # set exit reason code |
250 | j sie_exit | 255 | j sie_exit |
251 | 256 | ||
252 | EX_TABLE(.Lrewind_pad,.Lsie_fault) | 257 | EX_TABLE(.Lrewind_pad6,.Lsie_fault) |
258 | EX_TABLE(.Lrewind_pad4,.Lsie_fault) | ||
259 | EX_TABLE(.Lrewind_pad2,.Lsie_fault) | ||
253 | EX_TABLE(sie_exit,.Lsie_fault) | 260 | EX_TABLE(sie_exit,.Lsie_fault) |
254 | EXPORT_SYMBOL(sie64a) | 261 | EXPORT_SYMBOL(sie64a) |
255 | EXPORT_SYMBOL(sie_exit) | 262 | EXPORT_SYMBOL(sie_exit) |
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 9da243d94cc3..3b297fa3aa67 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c | |||
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, | |||
977 | ptr = asce.origin * 4096; | 977 | ptr = asce.origin * 4096; |
978 | if (asce.r) { | 978 | if (asce.r) { |
979 | *fake = 1; | 979 | *fake = 1; |
980 | ptr = 0; | ||
980 | asce.dt = ASCE_TYPE_REGION1; | 981 | asce.dt = ASCE_TYPE_REGION1; |
981 | } | 982 | } |
982 | switch (asce.dt) { | 983 | switch (asce.dt) { |
983 | case ASCE_TYPE_REGION1: | 984 | case ASCE_TYPE_REGION1: |
984 | if (vaddr.rfx01 > asce.tl && !asce.r) | 985 | if (vaddr.rfx01 > asce.tl && !*fake) |
985 | return PGM_REGION_FIRST_TRANS; | 986 | return PGM_REGION_FIRST_TRANS; |
986 | break; | 987 | break; |
987 | case ASCE_TYPE_REGION2: | 988 | case ASCE_TYPE_REGION2: |
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, | |||
1009 | union region1_table_entry rfte; | 1010 | union region1_table_entry rfte; |
1010 | 1011 | ||
1011 | if (*fake) { | 1012 | if (*fake) { |
1012 | /* offset in 16EB guest memory block */ | 1013 | ptr += (unsigned long) vaddr.rfx << 53; |
1013 | ptr = ptr + ((unsigned long) vaddr.rsx << 53UL); | ||
1014 | rfte.val = ptr; | 1014 | rfte.val = ptr; |
1015 | goto shadow_r2t; | 1015 | goto shadow_r2t; |
1016 | } | 1016 | } |
@@ -1036,8 +1036,7 @@ shadow_r2t: | |||
1036 | union region2_table_entry rste; | 1036 | union region2_table_entry rste; |
1037 | 1037 | ||
1038 | if (*fake) { | 1038 | if (*fake) { |
1039 | /* offset in 8PB guest memory block */ | 1039 | ptr += (unsigned long) vaddr.rsx << 42; |
1040 | ptr = ptr + ((unsigned long) vaddr.rtx << 42UL); | ||
1041 | rste.val = ptr; | 1040 | rste.val = ptr; |
1042 | goto shadow_r3t; | 1041 | goto shadow_r3t; |
1043 | } | 1042 | } |
@@ -1064,8 +1063,7 @@ shadow_r3t: | |||
1064 | union region3_table_entry rtte; | 1063 | union region3_table_entry rtte; |
1065 | 1064 | ||
1066 | if (*fake) { | 1065 | if (*fake) { |
1067 | /* offset in 4TB guest memory block */ | 1066 | ptr += (unsigned long) vaddr.rtx << 31; |
1068 | ptr = ptr + ((unsigned long) vaddr.sx << 31UL); | ||
1069 | rtte.val = ptr; | 1067 | rtte.val = ptr; |
1070 | goto shadow_sgt; | 1068 | goto shadow_sgt; |
1071 | } | 1069 | } |
@@ -1101,8 +1099,7 @@ shadow_sgt: | |||
1101 | union segment_table_entry ste; | 1099 | union segment_table_entry ste; |
1102 | 1100 | ||
1103 | if (*fake) { | 1101 | if (*fake) { |
1104 | /* offset in 2G guest memory block */ | 1102 | ptr += (unsigned long) vaddr.sx << 20; |
1105 | ptr = ptr + ((unsigned long) vaddr.sx << 20UL); | ||
1106 | ste.val = ptr; | 1103 | ste.val = ptr; |
1107 | goto shadow_pgt; | 1104 | goto shadow_pgt; |
1108 | } | 1105 | } |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index b017daed6887..b854b1da281a 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
101 | addr = PAGE_ALIGN(addr); | 101 | addr = PAGE_ALIGN(addr); |
102 | vma = find_vma(mm, addr); | 102 | vma = find_vma(mm, addr); |
103 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && | 103 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
104 | (!vma || addr + len <= vma->vm_start)) | 104 | (!vma || addr + len <= vm_start_gap(vma))) |
105 | goto check_asce_limit; | 105 | goto check_asce_limit; |
106 | } | 106 | } |
107 | 107 | ||
@@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
151 | addr = PAGE_ALIGN(addr); | 151 | addr = PAGE_ALIGN(addr); |
152 | vma = find_vma(mm, addr); | 152 | vma = find_vma(mm, addr); |
153 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && | 153 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
154 | (!vma || addr + len <= vma->vm_start)) | 154 | (!vma || addr + len <= vm_start_gap(vma))) |
155 | goto check_asce_limit; | 155 | goto check_asce_limit; |
156 | } | 156 | } |
157 | 157 | ||
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 08e7af0be4a7..6a1a1297baae 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c | |||
@@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
64 | 64 | ||
65 | vma = find_vma(mm, addr); | 65 | vma = find_vma(mm, addr); |
66 | if (TASK_SIZE - len >= addr && | 66 | if (TASK_SIZE - len >= addr && |
67 | (!vma || addr + len <= vma->vm_start)) | 67 | (!vma || addr + len <= vm_start_gap(vma))) |
68 | return addr; | 68 | return addr; |
69 | } | 69 | } |
70 | 70 | ||
@@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
114 | 114 | ||
115 | vma = find_vma(mm, addr); | 115 | vma = find_vma(mm, addr); |
116 | if (TASK_SIZE - len >= addr && | 116 | if (TASK_SIZE - len >= addr && |
117 | (!vma || addr + len <= vma->vm_start)) | 117 | (!vma || addr + len <= vm_start_gap(vma))) |
118 | return addr; | 118 | return addr; |
119 | } | 119 | } |
120 | 120 | ||
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index ef4520efc813..043544d0cda3 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
120 | 120 | ||
121 | vma = find_vma(mm, addr); | 121 | vma = find_vma(mm, addr); |
122 | if (task_size - len >= addr && | 122 | if (task_size - len >= addr && |
123 | (!vma || addr + len <= vma->vm_start)) | 123 | (!vma || addr + len <= vm_start_gap(vma))) |
124 | return addr; | 124 | return addr; |
125 | } | 125 | } |
126 | 126 | ||
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
183 | 183 | ||
184 | vma = find_vma(mm, addr); | 184 | vma = find_vma(mm, addr); |
185 | if (task_size - len >= addr && | 185 | if (task_size - len >= addr && |
186 | (!vma || addr + len <= vma->vm_start)) | 186 | (!vma || addr + len <= vm_start_gap(vma))) |
187 | return addr; | 187 | return addr; |
188 | } | 188 | } |
189 | 189 | ||
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 7c29d38e6b99..88855e383b34 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c | |||
@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
120 | addr = ALIGN(addr, huge_page_size(h)); | 120 | addr = ALIGN(addr, huge_page_size(h)); |
121 | vma = find_vma(mm, addr); | 121 | vma = find_vma(mm, addr); |
122 | if (task_size - len >= addr && | 122 | if (task_size - len >= addr && |
123 | (!vma || addr + len <= vma->vm_start)) | 123 | (!vma || addr + len <= vm_start_gap(vma))) |
124 | return addr; | 124 | return addr; |
125 | } | 125 | } |
126 | if (mm->get_unmapped_area == arch_get_unmapped_area) | 126 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index cb10153b5c9f..03e5cc4e76e4 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c | |||
@@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
233 | addr = ALIGN(addr, huge_page_size(h)); | 233 | addr = ALIGN(addr, huge_page_size(h)); |
234 | vma = find_vma(mm, addr); | 234 | vma = find_vma(mm, addr); |
235 | if (TASK_SIZE - len >= addr && | 235 | if (TASK_SIZE - len >= addr && |
236 | (!vma || addr + len <= vma->vm_start)) | 236 | (!vma || addr + len <= vm_start_gap(vma))) |
237 | return addr; | 237 | return addr; |
238 | } | 238 | } |
239 | if (current->mm->get_unmapped_area == arch_get_unmapped_area) | 239 | if (current->mm->get_unmapped_area == arch_get_unmapped_area) |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a6d91d4e37a1..110ce8238466 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids | |||
431 | [ C(DTLB) ] = { | 431 | [ C(DTLB) ] = { |
432 | [ C(OP_READ) ] = { | 432 | [ C(OP_READ) ] = { |
433 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ | 433 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ |
434 | [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ | 434 | [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ |
435 | }, | 435 | }, |
436 | [ C(OP_WRITE) ] = { | 436 | [ C(OP_WRITE) ] = { |
437 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ | 437 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ |
438 | [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ | 438 | [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ |
439 | }, | 439 | }, |
440 | [ C(OP_PREFETCH) ] = { | 440 | [ C(OP_PREFETCH) ] = { |
441 | [ C(RESULT_ACCESS) ] = 0x0, | 441 | [ C(RESULT_ACCESS) ] = 0x0, |
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h index b8ad261d11dc..c66d19e3c23e 100644 --- a/arch/x86/include/asm/extable.h +++ b/arch/x86/include/asm/extable.h | |||
@@ -29,6 +29,7 @@ struct pt_regs; | |||
29 | } while (0) | 29 | } while (0) |
30 | 30 | ||
31 | extern int fixup_exception(struct pt_regs *regs, int trapnr); | 31 | extern int fixup_exception(struct pt_regs *regs, int trapnr); |
32 | extern int fixup_bug(struct pt_regs *regs, int trapnr); | ||
32 | extern bool ex_has_fault_handler(unsigned long ip); | 33 | extern bool ex_has_fault_handler(unsigned long ip); |
33 | extern void early_fixup_exception(struct pt_regs *regs, int trapnr); | 34 | extern void early_fixup_exception(struct pt_regs *regs, int trapnr); |
34 | 35 | ||
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 055962615779..722d0e568863 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt { | |||
296 | 296 | ||
297 | bool perm_ok; /* do not check permissions if true */ | 297 | bool perm_ok; /* do not check permissions if true */ |
298 | bool ud; /* inject an #UD if host doesn't support insn */ | 298 | bool ud; /* inject an #UD if host doesn't support insn */ |
299 | bool tf; /* TF value before instruction (after for syscall/sysret) */ | ||
299 | 300 | ||
300 | bool have_exception; | 301 | bool have_exception; |
301 | struct x86_exception exception; | 302 | struct x86_exception exception; |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index fba100713924..d5acc27ed1cc 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -2,8 +2,7 @@ | |||
2 | #define _ASM_X86_MSHYPER_H | 2 | #define _ASM_X86_MSHYPER_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/interrupt.h> | 5 | #include <linux/atomic.h> |
6 | #include <linux/clocksource.h> | ||
7 | #include <asm/hyperv.h> | 6 | #include <asm/hyperv.h> |
8 | 7 | ||
9 | /* | 8 | /* |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 207b8f2582c7..213ddf3e937d 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
144 | addr = PAGE_ALIGN(addr); | 144 | addr = PAGE_ALIGN(addr); |
145 | vma = find_vma(mm, addr); | 145 | vma = find_vma(mm, addr); |
146 | if (end - len >= addr && | 146 | if (end - len >= addr && |
147 | (!vma || addr + len <= vma->vm_start)) | 147 | (!vma || addr + len <= vm_start_gap(vma))) |
148 | return addr; | 148 | return addr; |
149 | } | 149 | } |
150 | 150 | ||
@@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
187 | addr = PAGE_ALIGN(addr); | 187 | addr = PAGE_ALIGN(addr); |
188 | vma = find_vma(mm, addr); | 188 | vma = find_vma(mm, addr); |
189 | if (TASK_SIZE - len >= addr && | 189 | if (TASK_SIZE - len >= addr && |
190 | (!vma || addr + len <= vma->vm_start)) | 190 | (!vma || addr + len <= vm_start_gap(vma))) |
191 | return addr; | 191 | return addr; |
192 | } | 192 | } |
193 | 193 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 3995d3a777d4..bf54309b85da 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr) | |||
182 | return ud == INSN_UD0 || ud == INSN_UD2; | 182 | return ud == INSN_UD0 || ud == INSN_UD2; |
183 | } | 183 | } |
184 | 184 | ||
185 | static int fixup_bug(struct pt_regs *regs, int trapnr) | 185 | int fixup_bug(struct pt_regs *regs, int trapnr) |
186 | { | 186 | { |
187 | if (trapnr != X86_TRAP_UD) | 187 | if (trapnr != X86_TRAP_UD) |
188 | return 0; | 188 | return 0; |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 0816ab2e8adc..80890dee66ce 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
2742 | ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); | 2742 | ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); |
2743 | } | 2743 | } |
2744 | 2744 | ||
2745 | ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; | ||
2745 | return X86EMUL_CONTINUE; | 2746 | return X86EMUL_CONTINUE; |
2746 | } | 2747 | } |
2747 | 2748 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 87d3cb901935..0e846f0cb83b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) | |||
5313 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 5313 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
5314 | 5314 | ||
5315 | ctxt->eflags = kvm_get_rflags(vcpu); | 5315 | ctxt->eflags = kvm_get_rflags(vcpu); |
5316 | ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; | ||
5317 | |||
5316 | ctxt->eip = kvm_rip_read(vcpu); | 5318 | ctxt->eip = kvm_rip_read(vcpu); |
5317 | ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : | 5319 | ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : |
5318 | (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : | 5320 | (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : |
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, | |||
5528 | return dr6; | 5530 | return dr6; |
5529 | } | 5531 | } |
5530 | 5532 | ||
5531 | static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) | 5533 | static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) |
5532 | { | 5534 | { |
5533 | struct kvm_run *kvm_run = vcpu->run; | 5535 | struct kvm_run *kvm_run = vcpu->run; |
5534 | 5536 | ||
5535 | /* | 5537 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
5536 | * rflags is the old, "raw" value of the flags. The new value has | 5538 | kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; |
5537 | * not been saved yet. | 5539 | kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; |
5538 | * | 5540 | kvm_run->debug.arch.exception = DB_VECTOR; |
5539 | * This is correct even for TF set by the guest, because "the | 5541 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
5540 | * processor will not generate this exception after the instruction | 5542 | *r = EMULATE_USER_EXIT; |
5541 | * that sets the TF flag". | 5543 | } else { |
5542 | */ | 5544 | /* |
5543 | if (unlikely(rflags & X86_EFLAGS_TF)) { | 5545 | * "Certain debug exceptions may clear bit 0-3. The |
5544 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | 5546 | * remaining contents of the DR6 register are never |
5545 | kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | | 5547 | * cleared by the processor". |
5546 | DR6_RTM; | 5548 | */ |
5547 | kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; | 5549 | vcpu->arch.dr6 &= ~15; |
5548 | kvm_run->debug.arch.exception = DB_VECTOR; | 5550 | vcpu->arch.dr6 |= DR6_BS | DR6_RTM; |
5549 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 5551 | kvm_queue_exception(vcpu, DB_VECTOR); |
5550 | *r = EMULATE_USER_EXIT; | ||
5551 | } else { | ||
5552 | /* | ||
5553 | * "Certain debug exceptions may clear bit 0-3. The | ||
5554 | * remaining contents of the DR6 register are never | ||
5555 | * cleared by the processor". | ||
5556 | */ | ||
5557 | vcpu->arch.dr6 &= ~15; | ||
5558 | vcpu->arch.dr6 |= DR6_BS | DR6_RTM; | ||
5559 | kvm_queue_exception(vcpu, DB_VECTOR); | ||
5560 | } | ||
5561 | } | 5552 | } |
5562 | } | 5553 | } |
5563 | 5554 | ||
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
5567 | int r = EMULATE_DONE; | 5558 | int r = EMULATE_DONE; |
5568 | 5559 | ||
5569 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 5560 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
5570 | kvm_vcpu_check_singlestep(vcpu, rflags, &r); | 5561 | |
5562 | /* | ||
5563 | * rflags is the old, "raw" value of the flags. The new value has | ||
5564 | * not been saved yet. | ||
5565 | * | ||
5566 | * This is correct even for TF set by the guest, because "the | ||
5567 | * processor will not generate this exception after the instruction | ||
5568 | * that sets the TF flag". | ||
5569 | */ | ||
5570 | if (unlikely(rflags & X86_EFLAGS_TF)) | ||
5571 | kvm_vcpu_do_singlestep(vcpu, &r); | ||
5571 | return r == EMULATE_DONE; | 5572 | return r == EMULATE_DONE; |
5572 | } | 5573 | } |
5573 | EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); | 5574 | EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); |
@@ -5726,8 +5727,9 @@ restart: | |||
5726 | toggle_interruptibility(vcpu, ctxt->interruptibility); | 5727 | toggle_interruptibility(vcpu, ctxt->interruptibility); |
5727 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; | 5728 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; |
5728 | kvm_rip_write(vcpu, ctxt->eip); | 5729 | kvm_rip_write(vcpu, ctxt->eip); |
5729 | if (r == EMULATE_DONE) | 5730 | if (r == EMULATE_DONE && |
5730 | kvm_vcpu_check_singlestep(vcpu, rflags, &r); | 5731 | (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) |
5732 | kvm_vcpu_do_singlestep(vcpu, &r); | ||
5731 | if (!ctxt->have_exception || | 5733 | if (!ctxt->have_exception || |
5732 | exception_type(ctxt->exception.vector) == EXCPT_TRAP) | 5734 | exception_type(ctxt->exception.vector) == EXCPT_TRAP) |
5733 | __kvm_set_rflags(vcpu, ctxt->eflags); | 5735 | __kvm_set_rflags(vcpu, ctxt->eflags); |
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 35ea061010a1..0ea8afcb929c 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) | |||
162 | if (fixup_exception(regs, trapnr)) | 162 | if (fixup_exception(regs, trapnr)) |
163 | return; | 163 | return; |
164 | 164 | ||
165 | if (fixup_bug(regs, trapnr)) | ||
166 | return; | ||
167 | |||
165 | fail: | 168 | fail: |
166 | early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", | 169 | early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", |
167 | (unsigned)trapnr, (unsigned long)regs->cs, regs->ip, | 170 | (unsigned)trapnr, (unsigned long)regs->cs, regs->ip, |
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 302f43fd9c28..adad702b39cd 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
148 | addr = ALIGN(addr, huge_page_size(h)); | 148 | addr = ALIGN(addr, huge_page_size(h)); |
149 | vma = find_vma(mm, addr); | 149 | vma = find_vma(mm, addr); |
150 | if (TASK_SIZE - len >= addr && | 150 | if (TASK_SIZE - len >= addr && |
151 | (!vma || addr + len <= vma->vm_start)) | 151 | (!vma || addr + len <= vm_start_gap(vma))) |
152 | return addr; | 152 | return addr; |
153 | } | 153 | } |
154 | if (mm->get_unmapped_area == arch_get_unmapped_area) | 154 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index cbc87ea98751..9b3f9fa5b283 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -161,16 +161,16 @@ static int page_size_mask; | |||
161 | 161 | ||
162 | static void __init probe_page_size_mask(void) | 162 | static void __init probe_page_size_mask(void) |
163 | { | 163 | { |
164 | #if !defined(CONFIG_KMEMCHECK) | ||
165 | /* | 164 | /* |
166 | * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will | 165 | * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will |
167 | * use small pages. | 166 | * use small pages. |
168 | * This will simplify cpa(), which otherwise needs to support splitting | 167 | * This will simplify cpa(), which otherwise needs to support splitting |
169 | * large pages into small in interrupt context, etc. | 168 | * large pages into small in interrupt context, etc. |
170 | */ | 169 | */ |
171 | if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) | 170 | if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) |
172 | page_size_mask |= 1 << PG_LEVEL_2M; | 171 | page_size_mask |= 1 << PG_LEVEL_2M; |
173 | #endif | 172 | else |
173 | direct_gbpages = 0; | ||
174 | 174 | ||
175 | /* Enable PSE if available */ | 175 | /* Enable PSE if available */ |
176 | if (boot_cpu_has(X86_FEATURE_PSE)) | 176 | if (boot_cpu_has(X86_FEATURE_PSE)) |
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h index f71f88ea7646..19707db966f1 100644 --- a/arch/xtensa/include/asm/irq.h +++ b/arch/xtensa/include/asm/irq.h | |||
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { } | |||
29 | # define PLATFORM_NR_IRQS 0 | 29 | # define PLATFORM_NR_IRQS 0 |
30 | #endif | 30 | #endif |
31 | #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS | 31 | #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS |
32 | #define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) | 32 | #define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1) |
33 | #define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1) | ||
33 | 34 | ||
34 | #if VARIANT_NR_IRQS == 0 | 35 | #if VARIANT_NR_IRQS == 0 |
35 | static inline void variant_init_irq(void) { } | 36 | static inline void variant_init_irq(void) { } |
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index a265edd6ac37..99341028cc77 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) | |||
34 | { | 34 | { |
35 | int irq = irq_find_mapping(NULL, hwirq); | 35 | int irq = irq_find_mapping(NULL, hwirq); |
36 | 36 | ||
37 | if (hwirq >= NR_IRQS) { | ||
38 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | ||
39 | __func__, hwirq); | ||
40 | } | ||
41 | |||
42 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 37 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
43 | /* Debugging check for stack overflow: is there less than 1KB free? */ | 38 | /* Debugging check for stack overflow: is there less than 1KB free? */ |
44 | { | 39 | { |
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 394ef08300b6..33bfa5270d95 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c | |||
@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot) | |||
593 | (ccount_freq/10000) % 100, | 593 | (ccount_freq/10000) % 100, |
594 | loops_per_jiffy/(500000/HZ), | 594 | loops_per_jiffy/(500000/HZ), |
595 | (loops_per_jiffy/(5000/HZ)) % 100); | 595 | (loops_per_jiffy/(5000/HZ)) % 100); |
596 | 596 | seq_puts(f, "flags\t\t: " | |
597 | seq_printf(f,"flags\t\t: " | ||
598 | #if XCHAL_HAVE_NMI | 597 | #if XCHAL_HAVE_NMI |
599 | "nmi " | 598 | "nmi " |
600 | #endif | 599 | #endif |
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 06937928cb72..74afbf02d07e 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c | |||
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
88 | /* At this point: (!vmm || addr < vmm->vm_end). */ | 88 | /* At this point: (!vmm || addr < vmm->vm_end). */ |
89 | if (TASK_SIZE - len < addr) | 89 | if (TASK_SIZE - len < addr) |
90 | return -ENOMEM; | 90 | return -ENOMEM; |
91 | if (!vmm || addr + len <= vmm->vm_start) | 91 | if (!vmm || addr + len <= vm_start_gap(vmm)) |
92 | return addr; | 92 | return addr; |
93 | addr = vmm->vm_end; | 93 | addr = vmm->vm_end; |
94 | if (flags & MAP_SHARED) | 94 | if (flags & MAP_SHARED) |
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 30d9fc21e076..162c77e53ca8 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S | |||
@@ -118,7 +118,7 @@ SECTIONS | |||
118 | SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) | 118 | SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) |
119 | SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) | 119 | SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) |
120 | SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) | 120 | SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) |
121 | SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) | 121 | SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20) |
122 | SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) | 122 | SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) |
123 | #endif | 123 | #endif |
124 | 124 | ||
@@ -306,13 +306,13 @@ SECTIONS | |||
306 | .UserExceptionVector.literal) | 306 | .UserExceptionVector.literal) |
307 | SECTION_VECTOR (_DoubleExceptionVector_literal, | 307 | SECTION_VECTOR (_DoubleExceptionVector_literal, |
308 | .DoubleExceptionVector.literal, | 308 | .DoubleExceptionVector.literal, |
309 | DOUBLEEXC_VECTOR_VADDR - 48, | 309 | DOUBLEEXC_VECTOR_VADDR - 20, |
310 | SIZEOF(.UserExceptionVector.text), | 310 | SIZEOF(.UserExceptionVector.text), |
311 | .UserExceptionVector.text) | 311 | .UserExceptionVector.text) |
312 | SECTION_VECTOR (_DoubleExceptionVector_text, | 312 | SECTION_VECTOR (_DoubleExceptionVector_text, |
313 | .DoubleExceptionVector.text, | 313 | .DoubleExceptionVector.text, |
314 | DOUBLEEXC_VECTOR_VADDR, | 314 | DOUBLEEXC_VECTOR_VADDR, |
315 | 48, | 315 | 20, |
316 | .DoubleExceptionVector.literal) | 316 | .DoubleExceptionVector.literal) |
317 | 317 | ||
318 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; | 318 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; |
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 02e94bb3ad3e..c45b90bb9339 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c | |||
@@ -317,8 +317,7 @@ static int __init simdisk_init(void) | |||
317 | if (simdisk_count > MAX_SIMDISK_COUNT) | 317 | if (simdisk_count > MAX_SIMDISK_COUNT) |
318 | simdisk_count = MAX_SIMDISK_COUNT; | 318 | simdisk_count = MAX_SIMDISK_COUNT; |
319 | 319 | ||
320 | sddev = kmalloc(simdisk_count * sizeof(struct simdisk), | 320 | sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL); |
321 | GFP_KERNEL); | ||
322 | if (sddev == NULL) | 321 | if (sddev == NULL) |
323 | goto out_unregister; | 322 | goto out_unregister; |
324 | 323 | ||
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index dbeea2b440a1..1fda7e20dfcb 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h | |||
@@ -24,16 +24,18 @@ | |||
24 | 24 | ||
25 | /* Interrupt configuration. */ | 25 | /* Interrupt configuration. */ |
26 | 26 | ||
27 | #define PLATFORM_NR_IRQS 10 | 27 | #define PLATFORM_NR_IRQS 0 |
28 | 28 | ||
29 | /* Default assignment of LX60 devices to external interrupts. */ | 29 | /* Default assignment of LX60 devices to external interrupts. */ |
30 | 30 | ||
31 | #ifdef CONFIG_XTENSA_MX | 31 | #ifdef CONFIG_XTENSA_MX |
32 | #define DUART16552_INTNUM XCHAL_EXTINT3_NUM | 32 | #define DUART16552_INTNUM XCHAL_EXTINT3_NUM |
33 | #define OETH_IRQ XCHAL_EXTINT4_NUM | 33 | #define OETH_IRQ XCHAL_EXTINT4_NUM |
34 | #define C67X00_IRQ XCHAL_EXTINT8_NUM | ||
34 | #else | 35 | #else |
35 | #define DUART16552_INTNUM XCHAL_EXTINT0_NUM | 36 | #define DUART16552_INTNUM XCHAL_EXTINT0_NUM |
36 | #define OETH_IRQ XCHAL_EXTINT1_NUM | 37 | #define OETH_IRQ XCHAL_EXTINT1_NUM |
38 | #define C67X00_IRQ XCHAL_EXTINT5_NUM | ||
37 | #endif | 39 | #endif |
38 | 40 | ||
39 | /* | 41 | /* |
@@ -63,5 +65,5 @@ | |||
63 | 65 | ||
64 | #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) | 66 | #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) |
65 | #define C67X00_SIZE 0x10 | 67 | #define C67X00_SIZE 0x10 |
66 | #define C67X00_IRQ 5 | 68 | |
67 | #endif /* __XTENSA_XTAVNET_HARDWARE_H */ | 69 | #endif /* __XTENSA_XTAVNET_HARDWARE_H */ |
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 779be723eb2b..42285f35d313 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c | |||
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = { | |||
175 | .flags = IORESOURCE_MEM, | 175 | .flags = IORESOURCE_MEM, |
176 | }, | 176 | }, |
177 | [2] = { /* IRQ number */ | 177 | [2] = { /* IRQ number */ |
178 | .start = OETH_IRQ, | 178 | .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), |
179 | .end = OETH_IRQ, | 179 | .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), |
180 | .flags = IORESOURCE_IRQ, | 180 | .flags = IORESOURCE_IRQ, |
181 | }, | 181 | }, |
182 | }; | 182 | }; |
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = { | |||
213 | .flags = IORESOURCE_MEM, | 213 | .flags = IORESOURCE_MEM, |
214 | }, | 214 | }, |
215 | [1] = { /* IRQ number */ | 215 | [1] = { /* IRQ number */ |
216 | .start = C67X00_IRQ, | 216 | .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), |
217 | .end = C67X00_IRQ, | 217 | .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), |
218 | .flags = IORESOURCE_IRQ, | 218 | .flags = IORESOURCE_IRQ, |
219 | }, | 219 | }, |
220 | }; | 220 | }; |
@@ -247,7 +247,7 @@ static struct resource serial_resource = { | |||
247 | static struct plat_serial8250_port serial_platform_data[] = { | 247 | static struct plat_serial8250_port serial_platform_data[] = { |
248 | [0] = { | 248 | [0] = { |
249 | .mapbase = DUART16552_PADDR, | 249 | .mapbase = DUART16552_PADDR, |
250 | .irq = DUART16552_INTNUM, | 250 | .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), |
251 | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | | 251 | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | |
252 | UPF_IOREMAP, | 252 | UPF_IOREMAP, |
253 | .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, | 253 | .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, |
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 1f5b692526ae..0ded5e846335 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q, | |||
68 | __blk_mq_sched_assign_ioc(q, rq, bio, ioc); | 68 | __blk_mq_sched_assign_ioc(q, rq, bio, ioc); |
69 | } | 69 | } |
70 | 70 | ||
71 | /* | ||
72 | * Mark a hardware queue as needing a restart. For shared queues, maintain | ||
73 | * a count of how many hardware queues are marked for restart. | ||
74 | */ | ||
75 | static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) | ||
76 | { | ||
77 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
78 | return; | ||
79 | |||
80 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | ||
81 | struct request_queue *q = hctx->queue; | ||
82 | |||
83 | if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
84 | atomic_inc(&q->shared_hctx_restart); | ||
85 | } else | ||
86 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | ||
87 | } | ||
88 | |||
89 | static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) | ||
90 | { | ||
91 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
92 | return false; | ||
93 | |||
94 | if (hctx->flags & BLK_MQ_F_TAG_SHARED) { | ||
95 | struct request_queue *q = hctx->queue; | ||
96 | |||
97 | if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
98 | atomic_dec(&q->shared_hctx_restart); | ||
99 | } else | ||
100 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | ||
101 | |||
102 | if (blk_mq_hctx_has_pending(hctx)) { | ||
103 | blk_mq_run_hw_queue(hctx, true); | ||
104 | return true; | ||
105 | } | ||
106 | |||
107 | return false; | ||
108 | } | ||
109 | |||
71 | struct request *blk_mq_sched_get_request(struct request_queue *q, | 110 | struct request *blk_mq_sched_get_request(struct request_queue *q, |
72 | struct bio *bio, | 111 | struct bio *bio, |
73 | unsigned int op, | 112 | unsigned int op, |
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, | |||
266 | return true; | 305 | return true; |
267 | } | 306 | } |
268 | 307 | ||
269 | static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) | ||
270 | { | ||
271 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { | ||
272 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | ||
273 | if (blk_mq_hctx_has_pending(hctx)) { | ||
274 | blk_mq_run_hw_queue(hctx, true); | ||
275 | return true; | ||
276 | } | ||
277 | } | ||
278 | return false; | ||
279 | } | ||
280 | |||
281 | /** | 308 | /** |
282 | * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list | 309 | * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list |
283 | * @pos: loop cursor. | 310 | * @pos: loop cursor. |
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) | |||
309 | unsigned int i, j; | 336 | unsigned int i, j; |
310 | 337 | ||
311 | if (set->flags & BLK_MQ_F_TAG_SHARED) { | 338 | if (set->flags & BLK_MQ_F_TAG_SHARED) { |
339 | /* | ||
340 | * If this is 0, then we know that no hardware queues | ||
341 | * have RESTART marked. We're done. | ||
342 | */ | ||
343 | if (!atomic_read(&queue->shared_hctx_restart)) | ||
344 | return; | ||
345 | |||
312 | rcu_read_lock(); | 346 | rcu_read_lock(); |
313 | list_for_each_entry_rcu_rr(q, queue, &set->tag_list, | 347 | list_for_each_entry_rcu_rr(q, queue, &set->tag_list, |
314 | tag_set_list) { | 348 | tag_set_list) { |
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index edafb5383b7b..5007edece51a 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h | |||
@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) | |||
115 | return false; | 115 | return false; |
116 | } | 116 | } |
117 | 117 | ||
118 | /* | ||
119 | * Mark a hardware queue as needing a restart. | ||
120 | */ | ||
121 | static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) | ||
122 | { | ||
123 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
124 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | ||
125 | } | ||
126 | |||
127 | static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) | 118 | static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) |
128 | { | 119 | { |
129 | return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | 120 | return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index bb66c96850b1..958cedaff8b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q, | |||
2103 | } | 2103 | } |
2104 | } | 2104 | } |
2105 | 2105 | ||
2106 | /* | ||
2107 | * Caller needs to ensure that we're either frozen/quiesced, or that | ||
2108 | * the queue isn't live yet. | ||
2109 | */ | ||
2106 | static void queue_set_hctx_shared(struct request_queue *q, bool shared) | 2110 | static void queue_set_hctx_shared(struct request_queue *q, bool shared) |
2107 | { | 2111 | { |
2108 | struct blk_mq_hw_ctx *hctx; | 2112 | struct blk_mq_hw_ctx *hctx; |
2109 | int i; | 2113 | int i; |
2110 | 2114 | ||
2111 | queue_for_each_hw_ctx(q, hctx, i) { | 2115 | queue_for_each_hw_ctx(q, hctx, i) { |
2112 | if (shared) | 2116 | if (shared) { |
2117 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
2118 | atomic_inc(&q->shared_hctx_restart); | ||
2113 | hctx->flags |= BLK_MQ_F_TAG_SHARED; | 2119 | hctx->flags |= BLK_MQ_F_TAG_SHARED; |
2114 | else | 2120 | } else { |
2121 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | ||
2122 | atomic_dec(&q->shared_hctx_restart); | ||
2115 | hctx->flags &= ~BLK_MQ_F_TAG_SHARED; | 2123 | hctx->flags &= ~BLK_MQ_F_TAG_SHARED; |
2124 | } | ||
2116 | } | 2125 | } |
2117 | } | 2126 | } |
2118 | 2127 | ||
2119 | static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) | 2128 | static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, |
2129 | bool shared) | ||
2120 | { | 2130 | { |
2121 | struct request_queue *q; | 2131 | struct request_queue *q; |
2122 | 2132 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 283da7fbe034..27aceab1cc31 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) | |||
777 | } | 777 | } |
778 | 778 | ||
779 | /** | 779 | /** |
780 | * blk_release_queue: - release a &struct request_queue when it is no longer needed | 780 | * __blk_release_queue - release a request queue when it is no longer needed |
781 | * @kobj: the kobj belonging to the request queue to be released | 781 | * @work: pointer to the release_work member of the request queue to be released |
782 | * | 782 | * |
783 | * Description: | 783 | * Description: |
784 | * blk_release_queue is the pair to blk_init_queue() or | 784 | * blk_release_queue is the counterpart of blk_init_queue(). It should be |
785 | * blk_queue_make_request(). It should be called when a request queue is | 785 | * called when a request queue is being released; typically when a block |
786 | * being released; typically when a block device is being de-registered. | 786 | * device is being de-registered. Its primary task it to free the queue |
787 | * Currently, its primary task it to free all the &struct request | 787 | * itself. |
788 | * structures that were allocated to the queue and the queue itself. | ||
789 | * | 788 | * |
790 | * Note: | 789 | * Notes: |
791 | * The low level driver must have finished any outstanding requests first | 790 | * The low level driver must have finished any outstanding requests first |
792 | * via blk_cleanup_queue(). | 791 | * via blk_cleanup_queue(). |
793 | **/ | 792 | * |
794 | static void blk_release_queue(struct kobject *kobj) | 793 | * Although blk_release_queue() may be called with preemption disabled, |
794 | * __blk_release_queue() may sleep. | ||
795 | */ | ||
796 | static void __blk_release_queue(struct work_struct *work) | ||
795 | { | 797 | { |
796 | struct request_queue *q = | 798 | struct request_queue *q = container_of(work, typeof(*q), release_work); |
797 | container_of(kobj, struct request_queue, kobj); | ||
798 | 799 | ||
799 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) | 800 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
800 | blk_stat_remove_callback(q, q->poll_cb); | 801 | blk_stat_remove_callback(q, q->poll_cb); |
@@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj) | |||
834 | call_rcu(&q->rcu_head, blk_free_queue_rcu); | 835 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
835 | } | 836 | } |
836 | 837 | ||
838 | static void blk_release_queue(struct kobject *kobj) | ||
839 | { | ||
840 | struct request_queue *q = | ||
841 | container_of(kobj, struct request_queue, kobj); | ||
842 | |||
843 | INIT_WORK(&q->release_work, __blk_release_queue); | ||
844 | schedule_work(&q->release_work); | ||
845 | } | ||
846 | |||
837 | static const struct sysfs_ops queue_sysfs_ops = { | 847 | static const struct sysfs_ops queue_sysfs_ops = { |
838 | .show = queue_attr_show, | 848 | .show = queue_attr_show, |
839 | .store = queue_attr_store, | 849 | .store = queue_attr_store, |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 7abe66505739..0d2e98920069 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -416,9 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc, | |||
416 | } | 416 | } |
417 | } | 417 | } |
418 | 418 | ||
419 | table_desc->validation_count++; | 419 | if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { |
420 | if (table_desc->validation_count == 0) { | 420 | table_desc->validation_count++; |
421 | table_desc->validation_count--; | 421 | |
422 | /* | ||
423 | * Detect validation_count overflows to ensure that the warning | ||
424 | * message will only be printed once. | ||
425 | */ | ||
426 | if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { | ||
427 | ACPI_WARNING((AE_INFO, | ||
428 | "Table %p, Validation count overflows\n", | ||
429 | table_desc)); | ||
430 | } | ||
422 | } | 431 | } |
423 | 432 | ||
424 | *out_table = table_desc->pointer; | 433 | *out_table = table_desc->pointer; |
@@ -445,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc) | |||
445 | 454 | ||
446 | ACPI_FUNCTION_TRACE(acpi_tb_put_table); | 455 | ACPI_FUNCTION_TRACE(acpi_tb_put_table); |
447 | 456 | ||
448 | if (table_desc->validation_count == 0) { | 457 | if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { |
449 | ACPI_WARNING((AE_INFO, | 458 | table_desc->validation_count--; |
450 | "Table %p, Validation count is zero before decrement\n", | 459 | |
451 | table_desc)); | 460 | /* |
452 | return_VOID; | 461 | * Detect validation_count underflows to ensure that the warning |
462 | * message will only be printed once. | ||
463 | */ | ||
464 | if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { | ||
465 | ACPI_WARNING((AE_INFO, | ||
466 | "Table %p, Validation count underflows\n", | ||
467 | table_desc)); | ||
468 | return_VOID; | ||
469 | } | ||
453 | } | 470 | } |
454 | table_desc->validation_count--; | ||
455 | 471 | ||
456 | if (table_desc->validation_count == 0) { | 472 | if (table_desc->validation_count == 0) { |
457 | 473 | ||
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index e0587c85bafd..ff096d9755b9 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c | |||
@@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, | |||
474 | return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); | 474 | return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); |
475 | } | 475 | } |
476 | 476 | ||
477 | /* | ||
478 | * The end_tag opcode must be followed by a zero byte. | ||
479 | * Although this byte is technically defined to be a checksum, | ||
480 | * in practice, all ASL compilers set this byte to zero. | ||
481 | */ | ||
482 | if (*(aml + 1) != 0) { | ||
483 | return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); | ||
484 | } | ||
485 | |||
486 | /* Return the pointer to the end_tag if requested */ | 477 | /* Return the pointer to the end_tag if requested */ |
487 | 478 | ||
488 | if (!user_function) { | 479 | if (!user_function) { |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3a10d7573477..d53162997f32 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev) | |||
1428 | adev->flags.coherent_dma = cca; | 1428 | adev->flags.coherent_dma = cca; |
1429 | } | 1429 | } |
1430 | 1430 | ||
1431 | static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) | ||
1432 | { | ||
1433 | bool *is_spi_i2c_slave_p = data; | ||
1434 | |||
1435 | if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) | ||
1436 | return 1; | ||
1437 | |||
1438 | /* | ||
1439 | * devices that are connected to UART still need to be enumerated to | ||
1440 | * platform bus | ||
1441 | */ | ||
1442 | if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) | ||
1443 | *is_spi_i2c_slave_p = true; | ||
1444 | |||
1445 | /* no need to do more checking */ | ||
1446 | return -1; | ||
1447 | } | ||
1448 | |||
1449 | static bool acpi_is_spi_i2c_slave(struct acpi_device *device) | ||
1450 | { | ||
1451 | struct list_head resource_list; | ||
1452 | bool is_spi_i2c_slave = false; | ||
1453 | |||
1454 | INIT_LIST_HEAD(&resource_list); | ||
1455 | acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, | ||
1456 | &is_spi_i2c_slave); | ||
1457 | acpi_dev_free_resource_list(&resource_list); | ||
1458 | |||
1459 | return is_spi_i2c_slave; | ||
1460 | } | ||
1461 | |||
1431 | void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, | 1462 | void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, |
1432 | int type, unsigned long long sta) | 1463 | int type, unsigned long long sta) |
1433 | { | 1464 | { |
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, | |||
1443 | acpi_bus_get_flags(device); | 1474 | acpi_bus_get_flags(device); |
1444 | device->flags.match_driver = false; | 1475 | device->flags.match_driver = false; |
1445 | device->flags.initialized = true; | 1476 | device->flags.initialized = true; |
1477 | device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device); | ||
1446 | acpi_device_clear_enumerated(device); | 1478 | acpi_device_clear_enumerated(device); |
1447 | device_initialize(&device->dev); | 1479 | device_initialize(&device->dev); |
1448 | dev_set_uevent_suppress(&device->dev, true); | 1480 | dev_set_uevent_suppress(&device->dev, true); |
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, | |||
1727 | return AE_OK; | 1759 | return AE_OK; |
1728 | } | 1760 | } |
1729 | 1761 | ||
1730 | static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) | ||
1731 | { | ||
1732 | bool *is_spi_i2c_slave_p = data; | ||
1733 | |||
1734 | if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) | ||
1735 | return 1; | ||
1736 | |||
1737 | /* | ||
1738 | * devices that are connected to UART still need to be enumerated to | ||
1739 | * platform bus | ||
1740 | */ | ||
1741 | if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) | ||
1742 | *is_spi_i2c_slave_p = true; | ||
1743 | |||
1744 | /* no need to do more checking */ | ||
1745 | return -1; | ||
1746 | } | ||
1747 | |||
1748 | static void acpi_default_enumeration(struct acpi_device *device) | 1762 | static void acpi_default_enumeration(struct acpi_device *device) |
1749 | { | 1763 | { |
1750 | struct list_head resource_list; | ||
1751 | bool is_spi_i2c_slave = false; | ||
1752 | |||
1753 | /* | 1764 | /* |
1754 | * Do not enumerate SPI/I2C slaves as they will be enumerated by their | 1765 | * Do not enumerate SPI/I2C slaves as they will be enumerated by their |
1755 | * respective parents. | 1766 | * respective parents. |
1756 | */ | 1767 | */ |
1757 | INIT_LIST_HEAD(&resource_list); | 1768 | if (!device->flags.spi_i2c_slave) { |
1758 | acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, | ||
1759 | &is_spi_i2c_slave); | ||
1760 | acpi_dev_free_resource_list(&resource_list); | ||
1761 | if (!is_spi_i2c_slave) { | ||
1762 | acpi_create_platform_device(device, NULL); | 1769 | acpi_create_platform_device(device, NULL); |
1763 | acpi_device_set_enumerated(device); | 1770 | acpi_device_set_enumerated(device); |
1764 | } else { | 1771 | } else { |
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device) | |||
1854 | return; | 1861 | return; |
1855 | 1862 | ||
1856 | device->flags.match_driver = true; | 1863 | device->flags.match_driver = true; |
1857 | if (ret > 0) { | 1864 | if (ret > 0 && !device->flags.spi_i2c_slave) { |
1858 | acpi_device_set_enumerated(device); | 1865 | acpi_device_set_enumerated(device); |
1859 | goto ok; | 1866 | goto ok; |
1860 | } | 1867 | } |
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device) | |||
1863 | if (ret < 0) | 1870 | if (ret < 0) |
1864 | return; | 1871 | return; |
1865 | 1872 | ||
1866 | if (device->pnp.type.platform_id) | 1873 | if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave) |
1867 | acpi_default_enumeration(device); | ||
1868 | else | ||
1869 | acpi_device_set_enumerated(device); | 1874 | acpi_device_set_enumerated(device); |
1875 | else | ||
1876 | acpi_default_enumeration(device); | ||
1870 | 1877 | ||
1871 | ok: | 1878 | ok: |
1872 | list_for_each_entry(child, &device->children, node) | 1879 | list_for_each_entry(child, &device->children, node) |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 726c32e35db9..0e824091a12f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg) | |||
609 | unsigned long timeout; | 609 | unsigned long timeout; |
610 | int ret; | 610 | int ret; |
611 | 611 | ||
612 | xen_blkif_get(blkif); | ||
613 | |||
614 | set_freezable(); | 612 | set_freezable(); |
615 | while (!kthread_should_stop()) { | 613 | while (!kthread_should_stop()) { |
616 | if (try_to_freeze()) | 614 | if (try_to_freeze()) |
@@ -665,7 +663,6 @@ purge_gnt_list: | |||
665 | print_stats(ring); | 663 | print_stats(ring); |
666 | 664 | ||
667 | ring->xenblkd = NULL; | 665 | ring->xenblkd = NULL; |
668 | xen_blkif_put(blkif); | ||
669 | 666 | ||
670 | return 0; | 667 | return 0; |
671 | } | 668 | } |
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1436 | static void make_response(struct xen_blkif_ring *ring, u64 id, | 1433 | static void make_response(struct xen_blkif_ring *ring, u64 id, |
1437 | unsigned short op, int st) | 1434 | unsigned short op, int st) |
1438 | { | 1435 | { |
1439 | struct blkif_response resp; | 1436 | struct blkif_response *resp; |
1440 | unsigned long flags; | 1437 | unsigned long flags; |
1441 | union blkif_back_rings *blk_rings; | 1438 | union blkif_back_rings *blk_rings; |
1442 | int notify; | 1439 | int notify; |
1443 | 1440 | ||
1444 | resp.id = id; | ||
1445 | resp.operation = op; | ||
1446 | resp.status = st; | ||
1447 | |||
1448 | spin_lock_irqsave(&ring->blk_ring_lock, flags); | 1441 | spin_lock_irqsave(&ring->blk_ring_lock, flags); |
1449 | blk_rings = &ring->blk_rings; | 1442 | blk_rings = &ring->blk_rings; |
1450 | /* Place on the response ring for the relevant domain. */ | 1443 | /* Place on the response ring for the relevant domain. */ |
1451 | switch (ring->blkif->blk_protocol) { | 1444 | switch (ring->blkif->blk_protocol) { |
1452 | case BLKIF_PROTOCOL_NATIVE: | 1445 | case BLKIF_PROTOCOL_NATIVE: |
1453 | memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), | 1446 | resp = RING_GET_RESPONSE(&blk_rings->native, |
1454 | &resp, sizeof(resp)); | 1447 | blk_rings->native.rsp_prod_pvt); |
1455 | break; | 1448 | break; |
1456 | case BLKIF_PROTOCOL_X86_32: | 1449 | case BLKIF_PROTOCOL_X86_32: |
1457 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), | 1450 | resp = RING_GET_RESPONSE(&blk_rings->x86_32, |
1458 | &resp, sizeof(resp)); | 1451 | blk_rings->x86_32.rsp_prod_pvt); |
1459 | break; | 1452 | break; |
1460 | case BLKIF_PROTOCOL_X86_64: | 1453 | case BLKIF_PROTOCOL_X86_64: |
1461 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), | 1454 | resp = RING_GET_RESPONSE(&blk_rings->x86_64, |
1462 | &resp, sizeof(resp)); | 1455 | blk_rings->x86_64.rsp_prod_pvt); |
1463 | break; | 1456 | break; |
1464 | default: | 1457 | default: |
1465 | BUG(); | 1458 | BUG(); |
1466 | } | 1459 | } |
1460 | |||
1461 | resp->id = id; | ||
1462 | resp->operation = op; | ||
1463 | resp->status = st; | ||
1464 | |||
1467 | blk_rings->common.rsp_prod_pvt++; | 1465 | blk_rings->common.rsp_prod_pvt++; |
1468 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); | 1466 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); |
1469 | spin_unlock_irqrestore(&ring->blk_ring_lock, flags); | 1467 | spin_unlock_irqrestore(&ring->blk_ring_lock, flags); |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index dea61f6ab8cb..ecb35fe8ca8d 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues; | |||
75 | struct blkif_common_request { | 75 | struct blkif_common_request { |
76 | char dummy; | 76 | char dummy; |
77 | }; | 77 | }; |
78 | struct blkif_common_response { | 78 | |
79 | char dummy; | 79 | /* i386 protocol version */ |
80 | }; | ||
81 | 80 | ||
82 | struct blkif_x86_32_request_rw { | 81 | struct blkif_x86_32_request_rw { |
83 | uint8_t nr_segments; /* number of segments */ | 82 | uint8_t nr_segments; /* number of segments */ |
@@ -129,14 +128,6 @@ struct blkif_x86_32_request { | |||
129 | } u; | 128 | } u; |
130 | } __attribute__((__packed__)); | 129 | } __attribute__((__packed__)); |
131 | 130 | ||
132 | /* i386 protocol version */ | ||
133 | #pragma pack(push, 4) | ||
134 | struct blkif_x86_32_response { | ||
135 | uint64_t id; /* copied from request */ | ||
136 | uint8_t operation; /* copied from request */ | ||
137 | int16_t status; /* BLKIF_RSP_??? */ | ||
138 | }; | ||
139 | #pragma pack(pop) | ||
140 | /* x86_64 protocol version */ | 131 | /* x86_64 protocol version */ |
141 | 132 | ||
142 | struct blkif_x86_64_request_rw { | 133 | struct blkif_x86_64_request_rw { |
@@ -193,18 +184,12 @@ struct blkif_x86_64_request { | |||
193 | } u; | 184 | } u; |
194 | } __attribute__((__packed__)); | 185 | } __attribute__((__packed__)); |
195 | 186 | ||
196 | struct blkif_x86_64_response { | ||
197 | uint64_t __attribute__((__aligned__(8))) id; | ||
198 | uint8_t operation; /* copied from request */ | ||
199 | int16_t status; /* BLKIF_RSP_??? */ | ||
200 | }; | ||
201 | |||
202 | DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, | 187 | DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, |
203 | struct blkif_common_response); | 188 | struct blkif_response); |
204 | DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, | 189 | DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, |
205 | struct blkif_x86_32_response); | 190 | struct blkif_response __packed); |
206 | DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, | 191 | DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, |
207 | struct blkif_x86_64_response); | 192 | struct blkif_response); |
208 | 193 | ||
209 | union blkif_back_rings { | 194 | union blkif_back_rings { |
210 | struct blkif_back_ring native; | 195 | struct blkif_back_ring native; |
@@ -281,6 +266,7 @@ struct xen_blkif_ring { | |||
281 | 266 | ||
282 | wait_queue_head_t wq; | 267 | wait_queue_head_t wq; |
283 | atomic_t inflight; | 268 | atomic_t inflight; |
269 | bool active; | ||
284 | /* One thread per blkif ring. */ | 270 | /* One thread per blkif ring. */ |
285 | struct task_struct *xenblkd; | 271 | struct task_struct *xenblkd; |
286 | unsigned int waiting_reqs; | 272 | unsigned int waiting_reqs; |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 1f3dfaa54d87..792da683e70d 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) | |||
159 | init_waitqueue_head(&ring->shutdown_wq); | 159 | init_waitqueue_head(&ring->shutdown_wq); |
160 | ring->blkif = blkif; | 160 | ring->blkif = blkif; |
161 | ring->st_print = jiffies; | 161 | ring->st_print = jiffies; |
162 | xen_blkif_get(blkif); | 162 | ring->active = true; |
163 | } | 163 | } |
164 | 164 | ||
165 | return 0; | 165 | return 0; |
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
249 | struct xen_blkif_ring *ring = &blkif->rings[r]; | 249 | struct xen_blkif_ring *ring = &blkif->rings[r]; |
250 | unsigned int i = 0; | 250 | unsigned int i = 0; |
251 | 251 | ||
252 | if (!ring->active) | ||
253 | continue; | ||
254 | |||
252 | if (ring->xenblkd) { | 255 | if (ring->xenblkd) { |
253 | kthread_stop(ring->xenblkd); | 256 | kthread_stop(ring->xenblkd); |
254 | wake_up(&ring->shutdown_wq); | 257 | wake_up(&ring->shutdown_wq); |
255 | ring->xenblkd = NULL; | ||
256 | } | 258 | } |
257 | 259 | ||
258 | /* The above kthread_stop() guarantees that at this point we | 260 | /* The above kthread_stop() guarantees that at this point we |
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
296 | BUG_ON(ring->free_pages_num != 0); | 298 | BUG_ON(ring->free_pages_num != 0); |
297 | BUG_ON(ring->persistent_gnt_c != 0); | 299 | BUG_ON(ring->persistent_gnt_c != 0); |
298 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | 300 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); |
299 | xen_blkif_put(blkif); | 301 | ring->active = false; |
300 | } | 302 | } |
301 | blkif->nr_ring_pages = 0; | 303 | blkif->nr_ring_pages = 0; |
302 | /* | 304 | /* |
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
312 | 314 | ||
313 | static void xen_blkif_free(struct xen_blkif *blkif) | 315 | static void xen_blkif_free(struct xen_blkif *blkif) |
314 | { | 316 | { |
315 | 317 | WARN_ON(xen_blkif_disconnect(blkif)); | |
316 | xen_blkif_disconnect(blkif); | ||
317 | xen_vbd_free(&blkif->vbd); | 318 | xen_vbd_free(&blkif->vbd); |
319 | kfree(blkif->be->mode); | ||
320 | kfree(blkif->be); | ||
318 | 321 | ||
319 | /* Make sure everything is drained before shutting down */ | 322 | /* Make sure everything is drained before shutting down */ |
320 | kmem_cache_free(xen_blkif_cachep, blkif); | 323 | kmem_cache_free(xen_blkif_cachep, blkif); |
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) | |||
511 | xen_blkif_put(be->blkif); | 514 | xen_blkif_put(be->blkif); |
512 | } | 515 | } |
513 | 516 | ||
514 | kfree(be->mode); | ||
515 | kfree(be); | ||
516 | return 0; | 517 | return 0; |
517 | } | 518 | } |
518 | 519 | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index e870f329db88..01a260f67437 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len) | |||
803 | p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; | 803 | p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; |
804 | cp++; crng_init_cnt++; len--; | 804 | cp++; crng_init_cnt++; len--; |
805 | } | 805 | } |
806 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
806 | if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { | 807 | if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { |
807 | invalidate_batched_entropy(); | 808 | invalidate_batched_entropy(); |
808 | crng_init = 1; | 809 | crng_init = 1; |
809 | wake_up_interruptible(&crng_init_wait); | 810 | wake_up_interruptible(&crng_init_wait); |
810 | pr_notice("random: fast init done\n"); | 811 | pr_notice("random: fast init done\n"); |
811 | } | 812 | } |
812 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
813 | return 1; | 813 | return 1; |
814 | } | 814 | } |
815 | 815 | ||
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
841 | } | 841 | } |
842 | memzero_explicit(&buf, sizeof(buf)); | 842 | memzero_explicit(&buf, sizeof(buf)); |
843 | crng->init_time = jiffies; | 843 | crng->init_time = jiffies; |
844 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
844 | if (crng == &primary_crng && crng_init < 2) { | 845 | if (crng == &primary_crng && crng_init < 2) { |
845 | invalidate_batched_entropy(); | 846 | invalidate_batched_entropy(); |
846 | crng_init = 2; | 847 | crng_init = 2; |
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
848 | wake_up_interruptible(&crng_init_wait); | 849 | wake_up_interruptible(&crng_init_wait); |
849 | pr_notice("random: crng init done\n"); | 850 | pr_notice("random: crng init done\n"); |
850 | } | 851 | } |
851 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
852 | } | 852 | } |
853 | 853 | ||
854 | static inline void crng_wait_ready(void) | 854 | static inline void crng_wait_ready(void) |
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); | |||
2041 | u64 get_random_u64(void) | 2041 | u64 get_random_u64(void) |
2042 | { | 2042 | { |
2043 | u64 ret; | 2043 | u64 ret; |
2044 | bool use_lock = crng_init < 2; | 2044 | bool use_lock = READ_ONCE(crng_init) < 2; |
2045 | unsigned long flags; | 2045 | unsigned long flags = 0; |
2046 | struct batched_entropy *batch; | 2046 | struct batched_entropy *batch; |
2047 | 2047 | ||
2048 | #if BITS_PER_LONG == 64 | 2048 | #if BITS_PER_LONG == 64 |
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); | |||
2073 | u32 get_random_u32(void) | 2073 | u32 get_random_u32(void) |
2074 | { | 2074 | { |
2075 | u32 ret; | 2075 | u32 ret; |
2076 | bool use_lock = crng_init < 2; | 2076 | bool use_lock = READ_ONCE(crng_init) < 2; |
2077 | unsigned long flags; | 2077 | unsigned long flags = 0; |
2078 | struct batched_entropy *batch; | 2078 | struct batched_entropy *batch; |
2079 | 2079 | ||
2080 | if (arch_get_random_int(&ret)) | 2080 | if (arch_get_random_int(&ret)) |
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index 19480bcc7046..2f29ee1a4d00 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig | |||
@@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B | |||
14 | config COMMON_CLK_GXBB | 14 | config COMMON_CLK_GXBB |
15 | bool | 15 | bool |
16 | depends on COMMON_CLK_AMLOGIC | 16 | depends on COMMON_CLK_AMLOGIC |
17 | select RESET_CONTROLLER | ||
17 | help | 18 | help |
18 | Support for the clock controller on AmLogic S905 devices, aka gxbb. | 19 | Support for the clock controller on AmLogic S905 devices, aka gxbb. |
19 | Say Y if you want peripherals and CPU frequency scaling to work. | 20 | Say Y if you want peripherals and CPU frequency scaling to work. |
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index b0d551a8efe4..eb89c7801f00 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig | |||
@@ -156,6 +156,7 @@ config SUN8I_R_CCU | |||
156 | bool "Support for Allwinner SoCs' PRCM CCUs" | 156 | bool "Support for Allwinner SoCs' PRCM CCUs" |
157 | select SUNXI_CCU_DIV | 157 | select SUNXI_CCU_DIV |
158 | select SUNXI_CCU_GATE | 158 | select SUNXI_CCU_GATE |
159 | select SUNXI_CCU_MP | ||
159 | default MACH_SUN8I || (ARCH_SUNXI && ARM64) | 160 | default MACH_SUN8I || (ARCH_SUNXI && ARM64) |
160 | 161 | ||
161 | endif | 162 | endif |
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h index 9b3cd24b78d2..061b6fbb4f95 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h | |||
@@ -31,7 +31,9 @@ | |||
31 | #define CLK_PLL_VIDEO0_2X 8 | 31 | #define CLK_PLL_VIDEO0_2X 8 |
32 | #define CLK_PLL_VE 9 | 32 | #define CLK_PLL_VE 9 |
33 | #define CLK_PLL_DDR0 10 | 33 | #define CLK_PLL_DDR0 10 |
34 | #define CLK_PLL_PERIPH0 11 | 34 | |
35 | /* PLL_PERIPH0 exported for PRCM */ | ||
36 | |||
35 | #define CLK_PLL_PERIPH0_2X 12 | 37 | #define CLK_PLL_PERIPH0_2X 12 |
36 | #define CLK_PLL_PERIPH1 13 | 38 | #define CLK_PLL_PERIPH1 13 |
37 | #define CLK_PLL_PERIPH1_2X 14 | 39 | #define CLK_PLL_PERIPH1_2X 14 |
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 5c476f966a72..5372bf8be5e6 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c | |||
@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb", | |||
243 | static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", | 243 | static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", |
244 | 0x060, BIT(6), 0); | 244 | 0x060, BIT(6), 0); |
245 | static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", | 245 | static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", |
246 | 0x060, BIT(6), 0); | 246 | 0x060, BIT(7), 0); |
247 | static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", | 247 | static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", |
248 | 0x060, BIT(8), 0); | 248 | 0x060, BIT(8), 0); |
249 | static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", | 249 | static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", |
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 89e68d29bf45..df97e25aec76 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c | |||
@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents, | |||
556 | 0x12c, 0, 4, 24, 3, BIT(31), | 556 | 0x12c, 0, 4, 24, 3, BIT(31), |
557 | CLK_SET_RATE_PARENT); | 557 | CLK_SET_RATE_PARENT); |
558 | static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, | 558 | static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, |
559 | 0x12c, 0, 4, 24, 3, BIT(31), | 559 | 0x130, 0, 4, 24, 3, BIT(31), |
560 | CLK_SET_RATE_PARENT); | 560 | CLK_SET_RATE_PARENT); |
561 | 561 | ||
562 | static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", | 562 | static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", |
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h index 85973d1e8165..1b4baea37d81 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h | |||
@@ -29,7 +29,9 @@ | |||
29 | #define CLK_PLL_VIDEO 6 | 29 | #define CLK_PLL_VIDEO 6 |
30 | #define CLK_PLL_VE 7 | 30 | #define CLK_PLL_VE 7 |
31 | #define CLK_PLL_DDR 8 | 31 | #define CLK_PLL_DDR 8 |
32 | #define CLK_PLL_PERIPH0 9 | 32 | |
33 | /* PLL_PERIPH0 exported for PRCM */ | ||
34 | |||
33 | #define CLK_PLL_PERIPH0_2X 10 | 35 | #define CLK_PLL_PERIPH0_2X 10 |
34 | #define CLK_PLL_GPU 11 | 36 | #define CLK_PLL_GPU 11 |
35 | #define CLK_PLL_PERIPH1 12 | 37 | #define CLK_PLL_PERIPH1 12 |
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index e58706b40ae9..6297add857b5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c | |||
@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { | |||
537 | [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, | 537 | [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, |
538 | [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, | 538 | [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, |
539 | [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, | 539 | [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, |
540 | [RST_BUS_OTG] = { 0x2c0, BIT(23) }, | 540 | [RST_BUS_OTG] = { 0x2c0, BIT(24) }, |
541 | [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, | 541 | [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, |
542 | [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, | 542 | [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, |
543 | 543 | ||
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 4bed671e490e..8b5c30062d99 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) | |||
1209 | return 0; | 1209 | return 0; |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | rate = readl_relaxed(frame + CNTFRQ); | 1212 | rate = readl_relaxed(base + CNTFRQ); |
1213 | 1213 | ||
1214 | iounmap(frame); | 1214 | iounmap(base); |
1215 | 1215 | ||
1216 | return rate; | 1216 | return rate; |
1217 | } | 1217 | } |
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 44e5e951583b..8e64b8460f11 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
21 | #include <linux/clocksource.h> | ||
21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
22 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 2e9c830ae1cd..c4656c4d44a6 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
14 | #include <linux/clockchips.h> | 14 | #include <linux/clockchips.h> |
15 | #include <linux/clocksource.h> | ||
15 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
16 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
17 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 992f7c20760f..88220ff3e1c2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set, | |||
185 | int ret; | 185 | int ret; |
186 | ret = sscanf(buf, "%u", &input); | 186 | ret = sscanf(buf, "%u", &input); |
187 | 187 | ||
188 | /* cannot be lower than 11 otherwise freq will not fall */ | 188 | /* cannot be lower than 1 otherwise freq will not fall */ |
189 | if (ret != 1 || input < 11 || input > 100 || | 189 | if (ret != 1 || input < 1 || input > 100 || |
190 | input >= dbs_data->up_threshold) | 190 | input >= dbs_data->up_threshold) |
191 | return -EINVAL; | 191 | return -EINVAL; |
192 | 192 | ||
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index ffca4fc0061d..ae8eb0359889 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c | |||
@@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, | |||
180 | if (!state_node) | 180 | if (!state_node) |
181 | break; | 181 | break; |
182 | 182 | ||
183 | if (!of_device_is_available(state_node)) | 183 | if (!of_device_is_available(state_node)) { |
184 | of_node_put(state_node); | ||
184 | continue; | 185 | continue; |
186 | } | ||
185 | 187 | ||
186 | if (!idle_state_valid(state_node, i, cpumask)) { | 188 | if (!idle_state_valid(state_node, i, cpumask)) { |
187 | pr_warn("%s idle state not valid, bailing out\n", | 189 | pr_warn("%s idle state not valid, bailing out\n", |
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 5c3e7b11e8a6..f6e7956fc91a 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c | |||
@@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev) | |||
267 | } | 267 | } |
268 | platform_set_drvdata(pdev, nocp); | 268 | platform_set_drvdata(pdev, nocp); |
269 | 269 | ||
270 | clk_prepare_enable(nocp->clk); | 270 | ret = clk_prepare_enable(nocp->clk); |
271 | if (ret) { | ||
272 | dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); | ||
273 | return ret; | ||
274 | } | ||
271 | 275 | ||
272 | pr_info("exynos-nocp: new NoC Probe device registered: %s\n", | 276 | pr_info("exynos-nocp: new NoC Probe device registered: %s\n", |
273 | dev_name(dev)); | 277 | dev_name(dev)); |
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 9b7350935b73..d96e3dc71cf8 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c | |||
@@ -44,7 +44,7 @@ struct exynos_ppmu { | |||
44 | { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ | 44 | { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ |
45 | { "ppmu-event3-"#name, PPMU_PMNCNT3 } | 45 | { "ppmu-event3-"#name, PPMU_PMNCNT3 } |
46 | 46 | ||
47 | struct __exynos_ppmu_events { | 47 | static struct __exynos_ppmu_events { |
48 | char *name; | 48 | char *name; |
49 | int id; | 49 | int id; |
50 | } ppmu_events[] = { | 50 | } ppmu_events[] = { |
@@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev) | |||
648 | dev_name(&pdev->dev), desc[i].name); | 648 | dev_name(&pdev->dev), desc[i].name); |
649 | } | 649 | } |
650 | 650 | ||
651 | clk_prepare_enable(info->ppmu.clk); | 651 | ret = clk_prepare_enable(info->ppmu.clk); |
652 | if (ret) { | ||
653 | dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); | ||
654 | return ret; | ||
655 | } | ||
652 | 656 | ||
653 | return 0; | 657 | return 0; |
654 | } | 658 | } |
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index dc269cb288c2..951b6c79f166 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c | |||
@@ -47,7 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME); | |||
47 | DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); | 47 | DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); |
48 | DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); | 48 | DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); |
49 | DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); | 49 | DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); |
50 | DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); | 50 | DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY); |
51 | DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); | 51 | DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); |
52 | DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); | 52 | DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); |
53 | DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); | 53 | DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); |
@@ -192,7 +192,7 @@ static void __init dmi_id_init_attr_table(void) | |||
192 | ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); | 192 | ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); |
193 | ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); | 193 | ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); |
194 | ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); | 194 | ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); |
195 | ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); | 195 | ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); |
196 | ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); | 196 | ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); |
197 | ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); | 197 | ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); |
198 | ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); | 198 | ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 93f7acdaac7a..783041964439 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, | |||
144 | 144 | ||
145 | buf = dmi_early_remap(dmi_base, orig_dmi_len); | 145 | buf = dmi_early_remap(dmi_base, orig_dmi_len); |
146 | if (buf == NULL) | 146 | if (buf == NULL) |
147 | return -1; | 147 | return -ENOMEM; |
148 | 148 | ||
149 | dmi_decode_table(buf, decode, NULL); | 149 | dmi_decode_table(buf, decode, NULL); |
150 | 150 | ||
@@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, | |||
178 | const char *d = (const char *) dm; | 178 | const char *d = (const char *) dm; |
179 | const char *p; | 179 | const char *p; |
180 | 180 | ||
181 | if (dmi_ident[slot]) | 181 | if (dmi_ident[slot] || dm->length <= string) |
182 | return; | 182 | return; |
183 | 183 | ||
184 | p = dmi_string(dm, d[string]); | 184 | p = dmi_string(dm, d[string]); |
@@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, | |||
191 | static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, | 191 | static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, |
192 | int index) | 192 | int index) |
193 | { | 193 | { |
194 | const u8 *d = (u8 *) dm + index; | 194 | const u8 *d; |
195 | char *s; | 195 | char *s; |
196 | int is_ff = 1, is_00 = 1, i; | 196 | int is_ff = 1, is_00 = 1, i; |
197 | 197 | ||
198 | if (dmi_ident[slot]) | 198 | if (dmi_ident[slot] || dm->length <= index + 16) |
199 | return; | 199 | return; |
200 | 200 | ||
201 | d = (u8 *) dm + index; | ||
201 | for (i = 0; i < 16 && (is_ff || is_00); i++) { | 202 | for (i = 0; i < 16 && (is_ff || is_00); i++) { |
202 | if (d[i] != 0x00) | 203 | if (d[i] != 0x00) |
203 | is_00 = 0; | 204 | is_00 = 0; |
@@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, | |||
228 | static void __init dmi_save_type(const struct dmi_header *dm, int slot, | 229 | static void __init dmi_save_type(const struct dmi_header *dm, int slot, |
229 | int index) | 230 | int index) |
230 | { | 231 | { |
231 | const u8 *d = (u8 *) dm + index; | 232 | const u8 *d; |
232 | char *s; | 233 | char *s; |
233 | 234 | ||
234 | if (dmi_ident[slot]) | 235 | if (dmi_ident[slot] || dm->length <= index) |
235 | return; | 236 | return; |
236 | 237 | ||
237 | s = dmi_alloc(4); | 238 | s = dmi_alloc(4); |
238 | if (!s) | 239 | if (!s) |
239 | return; | 240 | return; |
240 | 241 | ||
242 | d = (u8 *) dm + index; | ||
241 | sprintf(s, "%u", *d & 0x7F); | 243 | sprintf(s, "%u", *d & 0x7F); |
242 | dmi_ident[slot] = s; | 244 | dmi_ident[slot] = s; |
243 | } | 245 | } |
@@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm) | |||
278 | 280 | ||
279 | static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) | 281 | static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) |
280 | { | 282 | { |
281 | int i, count = *(u8 *)(dm + 1); | 283 | int i, count; |
282 | struct dmi_device *dev; | 284 | struct dmi_device *dev; |
283 | 285 | ||
286 | if (dm->length < 0x05) | ||
287 | return; | ||
288 | |||
289 | count = *(u8 *)(dm + 1); | ||
284 | for (i = 1; i <= count; i++) { | 290 | for (i = 1; i <= count; i++) { |
285 | const char *devname = dmi_string(dm, i); | 291 | const char *devname = dmi_string(dm, i); |
286 | 292 | ||
@@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm) | |||
353 | const char *name; | 359 | const char *name; |
354 | const u8 *d = (u8 *)dm; | 360 | const u8 *d = (u8 *)dm; |
355 | 361 | ||
362 | if (dm->length < 0x0B) | ||
363 | return; | ||
364 | |||
356 | /* Skip disabled device */ | 365 | /* Skip disabled device */ |
357 | if ((d[0x5] & 0x80) == 0) | 366 | if ((d[0x5] & 0x80) == 0) |
358 | return; | 367 | return; |
@@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) | |||
387 | const char *d = (const char *)dm; | 396 | const char *d = (const char *)dm; |
388 | static int nr; | 397 | static int nr; |
389 | 398 | ||
390 | if (dm->type != DMI_ENTRY_MEM_DEVICE) | 399 | if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12) |
391 | return; | 400 | return; |
392 | if (nr >= dmi_memdev_nr) { | 401 | if (nr >= dmi_memdev_nr) { |
393 | pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); | 402 | pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); |
@@ -650,6 +659,21 @@ void __init dmi_scan_machine(void) | |||
650 | goto error; | 659 | goto error; |
651 | 660 | ||
652 | /* | 661 | /* |
662 | * Same logic as above, look for a 64-bit entry point | ||
663 | * first, and if not found, fall back to 32-bit entry point. | ||
664 | */ | ||
665 | memcpy_fromio(buf, p, 16); | ||
666 | for (q = p + 16; q < p + 0x10000; q += 16) { | ||
667 | memcpy_fromio(buf + 16, q, 16); | ||
668 | if (!dmi_smbios3_present(buf)) { | ||
669 | dmi_available = 1; | ||
670 | dmi_early_unmap(p, 0x10000); | ||
671 | goto out; | ||
672 | } | ||
673 | memcpy(buf, buf + 16, 16); | ||
674 | } | ||
675 | |||
676 | /* | ||
653 | * Iterate over all possible DMI header addresses q. | 677 | * Iterate over all possible DMI header addresses q. |
654 | * Maintain the 32 bytes around q in buf. On the | 678 | * Maintain the 32 bytes around q in buf. On the |
655 | * first iteration, substitute zero for the | 679 | * first iteration, substitute zero for the |
@@ -659,7 +683,7 @@ void __init dmi_scan_machine(void) | |||
659 | memset(buf, 0, 16); | 683 | memset(buf, 0, 16); |
660 | for (q = p; q < p + 0x10000; q += 16) { | 684 | for (q = p; q < p + 0x10000; q += 16) { |
661 | memcpy_fromio(buf + 16, q, 16); | 685 | memcpy_fromio(buf + 16, q, 16); |
662 | if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { | 686 | if (!dmi_present(buf)) { |
663 | dmi_available = 1; | 687 | dmi_available = 1; |
664 | dmi_early_unmap(p, 0x10000); | 688 | dmi_early_unmap(p, 0x10000); |
665 | goto out; | 689 | goto out; |
@@ -993,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date); | |||
993 | * @decode: Callback function | 1017 | * @decode: Callback function |
994 | * @private_data: Private data to be passed to the callback function | 1018 | * @private_data: Private data to be passed to the callback function |
995 | * | 1019 | * |
996 | * Returns -1 when the DMI table can't be reached, 0 on success. | 1020 | * Returns 0 on success, -ENXIO if DMI is not selected or not present, |
1021 | * or a different negative error code if DMI walking fails. | ||
997 | */ | 1022 | */ |
998 | int dmi_walk(void (*decode)(const struct dmi_header *, void *), | 1023 | int dmi_walk(void (*decode)(const struct dmi_header *, void *), |
999 | void *private_data) | 1024 | void *private_data) |
@@ -1001,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), | |||
1001 | u8 *buf; | 1026 | u8 *buf; |
1002 | 1027 | ||
1003 | if (!dmi_available) | 1028 | if (!dmi_available) |
1004 | return -1; | 1029 | return -ENXIO; |
1005 | 1030 | ||
1006 | buf = dmi_remap(dmi_base, dmi_len); | 1031 | buf = dmi_remap(dmi_base, dmi_len); |
1007 | if (buf == NULL) | 1032 | if (buf == NULL) |
1008 | return -1; | 1033 | return -ENOMEM; |
1009 | 1034 | ||
1010 | dmi_decode_table(buf, decode, private_data); | 1035 | dmi_decode_table(buf, decode, private_data); |
1011 | 1036 | ||
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 5104b6398139..c83ea68be792 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev, | |||
721 | u32 set; | 721 | u32 set; |
722 | 722 | ||
723 | if (!of_device_is_compatible(mvchip->chip.of_node, | 723 | if (!of_device_is_compatible(mvchip->chip.of_node, |
724 | "marvell,armada-370-xp-gpio")) | 724 | "marvell,armada-370-gpio")) |
725 | return 0; | 725 | return 0; |
726 | 726 | ||
727 | if (IS_ERR(mvchip->clk)) | 727 | if (IS_ERR(mvchip->clk)) |
@@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = { | |||
852 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, | 852 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, |
853 | }, | 853 | }, |
854 | { | 854 | { |
855 | .compatible = "marvell,armada-370-xp-gpio", | 855 | .compatible = "marvell,armada-370-gpio", |
856 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, | 856 | .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, |
857 | }, | 857 | }, |
858 | { | 858 | { |
@@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
1128 | mvchip); | 1128 | mvchip); |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | /* Armada 370/XP has simple PWM support for GPIO lines */ | 1131 | /* Some MVEBU SoCs have simple PWM support for GPIO lines */ |
1132 | if (IS_ENABLED(CONFIG_PWM)) | 1132 | if (IS_ENABLED(CONFIG_PWM)) |
1133 | return mvebu_pwm_probe(pdev, mvchip, id); | 1133 | return mvebu_pwm_probe(pdev, mvchip, id); |
1134 | 1134 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1cf78f4dd339..1e8e1123ddf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) | |||
693 | DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", | 693 | DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", |
694 | adev->clock.default_dispclk / 100); | 694 | adev->clock.default_dispclk / 100); |
695 | adev->clock.default_dispclk = 60000; | 695 | adev->clock.default_dispclk = 60000; |
696 | } else if (adev->clock.default_dispclk <= 60000) { | ||
697 | DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", | ||
698 | adev->clock.default_dispclk / 100); | ||
699 | adev->clock.default_dispclk = 62500; | ||
696 | } | 700 | } |
697 | adev->clock.dp_extclk = | 701 | adev->clock.dp_extclk = |
698 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); | 702 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4c7c2628ace4..3e5d550c5bd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -475,6 +475,7 @@ static const struct pci_device_id pciidlist[] = { | |||
475 | {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 475 | {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
476 | {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 476 | {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
477 | {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 477 | {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
478 | {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | ||
478 | {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 479 | {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
479 | /* Vega 10 */ | 480 | /* Vega 10 */ |
480 | {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, | 481 | {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 8c9bc75a9c2d..8a0818b23ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c | |||
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) | |||
165 | struct drm_device *dev = crtc->dev; | 165 | struct drm_device *dev = crtc->dev; |
166 | struct amdgpu_device *adev = dev->dev_private; | 166 | struct amdgpu_device *adev = dev->dev_private; |
167 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); | 167 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); |
168 | ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; | 168 | ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; |
169 | 169 | ||
170 | memset(&args, 0, sizeof(args)); | 170 | memset(&args, 0, sizeof(args)); |
171 | 171 | ||
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) | |||
178 | void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) | 178 | void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) |
179 | { | 179 | { |
180 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); | 180 | int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); |
181 | ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; | 181 | ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; |
182 | 182 | ||
183 | memset(&args, 0, sizeof(args)); | 183 | memset(&args, 0, sizeof(args)); |
184 | 184 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 3c62c45f43a1..9f78c03a2e31 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
1207 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; | 1207 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1208 | 1208 | ||
1209 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1209 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1210 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 1210 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
1211 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 1211 | (u32)mode->clock); |
1212 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
1213 | (u32)mode->clock); | ||
1214 | line_time = min(line_time, (u32)65535); | ||
1212 | 1215 | ||
1213 | /* watermark for high clocks */ | 1216 | /* watermark for high clocks */ |
1214 | if (adev->pm.dpm_enabled) { | 1217 | if (adev->pm.dpm_enabled) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c8ed0facddcd..4bcf01dc567a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
1176 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; | 1176 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1177 | 1177 | ||
1178 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1178 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1179 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 1179 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
1180 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 1180 | (u32)mode->clock); |
1181 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
1182 | (u32)mode->clock); | ||
1183 | line_time = min(line_time, (u32)65535); | ||
1181 | 1184 | ||
1182 | /* watermark for high clocks */ | 1185 | /* watermark for high clocks */ |
1183 | if (adev->pm.dpm_enabled) { | 1186 | if (adev->pm.dpm_enabled) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 786b5d02f44e..fd134a4629d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -991,8 +991,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, | |||
991 | fixed20_12 a, b, c; | 991 | fixed20_12 a, b, c; |
992 | 992 | ||
993 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 993 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
994 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 994 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
995 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 995 | (u32)mode->clock); |
996 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
997 | (u32)mode->clock); | ||
998 | line_time = min(line_time, (u32)65535); | ||
996 | priority_a_cnt = 0; | 999 | priority_a_cnt = 0; |
997 | priority_b_cnt = 0; | 1000 | priority_b_cnt = 0; |
998 | 1001 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3e90c19b9c7f..a9e869554627 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
1091 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; | 1091 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1092 | 1092 | ||
1093 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1093 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1094 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 1094 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
1095 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 1095 | (u32)mode->clock); |
1096 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
1097 | (u32)mode->clock); | ||
1098 | line_time = min(line_time, (u32)65535); | ||
1096 | 1099 | ||
1097 | /* watermark for high clocks */ | 1100 | /* watermark for high clocks */ |
1098 | if (adev->pm.dpm_enabled) { | 1101 | if (adev->pm.dpm_enabled) { |
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 40d2827a6d19..53e78d092d18 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config DRM_DW_HDMI | 1 | config DRM_DW_HDMI |
2 | tristate | 2 | tristate |
3 | select DRM_KMS_HELPER | 3 | select DRM_KMS_HELPER |
4 | select REGMAP_MMIO | ||
4 | 5 | ||
5 | config DRM_DW_HDMI_AHB_AUDIO | 6 | config DRM_DW_HDMI_AHB_AUDIO |
6 | tristate "Synopsys Designware AHB Audio interface" | 7 | tristate "Synopsys Designware AHB Audio interface" |
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5cd61aff7857..8072e6e4c62c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
@@ -1293,21 +1293,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
1293 | if (!connector) | 1293 | if (!connector) |
1294 | return -ENOENT; | 1294 | return -ENOENT; |
1295 | 1295 | ||
1296 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
1297 | encoder = drm_connector_get_encoder(connector); | ||
1298 | if (encoder) | ||
1299 | out_resp->encoder_id = encoder->base.id; | ||
1300 | else | ||
1301 | out_resp->encoder_id = 0; | ||
1302 | |||
1303 | ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, | ||
1304 | (uint32_t __user *)(unsigned long)(out_resp->props_ptr), | ||
1305 | (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), | ||
1306 | &out_resp->count_props); | ||
1307 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | ||
1308 | if (ret) | ||
1309 | goto out_unref; | ||
1310 | |||
1311 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) | 1296 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) |
1312 | if (connector->encoder_ids[i] != 0) | 1297 | if (connector->encoder_ids[i] != 0) |
1313 | encoders_count++; | 1298 | encoders_count++; |
@@ -1320,7 +1305,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
1320 | if (put_user(connector->encoder_ids[i], | 1305 | if (put_user(connector->encoder_ids[i], |
1321 | encoder_ptr + copied)) { | 1306 | encoder_ptr + copied)) { |
1322 | ret = -EFAULT; | 1307 | ret = -EFAULT; |
1323 | goto out_unref; | 1308 | goto out; |
1324 | } | 1309 | } |
1325 | copied++; | 1310 | copied++; |
1326 | } | 1311 | } |
@@ -1364,15 +1349,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
1364 | if (copy_to_user(mode_ptr + copied, | 1349 | if (copy_to_user(mode_ptr + copied, |
1365 | &u_mode, sizeof(u_mode))) { | 1350 | &u_mode, sizeof(u_mode))) { |
1366 | ret = -EFAULT; | 1351 | ret = -EFAULT; |
1352 | mutex_unlock(&dev->mode_config.mutex); | ||
1353 | |||
1367 | goto out; | 1354 | goto out; |
1368 | } | 1355 | } |
1369 | copied++; | 1356 | copied++; |
1370 | } | 1357 | } |
1371 | } | 1358 | } |
1372 | out_resp->count_modes = mode_count; | 1359 | out_resp->count_modes = mode_count; |
1373 | out: | ||
1374 | mutex_unlock(&dev->mode_config.mutex); | 1360 | mutex_unlock(&dev->mode_config.mutex); |
1375 | out_unref: | 1361 | |
1362 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
1363 | encoder = drm_connector_get_encoder(connector); | ||
1364 | if (encoder) | ||
1365 | out_resp->encoder_id = encoder->base.id; | ||
1366 | else | ||
1367 | out_resp->encoder_id = 0; | ||
1368 | |||
1369 | /* Only grab properties after probing, to make sure EDID and other | ||
1370 | * properties reflect the latest status. */ | ||
1371 | ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, | ||
1372 | (uint32_t __user *)(unsigned long)(out_resp->props_ptr), | ||
1373 | (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), | ||
1374 | &out_resp->count_props); | ||
1375 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | ||
1376 | |||
1377 | out: | ||
1376 | drm_connector_put(connector); | 1378 | drm_connector_put(connector); |
1377 | 1379 | ||
1378 | return ret; | 1380 | return ret; |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index adb411a078e8..f4b53588e071 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
@@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
1173 | 1173 | ||
1174 | 1174 | ||
1175 | if (IS_G200_SE(mdev)) { | 1175 | if (IS_G200_SE(mdev)) { |
1176 | if (mdev->unique_rev_id >= 0x02) { | 1176 | if (mdev->unique_rev_id >= 0x04) { |
1177 | WREG8(MGAREG_CRTCEXT_INDEX, 0x06); | ||
1178 | WREG8(MGAREG_CRTCEXT_DATA, 0); | ||
1179 | } else if (mdev->unique_rev_id >= 0x02) { | ||
1177 | u8 hi_pri_lvl; | 1180 | u8 hi_pri_lvl; |
1178 | u32 bpp; | 1181 | u32 bpp; |
1179 | u32 mb; | 1182 | u32 mb; |
@@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector, | |||
1639 | if (mga_vga_calculate_mode_bandwidth(mode, bpp) | 1642 | if (mga_vga_calculate_mode_bandwidth(mode, bpp) |
1640 | > (30100 * 1024)) | 1643 | > (30100 * 1024)) |
1641 | return MODE_BANDWIDTH; | 1644 | return MODE_BANDWIDTH; |
1645 | } else { | ||
1646 | if (mga_vga_calculate_mode_bandwidth(mode, bpp) | ||
1647 | > (55000 * 1024)) | ||
1648 | return MODE_BANDWIDTH; | ||
1642 | } | 1649 | } |
1643 | } else if (mdev->type == G200_WB) { | 1650 | } else if (mdev->type == G200_WB) { |
1644 | if (mode->hdisplay > 1280) | 1651 | if (mode->hdisplay > 1280) |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 4074805034da..3cb6c55b268d 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -9268,8 +9268,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev, | |||
9268 | u32 tmp, wm_mask; | 9268 | u32 tmp, wm_mask; |
9269 | 9269 | ||
9270 | if (radeon_crtc->base.enabled && num_heads && mode) { | 9270 | if (radeon_crtc->base.enabled && num_heads && mode) { |
9271 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 9271 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
9272 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 9272 | (u32)mode->clock); |
9273 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
9274 | (u32)mode->clock); | ||
9275 | line_time = min(line_time, (u32)65535); | ||
9273 | 9276 | ||
9274 | /* watermark for high clocks */ | 9277 | /* watermark for high clocks */ |
9275 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && | 9278 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 44527e679d31..24fe66c89dfb 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2163,8 +2163,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, | |||
2163 | fixed20_12 a, b, c; | 2163 | fixed20_12 a, b, c; |
2164 | 2164 | ||
2165 | if (radeon_crtc->base.enabled && num_heads && mode) { | 2165 | if (radeon_crtc->base.enabled && num_heads && mode) { |
2166 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 2166 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
2167 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 2167 | (u32)mode->clock); |
2168 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
2169 | (u32)mode->clock); | ||
2170 | line_time = min(line_time, (u32)65535); | ||
2168 | priority_a_cnt = 0; | 2171 | priority_a_cnt = 0; |
2169 | priority_b_cnt = 0; | 2172 | priority_b_cnt = 0; |
2170 | dram_channels = evergreen_get_number_of_dram_channels(rdev); | 2173 | dram_channels = evergreen_get_number_of_dram_channels(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 432480ff9d22..3178ba0c537c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
3393 | rdev->pdev->subsystem_vendor == 0x103c && | 3393 | rdev->pdev->subsystem_vendor == 0x103c && |
3394 | rdev->pdev->subsystem_device == 0x280a) | 3394 | rdev->pdev->subsystem_device == 0x280a) |
3395 | return; | 3395 | return; |
3396 | /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume | ||
3397 | * - it hangs on resume inside the dynclk 1 table. | ||
3398 | */ | ||
3399 | if (rdev->family == CHIP_RS400 && | ||
3400 | rdev->pdev->subsystem_vendor == 0x1179 && | ||
3401 | rdev->pdev->subsystem_device == 0xff31) | ||
3402 | return; | ||
3396 | 3403 | ||
3397 | /* DYN CLK 1 */ | 3404 | /* DYN CLK 1 */ |
3398 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3405 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6ecf42783d4b..0a6444d72000 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { | |||
136 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 | 136 | * https://bugzilla.kernel.org/show_bug.cgi?id=51381 |
137 | */ | 137 | */ |
138 | { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, | 138 | { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, |
139 | /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU | ||
140 | * https://bugs.freedesktop.org/show_bug.cgi?id=101491 | ||
141 | */ | ||
142 | { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, | ||
139 | /* macbook pro 8.2 */ | 143 | /* macbook pro 8.2 */ |
140 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, | 144 | { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, |
141 | { 0, 0, 0, 0, 0 }, | 145 | { 0, 0, 0, 0, 0 }, |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 7431eb4a11b7..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
621 | } | 621 | } |
622 | 622 | ||
623 | /* TODO: is this still necessary on NI+ ? */ | 623 | /* TODO: is this still necessary on NI+ ? */ |
624 | if ((cmd == 0 || cmd == 1 || cmd == 0x3) && | 624 | if ((cmd == 0 || cmd == 0x3) && |
625 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { | 625 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { |
626 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | 626 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", |
627 | start, end); | 627 | start, end); |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index c88a80e1e3ad..1907c950d76f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -2308,8 +2308,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev, | |||
2308 | fixed20_12 a, b, c; | 2308 | fixed20_12 a, b, c; |
2309 | 2309 | ||
2310 | if (radeon_crtc->base.enabled && num_heads && mode) { | 2310 | if (radeon_crtc->base.enabled && num_heads && mode) { |
2311 | active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; | 2311 | active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, |
2312 | line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); | 2312 | (u32)mode->clock); |
2313 | line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, | ||
2314 | (u32)mode->clock); | ||
2315 | line_time = min(line_time, (u32)65535); | ||
2313 | priority_a_cnt = 0; | 2316 | priority_a_cnt = 0; |
2314 | priority_b_cnt = 0; | 2317 | priority_b_cnt = 0; |
2315 | 2318 | ||
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index ac15cc65af36..518f4b69ea53 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
@@ -562,18 +562,6 @@ fail: | |||
562 | 562 | ||
563 | 563 | ||
564 | #ifdef CONFIG_DRM_TEGRA_STAGING | 564 | #ifdef CONFIG_DRM_TEGRA_STAGING |
565 | static struct tegra_drm_context * | ||
566 | tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) | ||
567 | { | ||
568 | struct tegra_drm_context *context; | ||
569 | |||
570 | mutex_lock(&file->lock); | ||
571 | context = idr_find(&file->contexts, id); | ||
572 | mutex_unlock(&file->lock); | ||
573 | |||
574 | return context; | ||
575 | } | ||
576 | |||
577 | static int tegra_gem_create(struct drm_device *drm, void *data, | 565 | static int tegra_gem_create(struct drm_device *drm, void *data, |
578 | struct drm_file *file) | 566 | struct drm_file *file) |
579 | { | 567 | { |
@@ -662,7 +650,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv, | |||
662 | if (err < 0) | 650 | if (err < 0) |
663 | return err; | 651 | return err; |
664 | 652 | ||
665 | err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); | 653 | err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); |
666 | if (err < 0) { | 654 | if (err < 0) { |
667 | client->ops->close_channel(context); | 655 | client->ops->close_channel(context); |
668 | return err; | 656 | return err; |
@@ -717,7 +705,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data, | |||
717 | 705 | ||
718 | mutex_lock(&fpriv->lock); | 706 | mutex_lock(&fpriv->lock); |
719 | 707 | ||
720 | context = tegra_drm_file_get_context(fpriv, args->context); | 708 | context = idr_find(&fpriv->contexts, args->context); |
721 | if (!context) { | 709 | if (!context) { |
722 | err = -EINVAL; | 710 | err = -EINVAL; |
723 | goto unlock; | 711 | goto unlock; |
@@ -742,7 +730,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data, | |||
742 | 730 | ||
743 | mutex_lock(&fpriv->lock); | 731 | mutex_lock(&fpriv->lock); |
744 | 732 | ||
745 | context = tegra_drm_file_get_context(fpriv, args->context); | 733 | context = idr_find(&fpriv->contexts, args->context); |
746 | if (!context) { | 734 | if (!context) { |
747 | err = -ENODEV; | 735 | err = -ENODEV; |
748 | goto unlock; | 736 | goto unlock; |
@@ -771,7 +759,7 @@ static int tegra_submit(struct drm_device *drm, void *data, | |||
771 | 759 | ||
772 | mutex_lock(&fpriv->lock); | 760 | mutex_lock(&fpriv->lock); |
773 | 761 | ||
774 | context = tegra_drm_file_get_context(fpriv, args->context); | 762 | context = idr_find(&fpriv->contexts, args->context); |
775 | if (!context) { | 763 | if (!context) { |
776 | err = -ENODEV; | 764 | err = -ENODEV; |
777 | goto unlock; | 765 | goto unlock; |
@@ -796,7 +784,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data, | |||
796 | 784 | ||
797 | mutex_lock(&fpriv->lock); | 785 | mutex_lock(&fpriv->lock); |
798 | 786 | ||
799 | context = tegra_drm_file_get_context(fpriv, args->context); | 787 | context = idr_find(&fpriv->contexts, args->context); |
800 | if (!context) { | 788 | if (!context) { |
801 | err = -ENODEV; | 789 | err = -ENODEV; |
802 | goto unlock; | 790 | goto unlock; |
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 5c1c711a21af..2c58a390123a 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c | |||
@@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev) | |||
172 | 172 | ||
173 | host->rst = devm_reset_control_get(&pdev->dev, "host1x"); | 173 | host->rst = devm_reset_control_get(&pdev->dev, "host1x"); |
174 | if (IS_ERR(host->rst)) { | 174 | if (IS_ERR(host->rst)) { |
175 | err = PTR_ERR(host->clk); | 175 | err = PTR_ERR(host->rst); |
176 | dev_err(&pdev->dev, "failed to get reset: %d\n", err); | 176 | dev_err(&pdev->dev, "failed to get reset: %d\n", err); |
177 | return err; | 177 | return err; |
178 | } | 178 | } |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 04cee65531d7..6e040692f1d8 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid) | |||
826 | * hid-rmi should take care of them, | 826 | * hid-rmi should take care of them, |
827 | * not hid-generic | 827 | * not hid-generic |
828 | */ | 828 | */ |
829 | if (IS_ENABLED(CONFIG_HID_RMI)) | 829 | hid->group = HID_GROUP_RMI; |
830 | hid->group = HID_GROUP_RMI; | ||
831 | break; | 830 | break; |
832 | } | 831 | } |
833 | 832 | ||
833 | /* fall back to generic driver in case specific driver doesn't exist */ | ||
834 | switch (hid->group) { | ||
835 | case HID_GROUP_MULTITOUCH_WIN_8: | ||
836 | /* fall-through */ | ||
837 | case HID_GROUP_MULTITOUCH: | ||
838 | if (!IS_ENABLED(CONFIG_HID_MULTITOUCH)) | ||
839 | hid->group = HID_GROUP_GENERIC; | ||
840 | break; | ||
841 | case HID_GROUP_SENSOR_HUB: | ||
842 | if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB)) | ||
843 | hid->group = HID_GROUP_GENERIC; | ||
844 | break; | ||
845 | case HID_GROUP_RMI: | ||
846 | if (!IS_ENABLED(CONFIG_HID_RMI)) | ||
847 | hid->group = HID_GROUP_GENERIC; | ||
848 | break; | ||
849 | case HID_GROUP_WACOM: | ||
850 | if (!IS_ENABLED(CONFIG_HID_WACOM)) | ||
851 | hid->group = HID_GROUP_GENERIC; | ||
852 | break; | ||
853 | case HID_GROUP_LOGITECH_DJ_DEVICE: | ||
854 | if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ)) | ||
855 | hid->group = HID_GROUP_GENERIC; | ||
856 | break; | ||
857 | } | ||
834 | vfree(parser); | 858 | vfree(parser); |
835 | return 0; | 859 | return 0; |
836 | } | 860 | } |
@@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect); | |||
1763 | * used as a driver. See hid_scan_report(). | 1787 | * used as a driver. See hid_scan_report(). |
1764 | */ | 1788 | */ |
1765 | static const struct hid_device_id hid_have_special_driver[] = { | 1789 | static const struct hid_device_id hid_have_special_driver[] = { |
1790 | #if IS_ENABLED(CONFIG_HID_A4TECH) | ||
1766 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, | 1791 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, |
1767 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, | 1792 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, |
1768 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, | 1793 | { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, |
1794 | #endif | ||
1795 | #if IS_ENABLED(CONFIG_HID_ACCUTOUCH) | ||
1796 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, | ||
1797 | #endif | ||
1798 | #if IS_ENABLED(CONFIG_HID_ACRUX) | ||
1769 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, | 1799 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, |
1770 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, | 1800 | { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, |
1801 | #endif | ||
1802 | #if IS_ENABLED(CONFIG_HID_ALPS) | ||
1771 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, | 1803 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, |
1804 | #endif | ||
1805 | #if IS_ENABLED(CONFIG_HID_APPLE) | ||
1772 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, | 1806 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, |
1773 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, | ||
1774 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, | ||
1775 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, | 1807 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, |
1776 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, | 1808 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, |
1777 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, | 1809 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, |
@@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1792 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, | 1824 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, |
1793 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, | 1825 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, |
1794 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, | 1826 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, |
1795 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, | ||
1796 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, | ||
1797 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, | ||
1798 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, | ||
1799 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, | ||
1800 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, | 1827 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, |
1801 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, | 1828 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, |
1802 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, | 1829 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, |
@@ -1851,62 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1851 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, | 1878 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, |
1852 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, | 1879 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, |
1853 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, | 1880 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, |
1881 | #endif | ||
1882 | #if IS_ENABLED(CONFIG_HID_APPLEIR) | ||
1883 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, | ||
1884 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, | ||
1885 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, | ||
1886 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, | ||
1887 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, | ||
1888 | #endif | ||
1889 | #if IS_ENABLED(CONFIG_HID_ASUS) | ||
1854 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, | 1890 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, |
1855 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, | 1891 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, |
1856 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, | 1892 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, |
1857 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, | 1893 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, |
1858 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, | 1894 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, |
1895 | #endif | ||
1896 | #if IS_ENABLED(CONFIG_HID_AUREAL) | ||
1859 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, | 1897 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, |
1898 | #endif | ||
1899 | #if IS_ENABLED(CONFIG_HID_BELKIN) | ||
1860 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, | 1900 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, |
1901 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, | ||
1902 | #endif | ||
1903 | #if IS_ENABLED(CONFIG_HID_BETOP_FF) | ||
1861 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, | 1904 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, |
1862 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, | 1905 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, |
1863 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, | 1906 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, |
1864 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, | 1907 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, |
1865 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, | 1908 | #endif |
1866 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, | 1909 | #if IS_ENABLED(CONFIG_HID_CHERRY) |
1867 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, | 1910 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, |
1868 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, | 1911 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, |
1912 | #endif | ||
1913 | #if IS_ENABLED(CONFIG_HID_CHICONY) | ||
1869 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, | 1914 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, |
1870 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, | ||
1871 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, | 1915 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, |
1872 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, | 1916 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, |
1873 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, | 1917 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, |
1918 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, | ||
1919 | #endif | ||
1920 | #if IS_ENABLED(CONFIG_HID_CMEDIA) | ||
1921 | { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, | ||
1922 | #endif | ||
1923 | #if IS_ENABLED(CONFIG_HID_CORSAIR) | ||
1874 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, | 1924 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, |
1875 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, | 1925 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, |
1876 | { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, | 1926 | #endif |
1927 | #if IS_ENABLED(CONFIG_HID_CP2112) | ||
1877 | { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, | 1928 | { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, |
1929 | #endif | ||
1930 | #if IS_ENABLED(CONFIG_HID_CYPRESS) | ||
1878 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, | 1931 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, |
1879 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, | 1932 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, |
1880 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, | 1933 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, |
1881 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, | 1934 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, |
1882 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, | 1935 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, |
1883 | { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, | 1936 | #endif |
1937 | #if IS_ENABLED(CONFIG_HID_DRAGONRISE) | ||
1884 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, | 1938 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, |
1885 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, | 1939 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, |
1886 | #if IS_ENABLED(CONFIG_HID_MAYFLASH) | ||
1887 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, | ||
1888 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, | ||
1889 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, | ||
1890 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, | ||
1891 | #endif | 1940 | #endif |
1892 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, | 1941 | #if IS_ENABLED(CONFIG_HID_ELECOM) |
1893 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, | ||
1894 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 1942 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
1895 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 1943 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
1896 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 1944 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
1945 | #endif | ||
1946 | #if IS_ENABLED(CONFIG_HID_ELO) | ||
1897 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, | 1947 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, |
1898 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, | 1948 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, |
1899 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, | 1949 | #endif |
1950 | #if IS_ENABLED(CONFIG_HID_EMS_FF) | ||
1900 | { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, | 1951 | { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, |
1952 | #endif | ||
1953 | #if IS_ENABLED(CONFIG_HID_EZKEY) | ||
1901 | { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, | 1954 | { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, |
1902 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, | 1955 | #endif |
1903 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, | 1956 | #if IS_ENABLED(CONFIG_HID_GEMBIRD) |
1904 | { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, | 1957 | { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, |
1905 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, | 1958 | #endif |
1959 | #if IS_ENABLED(CONFIG_HID_GFRM) | ||
1960 | { HID_BLUETOOTH_DEVICE(0x58, 0x2000) }, | ||
1961 | { HID_BLUETOOTH_DEVICE(0x471, 0x2210) }, | ||
1962 | #endif | ||
1963 | #if IS_ENABLED(CONFIG_HID_GREENASIA) | ||
1906 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, | 1964 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, |
1965 | #endif | ||
1966 | #if IS_ENABLED(CONFIG_HID_GT683R) | ||
1967 | { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, | ||
1968 | #endif | ||
1969 | #if IS_ENABLED(CONFIG_HID_GYRATION) | ||
1907 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, | 1970 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, |
1908 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, | 1971 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, |
1909 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, | 1972 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, |
1973 | #endif | ||
1974 | #if IS_ENABLED(CONFIG_HID_HOLTEK) | ||
1910 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, | 1975 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, |
1911 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, | 1976 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, |
1912 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, | 1977 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, |
@@ -1915,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1915 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, | 1980 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, |
1916 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, | 1981 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, |
1917 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, | 1982 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, |
1918 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, | 1983 | #endif |
1919 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, | 1984 | #if IS_ENABLED(CONFIG_HID_ICADE) |
1920 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, | ||
1921 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, | 1985 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, |
1986 | #endif | ||
1987 | #if IS_ENABLED(CONFIG_HID_KENSINGTON) | ||
1922 | { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, | 1988 | { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, |
1989 | #endif | ||
1990 | #if IS_ENABLED(CONFIG_HID_KEYTOUCH) | ||
1923 | { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, | 1991 | { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, |
1992 | #endif | ||
1993 | #if IS_ENABLED(CONFIG_HID_KYE) | ||
1924 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, | 1994 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, |
1925 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, | 1995 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, |
1926 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, | 1996 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, |
@@ -1930,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1930 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, | 2000 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, |
1931 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, | 2001 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, |
1932 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, | 2002 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, |
1933 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, | 2003 | #endif |
2004 | #if IS_ENABLED(CONFIG_HID_LCPOWER) | ||
1934 | { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, | 2005 | { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, |
2006 | #endif | ||
2007 | #if IS_ENABLED(CONFIG_HID_LED) | ||
2008 | { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, | ||
2009 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, | ||
2010 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, | ||
2011 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, | ||
2012 | { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, | ||
2013 | { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, | ||
2014 | #endif | ||
1935 | #if IS_ENABLED(CONFIG_HID_LENOVO) | 2015 | #if IS_ENABLED(CONFIG_HID_LENOVO) |
1936 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, | 2016 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, |
1937 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, | 2017 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, |
1938 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, | 2018 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, |
1939 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, | 2019 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, |
1940 | #endif | 2020 | #endif |
1941 | { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, | 2021 | #if IS_ENABLED(CONFIG_HID_LOGITECH) |
1942 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, | 2022 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, |
1943 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, | 2023 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, |
1944 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, | 2024 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, |
1945 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, | 2025 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, |
1946 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, | ||
1947 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, | ||
1948 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, | 2026 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, |
1949 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, | 2027 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, |
1950 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, | 2028 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, |
@@ -1957,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1957 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, | 2035 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, |
1958 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, | 2036 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, |
1959 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, | 2037 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, |
1960 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, | ||
1961 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, | 2038 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, |
1962 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, | 2039 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, |
1963 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, | 2040 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, |
@@ -1969,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1969 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, | 2046 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, |
1970 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, | 2047 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, |
1971 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, | 2048 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, |
1972 | #if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) | ||
1973 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, | ||
1974 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, | ||
1975 | #endif | ||
1976 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, | 2049 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, |
1977 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, | 2050 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, |
1978 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, | 2051 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, |
1979 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, | 2052 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, |
1980 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, | 2053 | #endif |
1981 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, | 2054 | #if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP) |
1982 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, | 2055 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, |
2056 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, | ||
2057 | #endif | ||
2058 | #if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) | ||
2059 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, | ||
2060 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, | ||
2061 | #endif | ||
2062 | #if IS_ENABLED(CONFIG_HID_MAGICMOUSE) | ||
2063 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, | ||
2064 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, | ||
2065 | #endif | ||
2066 | #if IS_ENABLED(CONFIG_HID_MAYFLASH) | ||
2067 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, | ||
2068 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, | ||
2069 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, | ||
2070 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, | ||
2071 | #endif | ||
2072 | #if IS_ENABLED(CONFIG_HID_MICROSOFT) | ||
1983 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, | 2073 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, |
1984 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, | 2074 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, |
1985 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, | 2075 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, |
@@ -1995,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1995 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, | 2085 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, |
1996 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, | 2086 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, |
1997 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, | 2087 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, |
2088 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, | ||
2089 | #endif | ||
2090 | #if IS_ENABLED(CONFIG_HID_MONTEREY) | ||
1998 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, | 2091 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, |
1999 | { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, | 2092 | #endif |
2093 | #if IS_ENABLED(CONFIG_HID_MULTITOUCH) | ||
2094 | { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, | ||
2095 | #endif | ||
2096 | #if IS_ENABLED(CONFIG_HID_WIIMOTE) | ||
2097 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, | ||
2098 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, | ||
2099 | #endif | ||
2100 | #if IS_ENABLED(CONFIG_HID_NTI) | ||
2000 | { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, | 2101 | { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, |
2102 | #endif | ||
2103 | #if IS_ENABLED(CONFIG_HID_NTRIG) | ||
2001 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, | 2104 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, |
2002 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, | 2105 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, |
2003 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, | 2106 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, |
@@ -2017,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2017 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, | 2120 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, |
2018 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, | 2121 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, |
2019 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, | 2122 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, |
2123 | #endif | ||
2124 | #if IS_ENABLED(CONFIG_HID_ORTEK) | ||
2020 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, | 2125 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, |
2021 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | 2126 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, |
2127 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, | ||
2128 | #endif | ||
2129 | #if IS_ENABLED(CONFIG_HID_PANTHERLORD) | ||
2130 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, | ||
2131 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, | ||
2132 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, | ||
2133 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, | ||
2134 | #endif | ||
2135 | #if IS_ENABLED(CONFIG_HID_PENMOUNT) | ||
2022 | { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, | 2136 | { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, |
2137 | #endif | ||
2138 | #if IS_ENABLED(CONFIG_HID_PETALYNX) | ||
2023 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, | 2139 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, |
2140 | #endif | ||
2141 | #if IS_ENABLED(CONFIG_HID_PICOLCD) | ||
2142 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, | ||
2143 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, | ||
2144 | #endif | ||
2145 | #if IS_ENABLED(CONFIG_HID_PLANTRONICS) | ||
2024 | { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, | 2146 | { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, |
2147 | #endif | ||
2148 | #if IS_ENABLED(CONFIG_HID_PRIMAX) | ||
2025 | { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, | 2149 | { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, |
2026 | { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, | 2150 | #endif |
2151 | #if IS_ENABLED(CONFIG_HID_PRODIKEYS) | ||
2152 | { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, | ||
2153 | #endif | ||
2154 | #if IS_ENABLED(CONFIG_HID_RMI) | ||
2155 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, | ||
2156 | { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, | ||
2157 | #endif | ||
2027 | #if IS_ENABLED(CONFIG_HID_ROCCAT) | 2158 | #if IS_ENABLED(CONFIG_HID_ROCCAT) |
2028 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, | 2159 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, |
2029 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, | 2160 | { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, |
@@ -2051,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2051 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, | 2182 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, |
2052 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, | 2183 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, |
2053 | #endif | 2184 | #endif |
2185 | #if IS_ENABLED(CONFIG_HID_SAMSUNG) | ||
2054 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, | 2186 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, |
2055 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, | 2187 | { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, |
2056 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, | 2188 | #endif |
2189 | #if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS) | ||
2190 | { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, | ||
2191 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, | ||
2192 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, | ||
2193 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, | ||
2194 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, | ||
2195 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, | ||
2196 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, | ||
2197 | #endif | ||
2198 | #if IS_ENABLED(CONFIG_HID_SONY) | ||
2199 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, | ||
2057 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, | 2200 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, |
2058 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, | 2201 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, |
2059 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, | 2202 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, |
@@ -2072,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2072 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, | 2215 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, |
2073 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, | 2216 | { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, |
2074 | { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, | 2217 | { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, |
2218 | #endif | ||
2219 | #if IS_ENABLED(CONFIG_HID_SPEEDLINK) | ||
2220 | { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, | ||
2221 | #endif | ||
2222 | #if IS_ENABLED(CONFIG_HID_STEELSERIES) | ||
2075 | { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, | 2223 | { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, |
2224 | #endif | ||
2225 | #if IS_ENABLED(CONFIG_HID_SUNPLUS) | ||
2076 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, | 2226 | { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, |
2077 | { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, | 2227 | #endif |
2228 | #if IS_ENABLED(CONFIG_HID_THRUSTMASTER) | ||
2078 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, | 2229 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, |
2079 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, | 2230 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, |
2080 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, | 2231 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, |
@@ -2083,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2083 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, | 2234 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, |
2084 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, | 2235 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, |
2085 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, | 2236 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, |
2237 | #endif | ||
2238 | #if IS_ENABLED(CONFIG_HID_TIVO) | ||
2086 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, | 2239 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, |
2087 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, | 2240 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, |
2088 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, | 2241 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, |
2242 | #endif | ||
2243 | #if IS_ENABLED(CONFIG_HID_TOPSEED) | ||
2244 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, | ||
2245 | { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, | ||
2246 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, | ||
2089 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, | 2247 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, |
2090 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, | 2248 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, |
2249 | #endif | ||
2250 | #if IS_ENABLED(CONFIG_HID_TWINHAN) | ||
2091 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, | 2251 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, |
2252 | #endif | ||
2253 | #if IS_ENABLED(CONFIG_HID_UCLOGIC) | ||
2254 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, | ||
2255 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) }, | ||
2092 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, | 2256 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, |
2093 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, | 2257 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, |
2094 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, | 2258 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, |
@@ -2096,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2096 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, | 2260 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, |
2097 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, | 2261 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, |
2098 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, | 2262 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, |
2099 | { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, | ||
2100 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, | 2263 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, |
2101 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, | 2264 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, |
2102 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, | 2265 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, |
2103 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, | 2266 | { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, |
2104 | { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, | ||
2105 | { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, | 2267 | { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, |
2106 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, | 2268 | { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, |
2107 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, | 2269 | #endif |
2108 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, | 2270 | #if IS_ENABLED(CONFIG_HID_UDRAW_PS3) |
2109 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, | 2271 | { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, |
2110 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, | 2272 | #endif |
2111 | { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, | 2273 | #if IS_ENABLED(CONFIG_HID_WALTOP) |
2112 | { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, | ||
2113 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, | 2274 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, |
2114 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, | 2275 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, |
2115 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, | 2276 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, |
@@ -2117,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2117 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, | 2278 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, |
2118 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, | 2279 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, |
2119 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, | 2280 | { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, |
2120 | { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, | 2281 | #endif |
2282 | #if IS_ENABLED(CONFIG_HID_XINMO) | ||
2121 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, | 2283 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, |
2122 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, | 2284 | { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, |
2285 | #endif | ||
2286 | #if IS_ENABLED(CONFIG_HID_ZEROPLUS) | ||
2123 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, | 2287 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, |
2124 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, | 2288 | { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, |
2289 | #endif | ||
2290 | #if IS_ENABLED(CONFIG_HID_ZYDACRON) | ||
2125 | { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, | 2291 | { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, |
2126 | 2292 | #endif | |
2127 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, | ||
2128 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, | ||
2129 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, | ||
2130 | { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, | ||
2131 | { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, | ||
2132 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, | ||
2133 | { } | 2293 | { } |
2134 | }; | 2294 | }; |
2135 | 2295 | ||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 8ca1e8ce0af2..4f9a3938189a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -319,6 +319,9 @@ | |||
319 | #define USB_VENDOR_ID_DELCOM 0x0fc5 | 319 | #define USB_VENDOR_ID_DELCOM 0x0fc5 |
320 | #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 | 320 | #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 |
321 | 321 | ||
322 | #define USB_VENDOR_ID_DELL 0x413c | ||
323 | #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a | ||
324 | |||
322 | #define USB_VENDOR_ID_DELORME 0x1163 | 325 | #define USB_VENDOR_ID_DELORME 0x1163 |
323 | #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 | 326 | #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 |
324 | #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 | 327 | #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 1d6c997b3001..20b40ad26325 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev, | |||
349 | 349 | ||
350 | if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { | 350 | if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { |
351 | magicmouse_emit_buttons(msc, clicks & 3); | 351 | magicmouse_emit_buttons(msc, clicks & 3); |
352 | input_mt_report_pointer_emulation(input, true); | ||
353 | input_report_rel(input, REL_X, x); | 352 | input_report_rel(input, REL_X, x); |
354 | input_report_rel(input, REL_Y, y); | 353 | input_report_rel(input, REL_Y, y); |
355 | } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ | 354 | } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ |
@@ -389,16 +388,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd | |||
389 | __clear_bit(BTN_RIGHT, input->keybit); | 388 | __clear_bit(BTN_RIGHT, input->keybit); |
390 | __clear_bit(BTN_MIDDLE, input->keybit); | 389 | __clear_bit(BTN_MIDDLE, input->keybit); |
391 | __set_bit(BTN_MOUSE, input->keybit); | 390 | __set_bit(BTN_MOUSE, input->keybit); |
391 | __set_bit(BTN_TOOL_FINGER, input->keybit); | ||
392 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); | ||
393 | __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); | ||
394 | __set_bit(BTN_TOOL_QUADTAP, input->keybit); | ||
395 | __set_bit(BTN_TOOL_QUINTTAP, input->keybit); | ||
396 | __set_bit(BTN_TOUCH, input->keybit); | ||
397 | __set_bit(INPUT_PROP_POINTER, input->propbit); | ||
392 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); | 398 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); |
393 | } | 399 | } |
394 | 400 | ||
395 | __set_bit(BTN_TOOL_FINGER, input->keybit); | ||
396 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); | ||
397 | __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); | ||
398 | __set_bit(BTN_TOOL_QUADTAP, input->keybit); | ||
399 | __set_bit(BTN_TOOL_QUINTTAP, input->keybit); | ||
400 | __set_bit(BTN_TOUCH, input->keybit); | ||
401 | __set_bit(INPUT_PROP_POINTER, input->propbit); | ||
402 | 401 | ||
403 | __set_bit(EV_ABS, input->evbit); | 402 | __set_bit(EV_ABS, input->evbit); |
404 | 403 | ||
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 6316498b7812..a88e7c7bea0a 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -85,6 +85,7 @@ static const struct hid_blacklist { | |||
85 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | 85 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, |
86 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | 86 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, |
87 | { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, | 87 | { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, |
88 | { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, | ||
88 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 89 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
89 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, | 90 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, |
90 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, | 91 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, |
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 26b05106f0d3..93d28c0ec8bf 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c | |||
@@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev) | |||
1066 | dev->addr_len = 1; | 1066 | dev->addr_len = 1; |
1067 | dev->tx_queue_len = SSIP_TXQUEUE_LEN; | 1067 | dev->tx_queue_len = SSIP_TXQUEUE_LEN; |
1068 | 1068 | ||
1069 | dev->destructor = free_netdev; | 1069 | dev->needs_free_netdev = true; |
1070 | dev->header_ops = &phonet_header_ops; | 1070 | dev->header_ops = &phonet_header_ops; |
1071 | } | 1071 | } |
1072 | 1072 | ||
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 95ed17183e73..54a47b40546f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, | |||
734 | * the first read operation, otherwise the first read cost | 734 | * the first read operation, otherwise the first read cost |
735 | * one extra clock cycle. | 735 | * one extra clock cycle. |
736 | */ | 736 | */ |
737 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); | 737 | temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); |
738 | temp |= I2CR_MTX; | 738 | temp |= I2CR_MTX; |
739 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); | 739 | imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); |
740 | } | 740 | } |
741 | msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); | 741 | msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); |
742 | 742 | ||
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo | |||
857 | * the first read operation, otherwise the first read cost | 857 | * the first read operation, otherwise the first read cost |
858 | * one extra clock cycle. | 858 | * one extra clock cycle. |
859 | */ | 859 | */ |
860 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); | 860 | temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); |
861 | temp |= I2CR_MTX; | 861 | temp |= I2CR_MTX; |
862 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); | 862 | imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); |
863 | } | 863 | } |
864 | } else if (i == (msgs->len - 2)) { | 864 | } else if (i == (msgs->len - 2)) { |
865 | dev_dbg(&i2c_imx->adapter.dev, | 865 | dev_dbg(&i2c_imx->adapter.dev, |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index f573448d2132..e98e44e584a4 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
@@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, | |||
584 | 584 | ||
585 | /* unmap the data buffer */ | 585 | /* unmap the data buffer */ |
586 | if (dma_size != 0) | 586 | if (dma_size != 0) |
587 | dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); | 587 | dma_unmap_single(dev, dma_addr, dma_size, dma_direction); |
588 | 588 | ||
589 | if (unlikely(!time_left)) { | 589 | if (unlikely(!time_left)) { |
590 | dev_err(dev, "completion wait timed out\n"); | 590 | dev_err(dev, "completion wait timed out\n"); |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 214bf2835d1f..8be3e6cb8fe6 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
@@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv) | |||
319 | rcar_i2c_write(priv, ICFBSCR, TCYC06); | 319 | rcar_i2c_write(priv, ICFBSCR, TCYC06); |
320 | 320 | ||
321 | dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), | 321 | dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), |
322 | priv->msg->len, priv->dma_direction); | 322 | sg_dma_len(&priv->sg), priv->dma_direction); |
323 | 323 | ||
324 | priv->dma_direction = DMA_NONE; | 324 | priv->dma_direction = DMA_NONE; |
325 | } | 325 | } |
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index dd4190b50df6..6066bbfc42fe 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c | |||
@@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev) | |||
468 | static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) | 468 | static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) |
469 | { | 469 | { |
470 | struct meson_sar_adc_priv *priv = iio_priv(indio_dev); | 470 | struct meson_sar_adc_priv *priv = iio_priv(indio_dev); |
471 | int count; | 471 | unsigned int count, tmp; |
472 | 472 | ||
473 | for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { | 473 | for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { |
474 | if (!meson_sar_adc_get_fifo_count(indio_dev)) | 474 | if (!meson_sar_adc_get_fifo_count(indio_dev)) |
475 | break; | 475 | break; |
476 | 476 | ||
477 | regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); | 477 | regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp); |
478 | } | 478 | } |
479 | } | 479 | } |
480 | 480 | ||
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c index b0c7d8ee5cb8..6888167ca1e6 100644 --- a/drivers/iio/adc/mxs-lradc-adc.c +++ b/drivers/iio/adc/mxs-lradc-adc.c | |||
@@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev) | |||
718 | adc->dev = dev; | 718 | adc->dev = dev; |
719 | 719 | ||
720 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 720 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
721 | if (!iores) | ||
722 | return -EINVAL; | ||
723 | |||
721 | adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); | 724 | adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); |
722 | if (IS_ERR(adc->base)) | 725 | if (!adc->base) |
723 | return PTR_ERR(adc->base); | 726 | return -ENOMEM; |
724 | 727 | ||
725 | init_completion(&adc->completion); | 728 | init_completion(&adc->completion); |
726 | spin_lock_init(&adc->lock); | 729 | spin_lock_init(&adc->lock); |
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index dd99d273bae9..ff03324dee13 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/poll.h> | 15 | #include <linux/poll.h> |
16 | #include <linux/iio/buffer.h> | 16 | #include <linux/iio/buffer.h> |
17 | #include <linux/iio/buffer_impl.h> | ||
17 | #include <linux/iio/buffer-dma.h> | 18 | #include <linux/iio/buffer-dma.h> |
18 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
19 | #include <linux/sizes.h> | 20 | #include <linux/sizes.h> |
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 9fabed47053d..2b5a320f42c5 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/iio/iio.h> | 15 | #include <linux/iio/iio.h> |
16 | #include <linux/iio/buffer.h> | 16 | #include <linux/iio/buffer.h> |
17 | #include <linux/iio/buffer_impl.h> | ||
17 | #include <linux/iio/buffer-dma.h> | 18 | #include <linux/iio/buffer-dma.h> |
18 | #include <linux/iio/buffer-dmaengine.h> | 19 | #include <linux/iio/buffer-dmaengine.h> |
19 | 20 | ||
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 96dabbd2f004..88a7c5d4e4d2 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | |||
@@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785}; | |||
41 | static const struct inv_mpu6050_reg_map reg_set_6500 = { | 41 | static const struct inv_mpu6050_reg_map reg_set_6500 = { |
42 | .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, | 42 | .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, |
43 | .lpf = INV_MPU6050_REG_CONFIG, | 43 | .lpf = INV_MPU6050_REG_CONFIG, |
44 | .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2, | ||
44 | .user_ctrl = INV_MPU6050_REG_USER_CTRL, | 45 | .user_ctrl = INV_MPU6050_REG_USER_CTRL, |
45 | .fifo_en = INV_MPU6050_REG_FIFO_EN, | 46 | .fifo_en = INV_MPU6050_REG_FIFO_EN, |
46 | .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, | 47 | .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, |
@@ -211,6 +212,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on) | |||
211 | EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); | 212 | EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); |
212 | 213 | ||
213 | /** | 214 | /** |
215 | * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent | ||
216 | * | ||
217 | * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope | ||
218 | * MPU6500 and above have a dedicated register for accelerometer | ||
219 | */ | ||
220 | static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st, | ||
221 | enum inv_mpu6050_filter_e val) | ||
222 | { | ||
223 | int result; | ||
224 | |||
225 | result = regmap_write(st->map, st->reg->lpf, val); | ||
226 | if (result) | ||
227 | return result; | ||
228 | |||
229 | switch (st->chip_type) { | ||
230 | case INV_MPU6050: | ||
231 | case INV_MPU6000: | ||
232 | case INV_MPU9150: | ||
233 | /* old chips, nothing to do */ | ||
234 | result = 0; | ||
235 | break; | ||
236 | default: | ||
237 | /* set accel lpf */ | ||
238 | result = regmap_write(st->map, st->reg->accel_lpf, val); | ||
239 | break; | ||
240 | } | ||
241 | |||
242 | return result; | ||
243 | } | ||
244 | |||
245 | /** | ||
214 | * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. | 246 | * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. |
215 | * | 247 | * |
216 | * Initial configuration: | 248 | * Initial configuration: |
@@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) | |||
233 | if (result) | 265 | if (result) |
234 | return result; | 266 | return result; |
235 | 267 | ||
236 | d = INV_MPU6050_FILTER_20HZ; | 268 | result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ); |
237 | result = regmap_write(st->map, st->reg->lpf, d); | ||
238 | if (result) | 269 | if (result) |
239 | return result; | 270 | return result; |
240 | 271 | ||
@@ -537,6 +568,8 @@ error_write_raw: | |||
537 | * would be alising. This function basically search for the | 568 | * would be alising. This function basically search for the |
538 | * correct low pass parameters based on the fifo rate, e.g, | 569 | * correct low pass parameters based on the fifo rate, e.g, |
539 | * sampling frequency. | 570 | * sampling frequency. |
571 | * | ||
572 | * lpf is set automatically when setting sampling rate to avoid any aliases. | ||
540 | */ | 573 | */ |
541 | static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) | 574 | static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) |
542 | { | 575 | { |
@@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) | |||
552 | while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) | 585 | while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) |
553 | i++; | 586 | i++; |
554 | data = d[i]; | 587 | data = d[i]; |
555 | result = regmap_write(st->map, st->reg->lpf, data); | 588 | result = inv_mpu6050_set_lpf_regs(st, data); |
556 | if (result) | 589 | if (result) |
557 | return result; | 590 | return result; |
558 | st->chip_config.lpf = data; | 591 | st->chip_config.lpf = data; |
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index ef13de7a2c20..953a0c09d568 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h | |||
@@ -28,6 +28,7 @@ | |||
28 | * struct inv_mpu6050_reg_map - Notable registers. | 28 | * struct inv_mpu6050_reg_map - Notable registers. |
29 | * @sample_rate_div: Divider applied to gyro output rate. | 29 | * @sample_rate_div: Divider applied to gyro output rate. |
30 | * @lpf: Configures internal low pass filter. | 30 | * @lpf: Configures internal low pass filter. |
31 | * @accel_lpf: Configures accelerometer low pass filter. | ||
31 | * @user_ctrl: Enables/resets the FIFO. | 32 | * @user_ctrl: Enables/resets the FIFO. |
32 | * @fifo_en: Determines which data will appear in FIFO. | 33 | * @fifo_en: Determines which data will appear in FIFO. |
33 | * @gyro_config: gyro config register. | 34 | * @gyro_config: gyro config register. |
@@ -47,6 +48,7 @@ | |||
47 | struct inv_mpu6050_reg_map { | 48 | struct inv_mpu6050_reg_map { |
48 | u8 sample_rate_div; | 49 | u8 sample_rate_div; |
49 | u8 lpf; | 50 | u8 lpf; |
51 | u8 accel_lpf; | ||
50 | u8 user_ctrl; | 52 | u8 user_ctrl; |
51 | u8 fifo_en; | 53 | u8 fifo_en; |
52 | u8 gyro_config; | 54 | u8 gyro_config; |
@@ -188,6 +190,7 @@ struct inv_mpu6050_state { | |||
188 | #define INV_MPU6050_FIFO_THRESHOLD 500 | 190 | #define INV_MPU6050_FIFO_THRESHOLD 500 |
189 | 191 | ||
190 | /* mpu6500 registers */ | 192 | /* mpu6500 registers */ |
193 | #define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D | ||
191 | #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 | 194 | #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 |
192 | 195 | ||
193 | /* delay time in milliseconds */ | 196 | /* delay time in milliseconds */ |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 02971e239a18..ece6926fa2e6 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
449 | return ret; | 449 | return ret; |
450 | 450 | ||
451 | rt = (struct rt6_info *)dst; | 451 | rt = (struct rt6_info *)dst; |
452 | if (ipv6_addr_any(&fl6.saddr)) { | 452 | if (ipv6_addr_any(&src_in->sin6_addr)) { |
453 | ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, | ||
454 | &fl6.daddr, 0, &fl6.saddr); | ||
455 | if (ret) | ||
456 | goto put; | ||
457 | |||
458 | src_in->sin6_family = AF_INET6; | 453 | src_in->sin6_family = AF_INET6; |
459 | src_in->sin6_addr = fl6.saddr; | 454 | src_in->sin6_addr = fl6.saddr; |
460 | } | 455 | } |
@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
471 | 466 | ||
472 | *pdst = dst; | 467 | *pdst = dst; |
473 | return 0; | 468 | return 0; |
474 | put: | ||
475 | dst_release(dst); | ||
476 | return ret; | ||
477 | } | 469 | } |
478 | #else | 470 | #else |
479 | static int addr6_resolve(struct sockaddr_in6 *src_in, | 471 | static int addr6_resolve(struct sockaddr_in6 *src_in, |
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ebf7be8d4139..08772836fded 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h | |||
@@ -56,6 +56,10 @@ | |||
56 | #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) | 56 | #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) |
57 | #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) | 57 | #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) |
58 | 58 | ||
59 | #define BNXT_RE_UD_QP_HW_STALL 0x400000 | ||
60 | |||
61 | #define BNXT_RE_RQ_WQE_THRESHOLD 32 | ||
62 | |||
59 | struct bnxt_re_work { | 63 | struct bnxt_re_work { |
60 | struct work_struct work; | 64 | struct work_struct work; |
61 | unsigned long event; | 65 | unsigned long event; |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7ba9e699d7ab..c7bd68311d0c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -61,6 +61,48 @@ | |||
61 | #include "ib_verbs.h" | 61 | #include "ib_verbs.h" |
62 | #include <rdma/bnxt_re-abi.h> | 62 | #include <rdma/bnxt_re-abi.h> |
63 | 63 | ||
64 | static int __from_ib_access_flags(int iflags) | ||
65 | { | ||
66 | int qflags = 0; | ||
67 | |||
68 | if (iflags & IB_ACCESS_LOCAL_WRITE) | ||
69 | qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; | ||
70 | if (iflags & IB_ACCESS_REMOTE_READ) | ||
71 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; | ||
72 | if (iflags & IB_ACCESS_REMOTE_WRITE) | ||
73 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; | ||
74 | if (iflags & IB_ACCESS_REMOTE_ATOMIC) | ||
75 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; | ||
76 | if (iflags & IB_ACCESS_MW_BIND) | ||
77 | qflags |= BNXT_QPLIB_ACCESS_MW_BIND; | ||
78 | if (iflags & IB_ZERO_BASED) | ||
79 | qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; | ||
80 | if (iflags & IB_ACCESS_ON_DEMAND) | ||
81 | qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; | ||
82 | return qflags; | ||
83 | }; | ||
84 | |||
85 | static enum ib_access_flags __to_ib_access_flags(int qflags) | ||
86 | { | ||
87 | enum ib_access_flags iflags = 0; | ||
88 | |||
89 | if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) | ||
90 | iflags |= IB_ACCESS_LOCAL_WRITE; | ||
91 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) | ||
92 | iflags |= IB_ACCESS_REMOTE_WRITE; | ||
93 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) | ||
94 | iflags |= IB_ACCESS_REMOTE_READ; | ||
95 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) | ||
96 | iflags |= IB_ACCESS_REMOTE_ATOMIC; | ||
97 | if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) | ||
98 | iflags |= IB_ACCESS_MW_BIND; | ||
99 | if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) | ||
100 | iflags |= IB_ZERO_BASED; | ||
101 | if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) | ||
102 | iflags |= IB_ACCESS_ON_DEMAND; | ||
103 | return iflags; | ||
104 | }; | ||
105 | |||
64 | static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, | 106 | static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, |
65 | struct bnxt_qplib_sge *sg_list, int num) | 107 | struct bnxt_qplib_sge *sg_list, int num) |
66 | { | 108 | { |
@@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, | |||
149 | ib_attr->max_total_mcast_qp_attach = 0; | 191 | ib_attr->max_total_mcast_qp_attach = 0; |
150 | ib_attr->max_ah = dev_attr->max_ah; | 192 | ib_attr->max_ah = dev_attr->max_ah; |
151 | 193 | ||
152 | ib_attr->max_fmr = dev_attr->max_fmr; | 194 | ib_attr->max_fmr = 0; |
153 | ib_attr->max_map_per_fmr = 1; /* ? */ | 195 | ib_attr->max_map_per_fmr = 0; |
154 | 196 | ||
155 | ib_attr->max_srq = dev_attr->max_srq; | 197 | ib_attr->max_srq = dev_attr->max_srq; |
156 | ib_attr->max_srq_wr = dev_attr->max_srq_wqes; | 198 | ib_attr->max_srq_wr = dev_attr->max_srq_wqes; |
@@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, | |||
410 | return IB_LINK_LAYER_ETHERNET; | 452 | return IB_LINK_LAYER_ETHERNET; |
411 | } | 453 | } |
412 | 454 | ||
455 | #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) | ||
456 | |||
457 | static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) | ||
458 | { | ||
459 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
460 | struct ib_mr *ib_mr = &fence->mr->ib_mr; | ||
461 | struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; | ||
462 | |||
463 | memset(wqe, 0, sizeof(*wqe)); | ||
464 | wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; | ||
465 | wqe->wr_id = BNXT_QPLIB_FENCE_WRID; | ||
466 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; | ||
467 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
468 | wqe->bind.zero_based = false; | ||
469 | wqe->bind.parent_l_key = ib_mr->lkey; | ||
470 | wqe->bind.va = (u64)(unsigned long)fence->va; | ||
471 | wqe->bind.length = fence->size; | ||
472 | wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); | ||
473 | wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; | ||
474 | |||
475 | /* Save the initial rkey in fence structure for now; | ||
476 | * wqe->bind.r_key will be set at (re)bind time. | ||
477 | */ | ||
478 | fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); | ||
479 | } | ||
480 | |||
481 | static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) | ||
482 | { | ||
483 | struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, | ||
484 | qplib_qp); | ||
485 | struct ib_pd *ib_pd = qp->ib_qp.pd; | ||
486 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); | ||
487 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
488 | struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; | ||
489 | struct bnxt_qplib_swqe wqe; | ||
490 | int rc; | ||
491 | |||
492 | memcpy(&wqe, fence_wqe, sizeof(wqe)); | ||
493 | wqe.bind.r_key = fence->bind_rkey; | ||
494 | fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); | ||
495 | |||
496 | dev_dbg(rdev_to_dev(qp->rdev), | ||
497 | "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", | ||
498 | wqe.bind.r_key, qp->qplib_qp.id, pd); | ||
499 | rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); | ||
500 | if (rc) { | ||
501 | dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); | ||
502 | return rc; | ||
503 | } | ||
504 | bnxt_qplib_post_send_db(&qp->qplib_qp); | ||
505 | |||
506 | return rc; | ||
507 | } | ||
508 | |||
509 | static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) | ||
510 | { | ||
511 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
512 | struct bnxt_re_dev *rdev = pd->rdev; | ||
513 | struct device *dev = &rdev->en_dev->pdev->dev; | ||
514 | struct bnxt_re_mr *mr = fence->mr; | ||
515 | |||
516 | if (fence->mw) { | ||
517 | bnxt_re_dealloc_mw(fence->mw); | ||
518 | fence->mw = NULL; | ||
519 | } | ||
520 | if (mr) { | ||
521 | if (mr->ib_mr.rkey) | ||
522 | bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, | ||
523 | true); | ||
524 | if (mr->ib_mr.lkey) | ||
525 | bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
526 | kfree(mr); | ||
527 | fence->mr = NULL; | ||
528 | } | ||
529 | if (fence->dma_addr) { | ||
530 | dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, | ||
531 | DMA_BIDIRECTIONAL); | ||
532 | fence->dma_addr = 0; | ||
533 | } | ||
534 | } | ||
535 | |||
536 | static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) | ||
537 | { | ||
538 | int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; | ||
539 | struct bnxt_re_fence_data *fence = &pd->fence; | ||
540 | struct bnxt_re_dev *rdev = pd->rdev; | ||
541 | struct device *dev = &rdev->en_dev->pdev->dev; | ||
542 | struct bnxt_re_mr *mr = NULL; | ||
543 | dma_addr_t dma_addr = 0; | ||
544 | struct ib_mw *mw; | ||
545 | u64 pbl_tbl; | ||
546 | int rc; | ||
547 | |||
548 | dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, | ||
549 | DMA_BIDIRECTIONAL); | ||
550 | rc = dma_mapping_error(dev, dma_addr); | ||
551 | if (rc) { | ||
552 | dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); | ||
553 | rc = -EIO; | ||
554 | fence->dma_addr = 0; | ||
555 | goto fail; | ||
556 | } | ||
557 | fence->dma_addr = dma_addr; | ||
558 | |||
559 | /* Allocate a MR */ | ||
560 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | ||
561 | if (!mr) { | ||
562 | rc = -ENOMEM; | ||
563 | goto fail; | ||
564 | } | ||
565 | fence->mr = mr; | ||
566 | mr->rdev = rdev; | ||
567 | mr->qplib_mr.pd = &pd->qplib_pd; | ||
568 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; | ||
569 | mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); | ||
570 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
571 | if (rc) { | ||
572 | dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); | ||
573 | goto fail; | ||
574 | } | ||
575 | |||
576 | /* Register MR */ | ||
577 | mr->ib_mr.lkey = mr->qplib_mr.lkey; | ||
578 | mr->qplib_mr.va = (u64)(unsigned long)fence->va; | ||
579 | mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; | ||
580 | pbl_tbl = dma_addr; | ||
581 | rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, | ||
582 | BNXT_RE_FENCE_PBL_SIZE, false); | ||
583 | if (rc) { | ||
584 | dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); | ||
585 | goto fail; | ||
586 | } | ||
587 | mr->ib_mr.rkey = mr->qplib_mr.rkey; | ||
588 | |||
589 | /* Create a fence MW only for kernel consumers */ | ||
590 | mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); | ||
591 | if (!mw) { | ||
592 | dev_err(rdev_to_dev(rdev), | ||
593 | "Failed to create fence-MW for PD: %p\n", pd); | ||
594 | rc = -EINVAL; | ||
595 | goto fail; | ||
596 | } | ||
597 | fence->mw = mw; | ||
598 | |||
599 | bnxt_re_create_fence_wqe(pd); | ||
600 | return 0; | ||
601 | |||
602 | fail: | ||
603 | bnxt_re_destroy_fence_mr(pd); | ||
604 | return rc; | ||
605 | } | ||
606 | |||
413 | /* Protection Domains */ | 607 | /* Protection Domains */ |
414 | int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) | 608 | int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) |
415 | { | 609 | { |
@@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) | |||
417 | struct bnxt_re_dev *rdev = pd->rdev; | 611 | struct bnxt_re_dev *rdev = pd->rdev; |
418 | int rc; | 612 | int rc; |
419 | 613 | ||
614 | bnxt_re_destroy_fence_mr(pd); | ||
420 | if (ib_pd->uobject && pd->dpi.dbr) { | 615 | if (ib_pd->uobject && pd->dpi.dbr) { |
421 | struct ib_ucontext *ib_uctx = ib_pd->uobject->context; | 616 | struct ib_ucontext *ib_uctx = ib_pd->uobject->context; |
422 | struct bnxt_re_ucontext *ucntx; | 617 | struct bnxt_re_ucontext *ucntx; |
@@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, | |||
498 | } | 693 | } |
499 | } | 694 | } |
500 | 695 | ||
696 | if (!udata) | ||
697 | if (bnxt_re_create_fence_mr(pd)) | ||
698 | dev_warn(rdev_to_dev(rdev), | ||
699 | "Failed to create Fence-MR\n"); | ||
501 | return &pd->ib_pd; | 700 | return &pd->ib_pd; |
502 | dbfail: | 701 | dbfail: |
503 | (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, | 702 | (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, |
@@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp | |||
849 | /* Shadow QP SQ depth should be same as QP1 RQ depth */ | 1048 | /* Shadow QP SQ depth should be same as QP1 RQ depth */ |
850 | qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; | 1049 | qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; |
851 | qp->qplib_qp.sq.max_sge = 2; | 1050 | qp->qplib_qp.sq.max_sge = 2; |
1051 | /* Q full delta can be 1 since it is internal QP */ | ||
1052 | qp->qplib_qp.sq.q_full_delta = 1; | ||
852 | 1053 | ||
853 | qp->qplib_qp.scq = qp1_qp->scq; | 1054 | qp->qplib_qp.scq = qp1_qp->scq; |
854 | qp->qplib_qp.rcq = qp1_qp->rcq; | 1055 | qp->qplib_qp.rcq = qp1_qp->rcq; |
855 | 1056 | ||
856 | qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; | 1057 | qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; |
857 | qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; | 1058 | qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; |
1059 | /* Q full delta can be 1 since it is internal QP */ | ||
1060 | qp->qplib_qp.rq.q_full_delta = 1; | ||
858 | 1061 | ||
859 | qp->qplib_qp.mtu = qp1_qp->mtu; | 1062 | qp->qplib_qp.mtu = qp1_qp->mtu; |
860 | 1063 | ||
@@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
917 | qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == | 1120 | qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == |
918 | IB_SIGNAL_ALL_WR) ? true : false); | 1121 | IB_SIGNAL_ALL_WR) ? true : false); |
919 | 1122 | ||
920 | entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); | ||
921 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | ||
922 | dev_attr->max_qp_wqes + 1); | ||
923 | |||
924 | qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; | 1123 | qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; |
925 | if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) | 1124 | if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) |
926 | qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; | 1125 | qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; |
@@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
959 | qp->qplib_qp.rq.max_wqe = min_t(u32, entries, | 1158 | qp->qplib_qp.rq.max_wqe = min_t(u32, entries, |
960 | dev_attr->max_qp_wqes + 1); | 1159 | dev_attr->max_qp_wqes + 1); |
961 | 1160 | ||
1161 | qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - | ||
1162 | qp_init_attr->cap.max_recv_wr; | ||
1163 | |||
962 | qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; | 1164 | qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; |
963 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) | 1165 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) |
964 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; | 1166 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
@@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
967 | qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); | 1169 | qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); |
968 | 1170 | ||
969 | if (qp_init_attr->qp_type == IB_QPT_GSI) { | 1171 | if (qp_init_attr->qp_type == IB_QPT_GSI) { |
1172 | /* Allocate 1 more than what's provided */ | ||
1173 | entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); | ||
1174 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | ||
1175 | dev_attr->max_qp_wqes + 1); | ||
1176 | qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - | ||
1177 | qp_init_attr->cap.max_send_wr; | ||
970 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; | 1178 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
971 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) | 1179 | if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) |
972 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; | 1180 | qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
@@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1006 | } | 1214 | } |
1007 | 1215 | ||
1008 | } else { | 1216 | } else { |
1217 | /* Allocate 128 + 1 more than what's provided */ | ||
1218 | entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + | ||
1219 | BNXT_QPLIB_RESERVED_QP_WRS + 1); | ||
1220 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | ||
1221 | dev_attr->max_qp_wqes + | ||
1222 | BNXT_QPLIB_RESERVED_QP_WRS + 1); | ||
1223 | qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; | ||
1224 | |||
1225 | /* | ||
1226 | * Reserving one slot for Phantom WQE. Application can | ||
1227 | * post one extra entry in this case. But allowing this to avoid | ||
1228 | * unexpected Queue full condition | ||
1229 | */ | ||
1230 | |||
1231 | qp->qplib_qp.sq.q_full_delta -= 1; | ||
1232 | |||
1009 | qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; | 1233 | qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; |
1010 | qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; | 1234 | qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; |
1011 | if (udata) { | 1235 | if (udata) { |
@@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1025 | 1249 | ||
1026 | qp->ib_qp.qp_num = qp->qplib_qp.id; | 1250 | qp->ib_qp.qp_num = qp->qplib_qp.id; |
1027 | spin_lock_init(&qp->sq_lock); | 1251 | spin_lock_init(&qp->sq_lock); |
1252 | spin_lock_init(&qp->rq_lock); | ||
1028 | 1253 | ||
1029 | if (udata) { | 1254 | if (udata) { |
1030 | struct bnxt_re_qp_resp resp; | 1255 | struct bnxt_re_qp_resp resp; |
@@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) | |||
1129 | } | 1354 | } |
1130 | } | 1355 | } |
1131 | 1356 | ||
1132 | static int __from_ib_access_flags(int iflags) | ||
1133 | { | ||
1134 | int qflags = 0; | ||
1135 | |||
1136 | if (iflags & IB_ACCESS_LOCAL_WRITE) | ||
1137 | qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; | ||
1138 | if (iflags & IB_ACCESS_REMOTE_READ) | ||
1139 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; | ||
1140 | if (iflags & IB_ACCESS_REMOTE_WRITE) | ||
1141 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; | ||
1142 | if (iflags & IB_ACCESS_REMOTE_ATOMIC) | ||
1143 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; | ||
1144 | if (iflags & IB_ACCESS_MW_BIND) | ||
1145 | qflags |= BNXT_QPLIB_ACCESS_MW_BIND; | ||
1146 | if (iflags & IB_ZERO_BASED) | ||
1147 | qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; | ||
1148 | if (iflags & IB_ACCESS_ON_DEMAND) | ||
1149 | qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; | ||
1150 | return qflags; | ||
1151 | }; | ||
1152 | |||
1153 | static enum ib_access_flags __to_ib_access_flags(int qflags) | ||
1154 | { | ||
1155 | enum ib_access_flags iflags = 0; | ||
1156 | |||
1157 | if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) | ||
1158 | iflags |= IB_ACCESS_LOCAL_WRITE; | ||
1159 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) | ||
1160 | iflags |= IB_ACCESS_REMOTE_WRITE; | ||
1161 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) | ||
1162 | iflags |= IB_ACCESS_REMOTE_READ; | ||
1163 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) | ||
1164 | iflags |= IB_ACCESS_REMOTE_ATOMIC; | ||
1165 | if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) | ||
1166 | iflags |= IB_ACCESS_MW_BIND; | ||
1167 | if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) | ||
1168 | iflags |= IB_ZERO_BASED; | ||
1169 | if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) | ||
1170 | iflags |= IB_ACCESS_ON_DEMAND; | ||
1171 | return iflags; | ||
1172 | }; | ||
1173 | |||
1174 | static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, | 1357 | static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, |
1175 | struct bnxt_re_qp *qp1_qp, | 1358 | struct bnxt_re_qp *qp1_qp, |
1176 | int qp_attr_mask) | 1359 | int qp_attr_mask) |
@@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1378 | entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); | 1561 | entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); |
1379 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, | 1562 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, |
1380 | dev_attr->max_qp_wqes + 1); | 1563 | dev_attr->max_qp_wqes + 1); |
1564 | qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - | ||
1565 | qp_attr->cap.max_send_wr; | ||
1566 | /* | ||
1567 | * Reserving one slot for Phantom WQE. Some application can | ||
1568 | * post one extra entry in this case. Allowing this to avoid | ||
1569 | * unexpected Queue full condition | ||
1570 | */ | ||
1571 | qp->qplib_qp.sq.q_full_delta -= 1; | ||
1381 | qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; | 1572 | qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; |
1382 | if (qp->qplib_qp.rq.max_wqe) { | 1573 | if (qp->qplib_qp.rq.max_wqe) { |
1383 | entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); | 1574 | entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); |
1384 | qp->qplib_qp.rq.max_wqe = | 1575 | qp->qplib_qp.rq.max_wqe = |
1385 | min_t(u32, entries, dev_attr->max_qp_wqes + 1); | 1576 | min_t(u32, entries, dev_attr->max_qp_wqes + 1); |
1577 | qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - | ||
1578 | qp_attr->cap.max_recv_wr; | ||
1386 | qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; | 1579 | qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; |
1387 | } else { | 1580 | } else { |
1388 | /* SRQ was used prior, just ignore the RQ caps */ | 1581 | /* SRQ was used prior, just ignore the RQ caps */ |
@@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, | |||
1883 | return payload_sz; | 2076 | return payload_sz; |
1884 | } | 2077 | } |
1885 | 2078 | ||
2079 | static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) | ||
2080 | { | ||
2081 | if ((qp->ib_qp.qp_type == IB_QPT_UD || | ||
2082 | qp->ib_qp.qp_type == IB_QPT_GSI || | ||
2083 | qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && | ||
2084 | qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { | ||
2085 | int qp_attr_mask; | ||
2086 | struct ib_qp_attr qp_attr; | ||
2087 | |||
2088 | qp_attr_mask = IB_QP_STATE; | ||
2089 | qp_attr.qp_state = IB_QPS_RTS; | ||
2090 | bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); | ||
2091 | qp->qplib_qp.wqe_cnt = 0; | ||
2092 | } | ||
2093 | } | ||
2094 | |||
1886 | static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, | 2095 | static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, |
1887 | struct bnxt_re_qp *qp, | 2096 | struct bnxt_re_qp *qp, |
1888 | struct ib_send_wr *wr) | 2097 | struct ib_send_wr *wr) |
@@ -1928,6 +2137,7 @@ bad: | |||
1928 | wr = wr->next; | 2137 | wr = wr->next; |
1929 | } | 2138 | } |
1930 | bnxt_qplib_post_send_db(&qp->qplib_qp); | 2139 | bnxt_qplib_post_send_db(&qp->qplib_qp); |
2140 | bnxt_ud_qp_hw_stall_workaround(qp); | ||
1931 | spin_unlock_irqrestore(&qp->sq_lock, flags); | 2141 | spin_unlock_irqrestore(&qp->sq_lock, flags); |
1932 | return rc; | 2142 | return rc; |
1933 | } | 2143 | } |
@@ -2024,6 +2234,7 @@ bad: | |||
2024 | wr = wr->next; | 2234 | wr = wr->next; |
2025 | } | 2235 | } |
2026 | bnxt_qplib_post_send_db(&qp->qplib_qp); | 2236 | bnxt_qplib_post_send_db(&qp->qplib_qp); |
2237 | bnxt_ud_qp_hw_stall_workaround(qp); | ||
2027 | spin_unlock_irqrestore(&qp->sq_lock, flags); | 2238 | spin_unlock_irqrestore(&qp->sq_lock, flags); |
2028 | 2239 | ||
2029 | return rc; | 2240 | return rc; |
@@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, | |||
2071 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); | 2282 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2072 | struct bnxt_qplib_swqe wqe; | 2283 | struct bnxt_qplib_swqe wqe; |
2073 | int rc = 0, payload_sz = 0; | 2284 | int rc = 0, payload_sz = 0; |
2285 | unsigned long flags; | ||
2286 | u32 count = 0; | ||
2074 | 2287 | ||
2288 | spin_lock_irqsave(&qp->rq_lock, flags); | ||
2075 | while (wr) { | 2289 | while (wr) { |
2076 | /* House keeping */ | 2290 | /* House keeping */ |
2077 | memset(&wqe, 0, sizeof(wqe)); | 2291 | memset(&wqe, 0, sizeof(wqe)); |
@@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, | |||
2100 | *bad_wr = wr; | 2314 | *bad_wr = wr; |
2101 | break; | 2315 | break; |
2102 | } | 2316 | } |
2317 | |||
2318 | /* Ring DB if the RQEs posted reaches a threshold value */ | ||
2319 | if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { | ||
2320 | bnxt_qplib_post_recv_db(&qp->qplib_qp); | ||
2321 | count = 0; | ||
2322 | } | ||
2323 | |||
2103 | wr = wr->next; | 2324 | wr = wr->next; |
2104 | } | 2325 | } |
2105 | bnxt_qplib_post_recv_db(&qp->qplib_qp); | 2326 | |
2327 | if (count) | ||
2328 | bnxt_qplib_post_recv_db(&qp->qplib_qp); | ||
2329 | |||
2330 | spin_unlock_irqrestore(&qp->rq_lock, flags); | ||
2331 | |||
2106 | return rc; | 2332 | return rc; |
2107 | } | 2333 | } |
2108 | 2334 | ||
@@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc, | |||
2643 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | 2869 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
2644 | } | 2870 | } |
2645 | 2871 | ||
2872 | static int send_phantom_wqe(struct bnxt_re_qp *qp) | ||
2873 | { | ||
2874 | struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; | ||
2875 | unsigned long flags; | ||
2876 | int rc = 0; | ||
2877 | |||
2878 | spin_lock_irqsave(&qp->sq_lock, flags); | ||
2879 | |||
2880 | rc = bnxt_re_bind_fence_mw(lib_qp); | ||
2881 | if (!rc) { | ||
2882 | lib_qp->sq.phantom_wqe_cnt++; | ||
2883 | dev_dbg(&lib_qp->sq.hwq.pdev->dev, | ||
2884 | "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", | ||
2885 | lib_qp->id, lib_qp->sq.hwq.prod, | ||
2886 | HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), | ||
2887 | lib_qp->sq.phantom_wqe_cnt); | ||
2888 | } | ||
2889 | |||
2890 | spin_unlock_irqrestore(&qp->sq_lock, flags); | ||
2891 | return rc; | ||
2892 | } | ||
2893 | |||
2646 | int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) | 2894 | int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) |
2647 | { | 2895 | { |
2648 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); | 2896 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
2649 | struct bnxt_re_qp *qp; | 2897 | struct bnxt_re_qp *qp; |
2650 | struct bnxt_qplib_cqe *cqe; | 2898 | struct bnxt_qplib_cqe *cqe; |
2651 | int i, ncqe, budget; | 2899 | int i, ncqe, budget; |
2900 | struct bnxt_qplib_q *sq; | ||
2901 | struct bnxt_qplib_qp *lib_qp; | ||
2652 | u32 tbl_idx; | 2902 | u32 tbl_idx; |
2653 | struct bnxt_re_sqp_entries *sqp_entry = NULL; | 2903 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
2654 | unsigned long flags; | 2904 | unsigned long flags; |
@@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) | |||
2661 | } | 2911 | } |
2662 | cqe = &cq->cql[0]; | 2912 | cqe = &cq->cql[0]; |
2663 | while (budget) { | 2913 | while (budget) { |
2664 | ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); | 2914 | lib_qp = NULL; |
2915 | ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); | ||
2916 | if (lib_qp) { | ||
2917 | sq = &lib_qp->sq; | ||
2918 | if (sq->send_phantom) { | ||
2919 | qp = container_of(lib_qp, | ||
2920 | struct bnxt_re_qp, qplib_qp); | ||
2921 | if (send_phantom_wqe(qp) == -ENOMEM) | ||
2922 | dev_err(rdev_to_dev(cq->rdev), | ||
2923 | "Phantom failed! Scheduled to send again\n"); | ||
2924 | else | ||
2925 | sq->send_phantom = false; | ||
2926 | } | ||
2927 | } | ||
2928 | |||
2665 | if (!ncqe) | 2929 | if (!ncqe) |
2666 | break; | 2930 | break; |
2667 | 2931 | ||
@@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) | |||
2822 | struct bnxt_re_dev *rdev = mr->rdev; | 3086 | struct bnxt_re_dev *rdev = mr->rdev; |
2823 | int rc; | 3087 | int rc; |
2824 | 3088 | ||
3089 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
3090 | if (rc) { | ||
3091 | dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); | ||
3092 | return rc; | ||
3093 | } | ||
3094 | |||
2825 | if (mr->npages && mr->pages) { | 3095 | if (mr->npages && mr->pages) { |
2826 | rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, | 3096 | rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, |
2827 | &mr->qplib_frpl); | 3097 | &mr->qplib_frpl); |
@@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) | |||
2829 | mr->npages = 0; | 3099 | mr->npages = 0; |
2830 | mr->pages = NULL; | 3100 | mr->pages = NULL; |
2831 | } | 3101 | } |
2832 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); | ||
2833 | |||
2834 | if (!IS_ERR_OR_NULL(mr->ib_umem)) | 3102 | if (!IS_ERR_OR_NULL(mr->ib_umem)) |
2835 | ib_umem_release(mr->ib_umem); | 3103 | ib_umem_release(mr->ib_umem); |
2836 | 3104 | ||
@@ -2914,97 +3182,52 @@ fail: | |||
2914 | return ERR_PTR(rc); | 3182 | return ERR_PTR(rc); |
2915 | } | 3183 | } |
2916 | 3184 | ||
2917 | /* Fast Memory Regions */ | 3185 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
2918 | struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, | 3186 | struct ib_udata *udata) |
2919 | struct ib_fmr_attr *fmr_attr) | ||
2920 | { | 3187 | { |
2921 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); | 3188 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
2922 | struct bnxt_re_dev *rdev = pd->rdev; | 3189 | struct bnxt_re_dev *rdev = pd->rdev; |
2923 | struct bnxt_re_fmr *fmr; | 3190 | struct bnxt_re_mw *mw; |
2924 | int rc; | 3191 | int rc; |
2925 | 3192 | ||
2926 | if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || | 3193 | mw = kzalloc(sizeof(*mw), GFP_KERNEL); |
2927 | fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { | 3194 | if (!mw) |
2928 | dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); | ||
2929 | return ERR_PTR(-ENOMEM); | 3195 | return ERR_PTR(-ENOMEM); |
2930 | } | 3196 | mw->rdev = rdev; |
2931 | fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); | 3197 | mw->qplib_mw.pd = &pd->qplib_pd; |
2932 | if (!fmr) | ||
2933 | return ERR_PTR(-ENOMEM); | ||
2934 | |||
2935 | fmr->rdev = rdev; | ||
2936 | fmr->qplib_fmr.pd = &pd->qplib_pd; | ||
2937 | fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; | ||
2938 | 3198 | ||
2939 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); | 3199 | mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? |
2940 | if (rc) | 3200 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : |
3201 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); | ||
3202 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); | ||
3203 | if (rc) { | ||
3204 | dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); | ||
2941 | goto fail; | 3205 | goto fail; |
3206 | } | ||
3207 | mw->ib_mw.rkey = mw->qplib_mw.rkey; | ||
2942 | 3208 | ||
2943 | fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); | 3209 | atomic_inc(&rdev->mw_count); |
2944 | fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; | 3210 | return &mw->ib_mw; |
2945 | fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; | ||
2946 | 3211 | ||
2947 | atomic_inc(&rdev->mr_count); | ||
2948 | return &fmr->ib_fmr; | ||
2949 | fail: | 3212 | fail: |
2950 | kfree(fmr); | 3213 | kfree(mw); |
2951 | return ERR_PTR(rc); | 3214 | return ERR_PTR(rc); |
2952 | } | 3215 | } |
2953 | 3216 | ||
2954 | int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, | 3217 | int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) |
2955 | u64 iova) | ||
2956 | { | 3218 | { |
2957 | struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, | 3219 | struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); |
2958 | ib_fmr); | 3220 | struct bnxt_re_dev *rdev = mw->rdev; |
2959 | struct bnxt_re_dev *rdev = fmr->rdev; | ||
2960 | int rc; | 3221 | int rc; |
2961 | 3222 | ||
2962 | fmr->qplib_fmr.va = iova; | 3223 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); |
2963 | fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; | 3224 | if (rc) { |
2964 | 3225 | dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); | |
2965 | rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, | 3226 | return rc; |
2966 | list_len, true); | ||
2967 | if (rc) | ||
2968 | dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", | ||
2969 | fmr->ib_fmr.lkey); | ||
2970 | return rc; | ||
2971 | } | ||
2972 | |||
2973 | int bnxt_re_unmap_fmr(struct list_head *fmr_list) | ||
2974 | { | ||
2975 | struct bnxt_re_dev *rdev; | ||
2976 | struct bnxt_re_fmr *fmr; | ||
2977 | struct ib_fmr *ib_fmr; | ||
2978 | int rc = 0; | ||
2979 | |||
2980 | /* Validate each FMRs inside the fmr_list */ | ||
2981 | list_for_each_entry(ib_fmr, fmr_list, list) { | ||
2982 | fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); | ||
2983 | rdev = fmr->rdev; | ||
2984 | |||
2985 | if (rdev) { | ||
2986 | rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, | ||
2987 | &fmr->qplib_fmr, true); | ||
2988 | if (rc) | ||
2989 | break; | ||
2990 | } | ||
2991 | } | 3227 | } |
2992 | return rc; | ||
2993 | } | ||
2994 | |||
2995 | int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) | ||
2996 | { | ||
2997 | struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, | ||
2998 | ib_fmr); | ||
2999 | struct bnxt_re_dev *rdev = fmr->rdev; | ||
3000 | int rc; | ||
3001 | 3228 | ||
3002 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); | 3229 | kfree(mw); |
3003 | if (rc) | 3230 | atomic_dec(&rdev->mw_count); |
3004 | dev_err(rdev_to_dev(rdev), "Failed to free FMR"); | ||
3005 | |||
3006 | kfree(fmr); | ||
3007 | atomic_dec(&rdev->mr_count); | ||
3008 | return rc; | 3231 | return rc; |
3009 | } | 3232 | } |
3010 | 3233 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 5c3d71765454..6c160f6a5398 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h | |||
@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx { | |||
44 | u32 refcnt; | 44 | u32 refcnt; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | #define BNXT_RE_FENCE_BYTES 64 | ||
48 | struct bnxt_re_fence_data { | ||
49 | u32 size; | ||
50 | u8 va[BNXT_RE_FENCE_BYTES]; | ||
51 | dma_addr_t dma_addr; | ||
52 | struct bnxt_re_mr *mr; | ||
53 | struct ib_mw *mw; | ||
54 | struct bnxt_qplib_swqe bind_wqe; | ||
55 | u32 bind_rkey; | ||
56 | }; | ||
57 | |||
47 | struct bnxt_re_pd { | 58 | struct bnxt_re_pd { |
48 | struct bnxt_re_dev *rdev; | 59 | struct bnxt_re_dev *rdev; |
49 | struct ib_pd ib_pd; | 60 | struct ib_pd ib_pd; |
50 | struct bnxt_qplib_pd qplib_pd; | 61 | struct bnxt_qplib_pd qplib_pd; |
51 | struct bnxt_qplib_dpi dpi; | 62 | struct bnxt_qplib_dpi dpi; |
63 | struct bnxt_re_fence_data fence; | ||
52 | }; | 64 | }; |
53 | 65 | ||
54 | struct bnxt_re_ah { | 66 | struct bnxt_re_ah { |
@@ -62,6 +74,7 @@ struct bnxt_re_qp { | |||
62 | struct bnxt_re_dev *rdev; | 74 | struct bnxt_re_dev *rdev; |
63 | struct ib_qp ib_qp; | 75 | struct ib_qp ib_qp; |
64 | spinlock_t sq_lock; /* protect sq */ | 76 | spinlock_t sq_lock; /* protect sq */ |
77 | spinlock_t rq_lock; /* protect rq */ | ||
65 | struct bnxt_qplib_qp qplib_qp; | 78 | struct bnxt_qplib_qp qplib_qp; |
66 | struct ib_umem *sumem; | 79 | struct ib_umem *sumem; |
67 | struct ib_umem *rumem; | 80 | struct ib_umem *rumem; |
@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, | |||
181 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, | 194 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, |
182 | u32 max_num_sg); | 195 | u32 max_num_sg); |
183 | int bnxt_re_dereg_mr(struct ib_mr *mr); | 196 | int bnxt_re_dereg_mr(struct ib_mr *mr); |
184 | struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | 197 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
185 | struct ib_fmr_attr *fmr_attr); | 198 | struct ib_udata *udata); |
186 | int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, | 199 | int bnxt_re_dealloc_mw(struct ib_mw *mw); |
187 | u64 iova); | ||
188 | int bnxt_re_unmap_fmr(struct list_head *fmr_list); | ||
189 | int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); | ||
190 | struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | 200 | struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
191 | u64 virt_addr, int mr_access_flags, | 201 | u64 virt_addr, int mr_access_flags, |
192 | struct ib_udata *udata); | 202 | struct ib_udata *udata); |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 5d355401179b..1fce5e73216b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) | |||
507 | ibdev->dereg_mr = bnxt_re_dereg_mr; | 507 | ibdev->dereg_mr = bnxt_re_dereg_mr; |
508 | ibdev->alloc_mr = bnxt_re_alloc_mr; | 508 | ibdev->alloc_mr = bnxt_re_alloc_mr; |
509 | ibdev->map_mr_sg = bnxt_re_map_mr_sg; | 509 | ibdev->map_mr_sg = bnxt_re_map_mr_sg; |
510 | ibdev->alloc_fmr = bnxt_re_alloc_fmr; | ||
511 | ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; | ||
512 | ibdev->unmap_fmr = bnxt_re_unmap_fmr; | ||
513 | ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; | ||
514 | 510 | ||
515 | ibdev->reg_user_mr = bnxt_re_reg_user_mr; | 511 | ibdev->reg_user_mr = bnxt_re_reg_user_mr; |
516 | ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; | 512 | ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 43d08b5e9085..f05500bcdcf1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c | |||
@@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
284 | { | 284 | { |
285 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 285 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
286 | struct cmdq_create_qp1 req; | 286 | struct cmdq_create_qp1 req; |
287 | struct creq_create_qp1_resp *resp; | 287 | struct creq_create_qp1_resp resp; |
288 | struct bnxt_qplib_pbl *pbl; | 288 | struct bnxt_qplib_pbl *pbl; |
289 | struct bnxt_qplib_q *sq = &qp->sq; | 289 | struct bnxt_qplib_q *sq = &qp->sq; |
290 | struct bnxt_qplib_q *rq = &qp->rq; | 290 | struct bnxt_qplib_q *rq = &qp->rq; |
@@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
394 | 394 | ||
395 | req.pd_id = cpu_to_le32(qp->pd->id); | 395 | req.pd_id = cpu_to_le32(qp->pd->id); |
396 | 396 | ||
397 | resp = (struct creq_create_qp1_resp *) | 397 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
398 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 398 | (void *)&resp, NULL, 0); |
399 | NULL, 0); | 399 | if (rc) |
400 | if (!resp) { | ||
401 | dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); | ||
402 | rc = -EINVAL; | ||
403 | goto fail; | ||
404 | } | ||
405 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
406 | /* Cmd timed out */ | ||
407 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); | ||
408 | rc = -ETIMEDOUT; | ||
409 | goto fail; | ||
410 | } | ||
411 | if (resp->status || | ||
412 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
413 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); | ||
414 | dev_err(&rcfw->pdev->dev, | ||
415 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
416 | resp->status, le16_to_cpu(req.cookie), | ||
417 | le16_to_cpu(resp->cookie)); | ||
418 | rc = -EINVAL; | ||
419 | goto fail; | 400 | goto fail; |
420 | } | 401 | |
421 | qp->id = le32_to_cpu(resp->xid); | 402 | qp->id = le32_to_cpu(resp.xid); |
422 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; | 403 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; |
423 | sq->flush_in_progress = false; | 404 | sq->flush_in_progress = false; |
424 | rq->flush_in_progress = false; | 405 | rq->flush_in_progress = false; |
@@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
442 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 423 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
443 | struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; | 424 | struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; |
444 | struct cmdq_create_qp req; | 425 | struct cmdq_create_qp req; |
445 | struct creq_create_qp_resp *resp; | 426 | struct creq_create_qp_resp resp; |
446 | struct bnxt_qplib_pbl *pbl; | 427 | struct bnxt_qplib_pbl *pbl; |
447 | struct sq_psn_search **psn_search_ptr; | 428 | struct sq_psn_search **psn_search_ptr; |
448 | unsigned long int psn_search, poff = 0; | 429 | unsigned long int psn_search, poff = 0; |
@@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
627 | } | 608 | } |
628 | req.pd_id = cpu_to_le32(qp->pd->id); | 609 | req.pd_id = cpu_to_le32(qp->pd->id); |
629 | 610 | ||
630 | resp = (struct creq_create_qp_resp *) | 611 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
631 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 612 | (void *)&resp, NULL, 0); |
632 | NULL, 0); | 613 | if (rc) |
633 | if (!resp) { | ||
634 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); | ||
635 | rc = -EINVAL; | ||
636 | goto fail; | ||
637 | } | ||
638 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
639 | /* Cmd timed out */ | ||
640 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); | ||
641 | rc = -ETIMEDOUT; | ||
642 | goto fail; | ||
643 | } | ||
644 | if (resp->status || | ||
645 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
646 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); | ||
647 | dev_err(&rcfw->pdev->dev, | ||
648 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
649 | resp->status, le16_to_cpu(req.cookie), | ||
650 | le16_to_cpu(resp->cookie)); | ||
651 | rc = -EINVAL; | ||
652 | goto fail; | 614 | goto fail; |
653 | } | 615 | |
654 | qp->id = le32_to_cpu(resp->xid); | 616 | qp->id = le32_to_cpu(resp.xid); |
655 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; | 617 | qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; |
656 | sq->flush_in_progress = false; | 618 | sq->flush_in_progress = false; |
657 | rq->flush_in_progress = false; | 619 | rq->flush_in_progress = false; |
@@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
769 | { | 731 | { |
770 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 732 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
771 | struct cmdq_modify_qp req; | 733 | struct cmdq_modify_qp req; |
772 | struct creq_modify_qp_resp *resp; | 734 | struct creq_modify_qp_resp resp; |
773 | u16 cmd_flags = 0, pkey; | 735 | u16 cmd_flags = 0, pkey; |
774 | u32 temp32[4]; | 736 | u32 temp32[4]; |
775 | u32 bmask; | 737 | u32 bmask; |
738 | int rc; | ||
776 | 739 | ||
777 | RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); | 740 | RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); |
778 | 741 | ||
@@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
862 | 825 | ||
863 | req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); | 826 | req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); |
864 | 827 | ||
865 | resp = (struct creq_modify_qp_resp *) | 828 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
866 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 829 | (void *)&resp, NULL, 0); |
867 | NULL, 0); | 830 | if (rc) |
868 | if (!resp) { | 831 | return rc; |
869 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); | ||
870 | return -EINVAL; | ||
871 | } | ||
872 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
873 | /* Cmd timed out */ | ||
874 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); | ||
875 | return -ETIMEDOUT; | ||
876 | } | ||
877 | if (resp->status || | ||
878 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
879 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); | ||
880 | dev_err(&rcfw->pdev->dev, | ||
881 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
882 | resp->status, le16_to_cpu(req.cookie), | ||
883 | le16_to_cpu(resp->cookie)); | ||
884 | return -EINVAL; | ||
885 | } | ||
886 | qp->cur_qp_state = qp->state; | 832 | qp->cur_qp_state = qp->state; |
887 | return 0; | 833 | return 0; |
888 | } | 834 | } |
@@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
891 | { | 837 | { |
892 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 838 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
893 | struct cmdq_query_qp req; | 839 | struct cmdq_query_qp req; |
894 | struct creq_query_qp_resp *resp; | 840 | struct creq_query_qp_resp resp; |
841 | struct bnxt_qplib_rcfw_sbuf *sbuf; | ||
895 | struct creq_query_qp_resp_sb *sb; | 842 | struct creq_query_qp_resp_sb *sb; |
896 | u16 cmd_flags = 0; | 843 | u16 cmd_flags = 0; |
897 | u32 temp32[4]; | 844 | u32 temp32[4]; |
898 | int i; | 845 | int i, rc = 0; |
899 | 846 | ||
900 | RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); | 847 | RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); |
901 | 848 | ||
849 | sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); | ||
850 | if (!sbuf) | ||
851 | return -ENOMEM; | ||
852 | sb = sbuf->sb; | ||
853 | |||
902 | req.qp_cid = cpu_to_le32(qp->id); | 854 | req.qp_cid = cpu_to_le32(qp->id); |
903 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; | 855 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; |
904 | resp = (struct creq_query_qp_resp *) | 856 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
905 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 857 | (void *)sbuf, 0); |
906 | (void **)&sb, 0); | 858 | if (rc) |
907 | if (!resp) { | 859 | goto bail; |
908 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); | ||
909 | return -EINVAL; | ||
910 | } | ||
911 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
912 | /* Cmd timed out */ | ||
913 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); | ||
914 | return -ETIMEDOUT; | ||
915 | } | ||
916 | if (resp->status || | ||
917 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
918 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); | ||
919 | dev_err(&rcfw->pdev->dev, | ||
920 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
921 | resp->status, le16_to_cpu(req.cookie), | ||
922 | le16_to_cpu(resp->cookie)); | ||
923 | return -EINVAL; | ||
924 | } | ||
925 | /* Extract the context from the side buffer */ | 860 | /* Extract the context from the side buffer */ |
926 | qp->state = sb->en_sqd_async_notify_state & | 861 | qp->state = sb->en_sqd_async_notify_state & |
927 | CREQ_QUERY_QP_RESP_SB_STATE_MASK; | 862 | CREQ_QUERY_QP_RESP_SB_STATE_MASK; |
@@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) | |||
976 | qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); | 911 | qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); |
977 | memcpy(qp->smac, sb->src_mac, 6); | 912 | memcpy(qp->smac, sb->src_mac, 6); |
978 | qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); | 913 | qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); |
979 | return 0; | 914 | bail: |
915 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); | ||
916 | return rc; | ||
980 | } | 917 | } |
981 | 918 | ||
982 | static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) | 919 | static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) |
@@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
1021 | { | 958 | { |
1022 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 959 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
1023 | struct cmdq_destroy_qp req; | 960 | struct cmdq_destroy_qp req; |
1024 | struct creq_destroy_qp_resp *resp; | 961 | struct creq_destroy_qp_resp resp; |
1025 | unsigned long flags; | 962 | unsigned long flags; |
1026 | u16 cmd_flags = 0; | 963 | u16 cmd_flags = 0; |
964 | int rc; | ||
1027 | 965 | ||
1028 | RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); | 966 | RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); |
1029 | 967 | ||
1030 | req.qp_cid = cpu_to_le32(qp->id); | 968 | req.qp_cid = cpu_to_le32(qp->id); |
1031 | resp = (struct creq_destroy_qp_resp *) | 969 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
1032 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 970 | (void *)&resp, NULL, 0); |
1033 | NULL, 0); | 971 | if (rc) |
1034 | if (!resp) { | 972 | return rc; |
1035 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); | ||
1036 | return -EINVAL; | ||
1037 | } | ||
1038 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
1039 | /* Cmd timed out */ | ||
1040 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); | ||
1041 | return -ETIMEDOUT; | ||
1042 | } | ||
1043 | if (resp->status || | ||
1044 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
1045 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); | ||
1046 | dev_err(&rcfw->pdev->dev, | ||
1047 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
1048 | resp->status, le16_to_cpu(req.cookie), | ||
1049 | le16_to_cpu(resp->cookie)); | ||
1050 | return -EINVAL; | ||
1051 | } | ||
1052 | 973 | ||
1053 | /* Must walk the associated CQs to nullified the QP ptr */ | 974 | /* Must walk the associated CQs to nullified the QP ptr */ |
1054 | spin_lock_irqsave(&qp->scq->hwq.lock, flags); | 975 | spin_lock_irqsave(&qp->scq->hwq.lock, flags); |
@@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, | |||
1162 | rc = -EINVAL; | 1083 | rc = -EINVAL; |
1163 | goto done; | 1084 | goto done; |
1164 | } | 1085 | } |
1165 | if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == | 1086 | |
1166 | HWQ_CMP(sq->hwq.cons, &sq->hwq)) { | 1087 | if (bnxt_qplib_queue_full(sq)) { |
1088 | dev_err(&sq->hwq.pdev->dev, | ||
1089 | "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", | ||
1090 | sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, | ||
1091 | sq->q_full_delta); | ||
1167 | rc = -ENOMEM; | 1092 | rc = -ENOMEM; |
1168 | goto done; | 1093 | goto done; |
1169 | } | 1094 | } |
@@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, | |||
1373 | } | 1298 | } |
1374 | 1299 | ||
1375 | sq->hwq.prod++; | 1300 | sq->hwq.prod++; |
1301 | |||
1302 | qp->wqe_cnt++; | ||
1303 | |||
1376 | done: | 1304 | done: |
1377 | return rc; | 1305 | return rc; |
1378 | } | 1306 | } |
@@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, | |||
1411 | rc = -EINVAL; | 1339 | rc = -EINVAL; |
1412 | goto done; | 1340 | goto done; |
1413 | } | 1341 | } |
1414 | if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == | 1342 | if (bnxt_qplib_queue_full(rq)) { |
1415 | HWQ_CMP(rq->hwq.cons, &rq->hwq)) { | ||
1416 | dev_err(&rq->hwq.pdev->dev, | 1343 | dev_err(&rq->hwq.pdev->dev, |
1417 | "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); | 1344 | "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); |
1418 | rc = -EINVAL; | 1345 | rc = -EINVAL; |
@@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) | |||
1483 | { | 1410 | { |
1484 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 1411 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
1485 | struct cmdq_create_cq req; | 1412 | struct cmdq_create_cq req; |
1486 | struct creq_create_cq_resp *resp; | 1413 | struct creq_create_cq_resp resp; |
1487 | struct bnxt_qplib_pbl *pbl; | 1414 | struct bnxt_qplib_pbl *pbl; |
1488 | u16 cmd_flags = 0; | 1415 | u16 cmd_flags = 0; |
1489 | int rc; | 1416 | int rc; |
@@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) | |||
1525 | (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << | 1452 | (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << |
1526 | CMDQ_CREATE_CQ_CNQ_ID_SFT); | 1453 | CMDQ_CREATE_CQ_CNQ_ID_SFT); |
1527 | 1454 | ||
1528 | resp = (struct creq_create_cq_resp *) | 1455 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
1529 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 1456 | (void *)&resp, NULL, 0); |
1530 | NULL, 0); | 1457 | if (rc) |
1531 | if (!resp) { | ||
1532 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); | ||
1533 | return -EINVAL; | ||
1534 | } | ||
1535 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
1536 | /* Cmd timed out */ | ||
1537 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); | ||
1538 | rc = -ETIMEDOUT; | ||
1539 | goto fail; | ||
1540 | } | ||
1541 | if (resp->status || | ||
1542 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
1543 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); | ||
1544 | dev_err(&rcfw->pdev->dev, | ||
1545 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
1546 | resp->status, le16_to_cpu(req.cookie), | ||
1547 | le16_to_cpu(resp->cookie)); | ||
1548 | rc = -EINVAL; | ||
1549 | goto fail; | 1458 | goto fail; |
1550 | } | 1459 | |
1551 | cq->id = le32_to_cpu(resp->xid); | 1460 | cq->id = le32_to_cpu(resp.xid); |
1552 | cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; | 1461 | cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; |
1553 | cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; | 1462 | cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; |
1554 | init_waitqueue_head(&cq->waitq); | 1463 | init_waitqueue_head(&cq->waitq); |
@@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) | |||
1566 | { | 1475 | { |
1567 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 1476 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
1568 | struct cmdq_destroy_cq req; | 1477 | struct cmdq_destroy_cq req; |
1569 | struct creq_destroy_cq_resp *resp; | 1478 | struct creq_destroy_cq_resp resp; |
1570 | u16 cmd_flags = 0; | 1479 | u16 cmd_flags = 0; |
1480 | int rc; | ||
1571 | 1481 | ||
1572 | RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); | 1482 | RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); |
1573 | 1483 | ||
1574 | req.cq_cid = cpu_to_le32(cq->id); | 1484 | req.cq_cid = cpu_to_le32(cq->id); |
1575 | resp = (struct creq_destroy_cq_resp *) | 1485 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
1576 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 1486 | (void *)&resp, NULL, 0); |
1577 | NULL, 0); | 1487 | if (rc) |
1578 | if (!resp) { | 1488 | return rc; |
1579 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); | ||
1580 | return -EINVAL; | ||
1581 | } | ||
1582 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
1583 | /* Cmd timed out */ | ||
1584 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); | ||
1585 | return -ETIMEDOUT; | ||
1586 | } | ||
1587 | if (resp->status || | ||
1588 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
1589 | dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); | ||
1590 | dev_err(&rcfw->pdev->dev, | ||
1591 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
1592 | resp->status, le16_to_cpu(req.cookie), | ||
1593 | le16_to_cpu(resp->cookie)); | ||
1594 | return -EINVAL; | ||
1595 | } | ||
1596 | bnxt_qplib_free_hwq(res->pdev, &cq->hwq); | 1489 | bnxt_qplib_free_hwq(res->pdev, &cq->hwq); |
1597 | return 0; | 1490 | return 0; |
1598 | } | 1491 | } |
@@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, | |||
1664 | return rc; | 1557 | return rc; |
1665 | } | 1558 | } |
1666 | 1559 | ||
1560 | /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) | ||
1561 | * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 | ||
1562 | */ | ||
1563 | static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, | ||
1564 | u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) | ||
1565 | { | ||
1566 | struct bnxt_qplib_q *sq = &qp->sq; | ||
1567 | struct bnxt_qplib_swq *swq; | ||
1568 | u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; | ||
1569 | struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; | ||
1570 | struct cq_req *peek_req_hwcqe; | ||
1571 | struct bnxt_qplib_qp *peek_qp; | ||
1572 | struct bnxt_qplib_q *peek_sq; | ||
1573 | int i, rc = 0; | ||
1574 | |||
1575 | /* Normal mode */ | ||
1576 | /* Check for the psn_search marking before completing */ | ||
1577 | swq = &sq->swq[sw_sq_cons]; | ||
1578 | if (swq->psn_search && | ||
1579 | le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { | ||
1580 | /* Unmark */ | ||
1581 | swq->psn_search->flags_next_psn = cpu_to_le32 | ||
1582 | (le32_to_cpu(swq->psn_search->flags_next_psn) | ||
1583 | & ~0x80000000); | ||
1584 | dev_dbg(&cq->hwq.pdev->dev, | ||
1585 | "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", | ||
1586 | cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); | ||
1587 | sq->condition = true; | ||
1588 | sq->send_phantom = true; | ||
1589 | |||
1590 | /* TODO: Only ARM if the previous SQE is ARMALL */ | ||
1591 | bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); | ||
1592 | |||
1593 | rc = -EAGAIN; | ||
1594 | goto out; | ||
1595 | } | ||
1596 | if (sq->condition) { | ||
1597 | /* Peek at the completions */ | ||
1598 | peek_raw_cq_cons = cq->hwq.cons; | ||
1599 | peek_sw_cq_cons = cq_cons; | ||
1600 | i = cq->hwq.max_elements; | ||
1601 | while (i--) { | ||
1602 | peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); | ||
1603 | peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; | ||
1604 | peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] | ||
1605 | [CQE_IDX(peek_sw_cq_cons)]; | ||
1606 | /* If the next hwcqe is VALID */ | ||
1607 | if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, | ||
1608 | cq->hwq.max_elements)) { | ||
1609 | /* If the next hwcqe is a REQ */ | ||
1610 | if ((peek_hwcqe->cqe_type_toggle & | ||
1611 | CQ_BASE_CQE_TYPE_MASK) == | ||
1612 | CQ_BASE_CQE_TYPE_REQ) { | ||
1613 | peek_req_hwcqe = (struct cq_req *) | ||
1614 | peek_hwcqe; | ||
1615 | peek_qp = (struct bnxt_qplib_qp *) | ||
1616 | ((unsigned long) | ||
1617 | le64_to_cpu | ||
1618 | (peek_req_hwcqe->qp_handle)); | ||
1619 | peek_sq = &peek_qp->sq; | ||
1620 | peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( | ||
1621 | peek_req_hwcqe->sq_cons_idx) - 1 | ||
1622 | , &sq->hwq); | ||
1623 | /* If the hwcqe's sq's wr_id matches */ | ||
1624 | if (peek_sq == sq && | ||
1625 | sq->swq[peek_sq_cons_idx].wr_id == | ||
1626 | BNXT_QPLIB_FENCE_WRID) { | ||
1627 | /* | ||
1628 | * Unbreak only if the phantom | ||
1629 | * comes back | ||
1630 | */ | ||
1631 | dev_dbg(&cq->hwq.pdev->dev, | ||
1632 | "FP:Got Phantom CQE"); | ||
1633 | sq->condition = false; | ||
1634 | sq->single = true; | ||
1635 | rc = 0; | ||
1636 | goto out; | ||
1637 | } | ||
1638 | } | ||
1639 | /* Valid but not the phantom, so keep looping */ | ||
1640 | } else { | ||
1641 | /* Not valid yet, just exit and wait */ | ||
1642 | rc = -EINVAL; | ||
1643 | goto out; | ||
1644 | } | ||
1645 | peek_sw_cq_cons++; | ||
1646 | peek_raw_cq_cons++; | ||
1647 | } | ||
1648 | dev_err(&cq->hwq.pdev->dev, | ||
1649 | "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", | ||
1650 | cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); | ||
1651 | rc = -EINVAL; | ||
1652 | } | ||
1653 | out: | ||
1654 | return rc; | ||
1655 | } | ||
1656 | |||
1667 | static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | 1657 | static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, |
1668 | struct cq_req *hwcqe, | 1658 | struct cq_req *hwcqe, |
1669 | struct bnxt_qplib_cqe **pcqe, int *budget) | 1659 | struct bnxt_qplib_cqe **pcqe, int *budget, |
1660 | u32 cq_cons, struct bnxt_qplib_qp **lib_qp) | ||
1670 | { | 1661 | { |
1671 | struct bnxt_qplib_qp *qp; | 1662 | struct bnxt_qplib_qp *qp; |
1672 | struct bnxt_qplib_q *sq; | 1663 | struct bnxt_qplib_q *sq; |
1673 | struct bnxt_qplib_cqe *cqe; | 1664 | struct bnxt_qplib_cqe *cqe; |
1674 | u32 sw_cons, cqe_cons; | 1665 | u32 sw_sq_cons, cqe_sq_cons; |
1666 | struct bnxt_qplib_swq *swq; | ||
1675 | int rc = 0; | 1667 | int rc = 0; |
1676 | 1668 | ||
1677 | qp = (struct bnxt_qplib_qp *)((unsigned long) | 1669 | qp = (struct bnxt_qplib_qp *)((unsigned long) |
@@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | |||
1683 | } | 1675 | } |
1684 | sq = &qp->sq; | 1676 | sq = &qp->sq; |
1685 | 1677 | ||
1686 | cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); | 1678 | cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); |
1687 | if (cqe_cons > sq->hwq.max_elements) { | 1679 | if (cqe_sq_cons > sq->hwq.max_elements) { |
1688 | dev_err(&cq->hwq.pdev->dev, | 1680 | dev_err(&cq->hwq.pdev->dev, |
1689 | "QPLIB: FP: CQ Process req reported "); | 1681 | "QPLIB: FP: CQ Process req reported "); |
1690 | dev_err(&cq->hwq.pdev->dev, | 1682 | dev_err(&cq->hwq.pdev->dev, |
1691 | "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", | 1683 | "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", |
1692 | cqe_cons, sq->hwq.max_elements); | 1684 | cqe_sq_cons, sq->hwq.max_elements); |
1693 | return -EINVAL; | 1685 | return -EINVAL; |
1694 | } | 1686 | } |
1695 | /* If we were in the middle of flushing the SQ, continue */ | 1687 | /* If we were in the middle of flushing the SQ, continue */ |
@@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | |||
1698 | 1690 | ||
1699 | /* Require to walk the sq's swq to fabricate CQEs for all previously | 1691 | /* Require to walk the sq's swq to fabricate CQEs for all previously |
1700 | * signaled SWQEs due to CQE aggregation from the current sq cons | 1692 | * signaled SWQEs due to CQE aggregation from the current sq cons |
1701 | * to the cqe_cons | 1693 | * to the cqe_sq_cons |
1702 | */ | 1694 | */ |
1703 | cqe = *pcqe; | 1695 | cqe = *pcqe; |
1704 | while (*budget) { | 1696 | while (*budget) { |
1705 | sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); | 1697 | sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); |
1706 | if (sw_cons == cqe_cons) | 1698 | if (sw_sq_cons == cqe_sq_cons) |
1699 | /* Done */ | ||
1707 | break; | 1700 | break; |
1701 | |||
1702 | swq = &sq->swq[sw_sq_cons]; | ||
1708 | memset(cqe, 0, sizeof(*cqe)); | 1703 | memset(cqe, 0, sizeof(*cqe)); |
1709 | cqe->opcode = CQ_BASE_CQE_TYPE_REQ; | 1704 | cqe->opcode = CQ_BASE_CQE_TYPE_REQ; |
1710 | cqe->qp_handle = (u64)(unsigned long)qp; | 1705 | cqe->qp_handle = (u64)(unsigned long)qp; |
1711 | cqe->src_qp = qp->id; | 1706 | cqe->src_qp = qp->id; |
1712 | cqe->wr_id = sq->swq[sw_cons].wr_id; | 1707 | cqe->wr_id = swq->wr_id; |
1713 | cqe->type = sq->swq[sw_cons].type; | 1708 | if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) |
1709 | goto skip; | ||
1710 | cqe->type = swq->type; | ||
1714 | 1711 | ||
1715 | /* For the last CQE, check for status. For errors, regardless | 1712 | /* For the last CQE, check for status. For errors, regardless |
1716 | * of the request being signaled or not, it must complete with | 1713 | * of the request being signaled or not, it must complete with |
1717 | * the hwcqe error status | 1714 | * the hwcqe error status |
1718 | */ | 1715 | */ |
1719 | if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && | 1716 | if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && |
1720 | hwcqe->status != CQ_REQ_STATUS_OK) { | 1717 | hwcqe->status != CQ_REQ_STATUS_OK) { |
1721 | cqe->status = hwcqe->status; | 1718 | cqe->status = hwcqe->status; |
1722 | dev_err(&cq->hwq.pdev->dev, | 1719 | dev_err(&cq->hwq.pdev->dev, |
1723 | "QPLIB: FP: CQ Processed Req "); | 1720 | "QPLIB: FP: CQ Processed Req "); |
1724 | dev_err(&cq->hwq.pdev->dev, | 1721 | dev_err(&cq->hwq.pdev->dev, |
1725 | "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", | 1722 | "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", |
1726 | sw_cons, cqe->wr_id, cqe->status); | 1723 | sw_sq_cons, cqe->wr_id, cqe->status); |
1727 | cqe++; | 1724 | cqe++; |
1728 | (*budget)--; | 1725 | (*budget)--; |
1729 | sq->flush_in_progress = true; | 1726 | sq->flush_in_progress = true; |
1730 | /* Must block new posting of SQ and RQ */ | 1727 | /* Must block new posting of SQ and RQ */ |
1731 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 1728 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
1729 | sq->condition = false; | ||
1730 | sq->single = false; | ||
1732 | } else { | 1731 | } else { |
1733 | if (sq->swq[sw_cons].flags & | 1732 | if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { |
1734 | SQ_SEND_FLAGS_SIGNAL_COMP) { | 1733 | /* Before we complete, do WA 9060 */ |
1734 | if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, | ||
1735 | cqe_sq_cons)) { | ||
1736 | *lib_qp = qp; | ||
1737 | goto out; | ||
1738 | } | ||
1735 | cqe->status = CQ_REQ_STATUS_OK; | 1739 | cqe->status = CQ_REQ_STATUS_OK; |
1736 | cqe++; | 1740 | cqe++; |
1737 | (*budget)--; | 1741 | (*budget)--; |
1738 | } | 1742 | } |
1739 | } | 1743 | } |
1744 | skip: | ||
1740 | sq->hwq.cons++; | 1745 | sq->hwq.cons++; |
1746 | if (sq->single) | ||
1747 | break; | ||
1741 | } | 1748 | } |
1749 | out: | ||
1742 | *pcqe = cqe; | 1750 | *pcqe = cqe; |
1743 | if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { | 1751 | if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { |
1744 | /* Out of budget */ | 1752 | /* Out of budget */ |
1745 | rc = -EAGAIN; | 1753 | rc = -EAGAIN; |
1746 | goto done; | 1754 | goto done; |
1747 | } | 1755 | } |
1756 | /* | ||
1757 | * Back to normal completion mode only after it has completed all of | ||
1758 | * the WC for this CQE | ||
1759 | */ | ||
1760 | sq->single = false; | ||
1748 | if (!sq->flush_in_progress) | 1761 | if (!sq->flush_in_progress) |
1749 | goto done; | 1762 | goto done; |
1750 | flush: | 1763 | flush: |
@@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, | |||
2074 | } | 2087 | } |
2075 | 2088 | ||
2076 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | 2089 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, |
2077 | int num_cqes) | 2090 | int num_cqes, struct bnxt_qplib_qp **lib_qp) |
2078 | { | 2091 | { |
2079 | struct cq_base *hw_cqe, **hw_cqe_ptr; | 2092 | struct cq_base *hw_cqe, **hw_cqe_ptr; |
2080 | unsigned long flags; | 2093 | unsigned long flags; |
@@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | |||
2099 | case CQ_BASE_CQE_TYPE_REQ: | 2112 | case CQ_BASE_CQE_TYPE_REQ: |
2100 | rc = bnxt_qplib_cq_process_req(cq, | 2113 | rc = bnxt_qplib_cq_process_req(cq, |
2101 | (struct cq_req *)hw_cqe, | 2114 | (struct cq_req *)hw_cqe, |
2102 | &cqe, &budget); | 2115 | &cqe, &budget, |
2116 | sw_cons, lib_qp); | ||
2103 | break; | 2117 | break; |
2104 | case CQ_BASE_CQE_TYPE_RES_RC: | 2118 | case CQ_BASE_CQE_TYPE_RES_RC: |
2105 | rc = bnxt_qplib_cq_process_res_rc(cq, | 2119 | rc = bnxt_qplib_cq_process_res_rc(cq, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index f0150f8da1e3..36b7b7db0e3f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h | |||
@@ -88,6 +88,7 @@ struct bnxt_qplib_swq { | |||
88 | 88 | ||
89 | struct bnxt_qplib_swqe { | 89 | struct bnxt_qplib_swqe { |
90 | /* General */ | 90 | /* General */ |
91 | #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ | ||
91 | u64 wr_id; | 92 | u64 wr_id; |
92 | u8 reqs_type; | 93 | u8 reqs_type; |
93 | u8 type; | 94 | u8 type; |
@@ -216,9 +217,16 @@ struct bnxt_qplib_q { | |||
216 | struct scatterlist *sglist; | 217 | struct scatterlist *sglist; |
217 | u32 nmap; | 218 | u32 nmap; |
218 | u32 max_wqe; | 219 | u32 max_wqe; |
220 | u16 q_full_delta; | ||
219 | u16 max_sge; | 221 | u16 max_sge; |
220 | u32 psn; | 222 | u32 psn; |
221 | bool flush_in_progress; | 223 | bool flush_in_progress; |
224 | bool condition; | ||
225 | bool single; | ||
226 | bool send_phantom; | ||
227 | u32 phantom_wqe_cnt; | ||
228 | u32 phantom_cqe_cnt; | ||
229 | u32 next_cq_cons; | ||
222 | }; | 230 | }; |
223 | 231 | ||
224 | struct bnxt_qplib_qp { | 232 | struct bnxt_qplib_qp { |
@@ -242,6 +250,7 @@ struct bnxt_qplib_qp { | |||
242 | u8 timeout; | 250 | u8 timeout; |
243 | u8 retry_cnt; | 251 | u8 retry_cnt; |
244 | u8 rnr_retry; | 252 | u8 rnr_retry; |
253 | u64 wqe_cnt; | ||
245 | u32 min_rnr_timer; | 254 | u32 min_rnr_timer; |
246 | u32 max_rd_atomic; | 255 | u32 max_rd_atomic; |
247 | u32 max_dest_rd_atomic; | 256 | u32 max_dest_rd_atomic; |
@@ -301,6 +310,13 @@ struct bnxt_qplib_qp { | |||
301 | (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ | 310 | (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ |
302 | !((raw_cons) & (cp_bit))) | 311 | !((raw_cons) & (cp_bit))) |
303 | 312 | ||
313 | static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) | ||
314 | { | ||
315 | return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), | ||
316 | &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, | ||
317 | &qplib_q->hwq); | ||
318 | } | ||
319 | |||
304 | struct bnxt_qplib_cqe { | 320 | struct bnxt_qplib_cqe { |
305 | u8 status; | 321 | u8 status; |
306 | u8 type; | 322 | u8 type; |
@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, | |||
432 | int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); | 448 | int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); |
433 | int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); | 449 | int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); |
434 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | 450 | int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, |
435 | int num); | 451 | int num, struct bnxt_qplib_qp **qp); |
436 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); | 452 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); |
437 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); | 453 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); |
438 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); | 454 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 23fb7260662b..16e42754dbec 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | |||
@@ -39,72 +39,55 @@ | |||
39 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
40 | #include <linux/pci.h> | 40 | #include <linux/pci.h> |
41 | #include <linux/prefetch.h> | 41 | #include <linux/prefetch.h> |
42 | #include <linux/delay.h> | ||
43 | |||
42 | #include "roce_hsi.h" | 44 | #include "roce_hsi.h" |
43 | #include "qplib_res.h" | 45 | #include "qplib_res.h" |
44 | #include "qplib_rcfw.h" | 46 | #include "qplib_rcfw.h" |
45 | static void bnxt_qplib_service_creq(unsigned long data); | 47 | static void bnxt_qplib_service_creq(unsigned long data); |
46 | 48 | ||
47 | /* Hardware communication channel */ | 49 | /* Hardware communication channel */ |
48 | int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) | 50 | static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) |
49 | { | 51 | { |
50 | u16 cbit; | 52 | u16 cbit; |
51 | int rc; | 53 | int rc; |
52 | 54 | ||
53 | cookie &= RCFW_MAX_COOKIE_VALUE; | ||
54 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 55 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
55 | if (!test_bit(cbit, rcfw->cmdq_bitmap)) | ||
56 | dev_warn(&rcfw->pdev->dev, | ||
57 | "QPLIB: CMD bit %d for cookie 0x%x is not set?", | ||
58 | cbit, cookie); | ||
59 | |||
60 | rc = wait_event_timeout(rcfw->waitq, | 56 | rc = wait_event_timeout(rcfw->waitq, |
61 | !test_bit(cbit, rcfw->cmdq_bitmap), | 57 | !test_bit(cbit, rcfw->cmdq_bitmap), |
62 | msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); | 58 | msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); |
63 | if (!rc) { | 59 | return rc ? 0 : -ETIMEDOUT; |
64 | dev_warn(&rcfw->pdev->dev, | ||
65 | "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", | ||
66 | RCFW_CMD_WAIT_TIME_MS, cookie); | ||
67 | } | ||
68 | |||
69 | return rc; | ||
70 | }; | 60 | }; |
71 | 61 | ||
72 | int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) | 62 | static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) |
73 | { | 63 | { |
74 | u32 count = -1; | 64 | u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; |
75 | u16 cbit; | 65 | u16 cbit; |
76 | 66 | ||
77 | cookie &= RCFW_MAX_COOKIE_VALUE; | ||
78 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 67 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
79 | if (!test_bit(cbit, rcfw->cmdq_bitmap)) | 68 | if (!test_bit(cbit, rcfw->cmdq_bitmap)) |
80 | goto done; | 69 | goto done; |
81 | do { | 70 | do { |
71 | mdelay(1); /* 1m sec */ | ||
82 | bnxt_qplib_service_creq((unsigned long)rcfw); | 72 | bnxt_qplib_service_creq((unsigned long)rcfw); |
83 | } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); | 73 | } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); |
84 | done: | 74 | done: |
85 | return count; | 75 | return count ? 0 : -ETIMEDOUT; |
86 | }; | 76 | }; |
87 | 77 | ||
88 | void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | 78 | static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, |
89 | struct cmdq_base *req, void **crsbe, | 79 | struct creq_base *resp, void *sb, u8 is_block) |
90 | u8 is_block) | ||
91 | { | 80 | { |
92 | struct bnxt_qplib_crsq *crsq = &rcfw->crsq; | ||
93 | struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; | 81 | struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; |
94 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; | 82 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; |
95 | struct bnxt_qplib_hwq *crsb = &rcfw->crsb; | 83 | struct bnxt_qplib_crsq *crsqe; |
96 | struct bnxt_qplib_crsqe *crsqe = NULL; | ||
97 | struct bnxt_qplib_crsbe **crsb_ptr; | ||
98 | u32 sw_prod, cmdq_prod; | 84 | u32 sw_prod, cmdq_prod; |
99 | u8 retry_cnt = 0xFF; | ||
100 | dma_addr_t dma_addr; | ||
101 | unsigned long flags; | 85 | unsigned long flags; |
102 | u32 size, opcode; | 86 | u32 size, opcode; |
103 | u16 cookie, cbit; | 87 | u16 cookie, cbit; |
104 | int pg, idx; | 88 | int pg, idx; |
105 | u8 *preq; | 89 | u8 *preq; |
106 | 90 | ||
107 | retry: | ||
108 | opcode = req->opcode; | 91 | opcode = req->opcode; |
109 | if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && | 92 | if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && |
110 | (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && | 93 | (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && |
@@ -112,63 +95,50 @@ retry: | |||
112 | dev_err(&rcfw->pdev->dev, | 95 | dev_err(&rcfw->pdev->dev, |
113 | "QPLIB: RCFW not initialized, reject opcode 0x%x", | 96 | "QPLIB: RCFW not initialized, reject opcode 0x%x", |
114 | opcode); | 97 | opcode); |
115 | return NULL; | 98 | return -EINVAL; |
116 | } | 99 | } |
117 | 100 | ||
118 | if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && | 101 | if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && |
119 | opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { | 102 | opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { |
120 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); | 103 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); |
121 | return NULL; | 104 | return -EINVAL; |
122 | } | 105 | } |
123 | 106 | ||
124 | /* Cmdq are in 16-byte units, each request can consume 1 or more | 107 | /* Cmdq are in 16-byte units, each request can consume 1 or more |
125 | * cmdqe | 108 | * cmdqe |
126 | */ | 109 | */ |
127 | spin_lock_irqsave(&cmdq->lock, flags); | 110 | spin_lock_irqsave(&cmdq->lock, flags); |
128 | if (req->cmd_size > cmdq->max_elements - | 111 | if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { |
129 | ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & | ||
130 | (cmdq->max_elements - 1))) { | ||
131 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); | 112 | dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); |
132 | spin_unlock_irqrestore(&cmdq->lock, flags); | 113 | spin_unlock_irqrestore(&cmdq->lock, flags); |
133 | 114 | return -EAGAIN; | |
134 | if (!retry_cnt--) | ||
135 | return NULL; | ||
136 | goto retry; | ||
137 | } | 115 | } |
138 | 116 | ||
139 | retry_cnt = 0xFF; | ||
140 | 117 | ||
141 | cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; | 118 | cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; |
142 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 119 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
143 | if (is_block) | 120 | if (is_block) |
144 | cookie |= RCFW_CMD_IS_BLOCKING; | 121 | cookie |= RCFW_CMD_IS_BLOCKING; |
122 | |||
123 | set_bit(cbit, rcfw->cmdq_bitmap); | ||
145 | req->cookie = cpu_to_le16(cookie); | 124 | req->cookie = cpu_to_le16(cookie); |
146 | if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { | 125 | crsqe = &rcfw->crsqe_tbl[cbit]; |
147 | dev_err(&rcfw->pdev->dev, | 126 | if (crsqe->resp) { |
148 | "QPLIB: RCFW MAX outstanding cmd reached!"); | ||
149 | atomic_dec(&rcfw->seq_num); | ||
150 | spin_unlock_irqrestore(&cmdq->lock, flags); | 127 | spin_unlock_irqrestore(&cmdq->lock, flags); |
151 | 128 | return -EBUSY; | |
152 | if (!retry_cnt--) | ||
153 | return NULL; | ||
154 | goto retry; | ||
155 | } | 129 | } |
156 | /* Reserve a resp buffer slot if requested */ | 130 | memset(resp, 0, sizeof(*resp)); |
157 | if (req->resp_size && crsbe) { | 131 | crsqe->resp = (struct creq_qp_event *)resp; |
158 | spin_lock(&crsb->lock); | 132 | crsqe->resp->cookie = req->cookie; |
159 | sw_prod = HWQ_CMP(crsb->prod, crsb); | 133 | crsqe->req_size = req->cmd_size; |
160 | crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; | 134 | if (req->resp_size && sb) { |
161 | *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] | 135 | struct bnxt_qplib_rcfw_sbuf *sbuf = sb; |
162 | [get_crsb_idx(sw_prod)]; | 136 | |
163 | bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); | 137 | req->resp_addr = cpu_to_le64(sbuf->dma_addr); |
164 | req->resp_addr = cpu_to_le64(dma_addr); | 138 | req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / |
165 | crsb->prod++; | 139 | BNXT_QPLIB_CMDQE_UNITS; |
166 | spin_unlock(&crsb->lock); | ||
167 | |||
168 | req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + | ||
169 | BNXT_QPLIB_CMDQE_UNITS - 1) / | ||
170 | BNXT_QPLIB_CMDQE_UNITS; | ||
171 | } | 140 | } |
141 | |||
172 | cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; | 142 | cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; |
173 | preq = (u8 *)req; | 143 | preq = (u8 *)req; |
174 | size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; | 144 | size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; |
@@ -190,23 +160,24 @@ retry: | |||
190 | preq += min_t(u32, size, sizeof(*cmdqe)); | 160 | preq += min_t(u32, size, sizeof(*cmdqe)); |
191 | size -= min_t(u32, size, sizeof(*cmdqe)); | 161 | size -= min_t(u32, size, sizeof(*cmdqe)); |
192 | cmdq->prod++; | 162 | cmdq->prod++; |
163 | rcfw->seq_num++; | ||
193 | } while (size > 0); | 164 | } while (size > 0); |
194 | 165 | ||
166 | rcfw->seq_num++; | ||
167 | |||
195 | cmdq_prod = cmdq->prod; | 168 | cmdq_prod = cmdq->prod; |
196 | if (rcfw->flags & FIRMWARE_FIRST_FLAG) { | 169 | if (rcfw->flags & FIRMWARE_FIRST_FLAG) { |
197 | /* The very first doorbell write is required to set this flag | 170 | /* The very first doorbell write |
198 | * which prompts the FW to reset its internal pointers | 171 | * is required to set this flag |
172 | * which prompts the FW to reset | ||
173 | * its internal pointers | ||
199 | */ | 174 | */ |
200 | cmdq_prod |= FIRMWARE_FIRST_FLAG; | 175 | cmdq_prod |= FIRMWARE_FIRST_FLAG; |
201 | rcfw->flags &= ~FIRMWARE_FIRST_FLAG; | 176 | rcfw->flags &= ~FIRMWARE_FIRST_FLAG; |
202 | } | 177 | } |
203 | sw_prod = HWQ_CMP(crsq->prod, crsq); | ||
204 | crsqe = &crsq->crsq[sw_prod]; | ||
205 | memset(crsqe, 0, sizeof(*crsqe)); | ||
206 | crsq->prod++; | ||
207 | crsqe->req_size = req->cmd_size; | ||
208 | 178 | ||
209 | /* ring CMDQ DB */ | 179 | /* ring CMDQ DB */ |
180 | wmb(); | ||
210 | writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + | 181 | writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + |
211 | rcfw->cmdq_bar_reg_prod_off); | 182 | rcfw->cmdq_bar_reg_prod_off); |
212 | writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + | 183 | writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + |
@@ -214,9 +185,56 @@ retry: | |||
214 | done: | 185 | done: |
215 | spin_unlock_irqrestore(&cmdq->lock, flags); | 186 | spin_unlock_irqrestore(&cmdq->lock, flags); |
216 | /* Return the CREQ response pointer */ | 187 | /* Return the CREQ response pointer */ |
217 | return crsqe ? &crsqe->qp_event : NULL; | 188 | return 0; |
218 | } | 189 | } |
219 | 190 | ||
191 | int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | ||
192 | struct cmdq_base *req, | ||
193 | struct creq_base *resp, | ||
194 | void *sb, u8 is_block) | ||
195 | { | ||
196 | struct creq_qp_event *evnt = (struct creq_qp_event *)resp; | ||
197 | u16 cookie; | ||
198 | u8 opcode, retry_cnt = 0xFF; | ||
199 | int rc = 0; | ||
200 | |||
201 | do { | ||
202 | opcode = req->opcode; | ||
203 | rc = __send_message(rcfw, req, resp, sb, is_block); | ||
204 | cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; | ||
205 | if (!rc) | ||
206 | break; | ||
207 | |||
208 | if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { | ||
209 | /* send failed */ | ||
210 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", | ||
211 | cookie, opcode); | ||
212 | return rc; | ||
213 | } | ||
214 | is_block ? mdelay(1) : usleep_range(500, 1000); | ||
215 | |||
216 | } while (retry_cnt--); | ||
217 | |||
218 | if (is_block) | ||
219 | rc = __block_for_resp(rcfw, cookie); | ||
220 | else | ||
221 | rc = __wait_for_resp(rcfw, cookie); | ||
222 | if (rc) { | ||
223 | /* timed out */ | ||
224 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", | ||
225 | cookie, opcode, RCFW_CMD_WAIT_TIME_MS); | ||
226 | return rc; | ||
227 | } | ||
228 | |||
229 | if (evnt->status) { | ||
230 | /* failed with status */ | ||
231 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", | ||
232 | cookie, opcode, evnt->status); | ||
233 | rc = -EFAULT; | ||
234 | } | ||
235 | |||
236 | return rc; | ||
237 | } | ||
220 | /* Completions */ | 238 | /* Completions */ |
221 | static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, | 239 | static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, |
222 | struct creq_func_event *func_event) | 240 | struct creq_func_event *func_event) |
@@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, | |||
260 | static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, | 278 | static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, |
261 | struct creq_qp_event *qp_event) | 279 | struct creq_qp_event *qp_event) |
262 | { | 280 | { |
263 | struct bnxt_qplib_crsq *crsq = &rcfw->crsq; | ||
264 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; | 281 | struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; |
265 | struct bnxt_qplib_crsqe *crsqe; | 282 | struct bnxt_qplib_crsq *crsqe; |
266 | u16 cbit, cookie, blocked = 0; | ||
267 | unsigned long flags; | 283 | unsigned long flags; |
268 | u32 sw_cons; | 284 | u16 cbit, blocked = 0; |
285 | u16 cookie; | ||
286 | __le16 mcookie; | ||
269 | 287 | ||
270 | switch (qp_event->event) { | 288 | switch (qp_event->event) { |
271 | case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: | 289 | case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: |
@@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, | |||
275 | default: | 293 | default: |
276 | /* Command Response */ | 294 | /* Command Response */ |
277 | spin_lock_irqsave(&cmdq->lock, flags); | 295 | spin_lock_irqsave(&cmdq->lock, flags); |
278 | sw_cons = HWQ_CMP(crsq->cons, crsq); | 296 | cookie = le16_to_cpu(qp_event->cookie); |
279 | crsqe = &crsq->crsq[sw_cons]; | 297 | mcookie = qp_event->cookie; |
280 | crsq->cons++; | ||
281 | memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); | ||
282 | |||
283 | cookie = le16_to_cpu(crsqe->qp_event.cookie); | ||
284 | blocked = cookie & RCFW_CMD_IS_BLOCKING; | 298 | blocked = cookie & RCFW_CMD_IS_BLOCKING; |
285 | cookie &= RCFW_MAX_COOKIE_VALUE; | 299 | cookie &= RCFW_MAX_COOKIE_VALUE; |
286 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; | 300 | cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; |
301 | crsqe = &rcfw->crsqe_tbl[cbit]; | ||
302 | if (crsqe->resp && | ||
303 | crsqe->resp->cookie == mcookie) { | ||
304 | memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); | ||
305 | crsqe->resp = NULL; | ||
306 | } else { | ||
307 | dev_err(&rcfw->pdev->dev, | ||
308 | "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", | ||
309 | crsqe->resp ? "mismatch" : "collision", | ||
310 | crsqe->resp ? crsqe->resp->cookie : 0, mcookie); | ||
311 | } | ||
287 | if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) | 312 | if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) |
288 | dev_warn(&rcfw->pdev->dev, | 313 | dev_warn(&rcfw->pdev->dev, |
289 | "QPLIB: CMD bit %d was not requested", cbit); | 314 | "QPLIB: CMD bit %d was not requested", cbit); |
290 | |||
291 | cmdq->cons += crsqe->req_size; | 315 | cmdq->cons += crsqe->req_size; |
292 | spin_unlock_irqrestore(&cmdq->lock, flags); | 316 | crsqe->req_size = 0; |
317 | |||
293 | if (!blocked) | 318 | if (!blocked) |
294 | wake_up(&rcfw->waitq); | 319 | wake_up(&rcfw->waitq); |
295 | break; | 320 | spin_unlock_irqrestore(&cmdq->lock, flags); |
296 | } | 321 | } |
297 | return 0; | 322 | return 0; |
298 | } | 323 | } |
@@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data) | |||
305 | struct creq_base *creqe, **creq_ptr; | 330 | struct creq_base *creqe, **creq_ptr; |
306 | u32 sw_cons, raw_cons; | 331 | u32 sw_cons, raw_cons; |
307 | unsigned long flags; | 332 | unsigned long flags; |
308 | u32 type; | 333 | u32 type, budget = CREQ_ENTRY_POLL_BUDGET; |
309 | 334 | ||
310 | /* Service the CREQ until empty */ | 335 | /* Service the CREQ until budget is over */ |
311 | spin_lock_irqsave(&creq->lock, flags); | 336 | spin_lock_irqsave(&creq->lock, flags); |
312 | raw_cons = creq->cons; | 337 | raw_cons = creq->cons; |
313 | while (1) { | 338 | while (budget > 0) { |
314 | sw_cons = HWQ_CMP(raw_cons, creq); | 339 | sw_cons = HWQ_CMP(raw_cons, creq); |
315 | creq_ptr = (struct creq_base **)creq->pbl_ptr; | 340 | creq_ptr = (struct creq_base **)creq->pbl_ptr; |
316 | creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; | 341 | creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; |
@@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data) | |||
320 | type = creqe->type & CREQ_BASE_TYPE_MASK; | 345 | type = creqe->type & CREQ_BASE_TYPE_MASK; |
321 | switch (type) { | 346 | switch (type) { |
322 | case CREQ_BASE_TYPE_QP_EVENT: | 347 | case CREQ_BASE_TYPE_QP_EVENT: |
323 | if (!bnxt_qplib_process_qp_event | 348 | bnxt_qplib_process_qp_event |
324 | (rcfw, (struct creq_qp_event *)creqe)) | 349 | (rcfw, (struct creq_qp_event *)creqe); |
325 | rcfw->creq_qp_event_processed++; | 350 | rcfw->creq_qp_event_processed++; |
326 | else { | ||
327 | dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); | ||
328 | dev_warn(&rcfw->pdev->dev, | ||
329 | "QPLIB: type = 0x%x not handled", | ||
330 | type); | ||
331 | } | ||
332 | break; | 351 | break; |
333 | case CREQ_BASE_TYPE_FUNC_EVENT: | 352 | case CREQ_BASE_TYPE_FUNC_EVENT: |
334 | if (!bnxt_qplib_process_func_event | 353 | if (!bnxt_qplib_process_func_event |
@@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data) | |||
346 | break; | 365 | break; |
347 | } | 366 | } |
348 | raw_cons++; | 367 | raw_cons++; |
368 | budget--; | ||
349 | } | 369 | } |
370 | |||
350 | if (creq->cons != raw_cons) { | 371 | if (creq->cons != raw_cons) { |
351 | creq->cons = raw_cons; | 372 | creq->cons = raw_cons; |
352 | CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, | 373 | CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, |
@@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) | |||
375 | /* RCFW */ | 396 | /* RCFW */ |
376 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) | 397 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) |
377 | { | 398 | { |
378 | struct creq_deinitialize_fw_resp *resp; | ||
379 | struct cmdq_deinitialize_fw req; | 399 | struct cmdq_deinitialize_fw req; |
400 | struct creq_deinitialize_fw_resp resp; | ||
380 | u16 cmd_flags = 0; | 401 | u16 cmd_flags = 0; |
402 | int rc; | ||
381 | 403 | ||
382 | RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); | 404 | RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); |
383 | resp = (struct creq_deinitialize_fw_resp *) | 405 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
384 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 406 | NULL, 0); |
385 | NULL, 0); | 407 | if (rc) |
386 | if (!resp) | 408 | return rc; |
387 | return -EINVAL; | ||
388 | |||
389 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) | ||
390 | return -ETIMEDOUT; | ||
391 | |||
392 | if (resp->status || | ||
393 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) | ||
394 | return -EFAULT; | ||
395 | 409 | ||
396 | clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); | 410 | clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); |
397 | return 0; | 411 | return 0; |
@@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) | |||
417 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | 431 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, |
418 | struct bnxt_qplib_ctx *ctx, int is_virtfn) | 432 | struct bnxt_qplib_ctx *ctx, int is_virtfn) |
419 | { | 433 | { |
420 | struct creq_initialize_fw_resp *resp; | ||
421 | struct cmdq_initialize_fw req; | 434 | struct cmdq_initialize_fw req; |
435 | struct creq_initialize_fw_resp resp; | ||
422 | u16 cmd_flags = 0, level; | 436 | u16 cmd_flags = 0, level; |
437 | int rc; | ||
423 | 438 | ||
424 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); | 439 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); |
425 | 440 | ||
@@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | |||
482 | 497 | ||
483 | skip_ctx_setup: | 498 | skip_ctx_setup: |
484 | req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); | 499 | req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); |
485 | resp = (struct creq_initialize_fw_resp *) | 500 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
486 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 501 | NULL, 0); |
487 | NULL, 0); | 502 | if (rc) |
488 | if (!resp) { | 503 | return rc; |
489 | dev_err(&rcfw->pdev->dev, | ||
490 | "QPLIB: RCFW: INITIALIZE_FW send failed"); | ||
491 | return -EINVAL; | ||
492 | } | ||
493 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
494 | /* Cmd timed out */ | ||
495 | dev_err(&rcfw->pdev->dev, | ||
496 | "QPLIB: RCFW: INITIALIZE_FW timed out"); | ||
497 | return -ETIMEDOUT; | ||
498 | } | ||
499 | if (resp->status || | ||
500 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
501 | dev_err(&rcfw->pdev->dev, | ||
502 | "QPLIB: RCFW: INITIALIZE_FW failed"); | ||
503 | return -EINVAL; | ||
504 | } | ||
505 | set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); | 504 | set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); |
506 | return 0; | 505 | return 0; |
507 | } | 506 | } |
508 | 507 | ||
509 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) | 508 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) |
510 | { | 509 | { |
511 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); | 510 | kfree(rcfw->crsqe_tbl); |
512 | kfree(rcfw->crsq.crsq); | ||
513 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); | 511 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); |
514 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); | 512 | bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); |
515 | |||
516 | rcfw->pdev = NULL; | 513 | rcfw->pdev = NULL; |
517 | } | 514 | } |
518 | 515 | ||
@@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, | |||
539 | goto fail; | 536 | goto fail; |
540 | } | 537 | } |
541 | 538 | ||
542 | rcfw->crsq.max_elements = rcfw->cmdq.max_elements; | 539 | rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, |
543 | rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, | 540 | sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); |
544 | sizeof(*rcfw->crsq.crsq), GFP_KERNEL); | 541 | if (!rcfw->crsqe_tbl) |
545 | if (!rcfw->crsq.crsq) | ||
546 | goto fail; | 542 | goto fail; |
547 | 543 | ||
548 | rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; | ||
549 | if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, | ||
550 | &rcfw->crsb.max_elements, | ||
551 | BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, | ||
552 | HWQ_TYPE_CTX)) { | ||
553 | dev_err(&rcfw->pdev->dev, | ||
554 | "QPLIB: HW channel CRSB allocation failed"); | ||
555 | goto fail; | ||
556 | } | ||
557 | return 0; | 544 | return 0; |
558 | 545 | ||
559 | fail: | 546 | fail: |
@@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
606 | int rc; | 593 | int rc; |
607 | 594 | ||
608 | /* General */ | 595 | /* General */ |
609 | atomic_set(&rcfw->seq_num, 0); | 596 | rcfw->seq_num = 0; |
610 | rcfw->flags = FIRMWARE_FIRST_FLAG; | 597 | rcfw->flags = FIRMWARE_FIRST_FLAG; |
611 | bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * | 598 | bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * |
612 | sizeof(unsigned long)); | 599 | sizeof(unsigned long)); |
@@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
636 | 623 | ||
637 | rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; | 624 | rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; |
638 | 625 | ||
639 | /* CRSQ */ | ||
640 | rcfw->crsq.prod = 0; | ||
641 | rcfw->crsq.cons = 0; | ||
642 | |||
643 | /* CREQ */ | 626 | /* CREQ */ |
644 | rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; | 627 | rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; |
645 | res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); | 628 | res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); |
@@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
692 | __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); | 675 | __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); |
693 | return 0; | 676 | return 0; |
694 | } | 677 | } |
678 | |||
679 | struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( | ||
680 | struct bnxt_qplib_rcfw *rcfw, | ||
681 | u32 size) | ||
682 | { | ||
683 | struct bnxt_qplib_rcfw_sbuf *sbuf; | ||
684 | |||
685 | sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); | ||
686 | if (!sbuf) | ||
687 | return NULL; | ||
688 | |||
689 | sbuf->size = size; | ||
690 | sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, | ||
691 | &sbuf->dma_addr, GFP_ATOMIC); | ||
692 | if (!sbuf->sb) | ||
693 | goto bail; | ||
694 | |||
695 | return sbuf; | ||
696 | bail: | ||
697 | kfree(sbuf); | ||
698 | return NULL; | ||
699 | } | ||
700 | |||
701 | void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, | ||
702 | struct bnxt_qplib_rcfw_sbuf *sbuf) | ||
703 | { | ||
704 | if (sbuf->sb) | ||
705 | dma_free_coherent(&rcfw->pdev->dev, sbuf->size, | ||
706 | sbuf->sb, sbuf->dma_addr); | ||
707 | kfree(sbuf); | ||
708 | } | ||
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index d3567d75bf58..09ce121770cd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | |||
@@ -73,6 +73,7 @@ | |||
73 | #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT | 73 | #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT |
74 | #define RCFW_MAX_COOKIE_VALUE 0x7FFF | 74 | #define RCFW_MAX_COOKIE_VALUE 0x7FFF |
75 | #define RCFW_CMD_IS_BLOCKING 0x8000 | 75 | #define RCFW_CMD_IS_BLOCKING 0x8000 |
76 | #define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 | ||
76 | 77 | ||
77 | /* Cmdq contains a fix number of a 16-Byte slots */ | 78 | /* Cmdq contains a fix number of a 16-Byte slots */ |
78 | struct bnxt_qplib_cmdqe { | 79 | struct bnxt_qplib_cmdqe { |
@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe { | |||
94 | u8 data[1024]; | 95 | u8 data[1024]; |
95 | }; | 96 | }; |
96 | 97 | ||
97 | /* CRSQ SB */ | ||
98 | #define BNXT_QPLIB_CRSBE_MAX_CNT 4 | ||
99 | #define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) | ||
100 | #define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) | ||
101 | |||
102 | #define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) | ||
103 | #define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) | ||
104 | |||
105 | static inline u32 get_crsb_pg(u32 val) | ||
106 | { | ||
107 | return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; | ||
108 | } | ||
109 | |||
110 | static inline u32 get_crsb_idx(u32 val) | ||
111 | { | ||
112 | return val & MAX_CRSB_IDX_PER_PG; | ||
113 | } | ||
114 | |||
115 | static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, | ||
116 | u32 prod, dma_addr_t *dma_addr) | ||
117 | { | ||
118 | *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; | ||
119 | *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * | ||
120 | BNXT_QPLIB_CRSBE_UNITS; | ||
121 | } | ||
122 | |||
123 | /* CREQ */ | 98 | /* CREQ */ |
124 | /* Allocate 1 per QP for async error notification for now */ | 99 | /* Allocate 1 per QP for async error notification for now */ |
125 | #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) | 100 | #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) |
@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val) | |||
158 | #define CREQ_DB(db, raw_cons, cp_bit) \ | 133 | #define CREQ_DB(db, raw_cons, cp_bit) \ |
159 | writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) | 134 | writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) |
160 | 135 | ||
136 | #define CREQ_ENTRY_POLL_BUDGET 0x100 | ||
137 | |||
161 | /* HWQ */ | 138 | /* HWQ */ |
162 | struct bnxt_qplib_crsqe { | 139 | |
163 | struct creq_qp_event qp_event; | 140 | struct bnxt_qplib_crsq { |
141 | struct creq_qp_event *resp; | ||
164 | u32 req_size; | 142 | u32 req_size; |
165 | }; | 143 | }; |
166 | 144 | ||
167 | struct bnxt_qplib_crsq { | 145 | struct bnxt_qplib_rcfw_sbuf { |
168 | struct bnxt_qplib_crsqe *crsq; | 146 | void *sb; |
169 | u32 prod; | 147 | dma_addr_t dma_addr; |
170 | u32 cons; | 148 | u32 size; |
171 | u32 max_elements; | ||
172 | }; | 149 | }; |
173 | 150 | ||
174 | /* RCFW Communication Channels */ | 151 | /* RCFW Communication Channels */ |
@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw { | |||
185 | wait_queue_head_t waitq; | 162 | wait_queue_head_t waitq; |
186 | int (*aeq_handler)(struct bnxt_qplib_rcfw *, | 163 | int (*aeq_handler)(struct bnxt_qplib_rcfw *, |
187 | struct creq_func_event *); | 164 | struct creq_func_event *); |
188 | atomic_t seq_num; | 165 | u32 seq_num; |
189 | 166 | ||
190 | /* Bar region info */ | 167 | /* Bar region info */ |
191 | void __iomem *cmdq_bar_reg_iomem; | 168 | void __iomem *cmdq_bar_reg_iomem; |
@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw { | |||
203 | 180 | ||
204 | /* Actual Cmd and Resp Queues */ | 181 | /* Actual Cmd and Resp Queues */ |
205 | struct bnxt_qplib_hwq cmdq; | 182 | struct bnxt_qplib_hwq cmdq; |
206 | struct bnxt_qplib_crsq crsq; | 183 | struct bnxt_qplib_crsq *crsqe_tbl; |
207 | struct bnxt_qplib_hwq crsb; | ||
208 | }; | 184 | }; |
209 | 185 | ||
210 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); | 186 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); |
@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
219 | (struct bnxt_qplib_rcfw *, | 195 | (struct bnxt_qplib_rcfw *, |
220 | struct creq_func_event *)); | 196 | struct creq_func_event *)); |
221 | 197 | ||
222 | int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); | 198 | struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( |
223 | int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); | 199 | struct bnxt_qplib_rcfw *rcfw, |
224 | void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | 200 | u32 size); |
225 | struct cmdq_base *req, void **crsbe, | 201 | void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, |
226 | u8 is_block); | 202 | struct bnxt_qplib_rcfw_sbuf *sbuf); |
203 | int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | ||
204 | struct cmdq_base *req, struct creq_base *resp, | ||
205 | void *sbuf, u8 is_block); | ||
227 | 206 | ||
228 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); | 207 | int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); |
229 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | 208 | int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 6277d802ca4b..2e4855509719 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h | |||
@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; | |||
48 | 48 | ||
49 | #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) | 49 | #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) |
50 | 50 | ||
51 | #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ | ||
52 | ((HWQ_CMP(hwq->prod, hwq)\ | ||
53 | - HWQ_CMP(hwq->cons, hwq))\ | ||
54 | & (hwq->max_elements - 1))) | ||
51 | enum bnxt_qplib_hwq_type { | 55 | enum bnxt_qplib_hwq_type { |
52 | HWQ_TYPE_CTX, | 56 | HWQ_TYPE_CTX, |
53 | HWQ_TYPE_QUEUE, | 57 | HWQ_TYPE_QUEUE, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 7b31eccedf11..fde18cf0e406 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c | |||
@@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
55 | struct bnxt_qplib_dev_attr *attr) | 55 | struct bnxt_qplib_dev_attr *attr) |
56 | { | 56 | { |
57 | struct cmdq_query_func req; | 57 | struct cmdq_query_func req; |
58 | struct creq_query_func_resp *resp; | 58 | struct creq_query_func_resp resp; |
59 | struct bnxt_qplib_rcfw_sbuf *sbuf; | ||
59 | struct creq_query_func_resp_sb *sb; | 60 | struct creq_query_func_resp_sb *sb; |
60 | u16 cmd_flags = 0; | 61 | u16 cmd_flags = 0; |
61 | u32 temp; | 62 | u32 temp; |
62 | u8 *tqm_alloc; | 63 | u8 *tqm_alloc; |
63 | int i; | 64 | int i, rc = 0; |
64 | 65 | ||
65 | RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); | 66 | RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); |
66 | 67 | ||
67 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; | 68 | sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); |
68 | resp = (struct creq_query_func_resp *) | 69 | if (!sbuf) { |
69 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, | ||
70 | 0); | ||
71 | if (!resp) { | ||
72 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); | ||
73 | return -EINVAL; | ||
74 | } | ||
75 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
76 | /* Cmd timed out */ | ||
77 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); | ||
78 | return -ETIMEDOUT; | ||
79 | } | ||
80 | if (resp->status || | ||
81 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
82 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); | ||
83 | dev_err(&rcfw->pdev->dev, | 70 | dev_err(&rcfw->pdev->dev, |
84 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | 71 | "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); |
85 | resp->status, le16_to_cpu(req.cookie), | 72 | return -ENOMEM; |
86 | le16_to_cpu(resp->cookie)); | ||
87 | return -EINVAL; | ||
88 | } | 73 | } |
74 | |||
75 | sb = sbuf->sb; | ||
76 | req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; | ||
77 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, | ||
78 | (void *)sbuf, 0); | ||
79 | if (rc) | ||
80 | goto bail; | ||
81 | |||
89 | /* Extract the context from the side buffer */ | 82 | /* Extract the context from the side buffer */ |
90 | attr->max_qp = le32_to_cpu(sb->max_qp); | 83 | attr->max_qp = le32_to_cpu(sb->max_qp); |
91 | attr->max_qp_rd_atom = | 84 | attr->max_qp_rd_atom = |
@@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
95 | sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? | 88 | sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? |
96 | BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; | 89 | BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; |
97 | attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); | 90 | attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); |
91 | /* | ||
92 | * 128 WQEs needs to be reserved for the HW (8916). Prevent | ||
93 | * reporting the max number | ||
94 | */ | ||
95 | attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; | ||
98 | attr->max_qp_sges = sb->max_sge; | 96 | attr->max_qp_sges = sb->max_sge; |
99 | attr->max_cq = le32_to_cpu(sb->max_cq); | 97 | attr->max_cq = le32_to_cpu(sb->max_cq); |
100 | attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); | 98 | attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); |
@@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
130 | attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); | 128 | attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); |
131 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); | 129 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); |
132 | } | 130 | } |
133 | return 0; | 131 | |
132 | bail: | ||
133 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); | ||
134 | return rc; | ||
134 | } | 135 | } |
135 | 136 | ||
136 | /* SGID */ | 137 | /* SGID */ |
@@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
178 | /* Remove GID from the SGID table */ | 179 | /* Remove GID from the SGID table */ |
179 | if (update) { | 180 | if (update) { |
180 | struct cmdq_delete_gid req; | 181 | struct cmdq_delete_gid req; |
181 | struct creq_delete_gid_resp *resp; | 182 | struct creq_delete_gid_resp resp; |
182 | u16 cmd_flags = 0; | 183 | u16 cmd_flags = 0; |
184 | int rc; | ||
183 | 185 | ||
184 | RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); | 186 | RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); |
185 | if (sgid_tbl->hw_id[index] == 0xFFFF) { | 187 | if (sgid_tbl->hw_id[index] == 0xFFFF) { |
@@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
188 | return -EINVAL; | 190 | return -EINVAL; |
189 | } | 191 | } |
190 | req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); | 192 | req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); |
191 | resp = (struct creq_delete_gid_resp *) | 193 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
192 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, | 194 | (void *)&resp, NULL, 0); |
193 | 0); | 195 | if (rc) |
194 | if (!resp) { | 196 | return rc; |
195 | dev_err(&res->pdev->dev, | ||
196 | "QPLIB: SP: DELETE_GID send failed"); | ||
197 | return -EINVAL; | ||
198 | } | ||
199 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
200 | le16_to_cpu(req.cookie))) { | ||
201 | /* Cmd timed out */ | ||
202 | dev_err(&res->pdev->dev, | ||
203 | "QPLIB: SP: DELETE_GID timed out"); | ||
204 | return -ETIMEDOUT; | ||
205 | } | ||
206 | if (resp->status || | ||
207 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
208 | dev_err(&res->pdev->dev, | ||
209 | "QPLIB: SP: DELETE_GID failed "); | ||
210 | dev_err(&res->pdev->dev, | ||
211 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
212 | resp->status, le16_to_cpu(req.cookie), | ||
213 | le16_to_cpu(resp->cookie)); | ||
214 | return -EINVAL; | ||
215 | } | ||
216 | } | 197 | } |
217 | memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, | 198 | memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, |
218 | sizeof(bnxt_qplib_gid_zero)); | 199 | sizeof(bnxt_qplib_gid_zero)); |
@@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
234 | struct bnxt_qplib_res, | 215 | struct bnxt_qplib_res, |
235 | sgid_tbl); | 216 | sgid_tbl); |
236 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 217 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
237 | int i, free_idx, rc = 0; | 218 | int i, free_idx; |
238 | 219 | ||
239 | if (!sgid_tbl) { | 220 | if (!sgid_tbl) { |
240 | dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); | 221 | dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); |
@@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
266 | } | 247 | } |
267 | if (update) { | 248 | if (update) { |
268 | struct cmdq_add_gid req; | 249 | struct cmdq_add_gid req; |
269 | struct creq_add_gid_resp *resp; | 250 | struct creq_add_gid_resp resp; |
270 | u16 cmd_flags = 0; | 251 | u16 cmd_flags = 0; |
271 | u32 temp32[4]; | 252 | u32 temp32[4]; |
272 | u16 temp16[3]; | 253 | u16 temp16[3]; |
254 | int rc; | ||
273 | 255 | ||
274 | RCFW_CMD_PREP(req, ADD_GID, cmd_flags); | 256 | RCFW_CMD_PREP(req, ADD_GID, cmd_flags); |
275 | 257 | ||
@@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
290 | req.src_mac[1] = cpu_to_be16(temp16[1]); | 272 | req.src_mac[1] = cpu_to_be16(temp16[1]); |
291 | req.src_mac[2] = cpu_to_be16(temp16[2]); | 273 | req.src_mac[2] = cpu_to_be16(temp16[2]); |
292 | 274 | ||
293 | resp = (struct creq_add_gid_resp *) | 275 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
294 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 276 | (void *)&resp, NULL, 0); |
295 | NULL, 0); | 277 | if (rc) |
296 | if (!resp) { | 278 | return rc; |
297 | dev_err(&res->pdev->dev, | 279 | sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); |
298 | "QPLIB: SP: ADD_GID send failed"); | ||
299 | return -EINVAL; | ||
300 | } | ||
301 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
302 | le16_to_cpu(req.cookie))) { | ||
303 | /* Cmd timed out */ | ||
304 | dev_err(&res->pdev->dev, | ||
305 | "QPIB: SP: ADD_GID timed out"); | ||
306 | return -ETIMEDOUT; | ||
307 | } | ||
308 | if (resp->status || | ||
309 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
310 | dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); | ||
311 | dev_err(&res->pdev->dev, | ||
312 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
313 | resp->status, le16_to_cpu(req.cookie), | ||
314 | le16_to_cpu(resp->cookie)); | ||
315 | return -EINVAL; | ||
316 | } | ||
317 | sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); | ||
318 | } | 280 | } |
319 | /* Add GID to the sgid_tbl */ | 281 | /* Add GID to the sgid_tbl */ |
320 | memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); | 282 | memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); |
@@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
325 | 287 | ||
326 | *index = free_idx; | 288 | *index = free_idx; |
327 | /* unlock */ | 289 | /* unlock */ |
328 | return rc; | 290 | return 0; |
329 | } | 291 | } |
330 | 292 | ||
331 | /* pkeys */ | 293 | /* pkeys */ |
@@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) | |||
422 | { | 384 | { |
423 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 385 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
424 | struct cmdq_create_ah req; | 386 | struct cmdq_create_ah req; |
425 | struct creq_create_ah_resp *resp; | 387 | struct creq_create_ah_resp resp; |
426 | u16 cmd_flags = 0; | 388 | u16 cmd_flags = 0; |
427 | u32 temp32[4]; | 389 | u32 temp32[4]; |
428 | u16 temp16[3]; | 390 | u16 temp16[3]; |
391 | int rc; | ||
429 | 392 | ||
430 | RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); | 393 | RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); |
431 | 394 | ||
@@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) | |||
450 | req.dest_mac[1] = cpu_to_le16(temp16[1]); | 413 | req.dest_mac[1] = cpu_to_le16(temp16[1]); |
451 | req.dest_mac[2] = cpu_to_le16(temp16[2]); | 414 | req.dest_mac[2] = cpu_to_le16(temp16[2]); |
452 | 415 | ||
453 | resp = (struct creq_create_ah_resp *) | 416 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
454 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 417 | NULL, 1); |
455 | NULL, 1); | 418 | if (rc) |
456 | if (!resp) { | 419 | return rc; |
457 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); | 420 | |
458 | return -EINVAL; | 421 | ah->id = le32_to_cpu(resp.xid); |
459 | } | ||
460 | if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
461 | /* Cmd timed out */ | ||
462 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); | ||
463 | return -ETIMEDOUT; | ||
464 | } | ||
465 | if (resp->status || | ||
466 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
467 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); | ||
468 | dev_err(&rcfw->pdev->dev, | ||
469 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
470 | resp->status, le16_to_cpu(req.cookie), | ||
471 | le16_to_cpu(resp->cookie)); | ||
472 | return -EINVAL; | ||
473 | } | ||
474 | ah->id = le32_to_cpu(resp->xid); | ||
475 | return 0; | 422 | return 0; |
476 | } | 423 | } |
477 | 424 | ||
@@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) | |||
479 | { | 426 | { |
480 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 427 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
481 | struct cmdq_destroy_ah req; | 428 | struct cmdq_destroy_ah req; |
482 | struct creq_destroy_ah_resp *resp; | 429 | struct creq_destroy_ah_resp resp; |
483 | u16 cmd_flags = 0; | 430 | u16 cmd_flags = 0; |
431 | int rc; | ||
484 | 432 | ||
485 | /* Clean up the AH table in the device */ | 433 | /* Clean up the AH table in the device */ |
486 | RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); | 434 | RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); |
487 | 435 | ||
488 | req.ah_cid = cpu_to_le32(ah->id); | 436 | req.ah_cid = cpu_to_le32(ah->id); |
489 | 437 | ||
490 | resp = (struct creq_destroy_ah_resp *) | 438 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
491 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 439 | NULL, 1); |
492 | NULL, 1); | 440 | if (rc) |
493 | if (!resp) { | 441 | return rc; |
494 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); | ||
495 | return -EINVAL; | ||
496 | } | ||
497 | if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
498 | /* Cmd timed out */ | ||
499 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); | ||
500 | return -ETIMEDOUT; | ||
501 | } | ||
502 | if (resp->status || | ||
503 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
504 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); | ||
505 | dev_err(&rcfw->pdev->dev, | ||
506 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
507 | resp->status, le16_to_cpu(req.cookie), | ||
508 | le16_to_cpu(resp->cookie)); | ||
509 | return -EINVAL; | ||
510 | } | ||
511 | return 0; | 442 | return 0; |
512 | } | 443 | } |
513 | 444 | ||
@@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
516 | { | 447 | { |
517 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 448 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
518 | struct cmdq_deallocate_key req; | 449 | struct cmdq_deallocate_key req; |
519 | struct creq_deallocate_key_resp *resp; | 450 | struct creq_deallocate_key_resp resp; |
520 | u16 cmd_flags = 0; | 451 | u16 cmd_flags = 0; |
452 | int rc; | ||
521 | 453 | ||
522 | if (mrw->lkey == 0xFFFFFFFF) { | 454 | if (mrw->lkey == 0xFFFFFFFF) { |
523 | dev_info(&res->pdev->dev, | 455 | dev_info(&res->pdev->dev, |
@@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
536 | else | 468 | else |
537 | req.key = cpu_to_le32(mrw->lkey); | 469 | req.key = cpu_to_le32(mrw->lkey); |
538 | 470 | ||
539 | resp = (struct creq_deallocate_key_resp *) | 471 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, |
540 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 472 | NULL, 0); |
541 | NULL, 0); | 473 | if (rc) |
542 | if (!resp) { | 474 | return rc; |
543 | dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); | 475 | |
544 | return -EINVAL; | ||
545 | } | ||
546 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
547 | /* Cmd timed out */ | ||
548 | dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); | ||
549 | return -ETIMEDOUT; | ||
550 | } | ||
551 | if (resp->status || | ||
552 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
553 | dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); | ||
554 | dev_err(&res->pdev->dev, | ||
555 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
556 | resp->status, le16_to_cpu(req.cookie), | ||
557 | le16_to_cpu(resp->cookie)); | ||
558 | return -EINVAL; | ||
559 | } | ||
560 | /* Free the qplib's MRW memory */ | 476 | /* Free the qplib's MRW memory */ |
561 | if (mrw->hwq.max_elements) | 477 | if (mrw->hwq.max_elements) |
562 | bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); | 478 | bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); |
@@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
568 | { | 484 | { |
569 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 485 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
570 | struct cmdq_allocate_mrw req; | 486 | struct cmdq_allocate_mrw req; |
571 | struct creq_allocate_mrw_resp *resp; | 487 | struct creq_allocate_mrw_resp resp; |
572 | u16 cmd_flags = 0; | 488 | u16 cmd_flags = 0; |
573 | unsigned long tmp; | 489 | unsigned long tmp; |
490 | int rc; | ||
574 | 491 | ||
575 | RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); | 492 | RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); |
576 | 493 | ||
@@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) | |||
584 | tmp = (unsigned long)mrw; | 501 | tmp = (unsigned long)mrw; |
585 | req.mrw_handle = cpu_to_le64(tmp); | 502 | req.mrw_handle = cpu_to_le64(tmp); |
586 | 503 | ||
587 | resp = (struct creq_allocate_mrw_resp *) | 504 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
588 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 505 | (void *)&resp, NULL, 0); |
589 | NULL, 0); | 506 | if (rc) |
590 | if (!resp) { | 507 | return rc; |
591 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); | 508 | |
592 | return -EINVAL; | ||
593 | } | ||
594 | if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { | ||
595 | /* Cmd timed out */ | ||
596 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); | ||
597 | return -ETIMEDOUT; | ||
598 | } | ||
599 | if (resp->status || | ||
600 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
601 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); | ||
602 | dev_err(&rcfw->pdev->dev, | ||
603 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
604 | resp->status, le16_to_cpu(req.cookie), | ||
605 | le16_to_cpu(resp->cookie)); | ||
606 | return -EINVAL; | ||
607 | } | ||
608 | if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || | 509 | if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || |
609 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || | 510 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || |
610 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) | 511 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) |
611 | mrw->rkey = le32_to_cpu(resp->xid); | 512 | mrw->rkey = le32_to_cpu(resp.xid); |
612 | else | 513 | else |
613 | mrw->lkey = le32_to_cpu(resp->xid); | 514 | mrw->lkey = le32_to_cpu(resp.xid); |
614 | return 0; | 515 | return 0; |
615 | } | 516 | } |
616 | 517 | ||
@@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, | |||
619 | { | 520 | { |
620 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 521 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
621 | struct cmdq_deregister_mr req; | 522 | struct cmdq_deregister_mr req; |
622 | struct creq_deregister_mr_resp *resp; | 523 | struct creq_deregister_mr_resp resp; |
623 | u16 cmd_flags = 0; | 524 | u16 cmd_flags = 0; |
624 | int rc; | 525 | int rc; |
625 | 526 | ||
626 | RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); | 527 | RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); |
627 | 528 | ||
628 | req.lkey = cpu_to_le32(mrw->lkey); | 529 | req.lkey = cpu_to_le32(mrw->lkey); |
629 | resp = (struct creq_deregister_mr_resp *) | 530 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
630 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 531 | (void *)&resp, NULL, block); |
631 | NULL, block); | 532 | if (rc) |
632 | if (!resp) { | 533 | return rc; |
633 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); | ||
634 | return -EINVAL; | ||
635 | } | ||
636 | if (block) | ||
637 | rc = bnxt_qplib_rcfw_block_for_resp(rcfw, | ||
638 | le16_to_cpu(req.cookie)); | ||
639 | else | ||
640 | rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
641 | le16_to_cpu(req.cookie)); | ||
642 | if (!rc) { | ||
643 | /* Cmd timed out */ | ||
644 | dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); | ||
645 | return -ETIMEDOUT; | ||
646 | } | ||
647 | if (resp->status || | ||
648 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
649 | dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); | ||
650 | dev_err(&rcfw->pdev->dev, | ||
651 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
652 | resp->status, le16_to_cpu(req.cookie), | ||
653 | le16_to_cpu(resp->cookie)); | ||
654 | return -EINVAL; | ||
655 | } | ||
656 | 534 | ||
657 | /* Free the qplib's MR memory */ | 535 | /* Free the qplib's MR memory */ |
658 | if (mrw->hwq.max_elements) { | 536 | if (mrw->hwq.max_elements) { |
@@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, | |||
669 | { | 547 | { |
670 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 548 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
671 | struct cmdq_register_mr req; | 549 | struct cmdq_register_mr req; |
672 | struct creq_register_mr_resp *resp; | 550 | struct creq_register_mr_resp resp; |
673 | u16 cmd_flags = 0, level; | 551 | u16 cmd_flags = 0, level; |
674 | int pg_ptrs, pages, i, rc; | 552 | int pg_ptrs, pages, i, rc; |
675 | dma_addr_t **pbl_ptr; | 553 | dma_addr_t **pbl_ptr; |
@@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, | |||
730 | req.key = cpu_to_le32(mr->lkey); | 608 | req.key = cpu_to_le32(mr->lkey); |
731 | req.mr_size = cpu_to_le64(mr->total_size); | 609 | req.mr_size = cpu_to_le64(mr->total_size); |
732 | 610 | ||
733 | resp = (struct creq_register_mr_resp *) | 611 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
734 | bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, | 612 | (void *)&resp, NULL, block); |
735 | NULL, block); | 613 | if (rc) |
736 | if (!resp) { | ||
737 | dev_err(&res->pdev->dev, "SP: REG_MR send failed"); | ||
738 | rc = -EINVAL; | ||
739 | goto fail; | ||
740 | } | ||
741 | if (block) | ||
742 | rc = bnxt_qplib_rcfw_block_for_resp(rcfw, | ||
743 | le16_to_cpu(req.cookie)); | ||
744 | else | ||
745 | rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, | ||
746 | le16_to_cpu(req.cookie)); | ||
747 | if (!rc) { | ||
748 | /* Cmd timed out */ | ||
749 | dev_err(&res->pdev->dev, "SP: REG_MR timed out"); | ||
750 | rc = -ETIMEDOUT; | ||
751 | goto fail; | ||
752 | } | ||
753 | if (resp->status || | ||
754 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
755 | dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); | ||
756 | dev_err(&res->pdev->dev, | ||
757 | "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", | ||
758 | resp->status, le16_to_cpu(req.cookie), | ||
759 | le16_to_cpu(resp->cookie)); | ||
760 | rc = -EINVAL; | ||
761 | goto fail; | 614 | goto fail; |
762 | } | 615 | |
763 | return 0; | 616 | return 0; |
764 | 617 | ||
765 | fail: | 618 | fail: |
@@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) | |||
804 | { | 657 | { |
805 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 658 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
806 | struct cmdq_map_tc_to_cos req; | 659 | struct cmdq_map_tc_to_cos req; |
807 | struct creq_map_tc_to_cos_resp *resp; | 660 | struct creq_map_tc_to_cos_resp resp; |
808 | u16 cmd_flags = 0; | 661 | u16 cmd_flags = 0; |
809 | int tleft; | 662 | int rc = 0; |
810 | 663 | ||
811 | RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); | 664 | RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); |
812 | req.cos0 = cpu_to_le16(cids[0]); | 665 | req.cos0 = cpu_to_le16(cids[0]); |
813 | req.cos1 = cpu_to_le16(cids[1]); | 666 | req.cos1 = cpu_to_le16(cids[1]); |
814 | 667 | ||
815 | resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); | 668 | rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, |
816 | if (!resp) { | 669 | (void *)&resp, NULL, 0); |
817 | dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); | ||
818 | return -EINVAL; | ||
819 | } | ||
820 | |||
821 | tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); | ||
822 | if (!tleft) { | ||
823 | dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); | ||
824 | return -ETIMEDOUT; | ||
825 | } | ||
826 | |||
827 | if (resp->status || | ||
828 | le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { | ||
829 | dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); | ||
830 | dev_err(&res->pdev->dev, | ||
831 | "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", | ||
832 | resp->status, le16_to_cpu(req.cookie), | ||
833 | le16_to_cpu(resp->cookie)); | ||
834 | return -EINVAL; | ||
835 | } | ||
836 | |||
837 | return 0; | 670 | return 0; |
838 | } | 671 | } |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 1442a617e968..a543f959098b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h | |||
@@ -40,6 +40,8 @@ | |||
40 | #ifndef __BNXT_QPLIB_SP_H__ | 40 | #ifndef __BNXT_QPLIB_SP_H__ |
41 | #define __BNXT_QPLIB_SP_H__ | 41 | #define __BNXT_QPLIB_SP_H__ |
42 | 42 | ||
43 | #define BNXT_QPLIB_RESERVED_QP_WRS 128 | ||
44 | |||
43 | struct bnxt_qplib_dev_attr { | 45 | struct bnxt_qplib_dev_attr { |
44 | char fw_ver[32]; | 46 | char fw_ver[32]; |
45 | u16 max_sgid; | 47 | u16 max_sgid; |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index f96a96dbcf1f..ae0b79aeea2e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, | |||
767 | kfree(entry); | 767 | kfree(entry); |
768 | } | 768 | } |
769 | 769 | ||
770 | list_for_each_safe(pos, nxt, &uctx->qpids) { | 770 | list_for_each_safe(pos, nxt, &uctx->cqids) { |
771 | entry = list_entry(pos, struct c4iw_qid_list, entry); | 771 | entry = list_entry(pos, struct c4iw_qid_list, entry); |
772 | list_del_init(&entry->entry); | 772 | list_del_init(&entry->entry); |
773 | kfree(entry); | 773 | kfree(entry); |
@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
880 | rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); | 880 | rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); |
881 | if (!rdev->free_workq) { | 881 | if (!rdev->free_workq) { |
882 | err = -ENOMEM; | 882 | err = -ENOMEM; |
883 | goto err_free_status_page; | 883 | goto err_free_status_page_and_wr_log; |
884 | } | 884 | } |
885 | 885 | ||
886 | rdev->status_page->db_off = 0; | 886 | rdev->status_page->db_off = 0; |
887 | 887 | ||
888 | return 0; | 888 | return 0; |
889 | err_free_status_page: | 889 | err_free_status_page_and_wr_log: |
890 | if (c4iw_wr_log && rdev->wr_log) | ||
891 | kfree(rdev->wr_log); | ||
890 | free_page((unsigned long)rdev->status_page); | 892 | free_page((unsigned long)rdev->status_page); |
891 | destroy_ocqp_pool: | 893 | destroy_ocqp_pool: |
892 | c4iw_ocqp_pool_destroy(rdev); | 894 | c4iw_ocqp_pool_destroy(rdev); |
@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) | |||
903 | { | 905 | { |
904 | destroy_workqueue(rdev->free_workq); | 906 | destroy_workqueue(rdev->free_workq); |
905 | kfree(rdev->wr_log); | 907 | kfree(rdev->wr_log); |
908 | c4iw_release_dev_ucontext(rdev, &rdev->uctx); | ||
906 | free_page((unsigned long)rdev->status_page); | 909 | free_page((unsigned long)rdev->status_page); |
907 | c4iw_pblpool_destroy(rdev); | 910 | c4iw_pblpool_destroy(rdev); |
908 | c4iw_rqtpool_destroy(rdev); | 911 | c4iw_rqtpool_destroy(rdev); |
912 | c4iw_ocqp_pool_destroy(rdev); | ||
909 | c4iw_destroy_resource(&rdev->resource); | 913 | c4iw_destroy_resource(&rdev->resource); |
910 | } | 914 | } |
911 | 915 | ||
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 0c79983c8b1a..9ecc089d4529 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -3692,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3692 | dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; | 3692 | dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; |
3693 | dev->ib_dev.get_port_immutable = mlx5_port_immutable; | 3693 | dev->ib_dev.get_port_immutable = mlx5_port_immutable; |
3694 | dev->ib_dev.get_dev_fw_str = get_dev_fw_str; | 3694 | dev->ib_dev.get_dev_fw_str = get_dev_fw_str; |
3695 | dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; | 3695 | if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { |
3696 | dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; | 3696 | dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; |
3697 | dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; | ||
3698 | } | ||
3697 | if (mlx5_core_is_pf(mdev)) { | 3699 | if (mlx5_core_is_pf(mdev)) { |
3698 | dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; | 3700 | dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; |
3699 | dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; | 3701 | dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; |
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa08c76a4245..d961f79b317c 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h | |||
@@ -58,7 +58,10 @@ | |||
58 | #define QEDR_MSG_QP " QP" | 58 | #define QEDR_MSG_QP " QP" |
59 | #define QEDR_MSG_GSI " GSI" | 59 | #define QEDR_MSG_GSI " GSI" |
60 | 60 | ||
61 | #define QEDR_CQ_MAGIC_NUMBER (0x11223344) | 61 | #define QEDR_CQ_MAGIC_NUMBER (0x11223344) |
62 | |||
63 | #define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) | ||
64 | #define FW_PAGE_SHIFT (12) | ||
62 | 65 | ||
63 | struct qedr_dev; | 66 | struct qedr_dev; |
64 | 67 | ||
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 17685cfea6a2..d6723c365c7f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, | |||
653 | 653 | ||
654 | static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, | 654 | static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, |
655 | struct qedr_pbl *pbl, | 655 | struct qedr_pbl *pbl, |
656 | struct qedr_pbl_info *pbl_info) | 656 | struct qedr_pbl_info *pbl_info, u32 pg_shift) |
657 | { | 657 | { |
658 | int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; | 658 | int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; |
659 | u32 fw_pg_cnt, fw_pg_per_umem_pg; | ||
659 | struct qedr_pbl *pbl_tbl; | 660 | struct qedr_pbl *pbl_tbl; |
660 | struct scatterlist *sg; | 661 | struct scatterlist *sg; |
661 | struct regpair *pbe; | 662 | struct regpair *pbe; |
663 | u64 pg_addr; | ||
662 | int entry; | 664 | int entry; |
663 | u32 addr; | ||
664 | 665 | ||
665 | if (!pbl_info->num_pbes) | 666 | if (!pbl_info->num_pbes) |
666 | return; | 667 | return; |
@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, | |||
683 | 684 | ||
684 | shift = umem->page_shift; | 685 | shift = umem->page_shift; |
685 | 686 | ||
687 | fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); | ||
688 | |||
686 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { | 689 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
687 | pages = sg_dma_len(sg) >> shift; | 690 | pages = sg_dma_len(sg) >> shift; |
691 | pg_addr = sg_dma_address(sg); | ||
688 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { | 692 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { |
689 | /* store the page address in pbe */ | 693 | for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { |
690 | pbe->lo = cpu_to_le32(sg_dma_address(sg) + | 694 | pbe->lo = cpu_to_le32(pg_addr); |
691 | (pg_cnt << shift)); | 695 | pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); |
692 | addr = upper_32_bits(sg_dma_address(sg) + | 696 | |
693 | (pg_cnt << shift)); | 697 | pg_addr += BIT(pg_shift); |
694 | pbe->hi = cpu_to_le32(addr); | 698 | pbe_cnt++; |
695 | pbe_cnt++; | 699 | total_num_pbes++; |
696 | total_num_pbes++; | 700 | pbe++; |
697 | pbe++; | 701 | |
698 | 702 | if (total_num_pbes == pbl_info->num_pbes) | |
699 | if (total_num_pbes == pbl_info->num_pbes) | 703 | return; |
700 | return; | 704 | |
701 | 705 | /* If the given pbl is full storing the pbes, | |
702 | /* If the given pbl is full storing the pbes, | 706 | * move to next pbl. |
703 | * move to next pbl. | 707 | */ |
704 | */ | 708 | if (pbe_cnt == |
705 | if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { | 709 | (pbl_info->pbl_size / sizeof(u64))) { |
706 | pbl_tbl++; | 710 | pbl_tbl++; |
707 | pbe = (struct regpair *)pbl_tbl->va; | 711 | pbe = (struct regpair *)pbl_tbl->va; |
708 | pbe_cnt = 0; | 712 | pbe_cnt = 0; |
713 | } | ||
714 | |||
715 | fw_pg_cnt++; | ||
709 | } | 716 | } |
710 | } | 717 | } |
711 | } | 718 | } |
@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, | |||
754 | u64 buf_addr, size_t buf_len, | 761 | u64 buf_addr, size_t buf_len, |
755 | int access, int dmasync) | 762 | int access, int dmasync) |
756 | { | 763 | { |
757 | int page_cnt; | 764 | u32 fw_pages; |
758 | int rc; | 765 | int rc; |
759 | 766 | ||
760 | q->buf_addr = buf_addr; | 767 | q->buf_addr = buf_addr; |
@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, | |||
766 | return PTR_ERR(q->umem); | 773 | return PTR_ERR(q->umem); |
767 | } | 774 | } |
768 | 775 | ||
769 | page_cnt = ib_umem_page_count(q->umem); | 776 | fw_pages = ib_umem_page_count(q->umem) << |
770 | rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); | 777 | (q->umem->page_shift - FW_PAGE_SHIFT); |
778 | |||
779 | rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); | ||
771 | if (rc) | 780 | if (rc) |
772 | goto err0; | 781 | goto err0; |
773 | 782 | ||
@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, | |||
777 | goto err0; | 786 | goto err0; |
778 | } | 787 | } |
779 | 788 | ||
780 | qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); | 789 | qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, |
790 | FW_PAGE_SHIFT); | ||
781 | 791 | ||
782 | return 0; | 792 | return 0; |
783 | 793 | ||
@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, | |||
2226 | goto err1; | 2236 | goto err1; |
2227 | 2237 | ||
2228 | qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, | 2238 | qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, |
2229 | &mr->info.pbl_info); | 2239 | &mr->info.pbl_info, mr->umem->page_shift); |
2230 | 2240 | ||
2231 | rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); | 2241 | rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); |
2232 | if (rc) { | 2242 | if (rc) { |
@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, | |||
3209 | case IB_WC_REG_MR: | 3219 | case IB_WC_REG_MR: |
3210 | qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; | 3220 | qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; |
3211 | break; | 3221 | break; |
3222 | case IB_WC_RDMA_READ: | ||
3223 | case IB_WC_SEND: | ||
3224 | wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; | ||
3225 | break; | ||
3212 | default: | 3226 | default: |
3213 | break; | 3227 | break; |
3214 | } | 3228 | } |
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecdba2fce083..1ac5b8551a4d 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h | |||
@@ -68,6 +68,7 @@ | |||
68 | static inline u32 rxe_crc32(struct rxe_dev *rxe, | 68 | static inline u32 rxe_crc32(struct rxe_dev *rxe, |
69 | u32 crc, void *next, size_t len) | 69 | u32 crc, void *next, size_t len) |
70 | { | 70 | { |
71 | u32 retval; | ||
71 | int err; | 72 | int err; |
72 | 73 | ||
73 | SHASH_DESC_ON_STACK(shash, rxe->tfm); | 74 | SHASH_DESC_ON_STACK(shash, rxe->tfm); |
@@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, | |||
81 | return crc32_le(crc, next, len); | 82 | return crc32_le(crc, next, len); |
82 | } | 83 | } |
83 | 84 | ||
84 | return *(u32 *)shash_desc_ctx(shash); | 85 | retval = *(u32 *)shash_desc_ctx(shash); |
86 | barrier_data(shash_desc_ctx(shash)); | ||
87 | return retval; | ||
85 | } | 88 | } |
86 | 89 | ||
87 | int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); | 90 | int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); |
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 83d709e74dfb..073e66783f1d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c | |||
@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, | |||
740 | 740 | ||
741 | sge = ibwr->sg_list; | 741 | sge = ibwr->sg_list; |
742 | for (i = 0; i < num_sge; i++, sge++) { | 742 | for (i = 0; i < num_sge; i++, sge++) { |
743 | if (qp->is_user && copy_from_user(p, (__user void *) | 743 | memcpy(p, (void *)(uintptr_t)sge->addr, |
744 | (uintptr_t)sge->addr, sge->length)) | 744 | sge->length); |
745 | return -EFAULT; | ||
746 | |||
747 | else if (!qp->is_user) | ||
748 | memcpy(p, (void *)(uintptr_t)sge->addr, | ||
749 | sge->length); | ||
750 | 745 | ||
751 | p += sge->length; | 746 | p += sge->length; |
752 | } | 747 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0060b2f9f659..efe7402f4885 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -863,7 +863,6 @@ dev_stop: | |||
863 | set_bit(IPOIB_STOP_REAPER, &priv->flags); | 863 | set_bit(IPOIB_STOP_REAPER, &priv->flags); |
864 | cancel_delayed_work(&priv->ah_reap_task); | 864 | cancel_delayed_work(&priv->ah_reap_task); |
865 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); | 865 | set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); |
866 | napi_enable(&priv->napi); | ||
867 | ipoib_ib_dev_stop(dev); | 866 | ipoib_ib_dev_stop(dev); |
868 | return -1; | 867 | return -1; |
869 | } | 868 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index a115c0b7a310..1015a63de6ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev) | |||
1596 | 1596 | ||
1597 | ipoib_transport_dev_cleanup(dev); | 1597 | ipoib_transport_dev_cleanup(dev); |
1598 | 1598 | ||
1599 | netif_napi_del(&priv->napi); | ||
1600 | |||
1599 | ipoib_cm_dev_cleanup(dev); | 1601 | ipoib_cm_dev_cleanup(dev); |
1600 | 1602 | ||
1601 | kfree(priv->rx_ring); | 1603 | kfree(priv->rx_ring); |
@@ -1649,6 +1651,7 @@ out_rx_ring_cleanup: | |||
1649 | kfree(priv->rx_ring); | 1651 | kfree(priv->rx_ring); |
1650 | 1652 | ||
1651 | out: | 1653 | out: |
1654 | netif_napi_del(&priv->napi); | ||
1652 | return -ENOMEM; | 1655 | return -ENOMEM; |
1653 | } | 1656 | } |
1654 | 1657 | ||
@@ -2237,6 +2240,7 @@ event_failed: | |||
2237 | 2240 | ||
2238 | device_init_failed: | 2241 | device_init_failed: |
2239 | free_netdev(priv->dev); | 2242 | free_netdev(priv->dev); |
2243 | kfree(priv); | ||
2240 | 2244 | ||
2241 | alloc_mem_failed: | 2245 | alloc_mem_failed: |
2242 | return ERR_PTR(result); | 2246 | return ERR_PTR(result); |
@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device) | |||
2277 | 2281 | ||
2278 | static void ipoib_remove_one(struct ib_device *device, void *client_data) | 2282 | static void ipoib_remove_one(struct ib_device *device, void *client_data) |
2279 | { | 2283 | { |
2280 | struct ipoib_dev_priv *priv, *tmp; | 2284 | struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; |
2281 | struct list_head *dev_list = client_data; | 2285 | struct list_head *dev_list = client_data; |
2282 | 2286 | ||
2283 | if (!dev_list) | 2287 | if (!dev_list) |
@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
2300 | flush_workqueue(priv->wq); | 2304 | flush_workqueue(priv->wq); |
2301 | 2305 | ||
2302 | unregister_netdev(priv->dev); | 2306 | unregister_netdev(priv->dev); |
2303 | free_netdev(priv->dev); | 2307 | if (device->free_rdma_netdev) |
2308 | device->free_rdma_netdev(priv->dev); | ||
2309 | else | ||
2310 | free_netdev(priv->dev); | ||
2311 | |||
2312 | list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) | ||
2313 | kfree(cpriv); | ||
2314 | |||
2304 | kfree(priv); | 2315 | kfree(priv); |
2305 | } | 2316 | } |
2306 | 2317 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 36dc4fcaa3cd..081b33deff1b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
133 | snprintf(intf_name, sizeof intf_name, "%s.%04x", | 133 | snprintf(intf_name, sizeof intf_name, "%s.%04x", |
134 | ppriv->dev->name, pkey); | 134 | ppriv->dev->name, pkey); |
135 | 135 | ||
136 | if (!rtnl_trylock()) | ||
137 | return restart_syscall(); | ||
138 | |||
136 | priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); | 139 | priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); |
137 | if (!priv) | 140 | if (!priv) |
138 | return -ENOMEM; | 141 | return -ENOMEM; |
139 | 142 | ||
140 | if (!rtnl_trylock()) | ||
141 | return restart_syscall(); | ||
142 | |||
143 | down_write(&ppriv->vlan_rwsem); | 143 | down_write(&ppriv->vlan_rwsem); |
144 | 144 | ||
145 | /* | 145 | /* |
@@ -167,8 +167,10 @@ out: | |||
167 | 167 | ||
168 | rtnl_unlock(); | 168 | rtnl_unlock(); |
169 | 169 | ||
170 | if (result) | 170 | if (result) { |
171 | free_netdev(priv->dev); | 171 | free_netdev(priv->dev); |
172 | kfree(priv); | ||
173 | } | ||
172 | 174 | ||
173 | return result; | 175 | return result; |
174 | } | 176 | } |
@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
209 | 211 | ||
210 | if (dev) { | 212 | if (dev) { |
211 | free_netdev(dev); | 213 | free_netdev(dev); |
214 | kfree(priv); | ||
212 | return 0; | 215 | return 0; |
213 | } | 216 | } |
214 | 217 | ||
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index e37d37273182..f600f3a7a3c6 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c | |||
@@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev) | |||
248 | 248 | ||
249 | if (!btns_desc) { | 249 | if (!btns_desc) { |
250 | dev_err(dev, "ACPI Button Descriptors not found\n"); | 250 | dev_err(dev, "ACPI Button Descriptors not found\n"); |
251 | return ERR_PTR(-ENODEV); | 251 | button_info = ERR_PTR(-ENODEV); |
252 | goto out; | ||
252 | } | 253 | } |
253 | 254 | ||
254 | /* The first package describes the collection */ | 255 | /* The first package describes the collection */ |
@@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev) | |||
264 | } | 265 | } |
265 | if (collection_uid == -1) { | 266 | if (collection_uid == -1) { |
266 | dev_err(dev, "Invalid Button Collection Descriptor\n"); | 267 | dev_err(dev, "Invalid Button Collection Descriptor\n"); |
267 | return ERR_PTR(-ENODEV); | 268 | button_info = ERR_PTR(-ENODEV); |
269 | goto out; | ||
268 | } | 270 | } |
269 | 271 | ||
270 | /* There are package.count - 1 buttons + 1 terminating empty entry */ | 272 | /* There are package.count - 1 buttons + 1 terminating empty entry */ |
271 | button_info = devm_kcalloc(dev, btns_desc->package.count, | 273 | button_info = devm_kcalloc(dev, btns_desc->package.count, |
272 | sizeof(*button_info), GFP_KERNEL); | 274 | sizeof(*button_info), GFP_KERNEL); |
273 | if (!button_info) | 275 | if (!button_info) { |
274 | return ERR_PTR(-ENOMEM); | 276 | button_info = ERR_PTR(-ENOMEM); |
277 | goto out; | ||
278 | } | ||
275 | 279 | ||
276 | /* Parse the button descriptors */ | 280 | /* Parse the button descriptors */ |
277 | for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) { | 281 | for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) { |
278 | if (soc_button_parse_btn_desc(dev, | 282 | if (soc_button_parse_btn_desc(dev, |
279 | &btns_desc->package.elements[i], | 283 | &btns_desc->package.elements[i], |
280 | collection_uid, | 284 | collection_uid, |
281 | &button_info[btn])) | 285 | &button_info[btn])) { |
282 | return ERR_PTR(-ENODEV); | 286 | button_info = ERR_PTR(-ENODEV); |
287 | goto out; | ||
288 | } | ||
283 | } | 289 | } |
284 | 290 | ||
291 | out: | ||
292 | kfree(buf.pointer); | ||
285 | return button_info; | 293 | return button_info; |
286 | } | 294 | } |
287 | 295 | ||
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index dea63e2db3e6..f5206e2c767e 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c | |||
@@ -31,9 +31,6 @@ | |||
31 | #define F54_GET_REPORT 1 | 31 | #define F54_GET_REPORT 1 |
32 | #define F54_FORCE_CAL 2 | 32 | #define F54_FORCE_CAL 2 |
33 | 33 | ||
34 | /* Fixed sizes of reports */ | ||
35 | #define F54_QUERY_LEN 27 | ||
36 | |||
37 | /* F54 capabilities */ | 34 | /* F54 capabilities */ |
38 | #define F54_CAP_BASELINE (1 << 2) | 35 | #define F54_CAP_BASELINE (1 << 2) |
39 | #define F54_CAP_IMAGE8 (1 << 3) | 36 | #define F54_CAP_IMAGE8 (1 << 3) |
@@ -95,7 +92,6 @@ struct rmi_f54_reports { | |||
95 | struct f54_data { | 92 | struct f54_data { |
96 | struct rmi_function *fn; | 93 | struct rmi_function *fn; |
97 | 94 | ||
98 | u8 qry[F54_QUERY_LEN]; | ||
99 | u8 num_rx_electrodes; | 95 | u8 num_rx_electrodes; |
100 | u8 num_tx_electrodes; | 96 | u8 num_tx_electrodes; |
101 | u8 capabilities; | 97 | u8 capabilities; |
@@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn) | |||
632 | { | 628 | { |
633 | int error; | 629 | int error; |
634 | struct f54_data *f54; | 630 | struct f54_data *f54; |
631 | u8 buf[6]; | ||
635 | 632 | ||
636 | f54 = dev_get_drvdata(&fn->dev); | 633 | f54 = dev_get_drvdata(&fn->dev); |
637 | 634 | ||
638 | error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, | 635 | error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, |
639 | &f54->qry, sizeof(f54->qry)); | 636 | buf, sizeof(buf)); |
640 | if (error) { | 637 | if (error) { |
641 | dev_err(&fn->dev, "%s: Failed to query F54 properties\n", | 638 | dev_err(&fn->dev, "%s: Failed to query F54 properties\n", |
642 | __func__); | 639 | __func__); |
643 | return error; | 640 | return error; |
644 | } | 641 | } |
645 | 642 | ||
646 | f54->num_rx_electrodes = f54->qry[0]; | 643 | f54->num_rx_electrodes = buf[0]; |
647 | f54->num_tx_electrodes = f54->qry[1]; | 644 | f54->num_tx_electrodes = buf[1]; |
648 | f54->capabilities = f54->qry[2]; | 645 | f54->capabilities = buf[2]; |
649 | f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8); | 646 | f54->clock_rate = buf[3] | (buf[4] << 8); |
650 | f54->family = f54->qry[5]; | 647 | f54->family = buf[5]; |
651 | 648 | ||
652 | rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n", | 649 | rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n", |
653 | f54->num_rx_electrodes); | 650 | f54->num_rx_electrodes); |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 09720d950686..f932a83b4990 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { | |||
723 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), | 723 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), |
724 | }, | 724 | }, |
725 | }, | 725 | }, |
726 | { | ||
727 | /* Fujitsu UH554 laptop */ | ||
728 | .matches = { | ||
729 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
730 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), | ||
731 | }, | ||
732 | }, | ||
726 | { } | 733 | { } |
727 | }; | 734 | }; |
728 | 735 | ||
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index eb7fbe159963..929f8558bf1c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) | |||
140 | } | 140 | } |
141 | 141 | ||
142 | #ifdef CONFIG_CLKSRC_MIPS_GIC | 142 | #ifdef CONFIG_CLKSRC_MIPS_GIC |
143 | u64 gic_read_count(void) | 143 | u64 notrace gic_read_count(void) |
144 | { | 144 | { |
145 | unsigned int hi, hi2, lo; | 145 | unsigned int hi, hi2, lo; |
146 | 146 | ||
@@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void) | |||
167 | return bits; | 167 | return bits; |
168 | } | 168 | } |
169 | 169 | ||
170 | void gic_write_compare(u64 cnt) | 170 | void notrace gic_write_compare(u64 cnt) |
171 | { | 171 | { |
172 | if (mips_cm_is64) { | 172 | if (mips_cm_is64) { |
173 | gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); | 173 | gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); |
@@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt) | |||
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
182 | void gic_write_cpu_compare(u64 cnt, int cpu) | 182 | void notrace gic_write_cpu_compare(u64 cnt, int cpu) |
183 | { | 183 | { |
184 | unsigned long flags; | 184 | unsigned long flags; |
185 | 185 | ||
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index bb3ac5fe5846..72a391e01011 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c | |||
@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = { | |||
142 | int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) | 142 | int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) |
143 | { | 143 | { |
144 | struct irq_domain *root_domain = | 144 | struct irq_domain *root_domain = |
145 | irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, | 145 | irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, |
146 | &xtensa_mx_irq_domain_ops, | 146 | &xtensa_mx_irq_domain_ops, |
147 | &xtensa_mx_irq_chip); | 147 | &xtensa_mx_irq_chip); |
148 | irq_set_default_host(root_domain); | 148 | irq_set_default_host(root_domain); |
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index 472ae1770964..f728755fa292 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c | |||
@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = { | |||
89 | int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) | 89 | int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) |
90 | { | 90 | { |
91 | struct irq_domain *root_domain = | 91 | struct irq_domain *root_domain = |
92 | irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, | 92 | irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, |
93 | &xtensa_irq_domain_ops, &xtensa_irq_chip); | 93 | &xtensa_irq_domain_ops, &xtensa_irq_chip); |
94 | irq_set_default_host(root_domain); | 94 | irq_set_default_host(root_domain); |
95 | return 0; | 95 | return 0; |
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c index 1548259297c1..2cfd9389ee96 100644 --- a/drivers/leds/leds-bcm6328.c +++ b/drivers/leds/leds-bcm6328.c | |||
@@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, | |||
242 | 242 | ||
243 | spin_lock_irqsave(lock, flags); | 243 | spin_lock_irqsave(lock, flags); |
244 | val = bcm6328_led_read(addr); | 244 | val = bcm6328_led_read(addr); |
245 | val |= (BIT(reg) << (((sel % 4) * 4) + 16)); | 245 | val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16)); |
246 | bcm6328_led_write(addr, val); | 246 | bcm6328_led_write(addr, val); |
247 | spin_unlock_irqrestore(lock, flags); | 247 | spin_unlock_irqrestore(lock, flags); |
248 | } | 248 | } |
@@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, | |||
269 | 269 | ||
270 | spin_lock_irqsave(lock, flags); | 270 | spin_lock_irqsave(lock, flags); |
271 | val = bcm6328_led_read(addr); | 271 | val = bcm6328_led_read(addr); |
272 | val |= (BIT(reg) << ((sel % 4) * 4)); | 272 | val |= (BIT(reg % 4) << ((sel % 4) * 4)); |
273 | bcm6328_led_write(addr, val); | 273 | bcm6328_led_write(addr, val); |
274 | spin_unlock_irqrestore(lock, flags); | 274 | spin_unlock_irqrestore(lock, flags); |
275 | } | 275 | } |
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index afa3b4099214..e95ea65380c8 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/sched/loadavg.h> | 20 | #include <linux/sched/loadavg.h> |
21 | #include <linux/leds.h> | 21 | #include <linux/leds.h> |
22 | #include <linux/reboot.h> | 22 | #include <linux/reboot.h> |
23 | #include <linux/suspend.h> | ||
24 | #include "../leds.h" | 23 | #include "../leds.h" |
25 | 24 | ||
26 | static int panic_heartbeats; | 25 | static int panic_heartbeats; |
@@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = { | |||
163 | .deactivate = heartbeat_trig_deactivate, | 162 | .deactivate = heartbeat_trig_deactivate, |
164 | }; | 163 | }; |
165 | 164 | ||
166 | static int heartbeat_pm_notifier(struct notifier_block *nb, | ||
167 | unsigned long pm_event, void *unused) | ||
168 | { | ||
169 | int rc; | ||
170 | |||
171 | switch (pm_event) { | ||
172 | case PM_SUSPEND_PREPARE: | ||
173 | case PM_HIBERNATION_PREPARE: | ||
174 | case PM_RESTORE_PREPARE: | ||
175 | led_trigger_unregister(&heartbeat_led_trigger); | ||
176 | break; | ||
177 | case PM_POST_SUSPEND: | ||
178 | case PM_POST_HIBERNATION: | ||
179 | case PM_POST_RESTORE: | ||
180 | rc = led_trigger_register(&heartbeat_led_trigger); | ||
181 | if (rc) | ||
182 | pr_err("could not re-register heartbeat trigger\n"); | ||
183 | break; | ||
184 | default: | ||
185 | break; | ||
186 | } | ||
187 | return NOTIFY_DONE; | ||
188 | } | ||
189 | |||
190 | static int heartbeat_reboot_notifier(struct notifier_block *nb, | 165 | static int heartbeat_reboot_notifier(struct notifier_block *nb, |
191 | unsigned long code, void *unused) | 166 | unsigned long code, void *unused) |
192 | { | 167 | { |
@@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb, | |||
201 | return NOTIFY_DONE; | 176 | return NOTIFY_DONE; |
202 | } | 177 | } |
203 | 178 | ||
204 | static struct notifier_block heartbeat_pm_nb = { | ||
205 | .notifier_call = heartbeat_pm_notifier, | ||
206 | }; | ||
207 | |||
208 | static struct notifier_block heartbeat_reboot_nb = { | 179 | static struct notifier_block heartbeat_reboot_nb = { |
209 | .notifier_call = heartbeat_reboot_notifier, | 180 | .notifier_call = heartbeat_reboot_notifier, |
210 | }; | 181 | }; |
@@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void) | |||
221 | atomic_notifier_chain_register(&panic_notifier_list, | 192 | atomic_notifier_chain_register(&panic_notifier_list, |
222 | &heartbeat_panic_nb); | 193 | &heartbeat_panic_nb); |
223 | register_reboot_notifier(&heartbeat_reboot_nb); | 194 | register_reboot_notifier(&heartbeat_reboot_nb); |
224 | register_pm_notifier(&heartbeat_pm_nb); | ||
225 | } | 195 | } |
226 | return rc; | 196 | return rc; |
227 | } | 197 | } |
228 | 198 | ||
229 | static void __exit heartbeat_trig_exit(void) | 199 | static void __exit heartbeat_trig_exit(void) |
230 | { | 200 | { |
231 | unregister_pm_notifier(&heartbeat_pm_nb); | ||
232 | unregister_reboot_notifier(&heartbeat_reboot_nb); | 201 | unregister_reboot_notifier(&heartbeat_reboot_nb); |
233 | atomic_notifier_chain_unregister(&panic_notifier_list, | 202 | atomic_notifier_chain_unregister(&panic_notifier_list, |
234 | &heartbeat_panic_nb); | 203 | &heartbeat_panic_nb); |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 7910bfe50da4..93b181088168 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic) | |||
1105 | static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) | 1105 | static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) |
1106 | { | 1106 | { |
1107 | struct bio *bio; | 1107 | struct bio *bio; |
1108 | spin_lock_irq(&ic->endio_wait.lock); | 1108 | unsigned long flags; |
1109 | |||
1110 | spin_lock_irqsave(&ic->endio_wait.lock, flags); | ||
1109 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); | 1111 | bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); |
1110 | bio_list_add(&ic->flush_bio_list, bio); | 1112 | bio_list_add(&ic->flush_bio_list, bio); |
1111 | spin_unlock_irq(&ic->endio_wait.lock); | 1113 | spin_unlock_irqrestore(&ic->endio_wait.lock, flags); |
1114 | |||
1112 | queue_work(ic->commit_wq, &ic->commit_work); | 1115 | queue_work(ic->commit_wq, &ic->commit_work); |
1113 | } | 1116 | } |
1114 | 1117 | ||
@@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
3040 | ti->error = "The device is too small"; | 3043 | ti->error = "The device is too small"; |
3041 | goto bad; | 3044 | goto bad; |
3042 | } | 3045 | } |
3046 | if (ti->len > ic->provided_data_sectors) { | ||
3047 | r = -EINVAL; | ||
3048 | ti->error = "Not enough provided sectors for requested mapping size"; | ||
3049 | goto bad; | ||
3050 | } | ||
3043 | 3051 | ||
3044 | if (!buffer_sectors) | 3052 | if (!buffer_sectors) |
3045 | buffer_sectors = 1; | 3053 | buffer_sectors = 1; |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3702e502466d..8d5ca30f6551 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region, | |||
317 | else if (op == REQ_OP_WRITE_SAME) | 317 | else if (op == REQ_OP_WRITE_SAME) |
318 | special_cmd_max_sectors = q->limits.max_write_same_sectors; | 318 | special_cmd_max_sectors = q->limits.max_write_same_sectors; |
319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || | 319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || |
320 | op == REQ_OP_WRITE_SAME) && | 320 | op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { |
321 | special_cmd_max_sectors == 0) { | 321 | atomic_inc(&io->count); |
322 | dec_count(io, region, -EOPNOTSUPP); | 322 | dec_count(io, region, -EOPNOTSUPP); |
323 | return; | 323 | return; |
324 | } | 324 | } |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index e61c45047c25..4da8858856fb 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list) | |||
145 | 145 | ||
146 | struct dm_raid1_bio_record { | 146 | struct dm_raid1_bio_record { |
147 | struct mirror *m; | 147 | struct mirror *m; |
148 | /* if details->bi_bdev == NULL, details were not saved */ | ||
148 | struct dm_bio_details details; | 149 | struct dm_bio_details details; |
149 | region_t write_region; | 150 | region_t write_region; |
150 | }; | 151 | }; |
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
1198 | struct dm_raid1_bio_record *bio_record = | 1199 | struct dm_raid1_bio_record *bio_record = |
1199 | dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); | 1200 | dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); |
1200 | 1201 | ||
1202 | bio_record->details.bi_bdev = NULL; | ||
1203 | |||
1201 | if (rw == WRITE) { | 1204 | if (rw == WRITE) { |
1202 | /* Save region for mirror_end_io() handler */ | 1205 | /* Save region for mirror_end_io() handler */ |
1203 | bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); | 1206 | bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); |
@@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
1256 | } | 1259 | } |
1257 | 1260 | ||
1258 | if (error == -EOPNOTSUPP) | 1261 | if (error == -EOPNOTSUPP) |
1259 | return error; | 1262 | goto out; |
1260 | 1263 | ||
1261 | if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) | 1264 | if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) |
1262 | return error; | 1265 | goto out; |
1263 | 1266 | ||
1264 | if (unlikely(error)) { | 1267 | if (unlikely(error)) { |
1268 | if (!bio_record->details.bi_bdev) { | ||
1269 | /* | ||
1270 | * There wasn't enough memory to record necessary | ||
1271 | * information for a retry or there was no other | ||
1272 | * mirror in-sync. | ||
1273 | */ | ||
1274 | DMERR_LIMIT("Mirror read failed."); | ||
1275 | return -EIO; | ||
1276 | } | ||
1277 | |||
1265 | m = bio_record->m; | 1278 | m = bio_record->m; |
1266 | 1279 | ||
1267 | DMERR("Mirror read failed from %s. Trying alternative device.", | 1280 | DMERR("Mirror read failed from %s. Trying alternative device.", |
@@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
1277 | bd = &bio_record->details; | 1290 | bd = &bio_record->details; |
1278 | 1291 | ||
1279 | dm_bio_restore(bd, bio); | 1292 | dm_bio_restore(bd, bio); |
1293 | bio_record->details.bi_bdev = NULL; | ||
1280 | bio->bi_error = 0; | 1294 | bio->bi_error = 0; |
1281 | 1295 | ||
1282 | queue_bio(ms, bio, rw); | 1296 | queue_bio(ms, bio, rw); |
@@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
1285 | DMERR("All replicated volumes dead, failing I/O"); | 1299 | DMERR("All replicated volumes dead, failing I/O"); |
1286 | } | 1300 | } |
1287 | 1301 | ||
1302 | out: | ||
1303 | bio_record->details.bi_bdev = NULL; | ||
1304 | |||
1288 | return error; | 1305 | return error; |
1289 | } | 1306 | } |
1290 | 1307 | ||
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig index 4e25a950ae6f..43428cec3a01 100644 --- a/drivers/media/cec/Kconfig +++ b/drivers/media/cec/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config MEDIA_CEC_RC | 1 | config MEDIA_CEC_RC |
2 | bool "HDMI CEC RC integration" | 2 | bool "HDMI CEC RC integration" |
3 | depends on CEC_CORE && RC_CORE | 3 | depends on CEC_CORE && RC_CORE |
4 | depends on CEC_CORE=m || RC_CORE=y | ||
4 | ---help--- | 5 | ---help--- |
5 | Pass on CEC remote control messages to the RC framework. | 6 | Pass on CEC remote control messages to the RC framework. |
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 0860fb458757..999926f731c8 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c | |||
@@ -271,16 +271,10 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh, | |||
271 | bool block, struct cec_msg __user *parg) | 271 | bool block, struct cec_msg __user *parg) |
272 | { | 272 | { |
273 | struct cec_msg msg = {}; | 273 | struct cec_msg msg = {}; |
274 | long err = 0; | 274 | long err; |
275 | 275 | ||
276 | if (copy_from_user(&msg, parg, sizeof(msg))) | 276 | if (copy_from_user(&msg, parg, sizeof(msg))) |
277 | return -EFAULT; | 277 | return -EFAULT; |
278 | mutex_lock(&adap->lock); | ||
279 | if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR) | ||
280 | err = -ENONET; | ||
281 | mutex_unlock(&adap->lock); | ||
282 | if (err) | ||
283 | return err; | ||
284 | 278 | ||
285 | err = cec_receive_msg(fh, &msg, block); | 279 | err = cec_receive_msg(fh, &msg, block); |
286 | if (err) | 280 | if (err) |
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index acef4eca269f..3251cba89e8f 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c | |||
@@ -223,7 +223,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val) | |||
223 | static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, | 223 | static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, |
224 | u8 mask, u8 val) | 224 | u8 mask, u8 val) |
225 | { | 225 | { |
226 | i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); | 226 | i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1); |
227 | } | 227 | } |
228 | 228 | ||
229 | static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) | 229 | static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) |
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c index e12ec50bf0bf..90a5f8fd5eea 100644 --- a/drivers/media/rc/sir_ir.c +++ b/drivers/media/rc/sir_ir.c | |||
@@ -183,9 +183,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id) | |||
183 | static unsigned long delt; | 183 | static unsigned long delt; |
184 | unsigned long deltintr; | 184 | unsigned long deltintr; |
185 | unsigned long flags; | 185 | unsigned long flags; |
186 | int counter = 0; | ||
186 | int iir, lsr; | 187 | int iir, lsr; |
187 | 188 | ||
188 | while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { | 189 | while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { |
190 | if (++counter > 256) { | ||
191 | dev_err(&sir_ir_dev->dev, "Trapped in interrupt"); | ||
192 | break; | ||
193 | } | ||
194 | |||
189 | switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ | 195 | switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ |
190 | case UART_IIR_MSI: | 196 | case UART_IIR_MSI: |
191 | (void)inb(io + UART_MSR); | 197 | (void)inb(io + UART_MSR); |
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c index 71bd68548c9c..4126552c9055 100644 --- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c +++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c | |||
@@ -336,6 +336,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv) | |||
336 | serio_set_drvdata(serio, rain); | 336 | serio_set_drvdata(serio, rain); |
337 | INIT_WORK(&rain->work, rain_irq_work_handler); | 337 | INIT_WORK(&rain->work, rain_irq_work_handler); |
338 | mutex_init(&rain->write_lock); | 338 | mutex_init(&rain->write_lock); |
339 | spin_lock_init(&rain->buf_lock); | ||
339 | 340 | ||
340 | err = serio_open(serio, drv); | 341 | err = serio_open(serio, drv); |
341 | if (err) | 342 | if (err) |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 94afbbf92807..c0175ea7e7ad 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs); | |||
868 | 868 | ||
869 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) | 869 | void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) |
870 | { | 870 | { |
871 | if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) | 871 | if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) |
872 | return NULL; | 872 | return NULL; |
873 | 873 | ||
874 | return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); | 874 | return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); |
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 75488e65cd96..8d46e3ad9529 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c | |||
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona, | |||
245 | int ret; | 245 | int ret; |
246 | 246 | ||
247 | ret = regmap_read_poll_timeout(arizona->regmap, | 247 | ret = regmap_read_poll_timeout(arizona->regmap, |
248 | ARIZONA_INTERRUPT_RAW_STATUS_5, val, | 248 | reg, val, ((val & mask) == target), |
249 | ((val & mask) == target), | ||
250 | ARIZONA_REG_POLL_DELAY_US, | 249 | ARIZONA_REG_POLL_DELAY_US, |
251 | timeout_ms * 1000); | 250 | timeout_ms * 1000); |
252 | if (ret) | 251 | if (ret) |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 1842ed341af1..de962c2d5e00 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
@@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, | |||
210 | int i; | 210 | int i; |
211 | bool use_desc_chain_mode = true; | 211 | bool use_desc_chain_mode = true; |
212 | 212 | ||
213 | /* | ||
214 | * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been | ||
215 | * reported. For some strange reason this occurs in descriptor | ||
216 | * chain mode only. So let's fall back to bounce buffer mode | ||
217 | * for command SD_IO_RW_EXTENDED. | ||
218 | */ | ||
219 | if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) | ||
220 | return; | ||
221 | |||
213 | for_each_sg(data->sg, sg, data->sg_len, i) | 222 | for_each_sg(data->sg, sg, data->sg_len, i) |
214 | /* check for 8 byte alignment */ | 223 | /* check for 8 byte alignment */ |
215 | if (sg->offset & 7) { | 224 | if (sg->offset & 7) { |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index b44a6aeb346d..e5386ab706ec 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -90,10 +90,13 @@ enum ad_link_speed_type { | |||
90 | AD_LINK_SPEED_100MBPS, | 90 | AD_LINK_SPEED_100MBPS, |
91 | AD_LINK_SPEED_1000MBPS, | 91 | AD_LINK_SPEED_1000MBPS, |
92 | AD_LINK_SPEED_2500MBPS, | 92 | AD_LINK_SPEED_2500MBPS, |
93 | AD_LINK_SPEED_5000MBPS, | ||
93 | AD_LINK_SPEED_10000MBPS, | 94 | AD_LINK_SPEED_10000MBPS, |
95 | AD_LINK_SPEED_14000MBPS, | ||
94 | AD_LINK_SPEED_20000MBPS, | 96 | AD_LINK_SPEED_20000MBPS, |
95 | AD_LINK_SPEED_25000MBPS, | 97 | AD_LINK_SPEED_25000MBPS, |
96 | AD_LINK_SPEED_40000MBPS, | 98 | AD_LINK_SPEED_40000MBPS, |
99 | AD_LINK_SPEED_50000MBPS, | ||
97 | AD_LINK_SPEED_56000MBPS, | 100 | AD_LINK_SPEED_56000MBPS, |
98 | AD_LINK_SPEED_100000MBPS, | 101 | AD_LINK_SPEED_100000MBPS, |
99 | }; | 102 | }; |
@@ -259,10 +262,13 @@ static inline int __check_agg_selection_timer(struct port *port) | |||
259 | * %AD_LINK_SPEED_100MBPS, | 262 | * %AD_LINK_SPEED_100MBPS, |
260 | * %AD_LINK_SPEED_1000MBPS, | 263 | * %AD_LINK_SPEED_1000MBPS, |
261 | * %AD_LINK_SPEED_2500MBPS, | 264 | * %AD_LINK_SPEED_2500MBPS, |
265 | * %AD_LINK_SPEED_5000MBPS, | ||
262 | * %AD_LINK_SPEED_10000MBPS | 266 | * %AD_LINK_SPEED_10000MBPS |
267 | * %AD_LINK_SPEED_14000MBPS, | ||
263 | * %AD_LINK_SPEED_20000MBPS | 268 | * %AD_LINK_SPEED_20000MBPS |
264 | * %AD_LINK_SPEED_25000MBPS | 269 | * %AD_LINK_SPEED_25000MBPS |
265 | * %AD_LINK_SPEED_40000MBPS | 270 | * %AD_LINK_SPEED_40000MBPS |
271 | * %AD_LINK_SPEED_50000MBPS | ||
266 | * %AD_LINK_SPEED_56000MBPS | 272 | * %AD_LINK_SPEED_56000MBPS |
267 | * %AD_LINK_SPEED_100000MBPS | 273 | * %AD_LINK_SPEED_100000MBPS |
268 | */ | 274 | */ |
@@ -296,10 +302,18 @@ static u16 __get_link_speed(struct port *port) | |||
296 | speed = AD_LINK_SPEED_2500MBPS; | 302 | speed = AD_LINK_SPEED_2500MBPS; |
297 | break; | 303 | break; |
298 | 304 | ||
305 | case SPEED_5000: | ||
306 | speed = AD_LINK_SPEED_5000MBPS; | ||
307 | break; | ||
308 | |||
299 | case SPEED_10000: | 309 | case SPEED_10000: |
300 | speed = AD_LINK_SPEED_10000MBPS; | 310 | speed = AD_LINK_SPEED_10000MBPS; |
301 | break; | 311 | break; |
302 | 312 | ||
313 | case SPEED_14000: | ||
314 | speed = AD_LINK_SPEED_14000MBPS; | ||
315 | break; | ||
316 | |||
303 | case SPEED_20000: | 317 | case SPEED_20000: |
304 | speed = AD_LINK_SPEED_20000MBPS; | 318 | speed = AD_LINK_SPEED_20000MBPS; |
305 | break; | 319 | break; |
@@ -312,6 +326,10 @@ static u16 __get_link_speed(struct port *port) | |||
312 | speed = AD_LINK_SPEED_40000MBPS; | 326 | speed = AD_LINK_SPEED_40000MBPS; |
313 | break; | 327 | break; |
314 | 328 | ||
329 | case SPEED_50000: | ||
330 | speed = AD_LINK_SPEED_50000MBPS; | ||
331 | break; | ||
332 | |||
315 | case SPEED_56000: | 333 | case SPEED_56000: |
316 | speed = AD_LINK_SPEED_56000MBPS; | 334 | speed = AD_LINK_SPEED_56000MBPS; |
317 | break; | 335 | break; |
@@ -707,9 +725,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) | |||
707 | case AD_LINK_SPEED_2500MBPS: | 725 | case AD_LINK_SPEED_2500MBPS: |
708 | bandwidth = nports * 2500; | 726 | bandwidth = nports * 2500; |
709 | break; | 727 | break; |
728 | case AD_LINK_SPEED_5000MBPS: | ||
729 | bandwidth = nports * 5000; | ||
730 | break; | ||
710 | case AD_LINK_SPEED_10000MBPS: | 731 | case AD_LINK_SPEED_10000MBPS: |
711 | bandwidth = nports * 10000; | 732 | bandwidth = nports * 10000; |
712 | break; | 733 | break; |
734 | case AD_LINK_SPEED_14000MBPS: | ||
735 | bandwidth = nports * 14000; | ||
736 | break; | ||
713 | case AD_LINK_SPEED_20000MBPS: | 737 | case AD_LINK_SPEED_20000MBPS: |
714 | bandwidth = nports * 20000; | 738 | bandwidth = nports * 20000; |
715 | break; | 739 | break; |
@@ -719,6 +743,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) | |||
719 | case AD_LINK_SPEED_40000MBPS: | 743 | case AD_LINK_SPEED_40000MBPS: |
720 | bandwidth = nports * 40000; | 744 | bandwidth = nports * 40000; |
721 | break; | 745 | break; |
746 | case AD_LINK_SPEED_50000MBPS: | ||
747 | bandwidth = nports * 50000; | ||
748 | break; | ||
722 | case AD_LINK_SPEED_56000MBPS: | 749 | case AD_LINK_SPEED_56000MBPS: |
723 | bandwidth = nports * 56000; | 750 | bandwidth = nports * 56000; |
724 | break; | 751 | break; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2359478b977f..8ab6bdbe1682 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -4192,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev) | |||
4192 | struct bonding *bond = netdev_priv(bond_dev); | 4192 | struct bonding *bond = netdev_priv(bond_dev); |
4193 | if (bond->wq) | 4193 | if (bond->wq) |
4194 | destroy_workqueue(bond->wq); | 4194 | destroy_workqueue(bond->wq); |
4195 | free_netdev(bond_dev); | ||
4196 | } | 4195 | } |
4197 | 4196 | ||
4198 | void bond_setup(struct net_device *bond_dev) | 4197 | void bond_setup(struct net_device *bond_dev) |
@@ -4212,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev) | |||
4212 | bond_dev->netdev_ops = &bond_netdev_ops; | 4211 | bond_dev->netdev_ops = &bond_netdev_ops; |
4213 | bond_dev->ethtool_ops = &bond_ethtool_ops; | 4212 | bond_dev->ethtool_ops = &bond_ethtool_ops; |
4214 | 4213 | ||
4215 | bond_dev->destructor = bond_destructor; | 4214 | bond_dev->needs_free_netdev = true; |
4215 | bond_dev->priv_destructor = bond_destructor; | ||
4216 | 4216 | ||
4217 | SET_NETDEV_DEVTYPE(bond_dev, &bond_type); | 4217 | SET_NETDEV_DEVTYPE(bond_dev, &bond_type); |
4218 | 4218 | ||
@@ -4736,7 +4736,7 @@ int bond_create(struct net *net, const char *name) | |||
4736 | 4736 | ||
4737 | rtnl_unlock(); | 4737 | rtnl_unlock(); |
4738 | if (res < 0) | 4738 | if (res < 0) |
4739 | bond_destructor(bond_dev); | 4739 | free_netdev(bond_dev); |
4740 | return res; | 4740 | return res; |
4741 | } | 4741 | } |
4742 | 4742 | ||
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index ddabce759456..71a7c3b44fdd 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c | |||
@@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev) | |||
1121 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 1121 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
1122 | dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; | 1122 | dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; |
1123 | dev->priv_flags |= IFF_NO_QUEUE; | 1123 | dev->priv_flags |= IFF_NO_QUEUE; |
1124 | dev->destructor = free_netdev; | 1124 | dev->needs_free_netdev = true; |
1125 | dev->netdev_ops = &cfhsi_netdevops; | 1125 | dev->netdev_ops = &cfhsi_netdevops; |
1126 | for (i = 0; i < CFHSI_PRIO_LAST; ++i) | 1126 | for (i = 0; i < CFHSI_PRIO_LAST; ++i) |
1127 | skb_queue_head_init(&cfhsi->qhead[i]); | 1127 | skb_queue_head_init(&cfhsi->qhead[i]); |
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index c2dea4916e5d..76e1d3545105 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c | |||
@@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev) | |||
428 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 428 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
429 | dev->mtu = CAIF_MAX_MTU; | 429 | dev->mtu = CAIF_MAX_MTU; |
430 | dev->priv_flags |= IFF_NO_QUEUE; | 430 | dev->priv_flags |= IFF_NO_QUEUE; |
431 | dev->destructor = free_netdev; | 431 | dev->needs_free_netdev = true; |
432 | skb_queue_head_init(&serdev->head); | 432 | skb_queue_head_init(&serdev->head); |
433 | serdev->common.link_select = CAIF_LINK_LOW_LATENCY; | 433 | serdev->common.link_select = CAIF_LINK_LOW_LATENCY; |
434 | serdev->common.use_frag = true; | 434 | serdev->common.use_frag = true; |
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 3a529fbe539f..fc21afe852b9 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c | |||
@@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev) | |||
712 | dev->flags = IFF_NOARP | IFF_POINTOPOINT; | 712 | dev->flags = IFF_NOARP | IFF_POINTOPOINT; |
713 | dev->priv_flags |= IFF_NO_QUEUE; | 713 | dev->priv_flags |= IFF_NO_QUEUE; |
714 | dev->mtu = SPI_MAX_PAYLOAD_SIZE; | 714 | dev->mtu = SPI_MAX_PAYLOAD_SIZE; |
715 | dev->destructor = free_netdev; | 715 | dev->needs_free_netdev = true; |
716 | skb_queue_head_init(&cfspi->qhead); | 716 | skb_queue_head_init(&cfspi->qhead); |
717 | skb_queue_head_init(&cfspi->chead); | 717 | skb_queue_head_init(&cfspi->chead); |
718 | cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; | 718 | cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; |
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 6122768c8644..1794ea0420b7 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c | |||
@@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev) | |||
617 | netdev->tx_queue_len = 100; | 617 | netdev->tx_queue_len = 100; |
618 | netdev->flags = IFF_POINTOPOINT | IFF_NOARP; | 618 | netdev->flags = IFF_POINTOPOINT | IFF_NOARP; |
619 | netdev->mtu = CFV_DEF_MTU_SIZE; | 619 | netdev->mtu = CFV_DEF_MTU_SIZE; |
620 | netdev->destructor = free_netdev; | 620 | netdev->needs_free_netdev = true; |
621 | } | 621 | } |
622 | 622 | ||
623 | /* Create debugfs counters for the device */ | 623 | /* Create debugfs counters for the device */ |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 611d16a7061d..ae4ed03dc642 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -391,6 +391,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, | |||
391 | can_update_state_error_stats(dev, new_state); | 391 | can_update_state_error_stats(dev, new_state); |
392 | priv->state = new_state; | 392 | priv->state = new_state; |
393 | 393 | ||
394 | if (!cf) | ||
395 | return; | ||
396 | |||
394 | if (unlikely(new_state == CAN_STATE_BUS_OFF)) { | 397 | if (unlikely(new_state == CAN_STATE_BUS_OFF)) { |
395 | cf->can_id |= CAN_ERR_BUSOFF; | 398 | cf->can_id |= CAN_ERR_BUSOFF; |
396 | return; | 399 | return; |
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 0d57be5ea97b..85268be0c913 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c | |||
@@ -489,7 +489,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, | |||
489 | struct pucan_rx_msg *msg_list, int msg_count) | 489 | struct pucan_rx_msg *msg_list, int msg_count) |
490 | { | 490 | { |
491 | void *msg_ptr = msg_list; | 491 | void *msg_ptr = msg_list; |
492 | int i, msg_size; | 492 | int i, msg_size = 0; |
493 | 493 | ||
494 | for (i = 0; i < msg_count; i++) { | 494 | for (i = 0; i < msg_count; i++) { |
495 | msg_size = peak_canfd_handle_msg(priv, msg_ptr); | 495 | msg_size = peak_canfd_handle_msg(priv, msg_ptr); |
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index eb7173713bbc..6a6e896e52fa 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
@@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev) | |||
417 | static void slc_free_netdev(struct net_device *dev) | 417 | static void slc_free_netdev(struct net_device *dev) |
418 | { | 418 | { |
419 | int i = dev->base_addr; | 419 | int i = dev->base_addr; |
420 | free_netdev(dev); | 420 | |
421 | slcan_devs[i] = NULL; | 421 | slcan_devs[i] = NULL; |
422 | } | 422 | } |
423 | 423 | ||
@@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = { | |||
436 | static void slc_setup(struct net_device *dev) | 436 | static void slc_setup(struct net_device *dev) |
437 | { | 437 | { |
438 | dev->netdev_ops = &slc_netdev_ops; | 438 | dev->netdev_ops = &slc_netdev_ops; |
439 | dev->destructor = slc_free_netdev; | 439 | dev->needs_free_netdev = true; |
440 | dev->priv_destructor = slc_free_netdev; | ||
440 | 441 | ||
441 | dev->hard_header_len = 0; | 442 | dev->hard_header_len = 0; |
442 | dev->addr_len = 0; | 443 | dev->addr_len = 0; |
@@ -761,8 +762,6 @@ static void __exit slcan_exit(void) | |||
761 | if (sl->tty) { | 762 | if (sl->tty) { |
762 | printk(KERN_ERR "%s: tty discipline still running\n", | 763 | printk(KERN_ERR "%s: tty discipline still running\n", |
763 | dev->name); | 764 | dev->name); |
764 | /* Intentionally leak the control block. */ | ||
765 | dev->destructor = NULL; | ||
766 | } | 765 | } |
767 | 766 | ||
768 | unregister_netdev(dev); | 767 | unregister_netdev(dev); |
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index eecee7f8dfb7..afcc1312dbaf 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
@@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) | |||
265 | sizeof(*dm), | 265 | sizeof(*dm), |
266 | 1000); | 266 | 1000); |
267 | 267 | ||
268 | kfree(dm); | ||
269 | |||
268 | return rc; | 270 | return rc; |
269 | } | 271 | } |
270 | 272 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 57913dbbae0a..1ca76e03e965 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |||
@@ -908,8 +908,6 @@ static int peak_usb_probe(struct usb_interface *intf, | |||
908 | const struct peak_usb_adapter *peak_usb_adapter = NULL; | 908 | const struct peak_usb_adapter *peak_usb_adapter = NULL; |
909 | int i, err = -ENOMEM; | 909 | int i, err = -ENOMEM; |
910 | 910 | ||
911 | usb_dev = interface_to_usbdev(intf); | ||
912 | |||
913 | /* get corresponding PCAN-USB adapter */ | 911 | /* get corresponding PCAN-USB adapter */ |
914 | for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) | 912 | for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) |
915 | if (peak_usb_adapters_list[i]->device_id == usb_id_product) { | 913 | if (peak_usb_adapters_list[i]->device_id == usb_id_product) { |
@@ -920,7 +918,7 @@ static int peak_usb_probe(struct usb_interface *intf, | |||
920 | if (!peak_usb_adapter) { | 918 | if (!peak_usb_adapter) { |
921 | /* should never come except device_id bad usage in this file */ | 919 | /* should never come except device_id bad usage in this file */ |
922 | pr_err("%s: didn't find device id. 0x%x in devices list\n", | 920 | pr_err("%s: didn't find device id. 0x%x in devices list\n", |
923 | PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); | 921 | PCAN_USB_DRIVER_NAME, usb_id_product); |
924 | return -ENODEV; | 922 | return -ENODEV; |
925 | } | 923 | } |
926 | 924 | ||
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index facca33d53e9..a8cb33264ff1 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c | |||
@@ -152,7 +152,7 @@ static const struct net_device_ops vcan_netdev_ops = { | |||
152 | static void vcan_setup(struct net_device *dev) | 152 | static void vcan_setup(struct net_device *dev) |
153 | { | 153 | { |
154 | dev->type = ARPHRD_CAN; | 154 | dev->type = ARPHRD_CAN; |
155 | dev->mtu = CAN_MTU; | 155 | dev->mtu = CANFD_MTU; |
156 | dev->hard_header_len = 0; | 156 | dev->hard_header_len = 0; |
157 | dev->addr_len = 0; | 157 | dev->addr_len = 0; |
158 | dev->tx_queue_len = 0; | 158 | dev->tx_queue_len = 0; |
@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev) | |||
163 | dev->flags |= IFF_ECHO; | 163 | dev->flags |= IFF_ECHO; |
164 | 164 | ||
165 | dev->netdev_ops = &vcan_netdev_ops; | 165 | dev->netdev_ops = &vcan_netdev_ops; |
166 | dev->destructor = free_netdev; | 166 | dev->needs_free_netdev = true; |
167 | } | 167 | } |
168 | 168 | ||
169 | static struct rtnl_link_ops vcan_link_ops __read_mostly = { | 169 | static struct rtnl_link_ops vcan_link_ops __read_mostly = { |
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 7fbb24795681..cfe889e8f172 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c | |||
@@ -150,13 +150,13 @@ static const struct net_device_ops vxcan_netdev_ops = { | |||
150 | static void vxcan_setup(struct net_device *dev) | 150 | static void vxcan_setup(struct net_device *dev) |
151 | { | 151 | { |
152 | dev->type = ARPHRD_CAN; | 152 | dev->type = ARPHRD_CAN; |
153 | dev->mtu = CAN_MTU; | 153 | dev->mtu = CANFD_MTU; |
154 | dev->hard_header_len = 0; | 154 | dev->hard_header_len = 0; |
155 | dev->addr_len = 0; | 155 | dev->addr_len = 0; |
156 | dev->tx_queue_len = 0; | 156 | dev->tx_queue_len = 0; |
157 | dev->flags = (IFF_NOARP|IFF_ECHO); | 157 | dev->flags = (IFF_NOARP|IFF_ECHO); |
158 | dev->netdev_ops = &vxcan_netdev_ops; | 158 | dev->netdev_ops = &vxcan_netdev_ops; |
159 | dev->destructor = free_netdev; | 159 | dev->needs_free_netdev = true; |
160 | } | 160 | } |
161 | 161 | ||
162 | /* forward declaration for rtnl_create_link() */ | 162 | /* forward declaration for rtnl_create_link() */ |
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 149244aac20a..9905b52fe293 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
@@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev) | |||
328 | struct dummy_priv *priv = netdev_priv(dev); | 328 | struct dummy_priv *priv = netdev_priv(dev); |
329 | 329 | ||
330 | kfree(priv->vfinfo); | 330 | kfree(priv->vfinfo); |
331 | free_netdev(dev); | ||
332 | } | 331 | } |
333 | 332 | ||
334 | static void dummy_setup(struct net_device *dev) | 333 | static void dummy_setup(struct net_device *dev) |
@@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev) | |||
338 | /* Initialize the device structure. */ | 337 | /* Initialize the device structure. */ |
339 | dev->netdev_ops = &dummy_netdev_ops; | 338 | dev->netdev_ops = &dummy_netdev_ops; |
340 | dev->ethtool_ops = &dummy_ethtool_ops; | 339 | dev->ethtool_ops = &dummy_ethtool_ops; |
341 | dev->destructor = dummy_free_netdev; | 340 | dev->needs_free_netdev = true; |
341 | dev->priv_destructor = dummy_free_netdev; | ||
342 | 342 | ||
343 | /* Fill in device structure with ethernet-generic values. */ | 343 | /* Fill in device structure with ethernet-generic values. */ |
344 | dev->flags |= IFF_NOARP; | 344 | dev->flags |= IFF_NOARP; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 08d11cede9c9..f5b237e0bd60 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c | |||
@@ -61,6 +61,8 @@ | |||
61 | 61 | ||
62 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF | 62 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF |
63 | 63 | ||
64 | #define ENA_REGS_ADMIN_INTR_MASK 1 | ||
65 | |||
64 | /*****************************************************************************/ | 66 | /*****************************************************************************/ |
65 | /*****************************************************************************/ | 67 | /*****************************************************************************/ |
66 | /*****************************************************************************/ | 68 | /*****************************************************************************/ |
@@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu | |||
232 | tail_masked = admin_queue->sq.tail & queue_size_mask; | 234 | tail_masked = admin_queue->sq.tail & queue_size_mask; |
233 | 235 | ||
234 | /* In case of queue FULL */ | 236 | /* In case of queue FULL */ |
235 | cnt = admin_queue->sq.tail - admin_queue->sq.head; | 237 | cnt = atomic_read(&admin_queue->outstanding_cmds); |
236 | if (cnt >= admin_queue->q_depth) { | 238 | if (cnt >= admin_queue->q_depth) { |
237 | pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", | 239 | pr_debug("admin queue is full.\n"); |
238 | admin_queue->sq.tail, admin_queue->sq.head, | ||
239 | admin_queue->q_depth); | ||
240 | admin_queue->stats.out_of_space++; | 240 | admin_queue->stats.out_of_space++; |
241 | return ERR_PTR(-ENOSPC); | 241 | return ERR_PTR(-ENOSPC); |
242 | } | 242 | } |
@@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status) | |||
508 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, | 508 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, |
509 | struct ena_com_admin_queue *admin_queue) | 509 | struct ena_com_admin_queue *admin_queue) |
510 | { | 510 | { |
511 | unsigned long flags; | 511 | unsigned long flags, timeout; |
512 | u32 start_time; | ||
513 | int ret; | 512 | int ret; |
514 | 513 | ||
515 | start_time = ((u32)jiffies_to_usecs(jiffies)); | 514 | timeout = jiffies + ADMIN_CMD_TIMEOUT_US; |
515 | |||
516 | while (1) { | ||
517 | spin_lock_irqsave(&admin_queue->q_lock, flags); | ||
518 | ena_com_handle_admin_completion(admin_queue); | ||
519 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); | ||
520 | |||
521 | if (comp_ctx->status != ENA_CMD_SUBMITTED) | ||
522 | break; | ||
516 | 523 | ||
517 | while (comp_ctx->status == ENA_CMD_SUBMITTED) { | 524 | if (time_is_before_jiffies(timeout)) { |
518 | if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > | ||
519 | ADMIN_CMD_TIMEOUT_US) { | ||
520 | pr_err("Wait for completion (polling) timeout\n"); | 525 | pr_err("Wait for completion (polling) timeout\n"); |
521 | /* ENA didn't have any completion */ | 526 | /* ENA didn't have any completion */ |
522 | spin_lock_irqsave(&admin_queue->q_lock, flags); | 527 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
@@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c | |||
528 | goto err; | 533 | goto err; |
529 | } | 534 | } |
530 | 535 | ||
531 | spin_lock_irqsave(&admin_queue->q_lock, flags); | ||
532 | ena_com_handle_admin_completion(admin_queue); | ||
533 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); | ||
534 | |||
535 | msleep(100); | 536 | msleep(100); |
536 | } | 537 | } |
537 | 538 | ||
@@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) | |||
1455 | 1456 | ||
1456 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) | 1457 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) |
1457 | { | 1458 | { |
1459 | u32 mask_value = 0; | ||
1460 | |||
1461 | if (polling) | ||
1462 | mask_value = ENA_REGS_ADMIN_INTR_MASK; | ||
1463 | |||
1464 | writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); | ||
1458 | ena_dev->admin_queue.polling = polling; | 1465 | ena_dev->admin_queue.polling = polling; |
1459 | } | 1466 | } |
1460 | 1467 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 67b2338f8fb3..3ee55e2fd694 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |||
@@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { | |||
80 | ENA_STAT_TX_ENTRY(tx_poll), | 80 | ENA_STAT_TX_ENTRY(tx_poll), |
81 | ENA_STAT_TX_ENTRY(doorbells), | 81 | ENA_STAT_TX_ENTRY(doorbells), |
82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), | 82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), |
83 | ENA_STAT_TX_ENTRY(missing_tx_comp), | ||
84 | ENA_STAT_TX_ENTRY(bad_req_id), | 83 | ENA_STAT_TX_ENTRY(bad_req_id), |
85 | }; | 84 | }; |
86 | 85 | ||
@@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { | |||
94 | ENA_STAT_RX_ENTRY(dma_mapping_err), | 93 | ENA_STAT_RX_ENTRY(dma_mapping_err), |
95 | ENA_STAT_RX_ENTRY(bad_desc_num), | 94 | ENA_STAT_RX_ENTRY(bad_desc_num), |
96 | ENA_STAT_RX_ENTRY(rx_copybreak_pkt), | 95 | ENA_STAT_RX_ENTRY(rx_copybreak_pkt), |
96 | ENA_STAT_RX_ENTRY(empty_rx_ring), | ||
97 | }; | 97 | }; |
98 | 98 | ||
99 | static const struct ena_stats ena_stats_ena_com_strings[] = { | 99 | static const struct ena_stats ena_stats_ena_com_strings[] = { |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 7c1214d78855..4f16ed38bcf3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter) | |||
190 | rxr->sgl_size = adapter->max_rx_sgl_size; | 190 | rxr->sgl_size = adapter->max_rx_sgl_size; |
191 | rxr->smoothed_interval = | 191 | rxr->smoothed_interval = |
192 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); | 192 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); |
193 | rxr->empty_rx_queue = 0; | ||
193 | } | 194 | } |
194 | } | 195 | } |
195 | 196 | ||
@@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, | |||
1078 | rx_ring->per_napi_bytes = 0; | 1079 | rx_ring->per_napi_bytes = 0; |
1079 | } | 1080 | } |
1080 | 1081 | ||
1082 | static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, | ||
1083 | struct ena_ring *rx_ring) | ||
1084 | { | ||
1085 | struct ena_eth_io_intr_reg intr_reg; | ||
1086 | |||
1087 | /* Update intr register: rx intr delay, | ||
1088 | * tx intr delay and interrupt unmask | ||
1089 | */ | ||
1090 | ena_com_update_intr_reg(&intr_reg, | ||
1091 | rx_ring->smoothed_interval, | ||
1092 | tx_ring->smoothed_interval, | ||
1093 | true); | ||
1094 | |||
1095 | /* It is a shared MSI-X. | ||
1096 | * Tx and Rx CQ have pointer to it. | ||
1097 | * So we use one of them to reach the intr reg | ||
1098 | */ | ||
1099 | ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); | ||
1100 | } | ||
1101 | |||
1081 | static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, | 1102 | static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, |
1082 | struct ena_ring *rx_ring) | 1103 | struct ena_ring *rx_ring) |
1083 | { | 1104 | { |
@@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget) | |||
1108 | { | 1129 | { |
1109 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | 1130 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); |
1110 | struct ena_ring *tx_ring, *rx_ring; | 1131 | struct ena_ring *tx_ring, *rx_ring; |
1111 | struct ena_eth_io_intr_reg intr_reg; | ||
1112 | 1132 | ||
1113 | u32 tx_work_done; | 1133 | u32 tx_work_done; |
1114 | u32 rx_work_done; | 1134 | u32 rx_work_done; |
@@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget) | |||
1149 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) | 1169 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) |
1150 | ena_adjust_intr_moderation(rx_ring, tx_ring); | 1170 | ena_adjust_intr_moderation(rx_ring, tx_ring); |
1151 | 1171 | ||
1152 | /* Update intr register: rx intr delay, | 1172 | ena_unmask_interrupt(tx_ring, rx_ring); |
1153 | * tx intr delay and interrupt unmask | ||
1154 | */ | ||
1155 | ena_com_update_intr_reg(&intr_reg, | ||
1156 | rx_ring->smoothed_interval, | ||
1157 | tx_ring->smoothed_interval, | ||
1158 | true); | ||
1159 | |||
1160 | /* It is a shared MSI-X. | ||
1161 | * Tx and Rx CQ have pointer to it. | ||
1162 | * So we use one of them to reach the intr reg | ||
1163 | */ | ||
1164 | ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); | ||
1165 | } | 1173 | } |
1166 | 1174 | ||
1167 | |||
1168 | ena_update_ring_numa_node(tx_ring, rx_ring); | 1175 | ena_update_ring_numa_node(tx_ring, rx_ring); |
1169 | 1176 | ||
1170 | ret = rx_work_done; | 1177 | ret = rx_work_done; |
@@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter) | |||
1485 | 1492 | ||
1486 | ena_napi_enable_all(adapter); | 1493 | ena_napi_enable_all(adapter); |
1487 | 1494 | ||
1495 | /* Enable completion queues interrupt */ | ||
1496 | for (i = 0; i < adapter->num_queues; i++) | ||
1497 | ena_unmask_interrupt(&adapter->tx_ring[i], | ||
1498 | &adapter->rx_ring[i]); | ||
1499 | |||
1488 | /* schedule napi in case we had pending packets | 1500 | /* schedule napi in case we had pending packets |
1489 | * from the last time we disable napi | 1501 | * from the last time we disable napi |
1490 | */ | 1502 | */ |
@@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | |||
1532 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", | 1544 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", |
1533 | qid, rc); | 1545 | qid, rc); |
1534 | ena_com_destroy_io_queue(ena_dev, ena_qid); | 1546 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
1547 | return rc; | ||
1535 | } | 1548 | } |
1536 | 1549 | ||
1537 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); | 1550 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); |
@@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | |||
1596 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", | 1609 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", |
1597 | qid, rc); | 1610 | qid, rc); |
1598 | ena_com_destroy_io_queue(ena_dev, ena_qid); | 1611 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
1612 | return rc; | ||
1599 | } | 1613 | } |
1600 | 1614 | ||
1601 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); | 1615 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); |
@@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1981 | 1995 | ||
1982 | tx_info->tx_descs = nb_hw_desc; | 1996 | tx_info->tx_descs = nb_hw_desc; |
1983 | tx_info->last_jiffies = jiffies; | 1997 | tx_info->last_jiffies = jiffies; |
1998 | tx_info->print_once = 0; | ||
1984 | 1999 | ||
1985 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | 2000 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, |
1986 | tx_ring->ring_size); | 2001 | tx_ring->ring_size); |
@@ -2550,13 +2565,44 @@ err: | |||
2550 | "Reset attempt failed. Can not reset the device\n"); | 2565 | "Reset attempt failed. Can not reset the device\n"); |
2551 | } | 2566 | } |
2552 | 2567 | ||
2553 | static void check_for_missing_tx_completions(struct ena_adapter *adapter) | 2568 | static int check_missing_comp_in_queue(struct ena_adapter *adapter, |
2569 | struct ena_ring *tx_ring) | ||
2554 | { | 2570 | { |
2555 | struct ena_tx_buffer *tx_buf; | 2571 | struct ena_tx_buffer *tx_buf; |
2556 | unsigned long last_jiffies; | 2572 | unsigned long last_jiffies; |
2573 | u32 missed_tx = 0; | ||
2574 | int i; | ||
2575 | |||
2576 | for (i = 0; i < tx_ring->ring_size; i++) { | ||
2577 | tx_buf = &tx_ring->tx_buffer_info[i]; | ||
2578 | last_jiffies = tx_buf->last_jiffies; | ||
2579 | if (unlikely(last_jiffies && | ||
2580 | time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { | ||
2581 | if (!tx_buf->print_once) | ||
2582 | netif_notice(adapter, tx_err, adapter->netdev, | ||
2583 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | ||
2584 | tx_ring->qid, i); | ||
2585 | |||
2586 | tx_buf->print_once = 1; | ||
2587 | missed_tx++; | ||
2588 | |||
2589 | if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { | ||
2590 | netif_err(adapter, tx_err, adapter->netdev, | ||
2591 | "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", | ||
2592 | missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); | ||
2593 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
2594 | return -EIO; | ||
2595 | } | ||
2596 | } | ||
2597 | } | ||
2598 | |||
2599 | return 0; | ||
2600 | } | ||
2601 | |||
2602 | static void check_for_missing_tx_completions(struct ena_adapter *adapter) | ||
2603 | { | ||
2557 | struct ena_ring *tx_ring; | 2604 | struct ena_ring *tx_ring; |
2558 | int i, j, budget; | 2605 | int i, budget, rc; |
2559 | u32 missed_tx; | ||
2560 | 2606 | ||
2561 | /* Make sure the driver doesn't turn the device in other process */ | 2607 | /* Make sure the driver doesn't turn the device in other process */ |
2562 | smp_rmb(); | 2608 | smp_rmb(); |
@@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) | |||
2572 | for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { | 2618 | for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { |
2573 | tx_ring = &adapter->tx_ring[i]; | 2619 | tx_ring = &adapter->tx_ring[i]; |
2574 | 2620 | ||
2575 | for (j = 0; j < tx_ring->ring_size; j++) { | 2621 | rc = check_missing_comp_in_queue(adapter, tx_ring); |
2576 | tx_buf = &tx_ring->tx_buffer_info[j]; | 2622 | if (unlikely(rc)) |
2577 | last_jiffies = tx_buf->last_jiffies; | 2623 | return; |
2578 | if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { | ||
2579 | netif_notice(adapter, tx_err, adapter->netdev, | ||
2580 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | ||
2581 | tx_ring->qid, j); | ||
2582 | |||
2583 | u64_stats_update_begin(&tx_ring->syncp); | ||
2584 | missed_tx = tx_ring->tx_stats.missing_tx_comp++; | ||
2585 | u64_stats_update_end(&tx_ring->syncp); | ||
2586 | |||
2587 | /* Clear last jiffies so the lost buffer won't | ||
2588 | * be counted twice. | ||
2589 | */ | ||
2590 | tx_buf->last_jiffies = 0; | ||
2591 | |||
2592 | if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { | ||
2593 | netif_err(adapter, tx_err, adapter->netdev, | ||
2594 | "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", | ||
2595 | missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); | ||
2596 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
2597 | } | ||
2598 | } | ||
2599 | } | ||
2600 | 2624 | ||
2601 | budget--; | 2625 | budget--; |
2602 | if (!budget) | 2626 | if (!budget) |
@@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) | |||
2606 | adapter->last_monitored_tx_qid = i % adapter->num_queues; | 2630 | adapter->last_monitored_tx_qid = i % adapter->num_queues; |
2607 | } | 2631 | } |
2608 | 2632 | ||
2633 | /* trigger napi schedule after 2 consecutive detections */ | ||
2634 | #define EMPTY_RX_REFILL 2 | ||
2635 | /* For the rare case where the device runs out of Rx descriptors and the | ||
2636 | * napi handler failed to refill new Rx descriptors (due to a lack of memory | ||
2637 | * for example). | ||
2638 | * This case will lead to a deadlock: | ||
2639 | * The device won't send interrupts since all the new Rx packets will be dropped | ||
2640 | * The napi handler won't allocate new Rx descriptors so the device will be | ||
2641 | * able to send new packets. | ||
2642 | * | ||
2643 | * This scenario can happen when the kernel's vm.min_free_kbytes is too small. | ||
2644 | * It is recommended to have at least 512MB, with a minimum of 128MB for | ||
2645 | * constrained environment). | ||
2646 | * | ||
2647 | * When such a situation is detected - Reschedule napi | ||
2648 | */ | ||
2649 | static void check_for_empty_rx_ring(struct ena_adapter *adapter) | ||
2650 | { | ||
2651 | struct ena_ring *rx_ring; | ||
2652 | int i, refill_required; | ||
2653 | |||
2654 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | ||
2655 | return; | ||
2656 | |||
2657 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | ||
2658 | return; | ||
2659 | |||
2660 | for (i = 0; i < adapter->num_queues; i++) { | ||
2661 | rx_ring = &adapter->rx_ring[i]; | ||
2662 | |||
2663 | refill_required = | ||
2664 | ena_com_sq_empty_space(rx_ring->ena_com_io_sq); | ||
2665 | if (unlikely(refill_required == (rx_ring->ring_size - 1))) { | ||
2666 | rx_ring->empty_rx_queue++; | ||
2667 | |||
2668 | if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { | ||
2669 | u64_stats_update_begin(&rx_ring->syncp); | ||
2670 | rx_ring->rx_stats.empty_rx_ring++; | ||
2671 | u64_stats_update_end(&rx_ring->syncp); | ||
2672 | |||
2673 | netif_err(adapter, drv, adapter->netdev, | ||
2674 | "trigger refill for ring %d\n", i); | ||
2675 | |||
2676 | napi_schedule(rx_ring->napi); | ||
2677 | rx_ring->empty_rx_queue = 0; | ||
2678 | } | ||
2679 | } else { | ||
2680 | rx_ring->empty_rx_queue = 0; | ||
2681 | } | ||
2682 | } | ||
2683 | } | ||
2684 | |||
2609 | /* Check for keep alive expiration */ | 2685 | /* Check for keep alive expiration */ |
2610 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) | 2686 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) |
2611 | { | 2687 | { |
@@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data) | |||
2660 | 2736 | ||
2661 | check_for_missing_tx_completions(adapter); | 2737 | check_for_missing_tx_completions(adapter); |
2662 | 2738 | ||
2739 | check_for_empty_rx_ring(adapter); | ||
2740 | |||
2663 | if (debug_area) | 2741 | if (debug_area) |
2664 | ena_dump_stats_to_buf(adapter, debug_area); | 2742 | ena_dump_stats_to_buf(adapter, debug_area); |
2665 | 2743 | ||
@@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |||
2840 | { | 2918 | { |
2841 | int release_bars; | 2919 | int release_bars; |
2842 | 2920 | ||
2921 | if (ena_dev->mem_bar) | ||
2922 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); | ||
2923 | |||
2924 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | ||
2925 | |||
2843 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | 2926 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
2844 | pci_release_selected_regions(pdev, release_bars); | 2927 | pci_release_selected_regions(pdev, release_bars); |
2845 | } | 2928 | } |
@@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2927 | goto err_free_ena_dev; | 3010 | goto err_free_ena_dev; |
2928 | } | 3011 | } |
2929 | 3012 | ||
2930 | ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), | 3013 | ena_dev->reg_bar = devm_ioremap(&pdev->dev, |
2931 | pci_resource_len(pdev, ENA_REG_BAR)); | 3014 | pci_resource_start(pdev, ENA_REG_BAR), |
3015 | pci_resource_len(pdev, ENA_REG_BAR)); | ||
2932 | if (!ena_dev->reg_bar) { | 3016 | if (!ena_dev->reg_bar) { |
2933 | dev_err(&pdev->dev, "failed to remap regs bar\n"); | 3017 | dev_err(&pdev->dev, "failed to remap regs bar\n"); |
2934 | rc = -EFAULT; | 3018 | rc = -EFAULT; |
@@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2948 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); | 3032 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); |
2949 | 3033 | ||
2950 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | 3034 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
2951 | ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), | 3035 | ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, |
2952 | pci_resource_len(pdev, ENA_MEM_BAR)); | 3036 | pci_resource_start(pdev, ENA_MEM_BAR), |
3037 | pci_resource_len(pdev, ENA_MEM_BAR)); | ||
2953 | if (!ena_dev->mem_bar) { | 3038 | if (!ena_dev->mem_bar) { |
2954 | rc = -EFAULT; | 3039 | rc = -EFAULT; |
2955 | goto err_device_destroy; | 3040 | goto err_device_destroy; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 0e22bce6239d..a4d3d5e21068 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #define DRV_MODULE_VER_MAJOR 1 | 46 | #define DRV_MODULE_VER_MAJOR 1 |
47 | #define DRV_MODULE_VER_MINOR 1 | 47 | #define DRV_MODULE_VER_MINOR 1 |
48 | #define DRV_MODULE_VER_SUBMINOR 2 | 48 | #define DRV_MODULE_VER_SUBMINOR 7 |
49 | 49 | ||
50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
@@ -146,7 +146,18 @@ struct ena_tx_buffer { | |||
146 | u32 tx_descs; | 146 | u32 tx_descs; |
147 | /* num of buffers used by this skb */ | 147 | /* num of buffers used by this skb */ |
148 | u32 num_of_bufs; | 148 | u32 num_of_bufs; |
149 | /* Save the last jiffies to detect missing tx packets */ | 149 | |
150 | /* Used for detect missing tx packets to limit the number of prints */ | ||
151 | u32 print_once; | ||
152 | /* Save the last jiffies to detect missing tx packets | ||
153 | * | ||
154 | * sets to non zero value on ena_start_xmit and set to zero on | ||
155 | * napi and timer_Service_routine. | ||
156 | * | ||
157 | * while this value is not protected by lock, | ||
158 | * a given packet is not expected to be handled by ena_start_xmit | ||
159 | * and by napi/timer_service at the same time. | ||
160 | */ | ||
150 | unsigned long last_jiffies; | 161 | unsigned long last_jiffies; |
151 | struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; | 162 | struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; |
152 | } ____cacheline_aligned; | 163 | } ____cacheline_aligned; |
@@ -170,7 +181,6 @@ struct ena_stats_tx { | |||
170 | u64 napi_comp; | 181 | u64 napi_comp; |
171 | u64 tx_poll; | 182 | u64 tx_poll; |
172 | u64 doorbells; | 183 | u64 doorbells; |
173 | u64 missing_tx_comp; | ||
174 | u64 bad_req_id; | 184 | u64 bad_req_id; |
175 | }; | 185 | }; |
176 | 186 | ||
@@ -184,6 +194,7 @@ struct ena_stats_rx { | |||
184 | u64 dma_mapping_err; | 194 | u64 dma_mapping_err; |
185 | u64 bad_desc_num; | 195 | u64 bad_desc_num; |
186 | u64 rx_copybreak_pkt; | 196 | u64 rx_copybreak_pkt; |
197 | u64 empty_rx_ring; | ||
187 | }; | 198 | }; |
188 | 199 | ||
189 | struct ena_ring { | 200 | struct ena_ring { |
@@ -231,6 +242,7 @@ struct ena_ring { | |||
231 | struct ena_stats_tx tx_stats; | 242 | struct ena_stats_tx tx_stats; |
232 | struct ena_stats_rx rx_stats; | 243 | struct ena_stats_rx rx_stats; |
233 | }; | 244 | }; |
245 | int empty_rx_queue; | ||
234 | } ____cacheline_aligned; | 246 | } ____cacheline_aligned; |
235 | 247 | ||
236 | struct ena_stats_dev { | 248 | struct ena_stats_dev { |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b8e3d88f0879..a66aee51ab5b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | |||
@@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, | |||
193 | struct aq_hw_caps_s *aq_hw_caps, | 193 | struct aq_hw_caps_s *aq_hw_caps, |
194 | u32 *regs_buff); | 194 | u32 *regs_buff); |
195 | 195 | ||
196 | int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, | ||
197 | struct ethtool_cmd *cmd); | ||
198 | |||
199 | int hw_atl_utils_hw_set_power(struct aq_hw_s *self, | 196 | int hw_atl_utils_hw_set_power(struct aq_hw_s *self, |
200 | unsigned int power_state); | 197 | unsigned int power_state); |
201 | 198 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 5f49334dcad5..f619c4cac51f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3883 | /* when transmitting in a vf, start bd must hold the ethertype | 3883 | /* when transmitting in a vf, start bd must hold the ethertype |
3884 | * for fw to enforce it | 3884 | * for fw to enforce it |
3885 | */ | 3885 | */ |
3886 | u16 vlan_tci = 0; | ||
3886 | #ifndef BNX2X_STOP_ON_ERROR | 3887 | #ifndef BNX2X_STOP_ON_ERROR |
3887 | if (IS_VF(bp)) | 3888 | if (IS_VF(bp)) { |
3888 | #endif | 3889 | #endif |
3889 | tx_start_bd->vlan_or_ethertype = | 3890 | /* Still need to consider inband vlan for enforced */ |
3890 | cpu_to_le16(ntohs(eth->h_proto)); | 3891 | if (__vlan_get_tag(skb, &vlan_tci)) { |
3892 | tx_start_bd->vlan_or_ethertype = | ||
3893 | cpu_to_le16(ntohs(eth->h_proto)); | ||
3894 | } else { | ||
3895 | tx_start_bd->bd_flags.as_bitfield |= | ||
3896 | (X_ETH_INBAND_VLAN << | ||
3897 | ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); | ||
3898 | tx_start_bd->vlan_or_ethertype = | ||
3899 | cpu_to_le16(vlan_tci); | ||
3900 | } | ||
3891 | #ifndef BNX2X_STOP_ON_ERROR | 3901 | #ifndef BNX2X_STOP_ON_ERROR |
3892 | else | 3902 | } else { |
3893 | /* used by FW for packet accounting */ | 3903 | /* used by FW for packet accounting */ |
3894 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); | 3904 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); |
3905 | } | ||
3895 | #endif | 3906 | #endif |
3896 | } | 3907 | } |
3897 | 3908 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index bdfd53b46bc5..9ca994d0bab6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
901 | /* release VF resources */ | 901 | /* release VF resources */ |
902 | bnx2x_vf_free_resc(bp, vf); | 902 | bnx2x_vf_free_resc(bp, vf); |
903 | 903 | ||
904 | vf->malicious = false; | ||
905 | |||
904 | /* re-open the mailbox */ | 906 | /* re-open the mailbox */ |
905 | bnx2x_vf_enable_mbx(bp, vf->abs_vfid); | 907 | bnx2x_vf_enable_mbx(bp, vf->abs_vfid); |
906 | return; | 908 | return; |
@@ -1822,9 +1824,11 @@ get_vf: | |||
1822 | vf->abs_vfid, qidx); | 1824 | vf->abs_vfid, qidx); |
1823 | bnx2x_vf_handle_rss_update_eqe(bp, vf); | 1825 | bnx2x_vf_handle_rss_update_eqe(bp, vf); |
1824 | case EVENT_RING_OPCODE_VF_FLR: | 1826 | case EVENT_RING_OPCODE_VF_FLR: |
1825 | case EVENT_RING_OPCODE_MALICIOUS_VF: | ||
1826 | /* Do nothing for now */ | 1827 | /* Do nothing for now */ |
1827 | return 0; | 1828 | return 0; |
1829 | case EVENT_RING_OPCODE_MALICIOUS_VF: | ||
1830 | vf->malicious = true; | ||
1831 | return 0; | ||
1828 | } | 1832 | } |
1829 | 1833 | ||
1830 | return 0; | 1834 | return 0; |
@@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) | |||
1905 | continue; | 1909 | continue; |
1906 | } | 1910 | } |
1907 | 1911 | ||
1912 | if (vf->malicious) { | ||
1913 | DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), | ||
1914 | "vf %d malicious so no stats for it\n", | ||
1915 | vf->abs_vfid); | ||
1916 | continue; | ||
1917 | } | ||
1918 | |||
1908 | DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), | 1919 | DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), |
1909 | "add addresses for vf %d\n", vf->abs_vfid); | 1920 | "add addresses for vf %d\n", vf->abs_vfid); |
1910 | for_each_vfq(vf, j) { | 1921 | for_each_vfq(vf, j) { |
@@ -3042,7 +3053,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp) | |||
3042 | { | 3053 | { |
3043 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3054 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, |
3044 | sizeof(struct bnx2x_vf_mbx_msg)); | 3055 | sizeof(struct bnx2x_vf_mbx_msg)); |
3045 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, | 3056 | BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping, |
3046 | sizeof(union pf_vf_bulletin)); | 3057 | sizeof(union pf_vf_bulletin)); |
3047 | } | 3058 | } |
3048 | 3059 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 888d0b6632e8..53466f6cebab 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | |||
@@ -141,6 +141,7 @@ struct bnx2x_virtf { | |||
141 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ | 141 | #define VF_RESET 3 /* VF FLR'd, pending cleanup */ |
142 | 142 | ||
143 | bool flr_clnup_stage; /* true during flr cleanup */ | 143 | bool flr_clnup_stage; /* true during flr cleanup */ |
144 | bool malicious; /* true if FW indicated so, until FLR */ | ||
144 | 145 | ||
145 | /* dma */ | 146 | /* dma */ |
146 | dma_addr_t fw_stat_map; | 147 | dma_addr_t fw_stat_map; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 77ed2f628f9c..53309f659951 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap) | |||
2171 | { | 2171 | { |
2172 | int err; | 2172 | int err; |
2173 | 2173 | ||
2174 | mutex_lock(&uld_mutex); | ||
2174 | err = setup_sge_queues(adap); | 2175 | err = setup_sge_queues(adap); |
2175 | if (err) | 2176 | if (err) |
2176 | goto out; | 2177 | goto rel_lock; |
2177 | err = setup_rss(adap); | 2178 | err = setup_rss(adap); |
2178 | if (err) | 2179 | if (err) |
2179 | goto freeq; | 2180 | goto freeq; |
@@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap) | |||
2197 | goto irq_err; | 2198 | goto irq_err; |
2198 | } | 2199 | } |
2199 | 2200 | ||
2200 | mutex_lock(&uld_mutex); | ||
2201 | enable_rx(adap); | 2201 | enable_rx(adap); |
2202 | t4_sge_start(adap); | 2202 | t4_sge_start(adap); |
2203 | t4_intr_enable(adap); | 2203 | t4_intr_enable(adap); |
@@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap) | |||
2210 | #endif | 2210 | #endif |
2211 | /* Initialize hash mac addr list*/ | 2211 | /* Initialize hash mac addr list*/ |
2212 | INIT_LIST_HEAD(&adap->mac_hlist); | 2212 | INIT_LIST_HEAD(&adap->mac_hlist); |
2213 | out: | ||
2214 | return err; | 2213 | return err; |
2214 | |||
2215 | irq_err: | 2215 | irq_err: |
2216 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); | 2216 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); |
2217 | freeq: | 2217 | freeq: |
2218 | t4_free_sge_resources(adap); | 2218 | t4_free_sge_resources(adap); |
2219 | goto out; | 2219 | rel_lock: |
2220 | mutex_unlock(&uld_mutex); | ||
2221 | return err; | ||
2220 | } | 2222 | } |
2221 | 2223 | ||
2222 | static void cxgb_down(struct adapter *adapter) | 2224 | static void cxgb_down(struct adapter *adapter) |
@@ -4525,7 +4527,7 @@ static void dummy_setup(struct net_device *dev) | |||
4525 | /* Initialize the device structure. */ | 4527 | /* Initialize the device structure. */ |
4526 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; | 4528 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; |
4527 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; | 4529 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; |
4528 | dev->destructor = free_netdev; | 4530 | dev->needs_free_netdev = true; |
4529 | } | 4531 | } |
4530 | 4532 | ||
4531 | static int config_mgmt_dev(struct pci_dev *pdev) | 4533 | static int config_mgmt_dev(struct pci_dev *pdev) |
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 9a520e4f0df9..290ad0563320 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) | |||
2647 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ | 2647 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ |
2648 | 2648 | ||
2649 | /* device used for DMA mapping */ | 2649 | /* device used for DMA mapping */ |
2650 | arch_setup_dma_ops(dev, 0, 0, NULL, false); | 2650 | set_dma_ops(dev, get_dma_ops(&pdev->dev)); |
2651 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); | 2651 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
2652 | if (err) { | 2652 | if (err) { |
2653 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); | 2653 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); |
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 0b31f8502ada..6e67d22fd0d5 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c | |||
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, | |||
623 | goto no_mem; | 623 | goto no_mem; |
624 | } | 624 | } |
625 | 625 | ||
626 | set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); | ||
627 | |||
626 | ret = platform_device_add_data(pdev, &data, sizeof(data)); | 628 | ret = platform_device_add_data(pdev, &data, sizeof(data)); |
627 | if (ret) | 629 | if (ret) |
628 | goto err; | 630 | goto err; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index b8fab149690f..e95795b3c841 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) | |||
288 | 288 | ||
289 | /* Force 1000M Link, Default is 0x0200 */ | 289 | /* Force 1000M Link, Default is 0x0200 */ |
290 | phy_write(phy_dev, 7, 0x20C); | 290 | phy_write(phy_dev, 7, 0x20C); |
291 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); | ||
292 | 291 | ||
293 | /* Enable PHY loop-back */ | 292 | /* Powerup Fiber */ |
293 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); | ||
294 | val = phy_read(phy_dev, COPPER_CONTROL_REG); | ||
295 | val &= ~PHY_POWER_DOWN; | ||
296 | phy_write(phy_dev, COPPER_CONTROL_REG, val); | ||
297 | |||
298 | /* Enable Phy Loopback */ | ||
299 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); | ||
294 | val = phy_read(phy_dev, COPPER_CONTROL_REG); | 300 | val = phy_read(phy_dev, COPPER_CONTROL_REG); |
295 | val |= PHY_LOOP_BACK; | 301 | val |= PHY_LOOP_BACK; |
296 | val &= ~PHY_POWER_DOWN; | 302 | val &= ~PHY_POWER_DOWN; |
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) | |||
299 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); | 305 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); |
300 | phy_write(phy_dev, 1, 0x400); | 306 | phy_write(phy_dev, 1, 0x400); |
301 | phy_write(phy_dev, 7, 0x200); | 307 | phy_write(phy_dev, 7, 0x200); |
308 | |||
309 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); | ||
310 | val = phy_read(phy_dev, COPPER_CONTROL_REG); | ||
311 | val |= PHY_POWER_DOWN; | ||
312 | phy_write(phy_dev, COPPER_CONTROL_REG, val); | ||
313 | |||
302 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); | 314 | phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); |
303 | phy_write(phy_dev, 9, 0xF00); | 315 | phy_write(phy_dev, 9, 0xF00); |
304 | 316 | ||
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 508923f39ccf..259e69a52ec5 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
@@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev) | |||
343 | { | 343 | { |
344 | struct emac_regs __iomem *p = dev->emacp; | 344 | struct emac_regs __iomem *p = dev->emacp; |
345 | int n = 20; | 345 | int n = 20; |
346 | bool __maybe_unused try_internal_clock = false; | ||
346 | 347 | ||
347 | DBG(dev, "reset" NL); | 348 | DBG(dev, "reset" NL); |
348 | 349 | ||
@@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev) | |||
355 | } | 356 | } |
356 | 357 | ||
357 | #ifdef CONFIG_PPC_DCR_NATIVE | 358 | #ifdef CONFIG_PPC_DCR_NATIVE |
359 | do_retry: | ||
358 | /* | 360 | /* |
359 | * PPC460EX/GT Embedded Processor Advanced User's Manual | 361 | * PPC460EX/GT Embedded Processor Advanced User's Manual |
360 | * section 28.10.1 Mode Register 0 (EMACx_MR0) states: | 362 | * section 28.10.1 Mode Register 0 (EMACx_MR0) states: |
@@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev) | |||
362 | * of the EMAC. If none is present, select the internal clock | 364 | * of the EMAC. If none is present, select the internal clock |
363 | * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). | 365 | * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). |
364 | * After a soft reset, select the external clock. | 366 | * After a soft reset, select the external clock. |
367 | * | ||
368 | * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the | ||
369 | * ethernet cable is not attached. This causes the reset to timeout | ||
370 | * and the PHY detection code in emac_init_phy() is unable to | ||
371 | * communicate and detect the AR8035-A PHY. As a result, the emac | ||
372 | * driver bails out early and the user has no ethernet. | ||
373 | * In order to stay compatible with existing configurations, the | ||
374 | * driver will temporarily switch to the internal clock, after | ||
375 | * the first reset fails. | ||
365 | */ | 376 | */ |
366 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { | 377 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { |
367 | if (dev->phy_address == 0xffffffff && | 378 | if (try_internal_clock || (dev->phy_address == 0xffffffff && |
368 | dev->phy_map == 0xffffffff) { | 379 | dev->phy_map == 0xffffffff)) { |
369 | /* No PHY: select internal loop clock before reset */ | 380 | /* No PHY: select internal loop clock before reset */ |
370 | dcri_clrset(SDR0, SDR0_ETH_CFG, | 381 | dcri_clrset(SDR0, SDR0_ETH_CFG, |
371 | 0, SDR0_ETH_CFG_ECS << dev->cell_index); | 382 | 0, SDR0_ETH_CFG_ECS << dev->cell_index); |
@@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev) | |||
383 | 394 | ||
384 | #ifdef CONFIG_PPC_DCR_NATIVE | 395 | #ifdef CONFIG_PPC_DCR_NATIVE |
385 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { | 396 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { |
386 | if (dev->phy_address == 0xffffffff && | 397 | if (!n && !try_internal_clock) { |
387 | dev->phy_map == 0xffffffff) { | 398 | /* first attempt has timed out. */ |
399 | n = 20; | ||
400 | try_internal_clock = true; | ||
401 | goto do_retry; | ||
402 | } | ||
403 | |||
404 | if (try_internal_clock || (dev->phy_address == 0xffffffff && | ||
405 | dev->phy_map == 0xffffffff)) { | ||
388 | /* No PHY: restore external clock source after reset */ | 406 | /* No PHY: restore external clock source after reset */ |
389 | dcri_clrset(SDR0, SDR0_ETH_CFG, | 407 | dcri_clrset(SDR0, SDR0_ETH_CFG, |
390 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); | 408 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); |
@@ -2460,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus) | |||
2460 | return emac_reset(dev); | 2478 | return emac_reset(dev); |
2461 | } | 2479 | } |
2462 | 2480 | ||
2481 | static int emac_mdio_phy_start_aneg(struct mii_phy *phy, | ||
2482 | struct phy_device *phy_dev) | ||
2483 | { | ||
2484 | phy_dev->autoneg = phy->autoneg; | ||
2485 | phy_dev->speed = phy->speed; | ||
2486 | phy_dev->duplex = phy->duplex; | ||
2487 | phy_dev->advertising = phy->advertising; | ||
2488 | return phy_start_aneg(phy_dev); | ||
2489 | } | ||
2490 | |||
2463 | static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) | 2491 | static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) |
2464 | { | 2492 | { |
2465 | struct net_device *ndev = phy->dev; | 2493 | struct net_device *ndev = phy->dev; |
2466 | struct emac_instance *dev = netdev_priv(ndev); | 2494 | struct emac_instance *dev = netdev_priv(ndev); |
2467 | 2495 | ||
2468 | dev->phy.autoneg = AUTONEG_ENABLE; | ||
2469 | dev->phy.speed = SPEED_1000; | ||
2470 | dev->phy.duplex = DUPLEX_FULL; | ||
2471 | dev->phy.advertising = advertise; | ||
2472 | phy->autoneg = AUTONEG_ENABLE; | 2496 | phy->autoneg = AUTONEG_ENABLE; |
2473 | phy->speed = dev->phy.speed; | ||
2474 | phy->duplex = dev->phy.duplex; | ||
2475 | phy->advertising = advertise; | 2497 | phy->advertising = advertise; |
2476 | return phy_start_aneg(dev->phy_dev); | 2498 | return emac_mdio_phy_start_aneg(phy, dev->phy_dev); |
2477 | } | 2499 | } |
2478 | 2500 | ||
2479 | static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) | 2501 | static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) |
@@ -2481,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) | |||
2481 | struct net_device *ndev = phy->dev; | 2503 | struct net_device *ndev = phy->dev; |
2482 | struct emac_instance *dev = netdev_priv(ndev); | 2504 | struct emac_instance *dev = netdev_priv(ndev); |
2483 | 2505 | ||
2484 | dev->phy.autoneg = AUTONEG_DISABLE; | ||
2485 | dev->phy.speed = speed; | ||
2486 | dev->phy.duplex = fd; | ||
2487 | phy->autoneg = AUTONEG_DISABLE; | 2506 | phy->autoneg = AUTONEG_DISABLE; |
2488 | phy->speed = speed; | 2507 | phy->speed = speed; |
2489 | phy->duplex = fd; | 2508 | phy->duplex = fd; |
2490 | return phy_start_aneg(dev->phy_dev); | 2509 | return emac_mdio_phy_start_aneg(phy, dev->phy_dev); |
2491 | } | 2510 | } |
2492 | 2511 | ||
2493 | static int emac_mdio_poll_link(struct mii_phy *phy) | 2512 | static int emac_mdio_poll_link(struct mii_phy *phy) |
@@ -2509,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy) | |||
2509 | { | 2528 | { |
2510 | struct net_device *ndev = phy->dev; | 2529 | struct net_device *ndev = phy->dev; |
2511 | struct emac_instance *dev = netdev_priv(ndev); | 2530 | struct emac_instance *dev = netdev_priv(ndev); |
2531 | struct phy_device *phy_dev = dev->phy_dev; | ||
2512 | int res; | 2532 | int res; |
2513 | 2533 | ||
2514 | res = phy_read_status(dev->phy_dev); | 2534 | res = phy_read_status(phy_dev); |
2515 | if (res) | 2535 | if (res) |
2516 | return res; | 2536 | return res; |
2517 | 2537 | ||
2518 | dev->phy.speed = phy->speed; | 2538 | phy->speed = phy_dev->speed; |
2519 | dev->phy.duplex = phy->duplex; | 2539 | phy->duplex = phy_dev->duplex; |
2520 | dev->phy.pause = phy->pause; | 2540 | phy->pause = phy_dev->pause; |
2521 | dev->phy.asym_pause = phy->asym_pause; | 2541 | phy->asym_pause = phy_dev->asym_pause; |
2522 | return 0; | 2542 | return 0; |
2523 | } | 2543 | } |
2524 | 2544 | ||
@@ -2528,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy) | |||
2528 | struct emac_instance *dev = netdev_priv(ndev); | 2548 | struct emac_instance *dev = netdev_priv(ndev); |
2529 | 2549 | ||
2530 | phy_start(dev->phy_dev); | 2550 | phy_start(dev->phy_dev); |
2531 | dev->phy.autoneg = phy->autoneg; | ||
2532 | dev->phy.speed = phy->speed; | ||
2533 | dev->phy.duplex = phy->duplex; | ||
2534 | dev->phy.advertising = phy->advertising; | ||
2535 | dev->phy.pause = phy->pause; | ||
2536 | dev->phy.asym_pause = phy->asym_pause; | ||
2537 | |||
2538 | return phy_init_hw(dev->phy_dev); | 2551 | return phy_init_hw(dev->phy_dev); |
2539 | } | 2552 | } |
2540 | 2553 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a93757c255f7..c0fbeb387db4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev) | |||
1468 | } | 1468 | } |
1469 | #endif | 1469 | #endif |
1470 | 1470 | ||
1471 | static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) | ||
1472 | { | ||
1473 | return -EOPNOTSUPP; | ||
1474 | } | ||
1475 | |||
1471 | static const struct net_device_ops ibmvnic_netdev_ops = { | 1476 | static const struct net_device_ops ibmvnic_netdev_ops = { |
1472 | .ndo_open = ibmvnic_open, | 1477 | .ndo_open = ibmvnic_open, |
1473 | .ndo_stop = ibmvnic_close, | 1478 | .ndo_stop = ibmvnic_close, |
@@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = { | |||
1479 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1484 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1480 | .ndo_poll_controller = ibmvnic_netpoll_controller, | 1485 | .ndo_poll_controller = ibmvnic_netpoll_controller, |
1481 | #endif | 1486 | #endif |
1487 | .ndo_change_mtu = ibmvnic_change_mtu, | ||
1482 | }; | 1488 | }; |
1483 | 1489 | ||
1484 | /* ethtool functions */ | 1490 | /* ethtool functions */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cdde3cc28fb5..44d9610f7a15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
@@ -399,6 +399,7 @@ struct i40e_pf { | |||
399 | #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) | 399 | #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) |
400 | #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) | 400 | #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) |
401 | #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) | 401 | #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) |
402 | #define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) | ||
402 | #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) | 403 | #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) |
403 | #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) | 404 | #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) |
404 | #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) | 405 | #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7a8eb486b9ea..894c8e57ba00 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { | |||
224 | I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), | 224 | I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), |
225 | I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), | 225 | I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), |
226 | I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), | 226 | I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), |
227 | I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), | 227 | I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), |
228 | I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), | 228 | I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), |
229 | }; | 229 | }; |
230 | 230 | ||
@@ -4092,7 +4092,7 @@ flags_complete: | |||
4092 | 4092 | ||
4093 | /* Only allow ATR evict on hardware that is capable of handling it */ | 4093 | /* Only allow ATR evict on hardware that is capable of handling it */ |
4094 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) | 4094 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) |
4095 | pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; | 4095 | pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED; |
4096 | 4096 | ||
4097 | if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { | 4097 | if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { |
4098 | u16 sw_flags = 0, valid_flags = 0; | 4098 | u16 sw_flags = 0, valid_flags = 0; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 150caf6ca2b4..a7a4b28b4144 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf) | |||
8821 | (pf->hw.aq.api_min_ver > 4))) { | 8821 | (pf->hw.aq.api_min_ver > 4))) { |
8822 | /* Supported in FW API version higher than 1.4 */ | 8822 | /* Supported in FW API version higher than 1.4 */ |
8823 | pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; | 8823 | pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; |
8824 | pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; | ||
8825 | } else { | ||
8826 | pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; | ||
8827 | } | 8824 | } |
8828 | 8825 | ||
8826 | /* Enable HW ATR eviction if possible */ | ||
8827 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) | ||
8828 | pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; | ||
8829 | |||
8829 | pf->eeprom_version = 0xDEAD; | 8830 | pf->eeprom_version = 0xDEAD; |
8830 | pf->lan_veb = I40E_NO_VEB; | 8831 | pf->lan_veb = I40E_NO_VEB; |
8831 | pf->lan_vsi = I40E_NO_VSI; | 8832 | pf->lan_vsi = I40E_NO_VSI; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index cd894f4023b1..77115c25d96f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -2341,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
2341 | /* Due to lack of space, no more new filters can be programmed */ | 2341 | /* Due to lack of space, no more new filters can be programmed */ |
2342 | if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) | 2342 | if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) |
2343 | return; | 2343 | return; |
2344 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { | 2344 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { |
2345 | /* HW ATR eviction will take care of removing filters on FIN | 2345 | /* HW ATR eviction will take care of removing filters on FIN |
2346 | * and RST packets. | 2346 | * and RST packets. |
2347 | */ | 2347 | */ |
@@ -2403,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
2403 | I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & | 2403 | I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & |
2404 | I40E_TXD_FLTR_QW1_CNTINDEX_MASK; | 2404 | I40E_TXD_FLTR_QW1_CNTINDEX_MASK; |
2405 | 2405 | ||
2406 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) | 2406 | if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) |
2407 | dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; | 2407 | dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; |
2408 | 2408 | ||
2409 | fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); | 2409 | fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 95c23fbaa211..0fb38ca78900 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
@@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, | |||
3017 | VLAN_VID_MASK)); | 3017 | VLAN_VID_MASK)); |
3018 | } | 3018 | } |
3019 | 3019 | ||
3020 | spin_unlock_bh(&vsi->mac_filter_hash_lock); | ||
3020 | if (vlan_id || qos) | 3021 | if (vlan_id || qos) |
3021 | ret = i40e_vsi_add_pvid(vsi, vlanprio); | 3022 | ret = i40e_vsi_add_pvid(vsi, vlanprio); |
3022 | else | 3023 | else |
3023 | i40e_vsi_remove_pvid(vsi); | 3024 | i40e_vsi_remove_pvid(vsi); |
3025 | spin_lock_bh(&vsi->mac_filter_hash_lock); | ||
3024 | 3026 | ||
3025 | if (vlan_id) { | 3027 | if (vlan_id) { |
3026 | dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", | 3028 | dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 9b875d776b29..33c901622ed5 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, | |||
3719 | dma_addr_t *dma_addr, | 3719 | dma_addr_t *dma_addr, |
3720 | phys_addr_t *phys_addr) | 3720 | phys_addr_t *phys_addr) |
3721 | { | 3721 | { |
3722 | int cpu = smp_processor_id(); | 3722 | int cpu = get_cpu(); |
3723 | 3723 | ||
3724 | *dma_addr = mvpp2_percpu_read(priv, cpu, | 3724 | *dma_addr = mvpp2_percpu_read(priv, cpu, |
3725 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); | 3725 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); |
@@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, | |||
3740 | if (sizeof(phys_addr_t) == 8) | 3740 | if (sizeof(phys_addr_t) == 8) |
3741 | *phys_addr |= (u64)phys_addr_highbits << 32; | 3741 | *phys_addr |= (u64)phys_addr_highbits << 32; |
3742 | } | 3742 | } |
3743 | |||
3744 | put_cpu(); | ||
3743 | } | 3745 | } |
3744 | 3746 | ||
3745 | /* Free all buffers from the pool */ | 3747 | /* Free all buffers from the pool */ |
@@ -3920,18 +3922,12 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) | |||
3920 | return bm; | 3922 | return bm; |
3921 | } | 3923 | } |
3922 | 3924 | ||
3923 | /* Get pool number from a BM cookie */ | ||
3924 | static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) | ||
3925 | { | ||
3926 | return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; | ||
3927 | } | ||
3928 | |||
3929 | /* Release buffer to BM */ | 3925 | /* Release buffer to BM */ |
3930 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, | 3926 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, |
3931 | dma_addr_t buf_dma_addr, | 3927 | dma_addr_t buf_dma_addr, |
3932 | phys_addr_t buf_phys_addr) | 3928 | phys_addr_t buf_phys_addr) |
3933 | { | 3929 | { |
3934 | int cpu = smp_processor_id(); | 3930 | int cpu = get_cpu(); |
3935 | 3931 | ||
3936 | if (port->priv->hw_version == MVPP22) { | 3932 | if (port->priv->hw_version == MVPP22) { |
3937 | u32 val = 0; | 3933 | u32 val = 0; |
@@ -3958,15 +3954,15 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, | |||
3958 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); | 3954 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); |
3959 | mvpp2_percpu_write(port->priv, cpu, | 3955 | mvpp2_percpu_write(port->priv, cpu, |
3960 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); | 3956 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); |
3957 | |||
3958 | put_cpu(); | ||
3961 | } | 3959 | } |
3962 | 3960 | ||
3963 | /* Refill BM pool */ | 3961 | /* Refill BM pool */ |
3964 | static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, | 3962 | static void mvpp2_pool_refill(struct mvpp2_port *port, int pool, |
3965 | dma_addr_t dma_addr, | 3963 | dma_addr_t dma_addr, |
3966 | phys_addr_t phys_addr) | 3964 | phys_addr_t phys_addr) |
3967 | { | 3965 | { |
3968 | int pool = mvpp2_bm_cookie_pool_get(bm); | ||
3969 | |||
3970 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); | 3966 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
3971 | } | 3967 | } |
3972 | 3968 | ||
@@ -4186,8 +4182,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port) | |||
4186 | { | 4182 | { |
4187 | u32 val; | 4183 | u32 val; |
4188 | 4184 | ||
4189 | return; | ||
4190 | |||
4191 | /* Only GOP port 0 has an XLG MAC */ | 4185 | /* Only GOP port 0 has an XLG MAC */ |
4192 | if (port->gop_id == 0) { | 4186 | if (port->gop_id == 0) { |
4193 | val = readl(port->base + MVPP22_XLG_CTRL3_REG); | 4187 | val = readl(port->base + MVPP22_XLG_CTRL3_REG); |
@@ -4515,21 +4509,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, | |||
4515 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); | 4509 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
4516 | } | 4510 | } |
4517 | 4511 | ||
4518 | /* Obtain BM cookie information from descriptor */ | ||
4519 | static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, | ||
4520 | struct mvpp2_rx_desc *rx_desc) | ||
4521 | { | ||
4522 | int cpu = smp_processor_id(); | ||
4523 | int pool; | ||
4524 | |||
4525 | pool = (mvpp2_rxdesc_status_get(port, rx_desc) & | ||
4526 | MVPP2_RXD_BM_POOL_ID_MASK) >> | ||
4527 | MVPP2_RXD_BM_POOL_ID_OFFS; | ||
4528 | |||
4529 | return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | | ||
4530 | ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); | ||
4531 | } | ||
4532 | |||
4533 | /* Tx descriptors helper methods */ | 4512 | /* Tx descriptors helper methods */ |
4534 | 4513 | ||
4535 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ | 4514 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ |
@@ -4757,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) | |||
4757 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, | 4736 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, |
4758 | struct mvpp2_rx_queue *rxq) | 4737 | struct mvpp2_rx_queue *rxq) |
4759 | { | 4738 | { |
4760 | int cpu = smp_processor_id(); | 4739 | int cpu = get_cpu(); |
4761 | 4740 | ||
4762 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) | 4741 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) |
4763 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; | 4742 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; |
@@ -4765,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, | |||
4765 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | 4744 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
4766 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, | 4745 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, |
4767 | rxq->pkts_coal); | 4746 | rxq->pkts_coal); |
4747 | |||
4748 | put_cpu(); | ||
4768 | } | 4749 | } |
4769 | 4750 | ||
4770 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) | 4751 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) |
@@ -4945,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, | |||
4945 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | 4926 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); |
4946 | 4927 | ||
4947 | /* Set Rx descriptors queue starting address - indirect access */ | 4928 | /* Set Rx descriptors queue starting address - indirect access */ |
4948 | cpu = smp_processor_id(); | 4929 | cpu = get_cpu(); |
4949 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | 4930 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
4950 | if (port->priv->hw_version == MVPP21) | 4931 | if (port->priv->hw_version == MVPP21) |
4951 | rxq_dma = rxq->descs_dma; | 4932 | rxq_dma = rxq->descs_dma; |
@@ -4954,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, | |||
4954 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); | 4935 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); |
4955 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); | 4936 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); |
4956 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); | 4937 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); |
4938 | put_cpu(); | ||
4957 | 4939 | ||
4958 | /* Set Offset */ | 4940 | /* Set Offset */ |
4959 | mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); | 4941 | mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); |
@@ -4980,9 +4962,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, | |||
4980 | 4962 | ||
4981 | for (i = 0; i < rx_received; i++) { | 4963 | for (i = 0; i < rx_received; i++) { |
4982 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); | 4964 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
4983 | u32 bm = mvpp2_bm_cookie_build(port, rx_desc); | 4965 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
4966 | int pool; | ||
4967 | |||
4968 | pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> | ||
4969 | MVPP2_RXD_BM_POOL_ID_OFFS; | ||
4984 | 4970 | ||
4985 | mvpp2_pool_refill(port, bm, | 4971 | mvpp2_pool_refill(port, pool, |
4986 | mvpp2_rxdesc_dma_addr_get(port, rx_desc), | 4972 | mvpp2_rxdesc_dma_addr_get(port, rx_desc), |
4987 | mvpp2_rxdesc_cookie_get(port, rx_desc)); | 4973 | mvpp2_rxdesc_cookie_get(port, rx_desc)); |
4988 | } | 4974 | } |
@@ -5012,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, | |||
5012 | * free descriptor number | 4998 | * free descriptor number |
5013 | */ | 4999 | */ |
5014 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | 5000 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); |
5015 | cpu = smp_processor_id(); | 5001 | cpu = get_cpu(); |
5016 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); | 5002 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); |
5017 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); | 5003 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); |
5018 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); | 5004 | mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); |
5005 | put_cpu(); | ||
5019 | } | 5006 | } |
5020 | 5007 | ||
5021 | /* Create and initialize a Tx queue */ | 5008 | /* Create and initialize a Tx queue */ |
@@ -5038,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, | |||
5038 | txq->last_desc = txq->size - 1; | 5025 | txq->last_desc = txq->size - 1; |
5039 | 5026 | ||
5040 | /* Set Tx descriptors queue starting address - indirect access */ | 5027 | /* Set Tx descriptors queue starting address - indirect access */ |
5041 | cpu = smp_processor_id(); | 5028 | cpu = get_cpu(); |
5042 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | 5029 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
5043 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, | 5030 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, |
5044 | txq->descs_dma); | 5031 | txq->descs_dma); |
@@ -5063,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, | |||
5063 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, | 5050 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, |
5064 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | | 5051 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | |
5065 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); | 5052 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); |
5053 | put_cpu(); | ||
5066 | 5054 | ||
5067 | /* WRR / EJP configuration - indirect access */ | 5055 | /* WRR / EJP configuration - indirect access */ |
5068 | tx_port_num = mvpp2_egress_port(port); | 5056 | tx_port_num = mvpp2_egress_port(port); |
@@ -5133,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, | |||
5133 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); | 5121 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); |
5134 | 5122 | ||
5135 | /* Set Tx descriptors queue starting address and size */ | 5123 | /* Set Tx descriptors queue starting address and size */ |
5136 | cpu = smp_processor_id(); | 5124 | cpu = get_cpu(); |
5137 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | 5125 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
5138 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); | 5126 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); |
5139 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); | 5127 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); |
5128 | put_cpu(); | ||
5140 | } | 5129 | } |
5141 | 5130 | ||
5142 | /* Cleanup Tx ports */ | 5131 | /* Cleanup Tx ports */ |
@@ -5146,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) | |||
5146 | int delay, pending, cpu; | 5135 | int delay, pending, cpu; |
5147 | u32 val; | 5136 | u32 val; |
5148 | 5137 | ||
5149 | cpu = smp_processor_id(); | 5138 | cpu = get_cpu(); |
5150 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); | 5139 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); |
5151 | val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); | 5140 | val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); |
5152 | val |= MVPP2_TXQ_DRAIN_EN_MASK; | 5141 | val |= MVPP2_TXQ_DRAIN_EN_MASK; |
@@ -5173,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) | |||
5173 | 5162 | ||
5174 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; | 5163 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; |
5175 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); | 5164 | mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); |
5165 | put_cpu(); | ||
5176 | 5166 | ||
5177 | for_each_present_cpu(cpu) { | 5167 | for_each_present_cpu(cpu) { |
5178 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); | 5168 | txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); |
@@ -5420,7 +5410,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, | |||
5420 | 5410 | ||
5421 | /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ | 5411 | /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ |
5422 | static int mvpp2_rx_refill(struct mvpp2_port *port, | 5412 | static int mvpp2_rx_refill(struct mvpp2_port *port, |
5423 | struct mvpp2_bm_pool *bm_pool, u32 bm) | 5413 | struct mvpp2_bm_pool *bm_pool, int pool) |
5424 | { | 5414 | { |
5425 | dma_addr_t dma_addr; | 5415 | dma_addr_t dma_addr; |
5426 | phys_addr_t phys_addr; | 5416 | phys_addr_t phys_addr; |
@@ -5432,7 +5422,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port, | |||
5432 | if (!buf) | 5422 | if (!buf) |
5433 | return -ENOMEM; | 5423 | return -ENOMEM; |
5434 | 5424 | ||
5435 | mvpp2_pool_refill(port, bm, dma_addr, phys_addr); | 5425 | mvpp2_pool_refill(port, pool, dma_addr, phys_addr); |
5436 | 5426 | ||
5437 | return 0; | 5427 | return 0; |
5438 | } | 5428 | } |
@@ -5490,7 +5480,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5490 | unsigned int frag_size; | 5480 | unsigned int frag_size; |
5491 | dma_addr_t dma_addr; | 5481 | dma_addr_t dma_addr; |
5492 | phys_addr_t phys_addr; | 5482 | phys_addr_t phys_addr; |
5493 | u32 bm, rx_status; | 5483 | u32 rx_status; |
5494 | int pool, rx_bytes, err; | 5484 | int pool, rx_bytes, err; |
5495 | void *data; | 5485 | void *data; |
5496 | 5486 | ||
@@ -5502,8 +5492,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
5502 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); | 5492 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); |
5503 | data = (void *)phys_to_virt(phys_addr); | 5493 | data = (void *)phys_to_virt(phys_addr); |
5504 | 5494 | ||
5505 | bm = mvpp2_bm_cookie_build(port, rx_desc); | 5495 | pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> |
5506 | pool = mvpp2_bm_cookie_pool_get(bm); | 5496 | MVPP2_RXD_BM_POOL_ID_OFFS; |
5507 | bm_pool = &port->priv->bm_pools[pool]; | 5497 | bm_pool = &port->priv->bm_pools[pool]; |
5508 | 5498 | ||
5509 | /* In case of an error, release the requested buffer pointer | 5499 | /* In case of an error, release the requested buffer pointer |
@@ -5516,7 +5506,7 @@ err_drop_frame: | |||
5516 | dev->stats.rx_errors++; | 5506 | dev->stats.rx_errors++; |
5517 | mvpp2_rx_error(port, rx_desc); | 5507 | mvpp2_rx_error(port, rx_desc); |
5518 | /* Return the buffer to the pool */ | 5508 | /* Return the buffer to the pool */ |
5519 | mvpp2_pool_refill(port, bm, dma_addr, phys_addr); | 5509 | mvpp2_pool_refill(port, pool, dma_addr, phys_addr); |
5520 | continue; | 5510 | continue; |
5521 | } | 5511 | } |
5522 | 5512 | ||
@@ -5531,7 +5521,7 @@ err_drop_frame: | |||
5531 | goto err_drop_frame; | 5521 | goto err_drop_frame; |
5532 | } | 5522 | } |
5533 | 5523 | ||
5534 | err = mvpp2_rx_refill(port, bm_pool, bm); | 5524 | err = mvpp2_rx_refill(port, bm_pool, pool); |
5535 | if (err) { | 5525 | if (err) { |
5536 | netdev_err(port->dev, "failed to refill BM pools\n"); | 5526 | netdev_err(port->dev, "failed to refill BM pools\n"); |
5537 | goto err_drop_frame; | 5527 | goto err_drop_frame; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2fd044b23875..944fc1742464 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -458,13 +458,15 @@ struct mlx5e_mpw_info { | |||
458 | 458 | ||
459 | struct mlx5e_rx_am_stats { | 459 | struct mlx5e_rx_am_stats { |
460 | int ppms; /* packets per msec */ | 460 | int ppms; /* packets per msec */ |
461 | int bpms; /* bytes per msec */ | ||
461 | int epms; /* events per msec */ | 462 | int epms; /* events per msec */ |
462 | }; | 463 | }; |
463 | 464 | ||
464 | struct mlx5e_rx_am_sample { | 465 | struct mlx5e_rx_am_sample { |
465 | ktime_t time; | 466 | ktime_t time; |
466 | unsigned int pkt_ctr; | 467 | u32 pkt_ctr; |
467 | u16 event_ctr; | 468 | u32 byte_ctr; |
469 | u16 event_ctr; | ||
468 | }; | 470 | }; |
469 | 471 | ||
470 | struct mlx5e_rx_am { /* Adaptive Moderation */ | 472 | struct mlx5e_rx_am { /* Adaptive Moderation */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 8209affa75c3..16486dff1493 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev, | |||
1242 | SOF_TIMESTAMPING_RX_HARDWARE | | 1242 | SOF_TIMESTAMPING_RX_HARDWARE | |
1243 | SOF_TIMESTAMPING_RAW_HARDWARE; | 1243 | SOF_TIMESTAMPING_RAW_HARDWARE; |
1244 | 1244 | ||
1245 | info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | | 1245 | info->tx_types = BIT(HWTSTAMP_TX_OFF) | |
1246 | (BIT(1) << HWTSTAMP_TX_ON); | 1246 | BIT(HWTSTAMP_TX_ON); |
1247 | 1247 | ||
1248 | info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | | 1248 | info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | |
1249 | (BIT(1) << HWTSTAMP_FILTER_ALL); | 1249 | BIT(HWTSTAMP_FILTER_ALL); |
1250 | 1250 | ||
1251 | return 0; | 1251 | return 0; |
1252 | } | 1252 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 41cd22a223dc..277f4de30375 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, | |||
4241 | return netdev; | 4241 | return netdev; |
4242 | 4242 | ||
4243 | err_cleanup_nic: | 4243 | err_cleanup_nic: |
4244 | profile->cleanup(priv); | 4244 | if (profile->cleanup) |
4245 | profile->cleanup(priv); | ||
4245 | free_netdev(netdev); | 4246 | free_netdev(netdev); |
4246 | 4247 | ||
4247 | return NULL; | 4248 | return NULL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 79462c0368a0..46984a52a94b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, | |||
791 | params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); | 791 | params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); |
792 | params->num_tc = 1; | 792 | params->num_tc = 1; |
793 | params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; | 793 | params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; |
794 | |||
795 | mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); | ||
794 | } | 796 | } |
795 | 797 | ||
796 | static void mlx5e_build_rep_netdev(struct net_device *netdev) | 798 | static void mlx5e_build_rep_netdev(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 02dd3a95ed8f..acf32fe952cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c | |||
@@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) | |||
183 | mlx5e_am_step(am); | 183 | mlx5e_am_step(am); |
184 | } | 184 | } |
185 | 185 | ||
186 | #define IS_SIGNIFICANT_DIFF(val, ref) \ | ||
187 | (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ | ||
188 | |||
186 | static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, | 189 | static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, |
187 | struct mlx5e_rx_am_stats *prev) | 190 | struct mlx5e_rx_am_stats *prev) |
188 | { | 191 | { |
189 | int diff; | 192 | if (!prev->bpms) |
190 | 193 | return curr->bpms ? MLX5E_AM_STATS_BETTER : | |
191 | if (!prev->ppms) | ||
192 | return curr->ppms ? MLX5E_AM_STATS_BETTER : | ||
193 | MLX5E_AM_STATS_SAME; | 194 | MLX5E_AM_STATS_SAME; |
194 | 195 | ||
195 | diff = curr->ppms - prev->ppms; | 196 | if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) |
196 | if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ | 197 | return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : |
197 | return (diff > 0) ? MLX5E_AM_STATS_BETTER : | 198 | MLX5E_AM_STATS_WORSE; |
198 | MLX5E_AM_STATS_WORSE; | ||
199 | 199 | ||
200 | if (!prev->epms) | 200 | if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) |
201 | return curr->epms ? MLX5E_AM_STATS_WORSE : | 201 | return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : |
202 | MLX5E_AM_STATS_SAME; | 202 | MLX5E_AM_STATS_WORSE; |
203 | 203 | ||
204 | diff = curr->epms - prev->epms; | 204 | if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) |
205 | if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ | 205 | return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : |
206 | return (diff < 0) ? MLX5E_AM_STATS_BETTER : | 206 | MLX5E_AM_STATS_WORSE; |
207 | MLX5E_AM_STATS_WORSE; | ||
208 | 207 | ||
209 | return MLX5E_AM_STATS_SAME; | 208 | return MLX5E_AM_STATS_SAME; |
210 | } | 209 | } |
@@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, | |||
266 | { | 265 | { |
267 | s->time = ktime_get(); | 266 | s->time = ktime_get(); |
268 | s->pkt_ctr = rq->stats.packets; | 267 | s->pkt_ctr = rq->stats.packets; |
268 | s->byte_ctr = rq->stats.bytes; | ||
269 | s->event_ctr = rq->cq.event_ctr; | 269 | s->event_ctr = rq->cq.event_ctr; |
270 | } | 270 | } |
271 | 271 | ||
272 | #define MLX5E_AM_NEVENTS 64 | 272 | #define MLX5E_AM_NEVENTS 64 |
273 | #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) | ||
274 | #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) | ||
273 | 275 | ||
274 | static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, | 276 | static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, |
275 | struct mlx5e_rx_am_sample *end, | 277 | struct mlx5e_rx_am_sample *end, |
@@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, | |||
277 | { | 279 | { |
278 | /* u32 holds up to 71 minutes, should be enough */ | 280 | /* u32 holds up to 71 minutes, should be enough */ |
279 | u32 delta_us = ktime_us_delta(end->time, start->time); | 281 | u32 delta_us = ktime_us_delta(end->time, start->time); |
280 | unsigned int npkts = end->pkt_ctr - start->pkt_ctr; | 282 | u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); |
283 | u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, | ||
284 | start->byte_ctr); | ||
281 | 285 | ||
282 | if (!delta_us) | 286 | if (!delta_us) |
283 | return; | 287 | return; |
284 | 288 | ||
285 | curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; | 289 | curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); |
286 | curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; | 290 | curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); |
291 | curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, | ||
292 | delta_us); | ||
287 | } | 293 | } |
288 | 294 | ||
289 | void mlx5e_rx_am_work(struct work_struct *work) | 295 | void mlx5e_rx_am_work(struct work_struct *work) |
@@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq) | |||
308 | 314 | ||
309 | switch (am->state) { | 315 | switch (am->state) { |
310 | case MLX5E_AM_MEASURE_IN_PROGRESS: | 316 | case MLX5E_AM_MEASURE_IN_PROGRESS: |
311 | nevents = rq->cq.event_ctr - am->start_sample.event_ctr; | 317 | nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, |
318 | am->start_sample.event_ctr); | ||
312 | if (nevents < MLX5E_AM_NEVENTS) | 319 | if (nevents < MLX5E_AM_NEVENTS) |
313 | break; | 320 | break; |
314 | mlx5e_am_sample(rq, &end_sample); | 321 | mlx5e_am_sample(rq, &end_sample); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 53e4992d6511..f81c3aa60b46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
@@ -417,20 +417,13 @@ struct mlx5e_stats { | |||
417 | }; | 417 | }; |
418 | 418 | ||
419 | static const struct counter_desc mlx5e_pme_status_desc[] = { | 419 | static const struct counter_desc mlx5e_pme_status_desc[] = { |
420 | { "module_plug", 0 }, | ||
421 | { "module_unplug", 8 }, | 420 | { "module_unplug", 8 }, |
422 | }; | 421 | }; |
423 | 422 | ||
424 | static const struct counter_desc mlx5e_pme_error_desc[] = { | 423 | static const struct counter_desc mlx5e_pme_error_desc[] = { |
425 | { "module_pwr_budget_exd", 0 }, /* power budget exceed */ | 424 | { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ |
426 | { "module_long_range", 8 }, /* long range for non MLNX cable */ | 425 | { "module_high_temp", 48 }, /* high temperature */ |
427 | { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ | ||
428 | { "module_no_eeprom", 24 }, /* no eeprom/retry time out */ | ||
429 | { "module_enforce_part", 32 }, /* enforce part number list */ | ||
430 | { "module_unknown_id", 40 }, /* unknown identifier */ | ||
431 | { "module_high_temp", 48 }, /* high temperature */ | ||
432 | { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ | 426 | { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ |
433 | { "module_unknown_status", 64 }, | ||
434 | }; | 427 | }; |
435 | 428 | ||
436 | #endif /* __MLX5_EN_STATS_H__ */ | 429 | #endif /* __MLX5_EN_STATS_H__ */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ec63158ab643..9df9fc0d26f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = { | |||
895 | {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, | 895 | {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, |
896 | {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, | 896 | {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, |
897 | 897 | ||
898 | {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)}, | ||
899 | {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, | 898 | {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, |
900 | {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, | 899 | {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, |
901 | {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, | 900 | {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f991f669047e..a53e982a6863 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) | |||
906 | return 0; | 906 | return 0; |
907 | } | 907 | } |
908 | 908 | ||
909 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | 909 | static int mlx5_devlink_eswitch_check(struct devlink *devlink) |
910 | { | 910 | { |
911 | struct mlx5_core_dev *dev; | 911 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
912 | u16 cur_mlx5_mode, mlx5_mode = 0; | ||
913 | 912 | ||
914 | dev = devlink_priv(devlink); | 913 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) |
914 | return -EOPNOTSUPP; | ||
915 | 915 | ||
916 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 916 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) |
917 | return -EOPNOTSUPP; | 917 | return -EOPNOTSUPP; |
918 | 918 | ||
919 | cur_mlx5_mode = dev->priv.eswitch->mode; | 919 | if (dev->priv.eswitch->mode == SRIOV_NONE) |
920 | |||
921 | if (cur_mlx5_mode == SRIOV_NONE) | ||
922 | return -EOPNOTSUPP; | 920 | return -EOPNOTSUPP; |
923 | 921 | ||
922 | return 0; | ||
923 | } | ||
924 | |||
925 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | ||
926 | { | ||
927 | struct mlx5_core_dev *dev = devlink_priv(devlink); | ||
928 | u16 cur_mlx5_mode, mlx5_mode = 0; | ||
929 | int err; | ||
930 | |||
931 | err = mlx5_devlink_eswitch_check(devlink); | ||
932 | if (err) | ||
933 | return err; | ||
934 | |||
935 | cur_mlx5_mode = dev->priv.eswitch->mode; | ||
936 | |||
924 | if (esw_mode_from_devlink(mode, &mlx5_mode)) | 937 | if (esw_mode_from_devlink(mode, &mlx5_mode)) |
925 | return -EINVAL; | 938 | return -EINVAL; |
926 | 939 | ||
@@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | |||
937 | 950 | ||
938 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) | 951 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) |
939 | { | 952 | { |
940 | struct mlx5_core_dev *dev; | 953 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
941 | 954 | int err; | |
942 | dev = devlink_priv(devlink); | ||
943 | |||
944 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | ||
945 | return -EOPNOTSUPP; | ||
946 | 955 | ||
947 | if (dev->priv.eswitch->mode == SRIOV_NONE) | 956 | err = mlx5_devlink_eswitch_check(devlink); |
948 | return -EOPNOTSUPP; | 957 | if (err) |
958 | return err; | ||
949 | 959 | ||
950 | return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); | 960 | return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); |
951 | } | 961 | } |
@@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
954 | { | 964 | { |
955 | struct mlx5_core_dev *dev = devlink_priv(devlink); | 965 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
956 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 966 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
957 | int num_vports = esw->enabled_vports; | ||
958 | int err, vport; | 967 | int err, vport; |
959 | u8 mlx5_mode; | 968 | u8 mlx5_mode; |
960 | 969 | ||
961 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 970 | err = mlx5_devlink_eswitch_check(devlink); |
962 | return -EOPNOTSUPP; | 971 | if (err) |
963 | 972 | return err; | |
964 | if (esw->mode == SRIOV_NONE) | ||
965 | return -EOPNOTSUPP; | ||
966 | 973 | ||
967 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { | 974 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
968 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: | 975 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: |
@@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
985 | if (err) | 992 | if (err) |
986 | goto out; | 993 | goto out; |
987 | 994 | ||
988 | for (vport = 1; vport < num_vports; vport++) { | 995 | for (vport = 1; vport < esw->enabled_vports; vport++) { |
989 | err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); | 996 | err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); |
990 | if (err) { | 997 | if (err) { |
991 | esw_warn(dev, "Failed to set min inline on vport %d\n", | 998 | esw_warn(dev, "Failed to set min inline on vport %d\n", |
@@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) | |||
1010 | { | 1017 | { |
1011 | struct mlx5_core_dev *dev = devlink_priv(devlink); | 1018 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
1012 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1019 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
1020 | int err; | ||
1013 | 1021 | ||
1014 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1022 | err = mlx5_devlink_eswitch_check(devlink); |
1015 | return -EOPNOTSUPP; | 1023 | if (err) |
1016 | 1024 | return err; | |
1017 | if (esw->mode == SRIOV_NONE) | ||
1018 | return -EOPNOTSUPP; | ||
1019 | 1025 | ||
1020 | return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); | 1026 | return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); |
1021 | } | 1027 | } |
@@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) | |||
1062 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1068 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
1063 | int err; | 1069 | int err; |
1064 | 1070 | ||
1065 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1071 | err = mlx5_devlink_eswitch_check(devlink); |
1066 | return -EOPNOTSUPP; | 1072 | if (err) |
1067 | 1073 | return err; | |
1068 | if (esw->mode == SRIOV_NONE) | ||
1069 | return -EOPNOTSUPP; | ||
1070 | 1074 | ||
1071 | if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && | 1075 | if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && |
1072 | (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || | 1076 | (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || |
@@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) | |||
1105 | { | 1109 | { |
1106 | struct mlx5_core_dev *dev = devlink_priv(devlink); | 1110 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
1107 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1111 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
1112 | int err; | ||
1108 | 1113 | ||
1109 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1114 | err = mlx5_devlink_eswitch_check(devlink); |
1110 | return -EOPNOTSUPP; | 1115 | if (err) |
1111 | 1116 | return err; | |
1112 | if (esw->mode == SRIOV_NONE) | ||
1113 | return -EOPNOTSUPP; | ||
1114 | 1117 | ||
1115 | *encap = esw->offloads.encap; | 1118 | *encap = esw->offloads.encap; |
1116 | return 0; | 1119 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0e487e8ca634..8f5125ccd8d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -862,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace | |||
862 | ft_attr.level = level; | 862 | ft_attr.level = level; |
863 | ft_attr.prio = prio; | 863 | ft_attr.prio = prio; |
864 | 864 | ||
865 | return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); | 865 | return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); |
866 | } | 866 | } |
867 | 867 | ||
868 | struct mlx5_flow_table* | 868 | struct mlx5_flow_table* |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 44f59b1d6f0f..f27f84ffbc85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -275,10 +275,8 @@ static void poll_health(unsigned long data) | |||
275 | struct mlx5_core_health *health = &dev->priv.health; | 275 | struct mlx5_core_health *health = &dev->priv.health; |
276 | u32 count; | 276 | u32 count; |
277 | 277 | ||
278 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | 278 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
279 | mod_timer(&health->timer, get_next_poll_jiffies()); | 279 | goto out; |
280 | return; | ||
281 | } | ||
282 | 280 | ||
283 | count = ioread32be(health->health_counter); | 281 | count = ioread32be(health->health_counter); |
284 | if (count == health->prev) | 282 | if (count == health->prev) |
@@ -290,8 +288,6 @@ static void poll_health(unsigned long data) | |||
290 | if (health->miss_counter == MAX_MISSES) { | 288 | if (health->miss_counter == MAX_MISSES) { |
291 | dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); | 289 | dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); |
292 | print_health_info(dev); | 290 | print_health_info(dev); |
293 | } else { | ||
294 | mod_timer(&health->timer, get_next_poll_jiffies()); | ||
295 | } | 291 | } |
296 | 292 | ||
297 | if (in_fatal(dev) && !health->sick) { | 293 | if (in_fatal(dev) && !health->sick) { |
@@ -305,6 +301,9 @@ static void poll_health(unsigned long data) | |||
305 | "new health works are not permitted at this stage\n"); | 301 | "new health works are not permitted at this stage\n"); |
306 | spin_unlock(&health->wq_lock); | 302 | spin_unlock(&health->wq_lock); |
307 | } | 303 | } |
304 | |||
305 | out: | ||
306 | mod_timer(&health->timer, get_next_poll_jiffies()); | ||
308 | } | 307 | } |
309 | 308 | ||
310 | void mlx5_start_health_poll(struct mlx5_core_dev *dev) | 309 | void mlx5_start_health_poll(struct mlx5_core_dev *dev) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index af945edfee19..13be264587f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = { | |||
175 | }, | 175 | }, |
176 | }; | 176 | }; |
177 | 177 | ||
178 | #define FW_INIT_TIMEOUT_MILI 2000 | 178 | #define FW_INIT_TIMEOUT_MILI 2000 |
179 | #define FW_INIT_WAIT_MS 2 | 179 | #define FW_INIT_WAIT_MS 2 |
180 | #define FW_PRE_INIT_TIMEOUT_MILI 10000 | ||
180 | 181 | ||
181 | static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) | 182 | static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) |
182 | { | 183 | { |
@@ -537,8 +538,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) | |||
537 | /* disable cmdif checksum */ | 538 | /* disable cmdif checksum */ |
538 | MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); | 539 | MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); |
539 | 540 | ||
540 | /* If the HCA supports 4K UARs use it */ | 541 | /* Enable 4K UAR only when HCA supports it and page size is bigger |
541 | if (MLX5_CAP_GEN_MAX(dev, uar_4k)) | 542 | * than 4K. |
543 | */ | ||
544 | if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) | ||
542 | MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); | 545 | MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); |
543 | 546 | ||
544 | MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); | 547 | MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); |
@@ -1011,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
1011 | */ | 1014 | */ |
1012 | dev->state = MLX5_DEVICE_STATE_UP; | 1015 | dev->state = MLX5_DEVICE_STATE_UP; |
1013 | 1016 | ||
1017 | /* wait for firmware to accept initialization segments configurations | ||
1018 | */ | ||
1019 | err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); | ||
1020 | if (err) { | ||
1021 | dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", | ||
1022 | FW_PRE_INIT_TIMEOUT_MILI); | ||
1023 | goto out; | ||
1024 | } | ||
1025 | |||
1014 | err = mlx5_cmd_init(dev); | 1026 | err = mlx5_cmd_init(dev); |
1015 | if (err) { | 1027 | if (err) { |
1016 | dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); | 1028 | dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 483241b4b05d..a672f6a860dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c | |||
@@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, | |||
2956 | qed_wr(p_hwfn, | 2956 | qed_wr(p_hwfn, |
2957 | p_ptt, | 2957 | p_ptt, |
2958 | s_storm_defs[storm_id].cm_ctx_wr_addr, | 2958 | s_storm_defs[storm_id].cm_ctx_wr_addr, |
2959 | BIT(9) | lid); | 2959 | (i << 9) | lid); |
2960 | *(dump_buf + offset) = qed_rd(p_hwfn, | 2960 | *(dump_buf + offset) = qed_rd(p_hwfn, |
2961 | p_ptt, | 2961 | p_ptt, |
2962 | rd_reg_addr); | 2962 | rd_reg_addr); |
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index b7e4345c990d..019cef1d3cf7 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c | |||
@@ -661,8 +661,6 @@ restore_filters: | |||
661 | up_write(&vf->efx->filter_sem); | 661 | up_write(&vf->efx->filter_sem); |
662 | mutex_unlock(&vf->efx->mac_lock); | 662 | mutex_unlock(&vf->efx->mac_lock); |
663 | 663 | ||
664 | up_write(&vf->efx->filter_sem); | ||
665 | |||
666 | rc2 = efx_net_open(vf->efx->net_dev); | 664 | rc2 = efx_net_open(vf->efx->net_dev); |
667 | if (rc2) | 665 | if (rc2) |
668 | goto reset_nic; | 666 | goto reset_nic; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index aa6476439aee..e0ef02f9503b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
@@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) | |||
214 | { | 214 | { |
215 | /* Context type from W/B descriptor must be zero */ | 215 | /* Context type from W/B descriptor must be zero */ |
216 | if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) | 216 | if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) |
217 | return -EINVAL; | 217 | return 0; |
218 | 218 | ||
219 | /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ | 219 | /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ |
220 | if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) | 220 | if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) |
221 | return 0; | 221 | return 1; |
222 | 222 | ||
223 | return 1; | 223 | return 0; |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) | 226 | static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) |
@@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) | |||
282 | } | 282 | } |
283 | } | 283 | } |
284 | exit: | 284 | exit: |
285 | return ret; | 285 | if (likely(ret == 0)) |
286 | return 1; | ||
287 | |||
288 | return 0; | ||
286 | } | 289 | } |
287 | 290 | ||
288 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 291 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 12236daf7bb6..6e4cbc6ce0ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, | |||
434 | return; | 434 | return; |
435 | 435 | ||
436 | /* check tx tstamp status */ | 436 | /* check tx tstamp status */ |
437 | if (!priv->hw->desc->get_tx_timestamp_status(p)) { | 437 | if (priv->hw->desc->get_tx_timestamp_status(p)) { |
438 | /* get the valid tstamp */ | 438 | /* get the valid tstamp */ |
439 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | 439 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); |
440 | 440 | ||
441 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 441 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
442 | shhwtstamp.hwtstamp = ns_to_ktime(ns); | 442 | shhwtstamp.hwtstamp = ns_to_ktime(ns); |
443 | 443 | ||
444 | netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); | 444 | netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); |
445 | /* pass tstamp to stack */ | 445 | /* pass tstamp to stack */ |
446 | skb_tstamp_tx(skb, &shhwtstamp); | 446 | skb_tstamp_tx(skb, &shhwtstamp); |
447 | } | 447 | } |
@@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, | |||
468 | return; | 468 | return; |
469 | 469 | ||
470 | /* Check if timestamp is available */ | 470 | /* Check if timestamp is available */ |
471 | if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { | 471 | if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { |
472 | /* For GMAC4, the valid timestamp is from CTX next desc. */ | 472 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
473 | if (priv->plat->has_gmac4) | 473 | if (priv->plat->has_gmac4) |
474 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); | 474 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); |
475 | else | 475 | else |
476 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | 476 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); |
477 | 477 | ||
478 | netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); | 478 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
479 | shhwtstamp = skb_hwtstamps(skb); | 479 | shhwtstamp = skb_hwtstamps(skb); |
480 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 480 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
481 | shhwtstamp->hwtstamp = ns_to_ktime(ns); | 481 | shhwtstamp->hwtstamp = ns_to_ktime(ns); |
482 | } else { | 482 | } else { |
483 | netdev_err(priv->dev, "cannot get RX hw timestamp\n"); | 483 | netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); |
484 | } | 484 | } |
485 | } | 485 | } |
486 | 486 | ||
@@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
546 | /* PTP v1, UDP, any kind of event packet */ | 546 | /* PTP v1, UDP, any kind of event packet */ |
547 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | 547 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; |
548 | /* take time stamp for all event messages */ | 548 | /* take time stamp for all event messages */ |
549 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | 549 | if (priv->plat->has_gmac4) |
550 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | ||
551 | else | ||
552 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | ||
550 | 553 | ||
551 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | 554 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
552 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | 555 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
@@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
578 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; | 581 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; |
579 | ptp_v2 = PTP_TCR_TSVER2ENA; | 582 | ptp_v2 = PTP_TCR_TSVER2ENA; |
580 | /* take time stamp for all event messages */ | 583 | /* take time stamp for all event messages */ |
581 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | 584 | if (priv->plat->has_gmac4) |
585 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | ||
586 | else | ||
587 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | ||
582 | 588 | ||
583 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | 589 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
584 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | 590 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
@@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
612 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; | 618 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
613 | ptp_v2 = PTP_TCR_TSVER2ENA; | 619 | ptp_v2 = PTP_TCR_TSVER2ENA; |
614 | /* take time stamp for all event messages */ | 620 | /* take time stamp for all event messages */ |
615 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | 621 | if (priv->plat->has_gmac4) |
622 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | ||
623 | else | ||
624 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | ||
616 | 625 | ||
617 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | 626 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
618 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | 627 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
@@ -2822,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2822 | 2831 | ||
2823 | tx_q->tx_skbuff_dma[first_entry].buf = des; | 2832 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
2824 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); | 2833 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); |
2825 | tx_q->tx_skbuff[first_entry] = skb; | ||
2826 | 2834 | ||
2827 | first->des0 = cpu_to_le32(des); | 2835 | first->des0 = cpu_to_le32(des); |
2828 | 2836 | ||
@@ -2856,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2856 | 2864 | ||
2857 | tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; | 2865 | tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; |
2858 | 2866 | ||
2867 | /* Only the last descriptor gets to point to the skb. */ | ||
2868 | tx_q->tx_skbuff[tx_q->cur_tx] = skb; | ||
2869 | |||
2870 | /* We've used all descriptors we need for this skb, however, | ||
2871 | * advance cur_tx so that it references a fresh descriptor. | ||
2872 | * ndo_start_xmit will fill this descriptor the next time it's | ||
2873 | * called and stmmac_tx_clean may clean up to this descriptor. | ||
2874 | */ | ||
2859 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); | 2875 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
2860 | 2876 | ||
2861 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { | 2877 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
@@ -2989,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2989 | 3005 | ||
2990 | first = desc; | 3006 | first = desc; |
2991 | 3007 | ||
2992 | tx_q->tx_skbuff[first_entry] = skb; | ||
2993 | |||
2994 | enh_desc = priv->plat->enh_desc; | 3008 | enh_desc = priv->plat->enh_desc; |
2995 | /* To program the descriptors according to the size of the frame */ | 3009 | /* To program the descriptors according to the size of the frame */ |
2996 | if (enh_desc) | 3010 | if (enh_desc) |
@@ -3038,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3038 | skb->len); | 3052 | skb->len); |
3039 | } | 3053 | } |
3040 | 3054 | ||
3041 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); | 3055 | /* Only the last descriptor gets to point to the skb. */ |
3056 | tx_q->tx_skbuff[entry] = skb; | ||
3042 | 3057 | ||
3058 | /* We've used all descriptors we need for this skb, however, | ||
3059 | * advance cur_tx so that it references a fresh descriptor. | ||
3060 | * ndo_start_xmit will fill this descriptor the next time it's | ||
3061 | * called and stmmac_tx_clean may clean up to this descriptor. | ||
3062 | */ | ||
3063 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); | ||
3043 | tx_q->cur_tx = entry; | 3064 | tx_q->cur_tx = entry; |
3044 | 3065 | ||
3045 | if (netif_msg_pktdata(priv)) { | 3066 | if (netif_msg_pktdata(priv)) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 48fb72fc423c..f4b31d69f60e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | |||
@@ -59,7 +59,8 @@ | |||
59 | /* Enable Snapshot for Messages Relevant to Master */ | 59 | /* Enable Snapshot for Messages Relevant to Master */ |
60 | #define PTP_TCR_TSMSTRENA BIT(15) | 60 | #define PTP_TCR_TSMSTRENA BIT(15) |
61 | /* Select PTP packets for Taking Snapshots */ | 61 | /* Select PTP packets for Taking Snapshots */ |
62 | #define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) | 62 | #define PTP_TCR_SNAPTYPSEL_1 BIT(16) |
63 | #define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) | ||
63 | /* Enable MAC address for PTP Frame Filtering */ | 64 | /* Enable MAC address for PTP Frame Filtering */ |
64 | #define PTP_TCR_TSENMACADDR BIT(18) | 65 | #define PTP_TCR_TSENMACADDR BIT(18) |
65 | 66 | ||
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6ebb0f559a42..199459bd6961 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev) | |||
1007 | 1007 | ||
1008 | dev->netdev_ops = &geneve_netdev_ops; | 1008 | dev->netdev_ops = &geneve_netdev_ops; |
1009 | dev->ethtool_ops = &geneve_ethtool_ops; | 1009 | dev->ethtool_ops = &geneve_ethtool_ops; |
1010 | dev->destructor = free_netdev; | 1010 | dev->needs_free_netdev = true; |
1011 | 1011 | ||
1012 | SET_NETDEV_DEVTYPE(dev, &geneve_type); | 1012 | SET_NETDEV_DEVTYPE(dev, &geneve_type); |
1013 | 1013 | ||
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 7b652bb7ebe4..ca110cd2a4e4 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c | |||
@@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = { | |||
611 | static void gtp_link_setup(struct net_device *dev) | 611 | static void gtp_link_setup(struct net_device *dev) |
612 | { | 612 | { |
613 | dev->netdev_ops = >p_netdev_ops; | 613 | dev->netdev_ops = >p_netdev_ops; |
614 | dev->destructor = free_netdev; | 614 | dev->needs_free_netdev = true; |
615 | 615 | ||
616 | dev->hard_header_len = 0; | 616 | dev->hard_header_len = 0; |
617 | dev->addr_len = 0; | 617 | dev->addr_len = 0; |
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 922bf440e9f1..021a8ec411ab 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c | |||
@@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev) | |||
311 | { | 311 | { |
312 | /* Finish setting up the DEVICE info. */ | 312 | /* Finish setting up the DEVICE info. */ |
313 | dev->netdev_ops = &sp_netdev_ops; | 313 | dev->netdev_ops = &sp_netdev_ops; |
314 | dev->destructor = free_netdev; | 314 | dev->needs_free_netdev = true; |
315 | dev->mtu = SIXP_MTU; | 315 | dev->mtu = SIXP_MTU; |
316 | dev->hard_header_len = AX25_MAX_HEADER_LEN; | 316 | dev->hard_header_len = AX25_MAX_HEADER_LEN; |
317 | dev->header_ops = &ax25_header_ops; | 317 | dev->header_ops = &ax25_header_ops; |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index f62e7f325cf9..78a6414c5fd9 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = { | |||
476 | static void bpq_setup(struct net_device *dev) | 476 | static void bpq_setup(struct net_device *dev) |
477 | { | 477 | { |
478 | dev->netdev_ops = &bpq_netdev_ops; | 478 | dev->netdev_ops = &bpq_netdev_ops; |
479 | dev->destructor = free_netdev; | 479 | dev->needs_free_netdev = true; |
480 | 480 | ||
481 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); | 481 | memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); |
482 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); | 482 | memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 262b2ea576a3..6066f1bcaf2d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -171,6 +171,8 @@ struct rndis_device { | |||
171 | spinlock_t request_lock; | 171 | spinlock_t request_lock; |
172 | struct list_head req_list; | 172 | struct list_head req_list; |
173 | 173 | ||
174 | struct work_struct mcast_work; | ||
175 | |||
174 | u8 hw_mac_adr[ETH_ALEN]; | 176 | u8 hw_mac_adr[ETH_ALEN]; |
175 | u8 rss_key[NETVSC_HASH_KEYLEN]; | 177 | u8 rss_key[NETVSC_HASH_KEYLEN]; |
176 | u16 ind_table[ITAB_NUM]; | 178 | u16 ind_table[ITAB_NUM]; |
@@ -201,6 +203,7 @@ int rndis_filter_open(struct netvsc_device *nvdev); | |||
201 | int rndis_filter_close(struct netvsc_device *nvdev); | 203 | int rndis_filter_close(struct netvsc_device *nvdev); |
202 | int rndis_filter_device_add(struct hv_device *dev, | 204 | int rndis_filter_device_add(struct hv_device *dev, |
203 | struct netvsc_device_info *info); | 205 | struct netvsc_device_info *info); |
206 | void rndis_filter_update(struct netvsc_device *nvdev); | ||
204 | void rndis_filter_device_remove(struct hv_device *dev, | 207 | void rndis_filter_device_remove(struct hv_device *dev, |
205 | struct netvsc_device *nvdev); | 208 | struct netvsc_device *nvdev); |
206 | int rndis_filter_set_rss_param(struct rndis_device *rdev, | 209 | int rndis_filter_set_rss_param(struct rndis_device *rdev, |
@@ -211,7 +214,6 @@ int rndis_filter_receive(struct net_device *ndev, | |||
211 | struct vmbus_channel *channel, | 214 | struct vmbus_channel *channel, |
212 | void *data, u32 buflen); | 215 | void *data, u32 buflen); |
213 | 216 | ||
214 | int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); | ||
215 | int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); | 217 | int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); |
216 | 218 | ||
217 | void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); | 219 | void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); |
@@ -696,7 +698,6 @@ struct net_device_context { | |||
696 | /* list protection */ | 698 | /* list protection */ |
697 | spinlock_t lock; | 699 | spinlock_t lock; |
698 | 700 | ||
699 | struct work_struct work; | ||
700 | u32 msg_enable; /* debug level */ | 701 | u32 msg_enable; /* debug level */ |
701 | 702 | ||
702 | u32 tx_checksum_mask; | 703 | u32 tx_checksum_mask; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 4421a6d00375..82d6c022ca85 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -56,37 +56,12 @@ static int debug = -1; | |||
56 | module_param(debug, int, S_IRUGO); | 56 | module_param(debug, int, S_IRUGO); |
57 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 57 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
58 | 58 | ||
59 | static void do_set_multicast(struct work_struct *w) | ||
60 | { | ||
61 | struct net_device_context *ndevctx = | ||
62 | container_of(w, struct net_device_context, work); | ||
63 | struct hv_device *device_obj = ndevctx->device_ctx; | ||
64 | struct net_device *ndev = hv_get_drvdata(device_obj); | ||
65 | struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev); | ||
66 | struct rndis_device *rdev; | ||
67 | |||
68 | if (!nvdev) | ||
69 | return; | ||
70 | |||
71 | rdev = nvdev->extension; | ||
72 | if (rdev == NULL) | ||
73 | return; | ||
74 | |||
75 | if (ndev->flags & IFF_PROMISC) | ||
76 | rndis_filter_set_packet_filter(rdev, | ||
77 | NDIS_PACKET_TYPE_PROMISCUOUS); | ||
78 | else | ||
79 | rndis_filter_set_packet_filter(rdev, | ||
80 | NDIS_PACKET_TYPE_BROADCAST | | ||
81 | NDIS_PACKET_TYPE_ALL_MULTICAST | | ||
82 | NDIS_PACKET_TYPE_DIRECTED); | ||
83 | } | ||
84 | |||
85 | static void netvsc_set_multicast_list(struct net_device *net) | 59 | static void netvsc_set_multicast_list(struct net_device *net) |
86 | { | 60 | { |
87 | struct net_device_context *net_device_ctx = netdev_priv(net); | 61 | struct net_device_context *net_device_ctx = netdev_priv(net); |
62 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | ||
88 | 63 | ||
89 | schedule_work(&net_device_ctx->work); | 64 | rndis_filter_update(nvdev); |
90 | } | 65 | } |
91 | 66 | ||
92 | static int netvsc_open(struct net_device *net) | 67 | static int netvsc_open(struct net_device *net) |
@@ -123,8 +98,6 @@ static int netvsc_close(struct net_device *net) | |||
123 | 98 | ||
124 | netif_tx_disable(net); | 99 | netif_tx_disable(net); |
125 | 100 | ||
126 | /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ | ||
127 | cancel_work_sync(&net_device_ctx->work); | ||
128 | ret = rndis_filter_close(nvdev); | 101 | ret = rndis_filter_close(nvdev); |
129 | if (ret != 0) { | 102 | if (ret != 0) { |
130 | netdev_err(net, "unable to close device (ret %d).\n", ret); | 103 | netdev_err(net, "unable to close device (ret %d).\n", ret); |
@@ -1028,7 +1001,7 @@ static const struct { | |||
1028 | static int netvsc_get_sset_count(struct net_device *dev, int string_set) | 1001 | static int netvsc_get_sset_count(struct net_device *dev, int string_set) |
1029 | { | 1002 | { |
1030 | struct net_device_context *ndc = netdev_priv(dev); | 1003 | struct net_device_context *ndc = netdev_priv(dev); |
1031 | struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); | 1004 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
1032 | 1005 | ||
1033 | if (!nvdev) | 1006 | if (!nvdev) |
1034 | return -ENODEV; | 1007 | return -ENODEV; |
@@ -1158,11 +1131,22 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, | |||
1158 | } | 1131 | } |
1159 | 1132 | ||
1160 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1133 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1161 | static void netvsc_poll_controller(struct net_device *net) | 1134 | static void netvsc_poll_controller(struct net_device *dev) |
1162 | { | 1135 | { |
1163 | /* As netvsc_start_xmit() works synchronous we don't have to | 1136 | struct net_device_context *ndc = netdev_priv(dev); |
1164 | * trigger anything here. | 1137 | struct netvsc_device *ndev; |
1165 | */ | 1138 | int i; |
1139 | |||
1140 | rcu_read_lock(); | ||
1141 | ndev = rcu_dereference(ndc->nvdev); | ||
1142 | if (ndev) { | ||
1143 | for (i = 0; i < ndev->num_chn; i++) { | ||
1144 | struct netvsc_channel *nvchan = &ndev->chan_table[i]; | ||
1145 | |||
1146 | napi_schedule(&nvchan->napi); | ||
1147 | } | ||
1148 | } | ||
1149 | rcu_read_unlock(); | ||
1166 | } | 1150 | } |
1167 | #endif | 1151 | #endif |
1168 | 1152 | ||
@@ -1552,7 +1536,6 @@ static int netvsc_probe(struct hv_device *dev, | |||
1552 | hv_set_drvdata(dev, net); | 1536 | hv_set_drvdata(dev, net); |
1553 | 1537 | ||
1554 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 1538 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
1555 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | ||
1556 | 1539 | ||
1557 | spin_lock_init(&net_device_ctx->lock); | 1540 | spin_lock_init(&net_device_ctx->lock); |
1558 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); | 1541 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); |
@@ -1622,7 +1605,6 @@ static int netvsc_remove(struct hv_device *dev) | |||
1622 | netif_device_detach(net); | 1605 | netif_device_detach(net); |
1623 | 1606 | ||
1624 | cancel_delayed_work_sync(&ndev_ctx->dwork); | 1607 | cancel_delayed_work_sync(&ndev_ctx->dwork); |
1625 | cancel_work_sync(&ndev_ctx->work); | ||
1626 | 1608 | ||
1627 | /* | 1609 | /* |
1628 | * Call to the vsc driver to let it know that the device is being | 1610 | * Call to the vsc driver to let it know that the device is being |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index f9d5b0b8209a..cb79cd081f42 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include "hyperv_net.h" | 32 | #include "hyperv_net.h" |
33 | 33 | ||
34 | static void rndis_set_multicast(struct work_struct *w); | ||
34 | 35 | ||
35 | #define RNDIS_EXT_LEN PAGE_SIZE | 36 | #define RNDIS_EXT_LEN PAGE_SIZE |
36 | struct rndis_request { | 37 | struct rndis_request { |
@@ -76,6 +77,7 @@ static struct rndis_device *get_rndis_device(void) | |||
76 | spin_lock_init(&device->request_lock); | 77 | spin_lock_init(&device->request_lock); |
77 | 78 | ||
78 | INIT_LIST_HEAD(&device->req_list); | 79 | INIT_LIST_HEAD(&device->req_list); |
80 | INIT_WORK(&device->mcast_work, rndis_set_multicast); | ||
79 | 81 | ||
80 | device->state = RNDIS_DEV_UNINITIALIZED; | 82 | device->state = RNDIS_DEV_UNINITIALIZED; |
81 | 83 | ||
@@ -815,7 +817,8 @@ static int rndis_filter_query_link_speed(struct rndis_device *dev) | |||
815 | return ret; | 817 | return ret; |
816 | } | 818 | } |
817 | 819 | ||
818 | int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) | 820 | static int rndis_filter_set_packet_filter(struct rndis_device *dev, |
821 | u32 new_filter) | ||
819 | { | 822 | { |
820 | struct rndis_request *request; | 823 | struct rndis_request *request; |
821 | struct rndis_set_request *set; | 824 | struct rndis_set_request *set; |
@@ -846,6 +849,28 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) | |||
846 | return ret; | 849 | return ret; |
847 | } | 850 | } |
848 | 851 | ||
852 | static void rndis_set_multicast(struct work_struct *w) | ||
853 | { | ||
854 | struct rndis_device *rdev | ||
855 | = container_of(w, struct rndis_device, mcast_work); | ||
856 | |||
857 | if (rdev->ndev->flags & IFF_PROMISC) | ||
858 | rndis_filter_set_packet_filter(rdev, | ||
859 | NDIS_PACKET_TYPE_PROMISCUOUS); | ||
860 | else | ||
861 | rndis_filter_set_packet_filter(rdev, | ||
862 | NDIS_PACKET_TYPE_BROADCAST | | ||
863 | NDIS_PACKET_TYPE_ALL_MULTICAST | | ||
864 | NDIS_PACKET_TYPE_DIRECTED); | ||
865 | } | ||
866 | |||
867 | void rndis_filter_update(struct netvsc_device *nvdev) | ||
868 | { | ||
869 | struct rndis_device *rdev = nvdev->extension; | ||
870 | |||
871 | schedule_work(&rdev->mcast_work); | ||
872 | } | ||
873 | |||
849 | static int rndis_filter_init_device(struct rndis_device *dev) | 874 | static int rndis_filter_init_device(struct rndis_device *dev) |
850 | { | 875 | { |
851 | struct rndis_request *request; | 876 | struct rndis_request *request; |
@@ -973,6 +998,9 @@ static int rndis_filter_close_device(struct rndis_device *dev) | |||
973 | if (dev->state != RNDIS_DEV_DATAINITIALIZED) | 998 | if (dev->state != RNDIS_DEV_DATAINITIALIZED) |
974 | return 0; | 999 | return 0; |
975 | 1000 | ||
1001 | /* Make sure rndis_set_multicast doesn't re-enable filter! */ | ||
1002 | cancel_work_sync(&dev->mcast_work); | ||
1003 | |||
976 | ret = rndis_filter_set_packet_filter(dev, 0); | 1004 | ret = rndis_filter_set_packet_filter(dev, 0); |
977 | if (ret == -ENODEV) | 1005 | if (ret == -ENODEV) |
978 | ret = 0; | 1006 | ret = 0; |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 312fce7302d3..144ea5ae8ab4 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev) | |||
207 | __skb_queue_purge(&txp->tq); | 207 | __skb_queue_purge(&txp->tq); |
208 | } | 208 | } |
209 | kfree(dp->tx_private); | 209 | kfree(dp->tx_private); |
210 | free_netdev(dev); | ||
211 | } | 210 | } |
212 | 211 | ||
213 | static void ifb_setup(struct net_device *dev) | 212 | static void ifb_setup(struct net_device *dev) |
@@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev) | |||
230 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 229 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
231 | netif_keep_dst(dev); | 230 | netif_keep_dst(dev); |
232 | eth_hw_addr_random(dev); | 231 | eth_hw_addr_random(dev); |
233 | dev->destructor = ifb_dev_free; | 232 | dev->needs_free_netdev = true; |
233 | dev->priv_destructor = ifb_dev_free; | ||
234 | } | 234 | } |
235 | 235 | ||
236 | static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) | 236 | static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 618ed88fad0f..7c7680c8f0e3 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev) | |||
632 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); | 632 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); |
633 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; | 633 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; |
634 | dev->netdev_ops = &ipvlan_netdev_ops; | 634 | dev->netdev_ops = &ipvlan_netdev_ops; |
635 | dev->destructor = free_netdev; | 635 | dev->needs_free_netdev = true; |
636 | dev->header_ops = &ipvlan_header_ops; | 636 | dev->header_ops = &ipvlan_header_ops; |
637 | dev->ethtool_ops = &ipvlan_ethtool_ops; | 637 | dev->ethtool_ops = &ipvlan_ethtool_ops; |
638 | } | 638 | } |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 224f65cb576b..30612497643c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev) | |||
159 | { | 159 | { |
160 | dev_net(dev)->loopback_dev = NULL; | 160 | dev_net(dev)->loopback_dev = NULL; |
161 | free_percpu(dev->lstats); | 161 | free_percpu(dev->lstats); |
162 | free_netdev(dev); | ||
163 | } | 162 | } |
164 | 163 | ||
165 | static const struct net_device_ops loopback_ops = { | 164 | static const struct net_device_ops loopback_ops = { |
@@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev) | |||
196 | dev->ethtool_ops = &loopback_ethtool_ops; | 195 | dev->ethtool_ops = &loopback_ethtool_ops; |
197 | dev->header_ops = ð_header_ops; | 196 | dev->header_ops = ð_header_ops; |
198 | dev->netdev_ops = &loopback_ops; | 197 | dev->netdev_ops = &loopback_ops; |
199 | dev->destructor = loopback_dev_free; | 198 | dev->needs_free_netdev = true; |
199 | dev->priv_destructor = loopback_dev_free; | ||
200 | } | 200 | } |
201 | 201 | ||
202 | /* Setup and register the loopback device. */ | 202 | /* Setup and register the loopback device. */ |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index cdc347be68f2..79411675f0e6 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev) | |||
2996 | free_percpu(macsec->secy.tx_sc.stats); | 2996 | free_percpu(macsec->secy.tx_sc.stats); |
2997 | 2997 | ||
2998 | dev_put(real_dev); | 2998 | dev_put(real_dev); |
2999 | free_netdev(dev); | ||
3000 | } | 2999 | } |
3001 | 3000 | ||
3002 | static void macsec_setup(struct net_device *dev) | 3001 | static void macsec_setup(struct net_device *dev) |
@@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev) | |||
3006 | dev->max_mtu = ETH_MAX_MTU; | 3005 | dev->max_mtu = ETH_MAX_MTU; |
3007 | dev->priv_flags |= IFF_NO_QUEUE; | 3006 | dev->priv_flags |= IFF_NO_QUEUE; |
3008 | dev->netdev_ops = &macsec_netdev_ops; | 3007 | dev->netdev_ops = &macsec_netdev_ops; |
3009 | dev->destructor = macsec_free_netdev; | 3008 | dev->needs_free_netdev = true; |
3009 | dev->priv_destructor = macsec_free_netdev; | ||
3010 | SET_NETDEV_DEVTYPE(dev, &macsec_type); | 3010 | SET_NETDEV_DEVTYPE(dev, &macsec_type); |
3011 | 3011 | ||
3012 | eth_zero_addr(dev->broadcast); | 3012 | eth_zero_addr(dev->broadcast); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 346ad2ff3998..67bf7ebae5c6 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -1092,7 +1092,7 @@ void macvlan_common_setup(struct net_device *dev) | |||
1092 | netif_keep_dst(dev); | 1092 | netif_keep_dst(dev); |
1093 | dev->priv_flags |= IFF_UNICAST_FLT; | 1093 | dev->priv_flags |= IFF_UNICAST_FLT; |
1094 | dev->netdev_ops = &macvlan_netdev_ops; | 1094 | dev->netdev_ops = &macvlan_netdev_ops; |
1095 | dev->destructor = free_netdev; | 1095 | dev->needs_free_netdev = true; |
1096 | dev->header_ops = &macvlan_hard_header_ops; | 1096 | dev->header_ops = &macvlan_hard_header_ops; |
1097 | dev->ethtool_ops = &macvlan_ethtool_ops; | 1097 | dev->ethtool_ops = &macvlan_ethtool_ops; |
1098 | } | 1098 | } |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 06ee6395117f..0e27920c2b6b 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -358,7 +358,7 @@ static ssize_t enabled_store(struct config_item *item, | |||
358 | if (err) | 358 | if (err) |
359 | goto out_unlock; | 359 | goto out_unlock; |
360 | 360 | ||
361 | pr_info("netconsole: network logging started\n"); | 361 | pr_info("network logging started\n"); |
362 | } else { /* false */ | 362 | } else { /* false */ |
363 | /* We need to disable the netconsole before cleaning it up | 363 | /* We need to disable the netconsole before cleaning it up |
364 | * otherwise we might end up in write_msg() with | 364 | * otherwise we might end up in write_msg() with |
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index b91603835d26..c4b3362da4a2 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c | |||
@@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev) | |||
113 | 113 | ||
114 | dev->netdev_ops = &nlmon_ops; | 114 | dev->netdev_ops = &nlmon_ops; |
115 | dev->ethtool_ops = &nlmon_ethtool_ops; | 115 | dev->ethtool_ops = &nlmon_ethtool_ops; |
116 | dev->destructor = free_netdev; | 116 | dev->needs_free_netdev = true; |
117 | 117 | ||
118 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | | 118 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | |
119 | NETIF_F_HIGHDMA | NETIF_F_LLTX; | 119 | NETIF_F_HIGHDMA | NETIF_F_LLTX; |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c360dd6ead22..3ab6c58d4be6 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -127,6 +127,7 @@ config MDIO_THUNDER | |||
127 | tristate "ThunderX SOCs MDIO buses" | 127 | tristate "ThunderX SOCs MDIO buses" |
128 | depends on 64BIT | 128 | depends on 64BIT |
129 | depends on PCI | 129 | depends on PCI |
130 | depends on !(MDIO_DEVICE=y && PHYLIB=m) | ||
130 | select MDIO_CAVIUM | 131 | select MDIO_CAVIUM |
131 | help | 132 | help |
132 | This driver supports the MDIO interfaces found on Cavium | 133 | This driver supports the MDIO interfaces found on Cavium |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7524caa0f29d..eebb0e1c70ff 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -54,6 +54,8 @@ static const char *phy_speed_to_str(int speed) | |||
54 | return "5Gbps"; | 54 | return "5Gbps"; |
55 | case SPEED_10000: | 55 | case SPEED_10000: |
56 | return "10Gbps"; | 56 | return "10Gbps"; |
57 | case SPEED_14000: | ||
58 | return "14Gbps"; | ||
57 | case SPEED_20000: | 59 | case SPEED_20000: |
58 | return "20Gbps"; | 60 | return "20Gbps"; |
59 | case SPEED_25000: | 61 | case SPEED_25000: |
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 1da31dc47f86..74b907206aa7 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c | |||
@@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev) | |||
629 | static void sl_free_netdev(struct net_device *dev) | 629 | static void sl_free_netdev(struct net_device *dev) |
630 | { | 630 | { |
631 | int i = dev->base_addr; | 631 | int i = dev->base_addr; |
632 | free_netdev(dev); | 632 | |
633 | slip_devs[i] = NULL; | 633 | slip_devs[i] = NULL; |
634 | } | 634 | } |
635 | 635 | ||
@@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = { | |||
651 | static void sl_setup(struct net_device *dev) | 651 | static void sl_setup(struct net_device *dev) |
652 | { | 652 | { |
653 | dev->netdev_ops = &sl_netdev_ops; | 653 | dev->netdev_ops = &sl_netdev_ops; |
654 | dev->destructor = sl_free_netdev; | 654 | dev->needs_free_netdev = true; |
655 | dev->priv_destructor = sl_free_netdev; | ||
655 | 656 | ||
656 | dev->hard_header_len = 0; | 657 | dev->hard_header_len = 0; |
657 | dev->addr_len = 0; | 658 | dev->addr_len = 0; |
@@ -1369,8 +1370,6 @@ static void __exit slip_exit(void) | |||
1369 | if (sl->tty) { | 1370 | if (sl->tty) { |
1370 | printk(KERN_ERR "%s: tty discipline still running\n", | 1371 | printk(KERN_ERR "%s: tty discipline still running\n", |
1371 | dev->name); | 1372 | dev->name); |
1372 | /* Intentionally leak the control block. */ | ||
1373 | dev->destructor = NULL; | ||
1374 | } | 1373 | } |
1375 | 1374 | ||
1376 | unregister_netdev(dev); | 1375 | unregister_netdev(dev); |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6c5d5ef46f75..fba8c136aa7c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev) | |||
1643 | struct team *team = netdev_priv(dev); | 1643 | struct team *team = netdev_priv(dev); |
1644 | 1644 | ||
1645 | free_percpu(team->pcpu_stats); | 1645 | free_percpu(team->pcpu_stats); |
1646 | free_netdev(dev); | ||
1647 | } | 1646 | } |
1648 | 1647 | ||
1649 | static int team_open(struct net_device *dev) | 1648 | static int team_open(struct net_device *dev) |
@@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev) | |||
2079 | 2078 | ||
2080 | dev->netdev_ops = &team_netdev_ops; | 2079 | dev->netdev_ops = &team_netdev_ops; |
2081 | dev->ethtool_ops = &team_ethtool_ops; | 2080 | dev->ethtool_ops = &team_ethtool_ops; |
2082 | dev->destructor = team_destructor; | 2081 | dev->needs_free_netdev = true; |
2082 | dev->priv_destructor = team_destructor; | ||
2083 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); | 2083 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); |
2084 | dev->priv_flags |= IFF_NO_QUEUE; | 2084 | dev->priv_flags |= IFF_NO_QUEUE; |
2085 | dev->priv_flags |= IFF_TEAM; | 2085 | dev->priv_flags |= IFF_TEAM; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bbd707b9ef7a..9ee7d4275640 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev) | |||
1560 | free_percpu(tun->pcpu_stats); | 1560 | free_percpu(tun->pcpu_stats); |
1561 | tun_flow_uninit(tun); | 1561 | tun_flow_uninit(tun); |
1562 | security_tun_dev_free_security(tun->security); | 1562 | security_tun_dev_free_security(tun->security); |
1563 | free_netdev(dev); | ||
1564 | } | 1563 | } |
1565 | 1564 | ||
1566 | static void tun_setup(struct net_device *dev) | 1565 | static void tun_setup(struct net_device *dev) |
@@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev) | |||
1571 | tun->group = INVALID_GID; | 1570 | tun->group = INVALID_GID; |
1572 | 1571 | ||
1573 | dev->ethtool_ops = &tun_ethtool_ops; | 1572 | dev->ethtool_ops = &tun_ethtool_ops; |
1574 | dev->destructor = tun_free_netdev; | 1573 | dev->needs_free_netdev = true; |
1574 | dev->priv_destructor = tun_free_netdev; | ||
1575 | /* We prefer our own queue length */ | 1575 | /* We prefer our own queue length */ |
1576 | dev->tx_queue_len = TUN_READQ_SIZE; | 1576 | dev->tx_queue_len = TUN_READQ_SIZE; |
1577 | } | 1577 | } |
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index eb52de8205f0..c7a350bbaaa7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c | |||
@@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev) | |||
298 | dev->addr_len = 1; | 298 | dev->addr_len = 1; |
299 | dev->tx_queue_len = 3; | 299 | dev->tx_queue_len = 3; |
300 | 300 | ||
301 | dev->destructor = free_netdev; | 301 | dev->needs_free_netdev = true; |
302 | } | 302 | } |
303 | 303 | ||
304 | /* | 304 | /* |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8f923a147fa9..32a22f4e8356 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev) | |||
123 | dev->addr_len = 0; | 123 | dev->addr_len = 0; |
124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
125 | dev->netdev_ops = &qmimux_netdev_ops; | 125 | dev->netdev_ops = &qmimux_netdev_ops; |
126 | dev->destructor = free_netdev; | 126 | dev->needs_free_netdev = true; |
127 | } | 127 | } |
128 | 128 | ||
129 | static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) | 129 | static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) |
@@ -1192,6 +1192,8 @@ static const struct usb_device_id products[] = { | |||
1192 | {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ | 1192 | {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ |
1193 | {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, | 1193 | {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, |
1194 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ | 1194 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ |
1195 | {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ | ||
1196 | {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ | ||
1195 | {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ | 1197 | {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ |
1196 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ | 1198 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ |
1197 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ | 1199 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ |
@@ -1206,6 +1208,8 @@ static const struct usb_device_id products[] = { | |||
1206 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ | 1208 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ |
1207 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | 1209 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
1208 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ | 1210 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ |
1211 | {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ | ||
1212 | {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ | ||
1209 | {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ | 1213 | {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ |
1210 | {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ | 1214 | {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ |
1211 | {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ | 1215 | {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ddc62cb69be8..1a419a45e2a2 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -4368,6 +4368,8 @@ static u8 rtl_get_version(struct usb_interface *intf) | |||
4368 | break; | 4368 | break; |
4369 | } | 4369 | } |
4370 | 4370 | ||
4371 | dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); | ||
4372 | |||
4371 | return version; | 4373 | return version; |
4372 | } | 4374 | } |
4373 | 4375 | ||
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 38f0f03a29c8..0156fe8cac17 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev) | |||
222 | static void veth_dev_free(struct net_device *dev) | 222 | static void veth_dev_free(struct net_device *dev) |
223 | { | 223 | { |
224 | free_percpu(dev->vstats); | 224 | free_percpu(dev->vstats); |
225 | free_netdev(dev); | ||
226 | } | 225 | } |
227 | 226 | ||
228 | #ifdef CONFIG_NET_POLL_CONTROLLER | 227 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev) | |||
317 | NETIF_F_HW_VLAN_STAG_TX | | 316 | NETIF_F_HW_VLAN_STAG_TX | |
318 | NETIF_F_HW_VLAN_CTAG_RX | | 317 | NETIF_F_HW_VLAN_CTAG_RX | |
319 | NETIF_F_HW_VLAN_STAG_RX); | 318 | NETIF_F_HW_VLAN_STAG_RX); |
320 | dev->destructor = veth_dev_free; | 319 | dev->needs_free_netdev = true; |
320 | dev->priv_destructor = veth_dev_free; | ||
321 | dev->max_mtu = ETH_MAX_MTU; | 321 | dev->max_mtu = ETH_MAX_MTU; |
322 | 322 | ||
323 | dev->hw_features = VETH_FEATURES; | 323 | dev->hw_features = VETH_FEATURES; |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index db882493875c..022c0b5f9844 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
@@ -36,12 +36,14 @@ | |||
36 | #include <net/addrconf.h> | 36 | #include <net/addrconf.h> |
37 | #include <net/l3mdev.h> | 37 | #include <net/l3mdev.h> |
38 | #include <net/fib_rules.h> | 38 | #include <net/fib_rules.h> |
39 | #include <net/netns/generic.h> | ||
39 | 40 | ||
40 | #define DRV_NAME "vrf" | 41 | #define DRV_NAME "vrf" |
41 | #define DRV_VERSION "1.0" | 42 | #define DRV_VERSION "1.0" |
42 | 43 | ||
43 | #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ | 44 | #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ |
44 | static bool add_fib_rules = true; | 45 | |
46 | static unsigned int vrf_net_id; | ||
45 | 47 | ||
46 | struct net_vrf { | 48 | struct net_vrf { |
47 | struct rtable __rcu *rth; | 49 | struct rtable __rcu *rth; |
@@ -1348,7 +1350,7 @@ static void vrf_setup(struct net_device *dev) | |||
1348 | dev->netdev_ops = &vrf_netdev_ops; | 1350 | dev->netdev_ops = &vrf_netdev_ops; |
1349 | dev->l3mdev_ops = &vrf_l3mdev_ops; | 1351 | dev->l3mdev_ops = &vrf_l3mdev_ops; |
1350 | dev->ethtool_ops = &vrf_ethtool_ops; | 1352 | dev->ethtool_ops = &vrf_ethtool_ops; |
1351 | dev->destructor = free_netdev; | 1353 | dev->needs_free_netdev = true; |
1352 | 1354 | ||
1353 | /* Fill in device structure with ethernet-generic values. */ | 1355 | /* Fill in device structure with ethernet-generic values. */ |
1354 | eth_hw_addr_random(dev); | 1356 | eth_hw_addr_random(dev); |
@@ -1394,6 +1396,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, | |||
1394 | struct nlattr *tb[], struct nlattr *data[]) | 1396 | struct nlattr *tb[], struct nlattr *data[]) |
1395 | { | 1397 | { |
1396 | struct net_vrf *vrf = netdev_priv(dev); | 1398 | struct net_vrf *vrf = netdev_priv(dev); |
1399 | bool *add_fib_rules; | ||
1400 | struct net *net; | ||
1397 | int err; | 1401 | int err; |
1398 | 1402 | ||
1399 | if (!data || !data[IFLA_VRF_TABLE]) | 1403 | if (!data || !data[IFLA_VRF_TABLE]) |
@@ -1409,13 +1413,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, | |||
1409 | if (err) | 1413 | if (err) |
1410 | goto out; | 1414 | goto out; |
1411 | 1415 | ||
1412 | if (add_fib_rules) { | 1416 | net = dev_net(dev); |
1417 | add_fib_rules = net_generic(net, vrf_net_id); | ||
1418 | if (*add_fib_rules) { | ||
1413 | err = vrf_add_fib_rules(dev); | 1419 | err = vrf_add_fib_rules(dev); |
1414 | if (err) { | 1420 | if (err) { |
1415 | unregister_netdevice(dev); | 1421 | unregister_netdevice(dev); |
1416 | goto out; | 1422 | goto out; |
1417 | } | 1423 | } |
1418 | add_fib_rules = false; | 1424 | *add_fib_rules = false; |
1419 | } | 1425 | } |
1420 | 1426 | ||
1421 | out: | 1427 | out: |
@@ -1498,16 +1504,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = { | |||
1498 | .notifier_call = vrf_device_event, | 1504 | .notifier_call = vrf_device_event, |
1499 | }; | 1505 | }; |
1500 | 1506 | ||
1507 | /* Initialize per network namespace state */ | ||
1508 | static int __net_init vrf_netns_init(struct net *net) | ||
1509 | { | ||
1510 | bool *add_fib_rules = net_generic(net, vrf_net_id); | ||
1511 | |||
1512 | *add_fib_rules = true; | ||
1513 | |||
1514 | return 0; | ||
1515 | } | ||
1516 | |||
1517 | static struct pernet_operations vrf_net_ops __net_initdata = { | ||
1518 | .init = vrf_netns_init, | ||
1519 | .id = &vrf_net_id, | ||
1520 | .size = sizeof(bool), | ||
1521 | }; | ||
1522 | |||
1501 | static int __init vrf_init_module(void) | 1523 | static int __init vrf_init_module(void) |
1502 | { | 1524 | { |
1503 | int rc; | 1525 | int rc; |
1504 | 1526 | ||
1505 | register_netdevice_notifier(&vrf_notifier_block); | 1527 | register_netdevice_notifier(&vrf_notifier_block); |
1506 | 1528 | ||
1507 | rc = rtnl_link_register(&vrf_link_ops); | 1529 | rc = register_pernet_subsys(&vrf_net_ops); |
1508 | if (rc < 0) | 1530 | if (rc < 0) |
1509 | goto error; | 1531 | goto error; |
1510 | 1532 | ||
1533 | rc = rtnl_link_register(&vrf_link_ops); | ||
1534 | if (rc < 0) { | ||
1535 | unregister_pernet_subsys(&vrf_net_ops); | ||
1536 | goto error; | ||
1537 | } | ||
1538 | |||
1511 | return 0; | 1539 | return 0; |
1512 | 1540 | ||
1513 | error: | 1541 | error: |
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index 7f0136f2dd9d..c28bdce14fd5 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c | |||
@@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev) | |||
135 | 135 | ||
136 | dev->netdev_ops = &vsockmon_ops; | 136 | dev->netdev_ops = &vsockmon_ops; |
137 | dev->ethtool_ops = &vsockmon_ethtool_ops; | 137 | dev->ethtool_ops = &vsockmon_ethtool_ops; |
138 | dev->destructor = free_netdev; | 138 | dev->needs_free_netdev = true; |
139 | 139 | ||
140 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | | 140 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | |
141 | NETIF_F_HIGHDMA | NETIF_F_LLTX; | 141 | NETIF_F_HIGHDMA | NETIF_F_LLTX; |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a6b5052c1d36..5fa798a5c9a6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2611,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev) | |||
2611 | eth_hw_addr_random(dev); | 2611 | eth_hw_addr_random(dev); |
2612 | ether_setup(dev); | 2612 | ether_setup(dev); |
2613 | 2613 | ||
2614 | dev->destructor = free_netdev; | 2614 | dev->needs_free_netdev = true; |
2615 | SET_NETDEV_DEVTYPE(dev, &vxlan_type); | 2615 | SET_NETDEV_DEVTYPE(dev, &vxlan_type); |
2616 | 2616 | ||
2617 | dev->features |= NETIF_F_LLTX; | 2617 | dev->features |= NETIF_F_LLTX; |
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 65ee2a6f248c..a0d76f70c428 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c | |||
@@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev) | |||
475 | dev->flags = 0; | 475 | dev->flags = 0; |
476 | dev->header_ops = &dlci_header_ops; | 476 | dev->header_ops = &dlci_header_ops; |
477 | dev->netdev_ops = &dlci_netdev_ops; | 477 | dev->netdev_ops = &dlci_netdev_ops; |
478 | dev->destructor = free_netdev; | 478 | dev->needs_free_netdev = true; |
479 | 479 | ||
480 | dlp->receive = dlci_receive; | 480 | dlp->receive = dlci_receive; |
481 | 481 | ||
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index eb915281197e..78596e42a3f3 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) | |||
1106 | return -EIO; | 1106 | return -EIO; |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | dev->destructor = free_netdev; | 1109 | dev->needs_free_netdev = true; |
1110 | *get_dev_p(pvc, type) = dev; | 1110 | *get_dev_p(pvc, type) = dev; |
1111 | if (!used) { | 1111 | if (!used) { |
1112 | state(hdlc)->dce_changed = 1; | 1112 | state(hdlc)->dce_changed = 1; |
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 9df9ed62beff..63f749078a1f 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c | |||
@@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = { | |||
306 | static void lapbeth_setup(struct net_device *dev) | 306 | static void lapbeth_setup(struct net_device *dev) |
307 | { | 307 | { |
308 | dev->netdev_ops = &lapbeth_netdev_ops; | 308 | dev->netdev_ops = &lapbeth_netdev_ops; |
309 | dev->destructor = free_netdev; | 309 | dev->needs_free_netdev = true; |
310 | dev->type = ARPHRD_X25; | 310 | dev->type = ARPHRD_X25; |
311 | dev->hard_header_len = 3; | 311 | dev->hard_header_len = 3; |
312 | dev->mtu = 1000; | 312 | dev->mtu = 1000; |
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 91ee542de3d7..b90c77ef792e 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c | |||
@@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev) | |||
1287 | struct ath6kl *ar = ath6kl_priv(dev); | 1287 | struct ath6kl *ar = ath6kl_priv(dev); |
1288 | 1288 | ||
1289 | dev->netdev_ops = &ath6kl_netdev_ops; | 1289 | dev->netdev_ops = &ath6kl_netdev_ops; |
1290 | dev->destructor = free_netdev; | 1290 | dev->needs_free_netdev = true; |
1291 | dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; | 1291 | dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; |
1292 | 1292 | ||
1293 | dev->needed_headroom = ETH_HLEN; | 1293 | dev->needed_headroom = ETH_HLEN; |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index cd1d6730eab7..617199c0e5a0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
@@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev) | |||
5225 | 5225 | ||
5226 | if (vif) | 5226 | if (vif) |
5227 | brcmf_free_vif(vif); | 5227 | brcmf_free_vif(vif); |
5228 | free_netdev(ndev); | ||
5229 | } | 5228 | } |
5230 | 5229 | ||
5231 | static bool brcmf_is_linkup(const struct brcmf_event_msg *e) | 5230 | static bool brcmf_is_linkup(const struct brcmf_event_msg *e) |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index a3d82368f1a9..511d190c6cca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | |||
@@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, | |||
624 | if (!ndev) | 624 | if (!ndev) |
625 | return ERR_PTR(-ENOMEM); | 625 | return ERR_PTR(-ENOMEM); |
626 | 626 | ||
627 | ndev->destructor = brcmf_cfg80211_free_netdev; | 627 | ndev->needs_free_netdev = true; |
628 | ndev->priv_destructor = brcmf_cfg80211_free_netdev; | ||
628 | ifp = netdev_priv(ndev); | 629 | ifp = netdev_priv(ndev); |
629 | ifp->ndev = ndev; | 630 | ifp->ndev = ndev; |
630 | /* store mapping ifidx to bsscfgidx */ | 631 | /* store mapping ifidx to bsscfgidx */ |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index c7c1e9906500..d231042f19d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c | |||
@@ -442,7 +442,7 @@ struct brcmf_fw { | |||
442 | const char *nvram_name; | 442 | const char *nvram_name; |
443 | u16 domain_nr; | 443 | u16 domain_nr; |
444 | u16 bus_nr; | 444 | u16 bus_nr; |
445 | void (*done)(struct device *dev, const struct firmware *fw, | 445 | void (*done)(struct device *dev, int err, const struct firmware *fw, |
446 | void *nvram_image, u32 nvram_len); | 446 | void *nvram_image, u32 nvram_len); |
447 | }; | 447 | }; |
448 | 448 | ||
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) | |||
477 | if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) | 477 | if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) |
478 | goto fail; | 478 | goto fail; |
479 | 479 | ||
480 | fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); | 480 | fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length); |
481 | kfree(fwctx); | 481 | kfree(fwctx); |
482 | return; | 482 | return; |
483 | 483 | ||
484 | fail: | 484 | fail: |
485 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); | 485 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); |
486 | release_firmware(fwctx->code); | 486 | release_firmware(fwctx->code); |
487 | device_release_driver(fwctx->dev); | 487 | fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0); |
488 | kfree(fwctx); | 488 | kfree(fwctx); |
489 | } | 489 | } |
490 | 490 | ||
491 | static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) | 491 | static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) |
492 | { | 492 | { |
493 | struct brcmf_fw *fwctx = ctx; | 493 | struct brcmf_fw *fwctx = ctx; |
494 | int ret; | 494 | int ret = 0; |
495 | 495 | ||
496 | brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); | 496 | brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); |
497 | if (!fw) | 497 | if (!fw) { |
498 | ret = -ENOENT; | ||
498 | goto fail; | 499 | goto fail; |
499 | |||
500 | /* only requested code so done here */ | ||
501 | if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { | ||
502 | fwctx->done(fwctx->dev, fw, NULL, 0); | ||
503 | kfree(fwctx); | ||
504 | return; | ||
505 | } | 500 | } |
501 | /* only requested code so done here */ | ||
502 | if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) | ||
503 | goto done; | ||
504 | |||
506 | fwctx->code = fw; | 505 | fwctx->code = fw; |
507 | ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, | 506 | ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, |
508 | fwctx->dev, GFP_KERNEL, fwctx, | 507 | fwctx->dev, GFP_KERNEL, fwctx, |
509 | brcmf_fw_request_nvram_done); | 508 | brcmf_fw_request_nvram_done); |
510 | 509 | ||
511 | if (!ret) | 510 | /* pass NULL to nvram callback for bcm47xx fallback */ |
512 | return; | 511 | if (ret) |
513 | 512 | brcmf_fw_request_nvram_done(NULL, fwctx); | |
514 | brcmf_fw_request_nvram_done(NULL, fwctx); | ||
515 | return; | 513 | return; |
516 | 514 | ||
517 | fail: | 515 | fail: |
518 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); | 516 | brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); |
519 | device_release_driver(fwctx->dev); | 517 | done: |
518 | fwctx->done(fwctx->dev, ret, fw, NULL, 0); | ||
520 | kfree(fwctx); | 519 | kfree(fwctx); |
521 | } | 520 | } |
522 | 521 | ||
523 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, | 522 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, |
524 | const char *code, const char *nvram, | 523 | const char *code, const char *nvram, |
525 | void (*fw_cb)(struct device *dev, | 524 | void (*fw_cb)(struct device *dev, int err, |
526 | const struct firmware *fw, | 525 | const struct firmware *fw, |
527 | void *nvram_image, u32 nvram_len), | 526 | void *nvram_image, u32 nvram_len), |
528 | u16 domain_nr, u16 bus_nr) | 527 | u16 domain_nr, u16 bus_nr) |
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, | |||
555 | 554 | ||
556 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, | 555 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, |
557 | const char *code, const char *nvram, | 556 | const char *code, const char *nvram, |
558 | void (*fw_cb)(struct device *dev, | 557 | void (*fw_cb)(struct device *dev, int err, |
559 | const struct firmware *fw, | 558 | const struct firmware *fw, |
560 | void *nvram_image, u32 nvram_len)) | 559 | void *nvram_image, u32 nvram_len)) |
561 | { | 560 | { |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index d3c9f0d52ae3..8fa4b7e1ab3d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h | |||
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram); | |||
73 | */ | 73 | */ |
74 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, | 74 | int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, |
75 | const char *code, const char *nvram, | 75 | const char *code, const char *nvram, |
76 | void (*fw_cb)(struct device *dev, | 76 | void (*fw_cb)(struct device *dev, int err, |
77 | const struct firmware *fw, | 77 | const struct firmware *fw, |
78 | void *nvram_image, u32 nvram_len), | 78 | void *nvram_image, u32 nvram_len), |
79 | u16 domain_nr, u16 bus_nr); | 79 | u16 domain_nr, u16 bus_nr); |
80 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, | 80 | int brcmf_fw_get_firmwares(struct device *dev, u16 flags, |
81 | const char *code, const char *nvram, | 81 | const char *code, const char *nvram, |
82 | void (*fw_cb)(struct device *dev, | 82 | void (*fw_cb)(struct device *dev, int err, |
83 | const struct firmware *fw, | 83 | const struct firmware *fw, |
84 | void *nvram_image, u32 nvram_len)); | 84 | void *nvram_image, u32 nvram_len)); |
85 | 85 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 72373e59308e..f59642b2c935 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | |||
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) | |||
2145 | struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); | 2145 | struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); |
2146 | struct brcmf_fws_mac_descriptor *entry; | 2146 | struct brcmf_fws_mac_descriptor *entry; |
2147 | 2147 | ||
2148 | if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) | 2148 | if (!ifp->ndev || !brcmf_fws_queue_skbs(fws)) |
2149 | return; | 2149 | return; |
2150 | 2150 | ||
2151 | entry = &fws->desc.iface[ifp->ifidx]; | 2151 | entry = &fws->desc.iface[ifp->ifidx]; |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f36b96dc6acd..f878706613e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | |||
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { | |||
1650 | .write32 = brcmf_pcie_buscore_write32, | 1650 | .write32 = brcmf_pcie_buscore_write32, |
1651 | }; | 1651 | }; |
1652 | 1652 | ||
1653 | static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, | 1653 | static void brcmf_pcie_setup(struct device *dev, int ret, |
1654 | const struct firmware *fw, | ||
1654 | void *nvram, u32 nvram_len) | 1655 | void *nvram, u32 nvram_len) |
1655 | { | 1656 | { |
1656 | struct brcmf_bus *bus = dev_get_drvdata(dev); | 1657 | struct brcmf_bus *bus; |
1657 | struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; | 1658 | struct brcmf_pciedev *pcie_bus_dev; |
1658 | struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; | 1659 | struct brcmf_pciedev_info *devinfo; |
1659 | struct brcmf_commonring **flowrings; | 1660 | struct brcmf_commonring **flowrings; |
1660 | int ret; | ||
1661 | u32 i; | 1661 | u32 i; |
1662 | 1662 | ||
1663 | /* check firmware loading result */ | ||
1664 | if (ret) | ||
1665 | goto fail; | ||
1666 | |||
1667 | bus = dev_get_drvdata(dev); | ||
1668 | pcie_bus_dev = bus->bus_priv.pcie; | ||
1669 | devinfo = pcie_bus_dev->devinfo; | ||
1663 | brcmf_pcie_attach(devinfo); | 1670 | brcmf_pcie_attach(devinfo); |
1664 | 1671 | ||
1665 | /* Some of the firmwares have the size of the memory of the device | 1672 | /* Some of the firmwares have the size of the memory of the device |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index e03450059b06..5653d6dd38f6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
@@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { | |||
3982 | .get_memdump = brcmf_sdio_bus_get_memdump, | 3982 | .get_memdump = brcmf_sdio_bus_get_memdump, |
3983 | }; | 3983 | }; |
3984 | 3984 | ||
3985 | static void brcmf_sdio_firmware_callback(struct device *dev, | 3985 | static void brcmf_sdio_firmware_callback(struct device *dev, int err, |
3986 | const struct firmware *code, | 3986 | const struct firmware *code, |
3987 | void *nvram, u32 nvram_len) | 3987 | void *nvram, u32 nvram_len) |
3988 | { | 3988 | { |
3989 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); | 3989 | struct brcmf_bus *bus_if; |
3990 | struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; | 3990 | struct brcmf_sdio_dev *sdiodev; |
3991 | struct brcmf_sdio *bus = sdiodev->bus; | 3991 | struct brcmf_sdio *bus; |
3992 | int err = 0; | ||
3993 | u8 saveclk; | 3992 | u8 saveclk; |
3994 | 3993 | ||
3995 | brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); | 3994 | brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); |
3995 | bus_if = dev_get_drvdata(dev); | ||
3996 | sdiodev = bus_if->bus_priv.sdio; | ||
3997 | if (err) | ||
3998 | goto fail; | ||
3996 | 3999 | ||
3997 | if (!bus_if->drvr) | 4000 | if (!bus_if->drvr) |
3998 | return; | 4001 | return; |
3999 | 4002 | ||
4003 | bus = sdiodev->bus; | ||
4004 | |||
4000 | /* try to download image and nvram to the dongle */ | 4005 | /* try to download image and nvram to the dongle */ |
4001 | bus->alp_only = true; | 4006 | bus->alp_only = true; |
4002 | err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); | 4007 | err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); |
@@ -4083,6 +4088,7 @@ release: | |||
4083 | fail: | 4088 | fail: |
4084 | brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); | 4089 | brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); |
4085 | device_release_driver(dev); | 4090 | device_release_driver(dev); |
4091 | device_release_driver(&sdiodev->func[2]->dev); | ||
4086 | } | 4092 | } |
4087 | 4093 | ||
4088 | struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) | 4094 | struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index e4d545f9edee..0eea48e73331 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | |||
@@ -1159,17 +1159,18 @@ fail: | |||
1159 | return ret; | 1159 | return ret; |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | static void brcmf_usb_probe_phase2(struct device *dev, | 1162 | static void brcmf_usb_probe_phase2(struct device *dev, int ret, |
1163 | const struct firmware *fw, | 1163 | const struct firmware *fw, |
1164 | void *nvram, u32 nvlen) | 1164 | void *nvram, u32 nvlen) |
1165 | { | 1165 | { |
1166 | struct brcmf_bus *bus = dev_get_drvdata(dev); | 1166 | struct brcmf_bus *bus = dev_get_drvdata(dev); |
1167 | struct brcmf_usbdev_info *devinfo; | 1167 | struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo; |
1168 | int ret; | 1168 | |
1169 | if (ret) | ||
1170 | goto error; | ||
1169 | 1171 | ||
1170 | brcmf_dbg(USB, "Start fw downloading\n"); | 1172 | brcmf_dbg(USB, "Start fw downloading\n"); |
1171 | 1173 | ||
1172 | devinfo = bus->bus_priv.usb->devinfo; | ||
1173 | ret = check_file(fw->data); | 1174 | ret = check_file(fw->data); |
1174 | if (ret < 0) { | 1175 | if (ret < 0) { |
1175 | brcmf_err("invalid firmware\n"); | 1176 | brcmf_err("invalid firmware\n"); |
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 544fc09dcb62..1372b20f931e 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c | |||
@@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local, | |||
73 | dev->mem_end = mdev->mem_end; | 73 | dev->mem_end = mdev->mem_end; |
74 | 74 | ||
75 | hostap_setup_dev(dev, local, type); | 75 | hostap_setup_dev(dev, local, type); |
76 | dev->destructor = free_netdev; | 76 | dev->needs_free_netdev = true; |
77 | 77 | ||
78 | sprintf(dev->name, "%s%s", prefix, name); | 78 | sprintf(dev->name, "%s%s", prefix, name); |
79 | if (!rtnl_locked) | 79 | if (!rtnl_locked) |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 002b25cff5b6..c854a557998b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = { | |||
2861 | static void hwsim_mon_setup(struct net_device *dev) | 2861 | static void hwsim_mon_setup(struct net_device *dev) |
2862 | { | 2862 | { |
2863 | dev->netdev_ops = &hwsim_netdev_ops; | 2863 | dev->netdev_ops = &hwsim_netdev_ops; |
2864 | dev->destructor = free_netdev; | 2864 | dev->needs_free_netdev = true; |
2865 | ether_setup(dev); | 2865 | ether_setup(dev); |
2866 | dev->priv_flags |= IFF_NO_QUEUE; | 2866 | dev->priv_flags |= IFF_NO_QUEUE; |
2867 | dev->type = ARPHRD_IEEE80211_RADIOTAP; | 2867 | dev->type = ARPHRD_IEEE80211_RADIOTAP; |
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index dd87b9ff64c3..39b6b5e3f6e0 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c | |||
@@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, | |||
1280 | struct net_device *dev) | 1280 | struct net_device *dev) |
1281 | { | 1281 | { |
1282 | dev->netdev_ops = &mwifiex_netdev_ops; | 1282 | dev->netdev_ops = &mwifiex_netdev_ops; |
1283 | dev->destructor = free_netdev; | 1283 | dev->needs_free_netdev = true; |
1284 | /* Initialize private structure */ | 1284 | /* Initialize private structure */ |
1285 | priv->current_key_index = 0; | 1285 | priv->current_key_index = 0; |
1286 | priv->media_connected = false; | 1286 | priv->media_connected = false; |
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index c00238491673..7b3b6fd63d7d 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c | |||
@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = { | |||
2878 | .link_is_up = xeon_link_is_up, | 2878 | .link_is_up = xeon_link_is_up, |
2879 | .db_ioread = skx_db_ioread, | 2879 | .db_ioread = skx_db_ioread, |
2880 | .db_iowrite = skx_db_iowrite, | 2880 | .db_iowrite = skx_db_iowrite, |
2881 | .db_size = sizeof(u64), | 2881 | .db_size = sizeof(u32), |
2882 | .ntb_ctl = SKX_NTBCNTL_OFFSET, | 2882 | .ntb_ctl = SKX_NTBCNTL_OFFSET, |
2883 | .mw_bar = {2, 4}, | 2883 | .mw_bar = {2, 4}, |
2884 | }; | 2884 | }; |
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 02ca45fdd892..10e5bf460139 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
@@ -177,14 +177,12 @@ struct ntb_transport_qp { | |||
177 | u64 rx_err_ver; | 177 | u64 rx_err_ver; |
178 | u64 rx_memcpy; | 178 | u64 rx_memcpy; |
179 | u64 rx_async; | 179 | u64 rx_async; |
180 | u64 dma_rx_prep_err; | ||
181 | u64 tx_bytes; | 180 | u64 tx_bytes; |
182 | u64 tx_pkts; | 181 | u64 tx_pkts; |
183 | u64 tx_ring_full; | 182 | u64 tx_ring_full; |
184 | u64 tx_err_no_buf; | 183 | u64 tx_err_no_buf; |
185 | u64 tx_memcpy; | 184 | u64 tx_memcpy; |
186 | u64 tx_async; | 185 | u64 tx_async; |
187 | u64 dma_tx_prep_err; | ||
188 | }; | 186 | }; |
189 | 187 | ||
190 | struct ntb_transport_mw { | 188 | struct ntb_transport_mw { |
@@ -254,8 +252,6 @@ enum { | |||
254 | #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) | 252 | #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) |
255 | #define NTB_QP_DEF_NUM_ENTRIES 100 | 253 | #define NTB_QP_DEF_NUM_ENTRIES 100 |
256 | #define NTB_LINK_DOWN_TIMEOUT 10 | 254 | #define NTB_LINK_DOWN_TIMEOUT 10 |
257 | #define DMA_RETRIES 20 | ||
258 | #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) | ||
259 | 255 | ||
260 | static void ntb_transport_rxc_db(unsigned long data); | 256 | static void ntb_transport_rxc_db(unsigned long data); |
261 | static const struct ntb_ctx_ops ntb_transport_ops; | 257 | static const struct ntb_ctx_ops ntb_transport_ops; |
@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, | |||
516 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 512 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
517 | "free tx - \t%u\n", | 513 | "free tx - \t%u\n", |
518 | ntb_transport_tx_free_entry(qp)); | 514 | ntb_transport_tx_free_entry(qp)); |
519 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
520 | "DMA tx prep err - \t%llu\n", | ||
521 | qp->dma_tx_prep_err); | ||
522 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
523 | "DMA rx prep err - \t%llu\n", | ||
524 | qp->dma_rx_prep_err); | ||
525 | 515 | ||
526 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 516 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
527 | "\n"); | 517 | "\n"); |
@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, | |||
623 | if (!mw->virt_addr) | 613 | if (!mw->virt_addr) |
624 | return -ENOMEM; | 614 | return -ENOMEM; |
625 | 615 | ||
626 | if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) | 616 | if (mw_num < qp_count % mw_count) |
627 | num_qps_mw = qp_count / mw_count + 1; | 617 | num_qps_mw = qp_count / mw_count + 1; |
628 | else | 618 | else |
629 | num_qps_mw = qp_count / mw_count; | 619 | num_qps_mw = qp_count / mw_count; |
@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) | |||
768 | qp->tx_err_no_buf = 0; | 758 | qp->tx_err_no_buf = 0; |
769 | qp->tx_memcpy = 0; | 759 | qp->tx_memcpy = 0; |
770 | qp->tx_async = 0; | 760 | qp->tx_async = 0; |
771 | qp->dma_tx_prep_err = 0; | ||
772 | qp->dma_rx_prep_err = 0; | ||
773 | } | 761 | } |
774 | 762 | ||
775 | static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) | 763 | static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) |
@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, | |||
1000 | qp->event_handler = NULL; | 988 | qp->event_handler = NULL; |
1001 | ntb_qp_link_down_reset(qp); | 989 | ntb_qp_link_down_reset(qp); |
1002 | 990 | ||
1003 | if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) | 991 | if (mw_num < qp_count % mw_count) |
1004 | num_qps_mw = qp_count / mw_count + 1; | 992 | num_qps_mw = qp_count / mw_count + 1; |
1005 | else | 993 | else |
1006 | num_qps_mw = qp_count / mw_count; | 994 | num_qps_mw = qp_count / mw_count; |
@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) | |||
1128 | qp_count = ilog2(qp_bitmap); | 1116 | qp_count = ilog2(qp_bitmap); |
1129 | if (max_num_clients && max_num_clients < qp_count) | 1117 | if (max_num_clients && max_num_clients < qp_count) |
1130 | qp_count = max_num_clients; | 1118 | qp_count = max_num_clients; |
1131 | else if (mw_count < qp_count) | 1119 | else if (nt->mw_count < qp_count) |
1132 | qp_count = mw_count; | 1120 | qp_count = nt->mw_count; |
1133 | 1121 | ||
1134 | qp_bitmap &= BIT_ULL(qp_count) - 1; | 1122 | qp_bitmap &= BIT_ULL(qp_count) - 1; |
1135 | 1123 | ||
@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) | |||
1317 | struct dmaengine_unmap_data *unmap; | 1305 | struct dmaengine_unmap_data *unmap; |
1318 | dma_cookie_t cookie; | 1306 | dma_cookie_t cookie; |
1319 | void *buf = entry->buf; | 1307 | void *buf = entry->buf; |
1320 | int retries = 0; | ||
1321 | 1308 | ||
1322 | len = entry->len; | 1309 | len = entry->len; |
1323 | device = chan->device; | 1310 | device = chan->device; |
@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) | |||
1346 | 1333 | ||
1347 | unmap->from_cnt = 1; | 1334 | unmap->from_cnt = 1; |
1348 | 1335 | ||
1349 | for (retries = 0; retries < DMA_RETRIES; retries++) { | 1336 | txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], |
1350 | txd = device->device_prep_dma_memcpy(chan, | 1337 | unmap->addr[0], len, |
1351 | unmap->addr[1], | 1338 | DMA_PREP_INTERRUPT); |
1352 | unmap->addr[0], len, | 1339 | if (!txd) |
1353 | DMA_PREP_INTERRUPT); | ||
1354 | if (txd) | ||
1355 | break; | ||
1356 | |||
1357 | set_current_state(TASK_INTERRUPTIBLE); | ||
1358 | schedule_timeout(DMA_OUT_RESOURCE_TO); | ||
1359 | } | ||
1360 | |||
1361 | if (!txd) { | ||
1362 | qp->dma_rx_prep_err++; | ||
1363 | goto err_get_unmap; | 1340 | goto err_get_unmap; |
1364 | } | ||
1365 | 1341 | ||
1366 | txd->callback_result = ntb_rx_copy_callback; | 1342 | txd->callback_result = ntb_rx_copy_callback; |
1367 | txd->callback_param = entry; | 1343 | txd->callback_param = entry; |
@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, | |||
1606 | struct dmaengine_unmap_data *unmap; | 1582 | struct dmaengine_unmap_data *unmap; |
1607 | dma_addr_t dest; | 1583 | dma_addr_t dest; |
1608 | dma_cookie_t cookie; | 1584 | dma_cookie_t cookie; |
1609 | int retries = 0; | ||
1610 | 1585 | ||
1611 | device = chan->device; | 1586 | device = chan->device; |
1612 | dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; | 1587 | dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; |
@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp, | |||
1628 | 1603 | ||
1629 | unmap->to_cnt = 1; | 1604 | unmap->to_cnt = 1; |
1630 | 1605 | ||
1631 | for (retries = 0; retries < DMA_RETRIES; retries++) { | 1606 | txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, |
1632 | txd = device->device_prep_dma_memcpy(chan, dest, | 1607 | DMA_PREP_INTERRUPT); |
1633 | unmap->addr[0], len, | 1608 | if (!txd) |
1634 | DMA_PREP_INTERRUPT); | ||
1635 | if (txd) | ||
1636 | break; | ||
1637 | |||
1638 | set_current_state(TASK_INTERRUPTIBLE); | ||
1639 | schedule_timeout(DMA_OUT_RESOURCE_TO); | ||
1640 | } | ||
1641 | |||
1642 | if (!txd) { | ||
1643 | qp->dma_tx_prep_err++; | ||
1644 | goto err_get_unmap; | 1609 | goto err_get_unmap; |
1645 | } | ||
1646 | 1610 | ||
1647 | txd->callback_result = ntb_tx_copy_callback; | 1611 | txd->callback_result = ntb_tx_copy_callback; |
1648 | txd->callback_param = entry; | 1612 | txd->callback_param = entry; |
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 434e1d474f33..5cab2831ce99 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c | |||
@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); | |||
90 | 90 | ||
91 | static unsigned int seg_order = 19; /* 512K */ | 91 | static unsigned int seg_order = 19; /* 512K */ |
92 | module_param(seg_order, uint, 0644); | 92 | module_param(seg_order, uint, 0644); |
93 | MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); | 93 | MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing"); |
94 | 94 | ||
95 | static unsigned int run_order = 32; /* 4G */ | 95 | static unsigned int run_order = 32; /* 4G */ |
96 | module_param(run_order, uint, 0644); | 96 | module_param(run_order, uint, 0644); |
97 | MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); | 97 | MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer"); |
98 | 98 | ||
99 | static bool use_dma; /* default to 0 */ | 99 | static bool use_dma; /* default to 0 */ |
100 | module_param(use_dma, bool, 0644); | 100 | module_param(use_dma, bool, 0644); |
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 74cf5fffb1e1..c80e37a69305 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
@@ -896,7 +896,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) | |||
896 | { | 896 | { |
897 | if (pci_dev_is_disconnected(dev)) { | 897 | if (pci_dev_is_disconnected(dev)) { |
898 | *val = ~0; | 898 | *val = ~0; |
899 | return -ENODEV; | 899 | return PCIBIOS_DEVICE_NOT_FOUND; |
900 | } | 900 | } |
901 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); | 901 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); |
902 | } | 902 | } |
@@ -906,7 +906,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) | |||
906 | { | 906 | { |
907 | if (pci_dev_is_disconnected(dev)) { | 907 | if (pci_dev_is_disconnected(dev)) { |
908 | *val = ~0; | 908 | *val = ~0; |
909 | return -ENODEV; | 909 | return PCIBIOS_DEVICE_NOT_FOUND; |
910 | } | 910 | } |
911 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); | 911 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); |
912 | } | 912 | } |
@@ -917,7 +917,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, | |||
917 | { | 917 | { |
918 | if (pci_dev_is_disconnected(dev)) { | 918 | if (pci_dev_is_disconnected(dev)) { |
919 | *val = ~0; | 919 | *val = ~0; |
920 | return -ENODEV; | 920 | return PCIBIOS_DEVICE_NOT_FOUND; |
921 | } | 921 | } |
922 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); | 922 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); |
923 | } | 923 | } |
@@ -926,7 +926,7 @@ EXPORT_SYMBOL(pci_read_config_dword); | |||
926 | int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) | 926 | int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) |
927 | { | 927 | { |
928 | if (pci_dev_is_disconnected(dev)) | 928 | if (pci_dev_is_disconnected(dev)) |
929 | return -ENODEV; | 929 | return PCIBIOS_DEVICE_NOT_FOUND; |
930 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); | 930 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); |
931 | } | 931 | } |
932 | EXPORT_SYMBOL(pci_write_config_byte); | 932 | EXPORT_SYMBOL(pci_write_config_byte); |
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(pci_write_config_byte); | |||
934 | int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) | 934 | int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) |
935 | { | 935 | { |
936 | if (pci_dev_is_disconnected(dev)) | 936 | if (pci_dev_is_disconnected(dev)) |
937 | return -ENODEV; | 937 | return PCIBIOS_DEVICE_NOT_FOUND; |
938 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); | 938 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); |
939 | } | 939 | } |
940 | EXPORT_SYMBOL(pci_write_config_word); | 940 | EXPORT_SYMBOL(pci_write_config_word); |
@@ -943,7 +943,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, | |||
943 | u32 val) | 943 | u32 val) |
944 | { | 944 | { |
945 | if (pci_dev_is_disconnected(dev)) | 945 | if (pci_dev_is_disconnected(dev)) |
946 | return -ENODEV; | 946 | return PCIBIOS_DEVICE_NOT_FOUND; |
947 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); | 947 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); |
948 | } | 948 | } |
949 | EXPORT_SYMBOL(pci_write_config_dword); | 949 | EXPORT_SYMBOL(pci_write_config_dword); |
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig index 175edad42d2f..2942066607e0 100644 --- a/drivers/pci/endpoint/functions/Kconfig +++ b/drivers/pci/endpoint/functions/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config PCI_EPF_TEST | 5 | config PCI_EPF_TEST |
6 | tristate "PCI Endpoint Test driver" | 6 | tristate "PCI Endpoint Test driver" |
7 | depends on PCI_ENDPOINT | 7 | depends on PCI_ENDPOINT |
8 | select CRC32 | ||
8 | help | 9 | help |
9 | Enable this configuration option to enable the test driver | 10 | Enable this configuration option to enable the test driver |
10 | for PCI Endpoint. | 11 | for PCI Endpoint. |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 1482d132fbb8..e432ec887479 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = { | |||
495 | .flags = IRQCHIP_SKIP_SET_WAKE, | 495 | .flags = IRQCHIP_SKIP_SET_WAKE, |
496 | }; | 496 | }; |
497 | 497 | ||
498 | static void amd_gpio_irq_handler(struct irq_desc *desc) | 498 | #define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) |
499 | |||
500 | static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) | ||
499 | { | 501 | { |
500 | u32 i; | 502 | struct amd_gpio *gpio_dev = dev_id; |
501 | u32 off; | 503 | struct gpio_chip *gc = &gpio_dev->gc; |
502 | u32 reg; | 504 | irqreturn_t ret = IRQ_NONE; |
503 | u32 pin_reg; | 505 | unsigned int i, irqnr; |
504 | u64 reg64; | ||
505 | int handled = 0; | ||
506 | unsigned int irq; | ||
507 | unsigned long flags; | 506 | unsigned long flags; |
508 | struct irq_chip *chip = irq_desc_get_chip(desc); | 507 | u32 *regs, regval; |
509 | struct gpio_chip *gc = irq_desc_get_handler_data(desc); | 508 | u64 status, mask; |
510 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); | ||
511 | 509 | ||
512 | chained_irq_enter(chip, desc); | 510 | /* Read the wake status */ |
513 | /*enable GPIO interrupt again*/ | ||
514 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); | 511 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); |
515 | reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); | 512 | status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); |
516 | reg64 = reg; | 513 | status <<= 32; |
517 | reg64 = reg64 << 32; | 514 | status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0); |
518 | |||
519 | reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0); | ||
520 | reg64 |= reg; | ||
521 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 515 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
522 | 516 | ||
523 | /* | 517 | /* Bit 0-45 contain the relevant status bits */ |
524 | * first 46 bits indicates interrupt status. | 518 | status &= (1ULL << 46) - 1; |
525 | * one bit represents four interrupt sources. | 519 | regs = gpio_dev->base; |
526 | */ | 520 | for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) { |
527 | for (off = 0; off < 46 ; off++) { | 521 | if (!(status & mask)) |
528 | if (reg64 & BIT(off)) { | 522 | continue; |
529 | for (i = 0; i < 4; i++) { | 523 | status &= ~mask; |
530 | pin_reg = readl(gpio_dev->base + | 524 | |
531 | (off * 4 + i) * 4); | 525 | /* Each status bit covers four pins */ |
532 | if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || | 526 | for (i = 0; i < 4; i++) { |
533 | (pin_reg & BIT(WAKE_STS_OFF))) { | 527 | regval = readl(regs + i); |
534 | irq = irq_find_mapping(gc->irqdomain, | 528 | if (!(regval & PIN_IRQ_PENDING)) |
535 | off * 4 + i); | 529 | continue; |
536 | generic_handle_irq(irq); | 530 | irq = irq_find_mapping(gc->irqdomain, irqnr + i); |
537 | writel(pin_reg, | 531 | generic_handle_irq(irq); |
538 | gpio_dev->base | 532 | /* Clear interrupt */ |
539 | + (off * 4 + i) * 4); | 533 | writel(regval, regs + i); |
540 | handled++; | 534 | ret = IRQ_HANDLED; |
541 | } | ||
542 | } | ||
543 | } | 535 | } |
544 | } | 536 | } |
545 | 537 | ||
546 | if (handled == 0) | 538 | /* Signal EOI to the GPIO unit */ |
547 | handle_bad_irq(desc); | ||
548 | |||
549 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); | 539 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); |
550 | reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); | 540 | regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG); |
551 | reg |= EOI_MASK; | 541 | regval |= EOI_MASK; |
552 | writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); | 542 | writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG); |
553 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 543 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
554 | 544 | ||
555 | chained_irq_exit(chip, desc); | 545 | return ret; |
556 | } | 546 | } |
557 | 547 | ||
558 | static int amd_get_groups_count(struct pinctrl_dev *pctldev) | 548 | static int amd_get_groups_count(struct pinctrl_dev *pctldev) |
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev) | |||
821 | goto out2; | 811 | goto out2; |
822 | } | 812 | } |
823 | 813 | ||
824 | gpiochip_set_chained_irqchip(&gpio_dev->gc, | 814 | ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0, |
825 | &amd_gpio_irqchip, | 815 | KBUILD_MODNAME, gpio_dev); |
826 | irq_base, | 816 | if (ret) |
827 | amd_gpio_irq_handler); | 817 | goto out2; |
818 | |||
828 | platform_set_drvdata(pdev, gpio_dev); | 819 | platform_set_drvdata(pdev, gpio_dev); |
829 | 820 | ||
830 | dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); | 821 | dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); |
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index d3c5f5dfbbd7..222b6685b09f 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c | |||
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev, | |||
798 | break; | 798 | break; |
799 | case PIN_CONFIG_OUTPUT: | 799 | case PIN_CONFIG_OUTPUT: |
800 | __stm32_gpio_set(bank, offset, arg); | 800 | __stm32_gpio_set(bank, offset, arg); |
801 | ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); | 801 | ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false); |
802 | break; | 802 | break; |
803 | default: | 803 | default: |
804 | ret = -EINVAL; | 804 | ret = -EINVAL; |
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index ef29f18b1951..4cc2f4ea0a25 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c | |||
@@ -97,11 +97,9 @@ | |||
97 | } \ | 97 | } \ |
98 | } | 98 | } |
99 | 99 | ||
100 | #ifdef CONFIG_PM_SLEEP | ||
101 | static u8 suspend_prep_ok; | 100 | static u8 suspend_prep_ok; |
102 | static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; | 101 | static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; |
103 | static u64 suspend_shlw_res_temp, suspend_deep_res_temp; | 102 | static u64 suspend_shlw_res_temp, suspend_deep_res_temp; |
104 | #endif | ||
105 | 103 | ||
106 | struct telemetry_susp_stats { | 104 | struct telemetry_susp_stats { |
107 | u32 shlw_swake_ctr; | 105 | u32 shlw_swake_ctr; |
@@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = { | |||
807 | .release = single_release, | 805 | .release = single_release, |
808 | }; | 806 | }; |
809 | 807 | ||
810 | #ifdef CONFIG_PM_SLEEP | ||
811 | static int pm_suspend_prep_cb(void) | 808 | static int pm_suspend_prep_cb(void) |
812 | { | 809 | { |
813 | struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; | 810 | struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; |
@@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this, | |||
937 | static struct notifier_block pm_notifier = { | 934 | static struct notifier_block pm_notifier = { |
938 | .notifier_call = pm_notification, | 935 | .notifier_call = pm_notification, |
939 | }; | 936 | }; |
940 | #endif /* CONFIG_PM_SLEEP */ | ||
941 | 937 | ||
942 | static int __init telemetry_debugfs_init(void) | 938 | static int __init telemetry_debugfs_init(void) |
943 | { | 939 | { |
@@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void) | |||
960 | if (err < 0) | 956 | if (err < 0) |
961 | return -EINVAL; | 957 | return -EINVAL; |
962 | 958 | ||
963 | |||
964 | #ifdef CONFIG_PM_SLEEP | ||
965 | register_pm_notifier(&pm_notifier); | 959 | register_pm_notifier(&pm_notifier); |
966 | #endif /* CONFIG_PM_SLEEP */ | ||
967 | 960 | ||
968 | debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); | 961 | debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); |
969 | if (!debugfs_conf->telemetry_dbg_dir) | 962 | if (!debugfs_conf->telemetry_dbg_dir) { |
970 | return -ENOMEM; | 963 | err = -ENOMEM; |
964 | goto out_pm; | ||
965 | } | ||
971 | 966 | ||
972 | f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, | 967 | f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, |
973 | debugfs_conf->telemetry_dbg_dir, NULL, | 968 | debugfs_conf->telemetry_dbg_dir, NULL, |
@@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void) | |||
1014 | out: | 1009 | out: |
1015 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); | 1010 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); |
1016 | debugfs_conf->telemetry_dbg_dir = NULL; | 1011 | debugfs_conf->telemetry_dbg_dir = NULL; |
1012 | out_pm: | ||
1013 | unregister_pm_notifier(&pm_notifier); | ||
1017 | 1014 | ||
1018 | return err; | 1015 | return err; |
1019 | } | 1016 | } |
@@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void) | |||
1022 | { | 1019 | { |
1023 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); | 1020 | debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); |
1024 | debugfs_conf->telemetry_dbg_dir = NULL; | 1021 | debugfs_conf->telemetry_dbg_dir = NULL; |
1022 | unregister_pm_notifier(&pm_notifier); | ||
1025 | } | 1023 | } |
1026 | 1024 | ||
1027 | late_initcall(telemetry_debugfs_init); | 1025 | late_initcall(telemetry_debugfs_init); |
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index e72abbc18ee3..a66a317f3e4f 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c | |||
@@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf) | |||
70 | { | 70 | { |
71 | return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); | 71 | return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); |
72 | } | 72 | } |
73 | MDEV_TYPE_ATTR_RO(name); | 73 | static MDEV_TYPE_ATTR_RO(name); |
74 | 74 | ||
75 | static ssize_t device_api_show(struct kobject *kobj, struct device *dev, | 75 | static ssize_t device_api_show(struct kobject *kobj, struct device *dev, |
76 | char *buf) | 76 | char *buf) |
77 | { | 77 | { |
78 | return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); | 78 | return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); |
79 | } | 79 | } |
80 | MDEV_TYPE_ATTR_RO(device_api); | 80 | static MDEV_TYPE_ATTR_RO(device_api); |
81 | 81 | ||
82 | static ssize_t available_instances_show(struct kobject *kobj, | 82 | static ssize_t available_instances_show(struct kobject *kobj, |
83 | struct device *dev, char *buf) | 83 | struct device *dev, char *buf) |
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj, | |||
86 | 86 | ||
87 | return sprintf(buf, "%d\n", atomic_read(&private->avail)); | 87 | return sprintf(buf, "%d\n", atomic_read(&private->avail)); |
88 | } | 88 | } |
89 | MDEV_TYPE_ATTR_RO(available_instances); | 89 | static MDEV_TYPE_ATTR_RO(available_instances); |
90 | 90 | ||
91 | static struct attribute *mdev_types_attrs[] = { | 91 | static struct attribute *mdev_types_attrs[] = { |
92 | &mdev_type_attr_name.attr, | 92 | &mdev_type_attr_name.attr, |
@@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = { | |||
100 | .attrs = mdev_types_attrs, | 100 | .attrs = mdev_types_attrs, |
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct attribute_group *mdev_type_groups[] = { | 103 | static struct attribute_group *mdev_type_groups[] = { |
104 | &mdev_type_group, | 104 | &mdev_type_group, |
105 | NULL, | 105 | NULL, |
106 | }; | 106 | }; |
@@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev) | |||
152 | &events, &private->nb); | 152 | &events, &private->nb); |
153 | } | 153 | } |
154 | 154 | ||
155 | void vfio_ccw_mdev_release(struct mdev_device *mdev) | 155 | static void vfio_ccw_mdev_release(struct mdev_device *mdev) |
156 | { | 156 | { |
157 | struct vfio_ccw_private *private = | 157 | struct vfio_ccw_private *private = |
158 | dev_get_drvdata(mdev_parent_dev(mdev)); | 158 | dev_get_drvdata(mdev_parent_dev(mdev)); |
@@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, | |||
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
236 | int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) | 236 | static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) |
237 | { | 237 | { |
238 | if (info->index != VFIO_CCW_IO_IRQ_INDEX) | 238 | if (info->index != VFIO_CCW_IO_IRQ_INDEX) |
239 | return -EINVAL; | 239 | return -EINVAL; |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 9be4596d8a08..ea099910b4e9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev) | |||
668 | struct ap_driver *ap_drv = to_ap_drv(dev->driver); | 668 | struct ap_driver *ap_drv = to_ap_drv(dev->driver); |
669 | int rc; | 669 | int rc; |
670 | 670 | ||
671 | /* Add queue/card to list of active queues/cards */ | ||
672 | spin_lock_bh(&ap_list_lock); | ||
673 | if (is_card_dev(dev)) | ||
674 | list_add(&to_ap_card(dev)->list, &ap_card_list); | ||
675 | else | ||
676 | list_add(&to_ap_queue(dev)->list, | ||
677 | &to_ap_queue(dev)->card->queues); | ||
678 | spin_unlock_bh(&ap_list_lock); | ||
679 | |||
671 | ap_dev->drv = ap_drv; | 680 | ap_dev->drv = ap_drv; |
672 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 681 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
673 | if (rc) | 682 | |
683 | if (rc) { | ||
684 | spin_lock_bh(&ap_list_lock); | ||
685 | if (is_card_dev(dev)) | ||
686 | list_del_init(&to_ap_card(dev)->list); | ||
687 | else | ||
688 | list_del_init(&to_ap_queue(dev)->list); | ||
689 | spin_unlock_bh(&ap_list_lock); | ||
674 | ap_dev->drv = NULL; | 690 | ap_dev->drv = NULL; |
691 | } | ||
692 | |||
675 | return rc; | 693 | return rc; |
676 | } | 694 | } |
677 | 695 | ||
@@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev) | |||
680 | struct ap_device *ap_dev = to_ap_dev(dev); | 698 | struct ap_device *ap_dev = to_ap_dev(dev); |
681 | struct ap_driver *ap_drv = ap_dev->drv; | 699 | struct ap_driver *ap_drv = ap_dev->drv; |
682 | 700 | ||
701 | if (ap_drv->remove) | ||
702 | ap_drv->remove(ap_dev); | ||
703 | |||
704 | /* Remove queue/card from list of active queues/cards */ | ||
683 | spin_lock_bh(&ap_list_lock); | 705 | spin_lock_bh(&ap_list_lock); |
684 | if (is_card_dev(dev)) | 706 | if (is_card_dev(dev)) |
685 | list_del_init(&to_ap_card(dev)->list); | 707 | list_del_init(&to_ap_card(dev)->list); |
686 | else | 708 | else |
687 | list_del_init(&to_ap_queue(dev)->list); | 709 | list_del_init(&to_ap_queue(dev)->list); |
688 | spin_unlock_bh(&ap_list_lock); | 710 | spin_unlock_bh(&ap_list_lock); |
689 | if (ap_drv->remove) | 711 | |
690 | ap_drv->remove(ap_dev); | ||
691 | return 0; | 712 | return 0; |
692 | } | 713 | } |
693 | 714 | ||
@@ -1056,10 +1077,6 @@ static void ap_scan_bus(struct work_struct *unused) | |||
1056 | } | 1077 | } |
1057 | /* get it and thus adjust reference counter */ | 1078 | /* get it and thus adjust reference counter */ |
1058 | get_device(&ac->ap_dev.device); | 1079 | get_device(&ac->ap_dev.device); |
1059 | /* Add card device to card list */ | ||
1060 | spin_lock_bh(&ap_list_lock); | ||
1061 | list_add(&ac->list, &ap_card_list); | ||
1062 | spin_unlock_bh(&ap_list_lock); | ||
1063 | } | 1080 | } |
1064 | /* now create the new queue device */ | 1081 | /* now create the new queue device */ |
1065 | aq = ap_queue_create(qid, type); | 1082 | aq = ap_queue_create(qid, type); |
@@ -1070,10 +1087,6 @@ static void ap_scan_bus(struct work_struct *unused) | |||
1070 | aq->ap_dev.device.parent = &ac->ap_dev.device; | 1087 | aq->ap_dev.device.parent = &ac->ap_dev.device; |
1071 | dev_set_name(&aq->ap_dev.device, | 1088 | dev_set_name(&aq->ap_dev.device, |
1072 | "%02x.%04x", id, dom); | 1089 | "%02x.%04x", id, dom); |
1073 | /* Add queue device to card queue list */ | ||
1074 | spin_lock_bh(&ap_list_lock); | ||
1075 | list_add(&aq->list, &ac->queues); | ||
1076 | spin_unlock_bh(&ap_list_lock); | ||
1077 | /* Start with a device reset */ | 1090 | /* Start with a device reset */ |
1078 | spin_lock_bh(&aq->lock); | 1091 | spin_lock_bh(&aq->lock); |
1079 | ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); | 1092 | ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); |
@@ -1081,9 +1094,6 @@ static void ap_scan_bus(struct work_struct *unused) | |||
1081 | /* Register device */ | 1094 | /* Register device */ |
1082 | rc = device_register(&aq->ap_dev.device); | 1095 | rc = device_register(&aq->ap_dev.device); |
1083 | if (rc) { | 1096 | if (rc) { |
1084 | spin_lock_bh(&ap_list_lock); | ||
1085 | list_del_init(&aq->list); | ||
1086 | spin_unlock_bh(&ap_list_lock); | ||
1087 | put_device(&aq->ap_dev.device); | 1097 | put_device(&aq->ap_dev.device); |
1088 | continue; | 1098 | continue; |
1089 | } | 1099 | } |
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index cfa161ccc74e..836efac96813 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c | |||
@@ -160,7 +160,14 @@ static struct device_type ap_card_type = { | |||
160 | 160 | ||
161 | static void ap_card_device_release(struct device *dev) | 161 | static void ap_card_device_release(struct device *dev) |
162 | { | 162 | { |
163 | kfree(to_ap_card(dev)); | 163 | struct ap_card *ac = to_ap_card(dev); |
164 | |||
165 | if (!list_empty(&ac->list)) { | ||
166 | spin_lock_bh(&ap_list_lock); | ||
167 | list_del_init(&ac->list); | ||
168 | spin_unlock_bh(&ap_list_lock); | ||
169 | } | ||
170 | kfree(ac); | ||
164 | } | 171 | } |
165 | 172 | ||
166 | struct ap_card *ap_card_create(int id, int queue_depth, int device_type, | 173 | struct ap_card *ap_card_create(int id, int queue_depth, int device_type, |
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 480c58a63769..0f1a5d02acb0 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c | |||
@@ -584,7 +584,14 @@ static struct device_type ap_queue_type = { | |||
584 | 584 | ||
585 | static void ap_queue_device_release(struct device *dev) | 585 | static void ap_queue_device_release(struct device *dev) |
586 | { | 586 | { |
587 | kfree(to_ap_queue(dev)); | 587 | struct ap_queue *aq = to_ap_queue(dev); |
588 | |||
589 | if (!list_empty(&aq->list)) { | ||
590 | spin_lock_bh(&ap_list_lock); | ||
591 | list_del_init(&aq->list); | ||
592 | spin_unlock_bh(&ap_list_lock); | ||
593 | } | ||
594 | kfree(aq); | ||
588 | } | 595 | } |
589 | 596 | ||
590 | struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) | 597 | struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index dba94b486f05..fa732bd86729 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev) | |||
1954 | privptr->conn = NULL; privptr->fsm = NULL; | 1954 | privptr->conn = NULL; privptr->fsm = NULL; |
1955 | /* privptr gets freed by free_netdev() */ | 1955 | /* privptr gets freed by free_netdev() */ |
1956 | } | 1956 | } |
1957 | free_netdev(dev); | ||
1958 | } | 1957 | } |
1959 | 1958 | ||
1960 | /** | 1959 | /** |
@@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev) | |||
1972 | dev->mtu = NETIUCV_MTU_DEFAULT; | 1971 | dev->mtu = NETIUCV_MTU_DEFAULT; |
1973 | dev->min_mtu = 576; | 1972 | dev->min_mtu = 576; |
1974 | dev->max_mtu = NETIUCV_MTU_MAX; | 1973 | dev->max_mtu = NETIUCV_MTU_MAX; |
1975 | dev->destructor = netiucv_free_netdevice; | 1974 | dev->needs_free_netdev = true; |
1975 | dev->priv_destructor = netiucv_free_netdevice; | ||
1976 | dev->hard_header_len = NETIUCV_HDRLEN; | 1976 | dev->hard_header_len = NETIUCV_HDRLEN; |
1977 | dev->addr_len = 0; | 1977 | dev->addr_len = 0; |
1978 | dev->type = ARPHRD_SLIP; | 1978 | dev->type = ARPHRD_SLIP; |
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 8bc7ee1a8ca8..507512cc478b 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c | |||
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, | |||
870 | QEDI_ERR(&qedi->dbg_ctx, | 870 | QEDI_ERR(&qedi->dbg_ctx, |
871 | "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", | 871 | "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", |
872 | protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); | 872 | protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); |
873 | WARN_ON(1); | ||
874 | } | 873 | } |
875 | } | 874 | } |
876 | 875 | ||
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 09a294634bc7..879d3b7462f9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
@@ -1499,11 +1499,9 @@ err_idx: | |||
1499 | 1499 | ||
1500 | void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) | 1500 | void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) |
1501 | { | 1501 | { |
1502 | if (!test_and_clear_bit(idx, qedi->task_idx_map)) { | 1502 | if (!test_and_clear_bit(idx, qedi->task_idx_map)) |
1503 | QEDI_ERR(&qedi->dbg_ctx, | 1503 | QEDI_ERR(&qedi->dbg_ctx, |
1504 | "FW task context, already cleared, tid=0x%x\n", idx); | 1504 | "FW task context, already cleared, tid=0x%x\n", idx); |
1505 | WARN_ON(1); | ||
1506 | } | ||
1507 | } | 1505 | } |
1508 | 1506 | ||
1509 | void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, | 1507 | void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, |
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c index dc6ecd824365..ff10d1f0a7e4 100644 --- a/drivers/staging/iio/cdc/ad7152.c +++ b/drivers/staging/iio/cdc/ad7152.c | |||
@@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val) | |||
231 | if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) | 231 | if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) |
232 | i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; | 232 | i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; |
233 | 233 | ||
234 | mutex_lock(&chip->state_lock); | ||
235 | ret = i2c_smbus_write_byte_data(chip->client, | 234 | ret = i2c_smbus_write_byte_data(chip->client, |
236 | AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); | 235 | AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); |
237 | if (ret < 0) { | 236 | if (ret < 0) |
238 | mutex_unlock(&chip->state_lock); | ||
239 | return ret; | 237 | return ret; |
240 | } | ||
241 | 238 | ||
242 | chip->filter_rate_setup = i; | 239 | chip->filter_rate_setup = i; |
243 | mutex_unlock(&chip->state_lock); | ||
244 | 240 | ||
245 | return ret; | 241 | return ret; |
246 | } | 242 | } |
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index cfe37eb026d6..859d0d6051cd 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c | |||
@@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = { | |||
152 | static void mon_setup(struct net_device *dev) | 152 | static void mon_setup(struct net_device *dev) |
153 | { | 153 | { |
154 | dev->netdev_ops = &mon_netdev_ops; | 154 | dev->netdev_ops = &mon_netdev_ops; |
155 | dev->destructor = free_netdev; | 155 | dev->needs_free_netdev = true; |
156 | ether_setup(dev); | 156 | ether_setup(dev); |
157 | dev->priv_flags |= IFF_NO_QUEUE; | 157 | dev->priv_flags |= IFF_NO_QUEUE; |
158 | dev->type = ARPHRD_IEEE80211; | 158 | dev->type = ARPHRD_IEEE80211; |
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 36c3189fc4b7..bd4352fe2de3 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c | |||
@@ -2667,7 +2667,8 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st | |||
2667 | mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; | 2667 | mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; |
2668 | strncpy(mon_ndev->name, name, IFNAMSIZ); | 2668 | strncpy(mon_ndev->name, name, IFNAMSIZ); |
2669 | mon_ndev->name[IFNAMSIZ - 1] = 0; | 2669 | mon_ndev->name[IFNAMSIZ - 1] = 0; |
2670 | mon_ndev->destructor = rtw_ndev_destructor; | 2670 | mon_ndev->needs_free_netdev = true; |
2671 | mon_ndev->priv_destructor = rtw_ndev_destructor; | ||
2671 | 2672 | ||
2672 | mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; | 2673 | mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; |
2673 | 2674 | ||
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index f83cfc76505c..021589913681 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c | |||
@@ -1207,8 +1207,6 @@ void rtw_ndev_destructor(struct net_device *ndev) | |||
1207 | 1207 | ||
1208 | if (ndev->ieee80211_ptr) | 1208 | if (ndev->ieee80211_ptr) |
1209 | kfree((u8 *)ndev->ieee80211_ptr); | 1209 | kfree((u8 *)ndev->ieee80211_ptr); |
1210 | |||
1211 | free_netdev(ndev); | ||
1212 | } | 1210 | } |
1213 | 1211 | ||
1214 | void rtw_dev_unload(struct adapter *padapter) | 1212 | void rtw_dev_unload(struct adapter *padapter) |
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c index 02db59e8b593..aa16d1ab955b 100644 --- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c +++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c | |||
@@ -160,7 +160,7 @@ static int isFileReadable(char *path) | |||
160 | oldfs = get_fs(); set_fs(get_ds()); | 160 | oldfs = get_fs(); set_fs(get_ds()); |
161 | 161 | ||
162 | if (1!=readFile(fp, &buf, 1)) | 162 | if (1!=readFile(fp, &buf, 1)) |
163 | ret = PTR_ERR(fp); | 163 | ret = -EINVAL; |
164 | 164 | ||
165 | set_fs(oldfs); | 165 | set_fs(oldfs); |
166 | filp_close(fp, NULL); | 166 | filp_close(fp, NULL); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 0d8f81591bed..3fdca2cdd8da 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, | |||
1279 | */ | 1279 | */ |
1280 | if (dump_payload) | 1280 | if (dump_payload) |
1281 | goto after_immediate_data; | 1281 | goto after_immediate_data; |
1282 | /* | ||
1283 | * Check for underflow case where both EDTL and immediate data payload | ||
1284 | * exceeds what is presented by CDB's TRANSFER LENGTH, and what has | ||
1285 | * already been set in target_cmd_size_check() as se_cmd->data_length. | ||
1286 | * | ||
1287 | * For this special case, fail the command and dump the immediate data | ||
1288 | * payload. | ||
1289 | */ | ||
1290 | if (cmd->first_burst_len > cmd->se_cmd.data_length) { | ||
1291 | cmd->sense_reason = TCM_INVALID_CDB_FIELD; | ||
1292 | goto after_immediate_data; | ||
1293 | } | ||
1282 | 1294 | ||
1283 | immed_ret = iscsit_handle_immediate_data(cmd, hdr, | 1295 | immed_ret = iscsit_handle_immediate_data(cmd, hdr, |
1284 | cmd->first_burst_len); | 1296 | cmd->first_burst_len); |
@@ -4423,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession( | |||
4423 | * always sleep waiting for RX/TX thread shutdown to complete | 4435 | * always sleep waiting for RX/TX thread shutdown to complete |
4424 | * within iscsit_close_connection(). | 4436 | * within iscsit_close_connection(). |
4425 | */ | 4437 | */ |
4426 | if (!conn->conn_transport->rdma_shutdown) | 4438 | if (!conn->conn_transport->rdma_shutdown) { |
4427 | sleep = cmpxchg(&conn->tx_thread_active, true, false); | 4439 | sleep = cmpxchg(&conn->tx_thread_active, true, false); |
4440 | if (!sleep) | ||
4441 | return; | ||
4442 | } | ||
4428 | 4443 | ||
4429 | atomic_set(&conn->conn_logout_remove, 0); | 4444 | atomic_set(&conn->conn_logout_remove, 0); |
4430 | complete(&conn->conn_logout_comp); | 4445 | complete(&conn->conn_logout_comp); |
@@ -4440,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid( | |||
4440 | { | 4455 | { |
4441 | int sleep = 1; | 4456 | int sleep = 1; |
4442 | 4457 | ||
4443 | if (!conn->conn_transport->rdma_shutdown) | 4458 | if (!conn->conn_transport->rdma_shutdown) { |
4444 | sleep = cmpxchg(&conn->tx_thread_active, true, false); | 4459 | sleep = cmpxchg(&conn->tx_thread_active, true, false); |
4460 | if (!sleep) | ||
4461 | return; | ||
4462 | } | ||
4445 | 4463 | ||
4446 | atomic_set(&conn->conn_logout_remove, 0); | 4464 | atomic_set(&conn->conn_logout_remove, 0); |
4447 | complete(&conn->conn_logout_comp); | 4465 | complete(&conn->conn_logout_comp); |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 9ab7090f7c83..0912de7c0cf8 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -136,7 +136,7 @@ int init_se_kmem_caches(void); | |||
136 | void release_se_kmem_caches(void); | 136 | void release_se_kmem_caches(void); |
137 | u32 scsi_get_new_index(scsi_index_t); | 137 | u32 scsi_get_new_index(scsi_index_t); |
138 | void transport_subsystem_check_init(void); | 138 | void transport_subsystem_check_init(void); |
139 | void transport_cmd_finish_abort(struct se_cmd *, int); | 139 | int transport_cmd_finish_abort(struct se_cmd *, int); |
140 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); | 140 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); |
141 | void transport_dump_dev_state(struct se_device *, char *, int *); | 141 | void transport_dump_dev_state(struct se_device *, char *, int *); |
142 | void transport_dump_dev_info(struct se_device *, struct se_lun *, | 142 | void transport_dump_dev_info(struct se_device *, struct se_lun *, |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index dce1e1b47316..13f47bf4d16b 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr) | |||
75 | kfree(tmr); | 75 | kfree(tmr); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) | 78 | static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) |
79 | { | 79 | { |
80 | unsigned long flags; | 80 | unsigned long flags; |
81 | bool remove = true, send_tas; | 81 | bool remove = true, send_tas; |
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) | |||
91 | transport_send_task_abort(cmd); | 91 | transport_send_task_abort(cmd); |
92 | } | 92 | } |
93 | 93 | ||
94 | transport_cmd_finish_abort(cmd, remove); | 94 | return transport_cmd_finish_abort(cmd, remove); |
95 | } | 95 | } |
96 | 96 | ||
97 | static int target_check_cdb_and_preempt(struct list_head *list, | 97 | static int target_check_cdb_and_preempt(struct list_head *list, |
@@ -184,8 +184,8 @@ void core_tmr_abort_task( | |||
184 | cancel_work_sync(&se_cmd->work); | 184 | cancel_work_sync(&se_cmd->work); |
185 | transport_wait_for_tasks(se_cmd); | 185 | transport_wait_for_tasks(se_cmd); |
186 | 186 | ||
187 | transport_cmd_finish_abort(se_cmd, true); | 187 | if (!transport_cmd_finish_abort(se_cmd, true)) |
188 | target_put_sess_cmd(se_cmd); | 188 | target_put_sess_cmd(se_cmd); |
189 | 189 | ||
190 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" | 190 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" |
191 | " ref_tag: %llu\n", ref_tag); | 191 | " ref_tag: %llu\n", ref_tag); |
@@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list( | |||
281 | cancel_work_sync(&cmd->work); | 281 | cancel_work_sync(&cmd->work); |
282 | transport_wait_for_tasks(cmd); | 282 | transport_wait_for_tasks(cmd); |
283 | 283 | ||
284 | transport_cmd_finish_abort(cmd, 1); | 284 | if (!transport_cmd_finish_abort(cmd, 1)) |
285 | target_put_sess_cmd(cmd); | 285 | target_put_sess_cmd(cmd); |
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
@@ -380,8 +380,8 @@ static void core_tmr_drain_state_list( | |||
380 | cancel_work_sync(&cmd->work); | 380 | cancel_work_sync(&cmd->work); |
381 | transport_wait_for_tasks(cmd); | 381 | transport_wait_for_tasks(cmd); |
382 | 382 | ||
383 | core_tmr_handle_tas_abort(cmd, tas); | 383 | if (!core_tmr_handle_tas_abort(cmd, tas)) |
384 | target_put_sess_cmd(cmd); | 384 | target_put_sess_cmd(cmd); |
385 | } | 385 | } |
386 | } | 386 | } |
387 | 387 | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6025935036c9..f1b3a46bdcaf 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) | |||
651 | percpu_ref_put(&lun->lun_ref); | 651 | percpu_ref_put(&lun->lun_ref); |
652 | } | 652 | } |
653 | 653 | ||
654 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 654 | int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
655 | { | 655 | { |
656 | bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); | 656 | bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); |
657 | int ret = 0; | ||
657 | 658 | ||
658 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) | 659 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) |
659 | transport_lun_remove_cmd(cmd); | 660 | transport_lun_remove_cmd(cmd); |
@@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |||
665 | cmd->se_tfo->aborted_task(cmd); | 666 | cmd->se_tfo->aborted_task(cmd); |
666 | 667 | ||
667 | if (transport_cmd_check_stop_to_fabric(cmd)) | 668 | if (transport_cmd_check_stop_to_fabric(cmd)) |
668 | return; | 669 | return 1; |
669 | if (remove && ack_kref) | 670 | if (remove && ack_kref) |
670 | transport_put_cmd(cmd); | 671 | ret = transport_put_cmd(cmd); |
672 | |||
673 | return ret; | ||
671 | } | 674 | } |
672 | 675 | ||
673 | static void target_complete_failure_work(struct work_struct *work) | 676 | static void target_complete_failure_work(struct work_struct *work) |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 49d685ad0da9..45b554032332 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f) | |||
315 | list_del(&f->list); | 315 | list_del(&f->list); |
316 | if (f->unbind) | 316 | if (f->unbind) |
317 | f->unbind(c, f); | 317 | f->unbind(c, f); |
318 | |||
319 | if (f->bind_deactivated) | ||
320 | usb_function_activate(f); | ||
318 | } | 321 | } |
319 | EXPORT_SYMBOL_GPL(usb_remove_function); | 322 | EXPORT_SYMBOL_GPL(usb_remove_function); |
320 | 323 | ||
@@ -956,12 +959,8 @@ static void remove_config(struct usb_composite_dev *cdev, | |||
956 | 959 | ||
957 | f = list_first_entry(&config->functions, | 960 | f = list_first_entry(&config->functions, |
958 | struct usb_function, list); | 961 | struct usb_function, list); |
959 | list_del(&f->list); | 962 | |
960 | if (f->unbind) { | 963 | usb_remove_function(config, f); |
961 | DBG(cdev, "unbind function '%s'/%p\n", f->name, f); | ||
962 | f->unbind(config, f); | ||
963 | /* may free memory for "f" */ | ||
964 | } | ||
965 | } | 964 | } |
966 | list_del(&config->list); | 965 | list_del(&config->list); |
967 | if (config->unbind) { | 966 | if (config->unbind) { |
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index b4058f0000e4..6a1ce6a55158 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c | |||
@@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev) | |||
281 | dev->tx_queue_len = 1; | 281 | dev->tx_queue_len = 1; |
282 | 282 | ||
283 | dev->netdev_ops = &pn_netdev_ops; | 283 | dev->netdev_ops = &pn_netdev_ops; |
284 | dev->destructor = free_netdev; | 284 | dev->needs_free_netdev = true; |
285 | dev->header_ops = &phonet_header_ops; | 285 | dev->header_ops = &phonet_header_ops; |
286 | } | 286 | } |
287 | 287 | ||
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index b9ca0a26cbd9..684900fcfe24 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd) | |||
1183 | 1183 | ||
1184 | /* closing ep0 === shutdown all */ | 1184 | /* closing ep0 === shutdown all */ |
1185 | 1185 | ||
1186 | if (dev->gadget_registered) | 1186 | if (dev->gadget_registered) { |
1187 | usb_gadget_unregister_driver (&gadgetfs_driver); | 1187 | usb_gadget_unregister_driver (&gadgetfs_driver); |
1188 | dev->gadget_registered = false; | ||
1189 | } | ||
1188 | 1190 | ||
1189 | /* at this point "good" hardware has disconnected the | 1191 | /* at this point "good" hardware has disconnected the |
1190 | * device from USB; the host won't see it any more. | 1192 | * device from USB; the host won't see it any more. |
@@ -1677,9 +1679,10 @@ static void | |||
1677 | gadgetfs_suspend (struct usb_gadget *gadget) | 1679 | gadgetfs_suspend (struct usb_gadget *gadget) |
1678 | { | 1680 | { |
1679 | struct dev_data *dev = get_gadget_data (gadget); | 1681 | struct dev_data *dev = get_gadget_data (gadget); |
1682 | unsigned long flags; | ||
1680 | 1683 | ||
1681 | INFO (dev, "suspended from state %d\n", dev->state); | 1684 | INFO (dev, "suspended from state %d\n", dev->state); |
1682 | spin_lock (&dev->lock); | 1685 | spin_lock_irqsave(&dev->lock, flags); |
1683 | switch (dev->state) { | 1686 | switch (dev->state) { |
1684 | case STATE_DEV_SETUP: // VERY odd... host died?? | 1687 | case STATE_DEV_SETUP: // VERY odd... host died?? |
1685 | case STATE_DEV_CONNECTED: | 1688 | case STATE_DEV_CONNECTED: |
@@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget) | |||
1690 | default: | 1693 | default: |
1691 | break; | 1694 | break; |
1692 | } | 1695 | } |
1693 | spin_unlock (&dev->lock); | 1696 | spin_unlock_irqrestore(&dev->lock, flags); |
1694 | } | 1697 | } |
1695 | 1698 | ||
1696 | static struct usb_gadget_driver gadgetfs_driver = { | 1699 | static struct usb_gadget_driver gadgetfs_driver = { |
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index ccabb51cb98d..7635fd7cc328 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd) | |||
442 | /* Report reset and disconnect events to the driver */ | 442 | /* Report reset and disconnect events to the driver */ |
443 | if (dum->driver && (disconnect || reset)) { | 443 | if (dum->driver && (disconnect || reset)) { |
444 | stop_activity(dum); | 444 | stop_activity(dum); |
445 | spin_unlock(&dum->lock); | ||
446 | if (reset) | 445 | if (reset) |
447 | usb_gadget_udc_reset(&dum->gadget, dum->driver); | 446 | usb_gadget_udc_reset(&dum->gadget, dum->driver); |
448 | else | 447 | else |
449 | dum->driver->disconnect(&dum->gadget); | 448 | dum->driver->disconnect(&dum->gadget); |
450 | spin_lock(&dum->lock); | ||
451 | } | 449 | } |
452 | } else if (dum_hcd->active != dum_hcd->old_active) { | 450 | } else if (dum_hcd->active != dum_hcd->old_active) { |
453 | if (dum_hcd->old_active && dum->driver->suspend) { | 451 | if (dum_hcd->old_active && dum->driver->suspend) |
454 | spin_unlock(&dum->lock); | ||
455 | dum->driver->suspend(&dum->gadget); | 452 | dum->driver->suspend(&dum->gadget); |
456 | spin_lock(&dum->lock); | 453 | else if (!dum_hcd->old_active && dum->driver->resume) |
457 | } else if (!dum_hcd->old_active && dum->driver->resume) { | ||
458 | spin_unlock(&dum->lock); | ||
459 | dum->driver->resume(&dum->gadget); | 454 | dum->driver->resume(&dum->gadget); |
460 | spin_lock(&dum->lock); | ||
461 | } | ||
462 | } | 455 | } |
463 | 456 | ||
464 | dum_hcd->old_status = dum_hcd->port_status; | 457 | dum_hcd->old_status = dum_hcd->port_status; |
@@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g) | |||
983 | struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); | 976 | struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); |
984 | struct dummy *dum = dum_hcd->dum; | 977 | struct dummy *dum = dum_hcd->dum; |
985 | 978 | ||
979 | spin_lock_irq(&dum->lock); | ||
986 | dum->driver = NULL; | 980 | dum->driver = NULL; |
981 | spin_unlock_irq(&dum->lock); | ||
987 | 982 | ||
988 | return 0; | 983 | return 0; |
989 | } | 984 | } |
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 6cf07857eaca..f2cbd7f8005e 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
@@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) | |||
2470 | nuke(&dev->ep[i]); | 2470 | nuke(&dev->ep[i]); |
2471 | 2471 | ||
2472 | /* report disconnect; the driver is already quiesced */ | 2472 | /* report disconnect; the driver is already quiesced */ |
2473 | if (driver) { | 2473 | if (driver) |
2474 | spin_unlock(&dev->lock); | ||
2475 | driver->disconnect(&dev->gadget); | 2474 | driver->disconnect(&dev->gadget); |
2476 | spin_lock(&dev->lock); | ||
2477 | } | ||
2478 | 2475 | ||
2479 | usb_reinit(dev); | 2476 | usb_reinit(dev); |
2480 | } | 2477 | } |
@@ -3348,8 +3345,6 @@ next_endpoints: | |||
3348 | BIT(PCI_RETRY_ABORT_INTERRUPT)) | 3345 | BIT(PCI_RETRY_ABORT_INTERRUPT)) |
3349 | 3346 | ||
3350 | static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | 3347 | static void handle_stat1_irqs(struct net2280 *dev, u32 stat) |
3351 | __releases(dev->lock) | ||
3352 | __acquires(dev->lock) | ||
3353 | { | 3348 | { |
3354 | struct net2280_ep *ep; | 3349 | struct net2280_ep *ep; |
3355 | u32 tmp, num, mask, scratch; | 3350 | u32 tmp, num, mask, scratch; |
@@ -3390,14 +3385,12 @@ __acquires(dev->lock) | |||
3390 | if (disconnect || reset) { | 3385 | if (disconnect || reset) { |
3391 | stop_activity(dev, dev->driver); | 3386 | stop_activity(dev, dev->driver); |
3392 | ep0_start(dev); | 3387 | ep0_start(dev); |
3393 | spin_unlock(&dev->lock); | ||
3394 | if (reset) | 3388 | if (reset) |
3395 | usb_gadget_udc_reset | 3389 | usb_gadget_udc_reset |
3396 | (&dev->gadget, dev->driver); | 3390 | (&dev->gadget, dev->driver); |
3397 | else | 3391 | else |
3398 | (dev->driver->disconnect) | 3392 | (dev->driver->disconnect) |
3399 | (&dev->gadget); | 3393 | (&dev->gadget); |
3400 | spin_lock(&dev->lock); | ||
3401 | return; | 3394 | return; |
3402 | } | 3395 | } |
3403 | } | 3396 | } |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1f1687e888d6..fddf2731f798 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -2119,11 +2119,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | |||
2119 | { | 2119 | { |
2120 | u32 temp, port_offset, port_count; | 2120 | u32 temp, port_offset, port_count; |
2121 | int i; | 2121 | int i; |
2122 | u8 major_revision; | 2122 | u8 major_revision, minor_revision; |
2123 | struct xhci_hub *rhub; | 2123 | struct xhci_hub *rhub; |
2124 | 2124 | ||
2125 | temp = readl(addr); | 2125 | temp = readl(addr); |
2126 | major_revision = XHCI_EXT_PORT_MAJOR(temp); | 2126 | major_revision = XHCI_EXT_PORT_MAJOR(temp); |
2127 | minor_revision = XHCI_EXT_PORT_MINOR(temp); | ||
2127 | 2128 | ||
2128 | if (major_revision == 0x03) { | 2129 | if (major_revision == 0x03) { |
2129 | rhub = &xhci->usb3_rhub; | 2130 | rhub = &xhci->usb3_rhub; |
@@ -2137,7 +2138,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | |||
2137 | return; | 2138 | return; |
2138 | } | 2139 | } |
2139 | rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); | 2140 | rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); |
2140 | rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); | 2141 | |
2142 | if (rhub->min_rev < minor_revision) | ||
2143 | rhub->min_rev = minor_revision; | ||
2141 | 2144 | ||
2142 | /* Port offset and count in the third dword, see section 7.2 */ | 2145 | /* Port offset and count in the third dword, see section 7.2 */ |
2143 | temp = readl(addr + 2); | 2146 | temp = readl(addr + 2); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fcf1f3f63e7a..1bcf971141c0 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
201 | if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && | 201 | if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && |
202 | pdev->device == 0x1042) | 202 | pdev->device == 0x1042) |
203 | xhci->quirks |= XHCI_BROKEN_STREAMS; | 203 | xhci->quirks |= XHCI_BROKEN_STREAMS; |
204 | if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && | ||
205 | pdev->device == 0x1142) | ||
206 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | ||
204 | 207 | ||
205 | if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) | 208 | if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) |
206 | xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; | 209 | xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; |
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 687ebb053438..41d7979d81c5 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c | |||
@@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) | |||
1048 | 1048 | ||
1049 | for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; | 1049 | for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; |
1050 | i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) | 1050 | i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) |
1051 | if (PIXEL_CLOCK) | 1051 | if (PIXEL_CLOCK != 0) |
1052 | edt[num++] = block - edid; | 1052 | edt[num++] = block - edid; |
1053 | 1053 | ||
1054 | /* Yikes, EDID data is totally useless */ | 1054 | /* Yikes, EDID data is totally useless */ |
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index ec2e7e353685..449fceaf79d5 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c | |||
@@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface, | |||
1646 | dev_dbg(dev->gdev, "%s %s - serial #%s\n", | 1646 | dev_dbg(dev->gdev, "%s %s - serial #%s\n", |
1647 | usbdev->manufacturer, usbdev->product, usbdev->serial); | 1647 | usbdev->manufacturer, usbdev->product, usbdev->serial); |
1648 | dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", | 1648 | dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", |
1649 | usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, | 1649 | le16_to_cpu(usbdev->descriptor.idVendor), |
1650 | usbdev->descriptor.bcdDevice, dev); | 1650 | le16_to_cpu(usbdev->descriptor.idProduct), |
1651 | le16_to_cpu(usbdev->descriptor.bcdDevice), dev); | ||
1651 | dev_dbg(dev->gdev, "console enable=%d\n", console); | 1652 | dev_dbg(dev->gdev, "console enable=%d\n", console); |
1652 | dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); | 1653 | dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); |
1653 | 1654 | ||
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 6a3c353de7c3..05ef657235df 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c | |||
@@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info) | |||
1105 | char *bufptr; | 1105 | char *bufptr; |
1106 | struct urb *urb; | 1106 | struct urb *urb; |
1107 | 1107 | ||
1108 | pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", | 1108 | pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n", |
1109 | info->node, dev->blank_mode, blank_mode); | 1109 | info->node, dev->blank_mode, blank_mode); |
1110 | 1110 | ||
1111 | if ((dev->blank_mode == FB_BLANK_POWERDOWN) && | 1111 | if ((dev->blank_mode == FB_BLANK_POWERDOWN) && |
1112 | (blank_mode != FB_BLANK_POWERDOWN)) { | 1112 | (blank_mode != FB_BLANK_POWERDOWN)) { |
@@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface, | |||
1613 | pr_info("%s %s - serial #%s\n", | 1613 | pr_info("%s %s - serial #%s\n", |
1614 | usbdev->manufacturer, usbdev->product, usbdev->serial); | 1614 | usbdev->manufacturer, usbdev->product, usbdev->serial); |
1615 | pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", | 1615 | pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", |
1616 | usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, | 1616 | le16_to_cpu(usbdev->descriptor.idVendor), |
1617 | usbdev->descriptor.bcdDevice, dev); | 1617 | le16_to_cpu(usbdev->descriptor.idProduct), |
1618 | le16_to_cpu(usbdev->descriptor.bcdDevice), dev); | ||
1618 | pr_info("console enable=%d\n", console); | 1619 | pr_info("console enable=%d\n", console); |
1619 | pr_info("fb_defio enable=%d\n", fb_defio); | 1620 | pr_info("fb_defio enable=%d\n", fb_defio); |
1620 | pr_info("shadow enable=%d\n", shadow); | 1621 | pr_info("shadow enable=%d\n", shadow); |
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index f9718f012aae..badee04ef496 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c | |||
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared) | |||
1630 | } | 1630 | } |
1631 | static void viafb_remove_proc(struct viafb_shared *shared) | 1631 | static void viafb_remove_proc(struct viafb_shared *shared) |
1632 | { | 1632 | { |
1633 | struct proc_dir_entry *viafb_entry = shared->proc_entry, | 1633 | struct proc_dir_entry *viafb_entry = shared->proc_entry; |
1634 | *iga1_entry = shared->iga1_proc_entry, | ||
1635 | *iga2_entry = shared->iga2_proc_entry; | ||
1636 | 1634 | ||
1637 | if (!viafb_entry) | 1635 | if (!viafb_entry) |
1638 | return; | 1636 | return; |
1639 | 1637 | ||
1640 | remove_proc_entry("output_devices", iga2_entry); | 1638 | remove_proc_entry("output_devices", shared->iga2_proc_entry); |
1641 | remove_proc_entry("iga2", viafb_entry); | 1639 | remove_proc_entry("iga2", viafb_entry); |
1642 | remove_proc_entry("output_devices", iga1_entry); | 1640 | remove_proc_entry("output_devices", shared->iga1_proc_entry); |
1643 | remove_proc_entry("iga1", viafb_entry); | 1641 | remove_proc_entry("iga1", viafb_entry); |
1644 | remove_proc_entry("supported_output_devices", viafb_entry); | 1642 | remove_proc_entry("supported_output_devices", viafb_entry); |
1645 | 1643 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 408c174ef0d5..22caf808bfab 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -663,6 +663,12 @@ static int virtballoon_restore(struct virtio_device *vdev) | |||
663 | } | 663 | } |
664 | #endif | 664 | #endif |
665 | 665 | ||
666 | static int virtballoon_validate(struct virtio_device *vdev) | ||
667 | { | ||
668 | __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); | ||
669 | return 0; | ||
670 | } | ||
671 | |||
666 | static unsigned int features[] = { | 672 | static unsigned int features[] = { |
667 | VIRTIO_BALLOON_F_MUST_TELL_HOST, | 673 | VIRTIO_BALLOON_F_MUST_TELL_HOST, |
668 | VIRTIO_BALLOON_F_STATS_VQ, | 674 | VIRTIO_BALLOON_F_STATS_VQ, |
@@ -675,6 +681,7 @@ static struct virtio_driver virtio_balloon_driver = { | |||
675 | .driver.name = KBUILD_MODNAME, | 681 | .driver.name = KBUILD_MODNAME, |
676 | .driver.owner = THIS_MODULE, | 682 | .driver.owner = THIS_MODULE, |
677 | .id_table = id_table, | 683 | .id_table = id_table, |
684 | .validate = virtballoon_validate, | ||
678 | .probe = virtballoon_probe, | 685 | .probe = virtballoon_probe, |
679 | .remove = virtballoon_remove, | 686 | .remove = virtballoon_remove, |
680 | .config_changed = virtballoon_changed, | 687 | .config_changed = virtballoon_changed, |
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 734cbf8d9676..dd9f1bebb5a3 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c | |||
@@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp, | |||
344 | int status; | 344 | int status; |
345 | 345 | ||
346 | token = (autofs_wqt_t) param->fail.token; | 346 | token = (autofs_wqt_t) param->fail.token; |
347 | status = param->fail.status ? param->fail.status : -ENOENT; | 347 | status = param->fail.status < 0 ? param->fail.status : -ENOENT; |
348 | return autofs4_wait_release(sbi, token, status); | 348 | return autofs4_wait_release(sbi, token, status); |
349 | } | 349 | } |
350 | 350 | ||
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c index a97fdc156a03..baacc1866861 100644 --- a/fs/btrfs/hash.c +++ b/fs/btrfs/hash.c | |||
@@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length) | |||
38 | { | 38 | { |
39 | SHASH_DESC_ON_STACK(shash, tfm); | 39 | SHASH_DESC_ON_STACK(shash, tfm); |
40 | u32 *ctx = (u32 *)shash_desc_ctx(shash); | 40 | u32 *ctx = (u32 *)shash_desc_ctx(shash); |
41 | u32 retval; | ||
41 | int err; | 42 | int err; |
42 | 43 | ||
43 | shash->tfm = tfm; | 44 | shash->tfm = tfm; |
@@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length) | |||
47 | err = crypto_shash_update(shash, address, length); | 48 | err = crypto_shash_update(shash, address, length); |
48 | BUG_ON(err); | 49 | BUG_ON(err); |
49 | 50 | ||
50 | return *ctx; | 51 | retval = *ctx; |
52 | barrier_data(ctx); | ||
53 | return retval; | ||
51 | } | 54 | } |
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 987044bca1c2..59cb307b15fb 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c | |||
@@ -131,6 +131,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | if (new_mode != old_mode) { | 133 | if (new_mode != old_mode) { |
134 | newattrs.ia_ctime = current_time(inode); | ||
134 | newattrs.ia_mode = new_mode; | 135 | newattrs.ia_mode = new_mode; |
135 | newattrs.ia_valid = ATTR_MODE; | 136 | newattrs.ia_valid = ATTR_MODE; |
136 | ret = __ceph_setattr(inode, &newattrs); | 137 | ret = __ceph_setattr(inode, &newattrs); |
diff --git a/fs/ceph/export.c b/fs/ceph/export.c index e8f11fa565c5..7df550c13d7f 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c | |||
@@ -91,6 +91,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) | |||
91 | ceph_mdsc_put_request(req); | 91 | ceph_mdsc_put_request(req); |
92 | if (!inode) | 92 | if (!inode) |
93 | return ERR_PTR(-ESTALE); | 93 | return ERR_PTR(-ESTALE); |
94 | if (inode->i_nlink == 0) { | ||
95 | iput(inode); | ||
96 | return ERR_PTR(-ESTALE); | ||
97 | } | ||
94 | } | 98 | } |
95 | 99 | ||
96 | return d_obtain_alias(inode); | 100 | return d_obtain_alias(inode); |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index dcce79b84406..4de6cdddf059 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -2022,7 +2022,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) | |||
2022 | attr->ia_size > inode->i_size) { | 2022 | attr->ia_size > inode->i_size) { |
2023 | i_size_write(inode, attr->ia_size); | 2023 | i_size_write(inode, attr->ia_size); |
2024 | inode->i_blocks = calc_inode_blocks(attr->ia_size); | 2024 | inode->i_blocks = calc_inode_blocks(attr->ia_size); |
2025 | inode->i_ctime = attr->ia_ctime; | ||
2026 | ci->i_reported_size = attr->ia_size; | 2025 | ci->i_reported_size = attr->ia_size; |
2027 | dirtied |= CEPH_CAP_FILE_EXCL; | 2026 | dirtied |= CEPH_CAP_FILE_EXCL; |
2028 | } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || | 2027 | } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || |
@@ -2044,7 +2043,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) | |||
2044 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, | 2043 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, |
2045 | attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, | 2044 | attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, |
2046 | only ? "ctime only" : "ignored"); | 2045 | only ? "ctime only" : "ignored"); |
2047 | inode->i_ctime = attr->ia_ctime; | ||
2048 | if (only) { | 2046 | if (only) { |
2049 | /* | 2047 | /* |
2050 | * if kernel wants to dirty ctime but nothing else, | 2048 | * if kernel wants to dirty ctime but nothing else, |
@@ -2067,7 +2065,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) | |||
2067 | if (dirtied) { | 2065 | if (dirtied) { |
2068 | inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, | 2066 | inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, |
2069 | &prealloc_cf); | 2067 | &prealloc_cf); |
2070 | inode->i_ctime = current_time(inode); | 2068 | inode->i_ctime = attr->ia_ctime; |
2071 | } | 2069 | } |
2072 | 2070 | ||
2073 | release &= issued; | 2071 | release &= issued; |
@@ -2085,6 +2083,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) | |||
2085 | req->r_inode_drop = release; | 2083 | req->r_inode_drop = release; |
2086 | req->r_args.setattr.mask = cpu_to_le32(mask); | 2084 | req->r_args.setattr.mask = cpu_to_le32(mask); |
2087 | req->r_num_caps = 1; | 2085 | req->r_num_caps = 1; |
2086 | req->r_stamp = attr->ia_ctime; | ||
2088 | err = ceph_mdsc_do_request(mdsc, NULL, req); | 2087 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
2089 | } | 2088 | } |
2090 | dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, | 2089 | dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f38e56fa9712..0c05df44cc6c 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -1687,7 +1687,6 @@ struct ceph_mds_request * | |||
1687 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | 1687 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) |
1688 | { | 1688 | { |
1689 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); | 1689 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); |
1690 | struct timespec ts; | ||
1691 | 1690 | ||
1692 | if (!req) | 1691 | if (!req) |
1693 | return ERR_PTR(-ENOMEM); | 1692 | return ERR_PTR(-ENOMEM); |
@@ -1706,8 +1705,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |||
1706 | init_completion(&req->r_safe_completion); | 1705 | init_completion(&req->r_safe_completion); |
1707 | INIT_LIST_HEAD(&req->r_unsafe_item); | 1706 | INIT_LIST_HEAD(&req->r_unsafe_item); |
1708 | 1707 | ||
1709 | ktime_get_real_ts(&ts); | 1708 | req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran); |
1710 | req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran); | ||
1711 | 1709 | ||
1712 | req->r_op = op; | 1710 | req->r_op = op; |
1713 | req->r_direct_mode = mode; | 1711 | req->r_direct_mode = mode; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0fd081bd2a2f..fcef70602b27 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) | |||
3271 | if (!is_sync_kiocb(iocb)) | 3271 | if (!is_sync_kiocb(iocb)) |
3272 | ctx->iocb = iocb; | 3272 | ctx->iocb = iocb; |
3273 | 3273 | ||
3274 | if (to->type & ITER_IOVEC) | 3274 | if (to->type == ITER_IOVEC) |
3275 | ctx->should_dirty = true; | 3275 | ctx->should_dirty = true; |
3276 | 3276 | ||
3277 | rc = setup_aio_ctx_iter(ctx, to, READ); | 3277 | rc = setup_aio_ctx_iter(ctx, to, READ); |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index b08531977daa..3b147dc6af63 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) | |||
810 | 810 | ||
811 | if (!pages) { | 811 | if (!pages) { |
812 | pages = vmalloc(max_pages * sizeof(struct page *)); | 812 | pages = vmalloc(max_pages * sizeof(struct page *)); |
813 | if (!bv) { | 813 | if (!pages) { |
814 | kvfree(bv); | 814 | kvfree(bv); |
815 | return -ENOMEM; | 815 | return -ENOMEM; |
816 | } | 816 | } |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 27bc360c7ffd..a723df3e0197 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, | |||
849 | struct cifs_fid *fid, __u16 search_flags, | 849 | struct cifs_fid *fid, __u16 search_flags, |
850 | struct cifs_search_info *srch_inf) | 850 | struct cifs_search_info *srch_inf) |
851 | { | 851 | { |
852 | return CIFSFindFirst(xid, tcon, path, cifs_sb, | 852 | int rc; |
853 | &fid->netfid, search_flags, srch_inf, true); | 853 | |
854 | rc = CIFSFindFirst(xid, tcon, path, cifs_sb, | ||
855 | &fid->netfid, search_flags, srch_inf, true); | ||
856 | if (rc) | ||
857 | cifs_dbg(FYI, "find first failed=%d\n", rc); | ||
858 | return rc; | ||
854 | } | 859 | } |
855 | 860 | ||
856 | static int | 861 | static int |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index c58691834eb2..7e48561abd29 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, | |||
982 | rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); | 982 | rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); |
983 | kfree(utf16_path); | 983 | kfree(utf16_path); |
984 | if (rc) { | 984 | if (rc) { |
985 | cifs_dbg(VFS, "open dir failed\n"); | 985 | cifs_dbg(FYI, "open dir failed rc=%d\n", rc); |
986 | return rc; | 986 | return rc; |
987 | } | 987 | } |
988 | 988 | ||
@@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, | |||
992 | rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, | 992 | rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, |
993 | fid->volatile_fid, 0, srch_inf); | 993 | fid->volatile_fid, 0, srch_inf); |
994 | if (rc) { | 994 | if (rc) { |
995 | cifs_dbg(VFS, "query directory failed\n"); | 995 | cifs_dbg(FYI, "query directory failed rc=%d\n", rc); |
996 | SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); | 996 | SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); |
997 | } | 997 | } |
998 | return rc; | 998 | return rc; |
@@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) | |||
1809 | 1809 | ||
1810 | sg = init_sg(rqst, sign); | 1810 | sg = init_sg(rqst, sign); |
1811 | if (!sg) { | 1811 | if (!sg) { |
1812 | cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); | 1812 | cifs_dbg(VFS, "%s: Failed to init sg", __func__); |
1813 | rc = -ENOMEM; | ||
1813 | goto free_req; | 1814 | goto free_req; |
1814 | } | 1815 | } |
1815 | 1816 | ||
@@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) | |||
1817 | iv = kzalloc(iv_len, GFP_KERNEL); | 1818 | iv = kzalloc(iv_len, GFP_KERNEL); |
1818 | if (!iv) { | 1819 | if (!iv) { |
1819 | cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); | 1820 | cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); |
1821 | rc = -ENOMEM; | ||
1820 | goto free_sg; | 1822 | goto free_sg; |
1821 | } | 1823 | } |
1822 | iv[0] = 3; | 1824 | iv[0] = 3; |
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 3cb5c9e2d4e7..de50e749ff05 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode, | |||
188 | pcreatetime = (__u64 *)value; | 188 | pcreatetime = (__u64 *)value; |
189 | *pcreatetime = CIFS_I(inode)->createtime; | 189 | *pcreatetime = CIFS_I(inode)->createtime; |
190 | return sizeof(__u64); | 190 | return sizeof(__u64); |
191 | |||
192 | return rc; | ||
193 | } | 191 | } |
194 | 192 | ||
195 | 193 | ||
diff --git a/fs/configfs/item.c b/fs/configfs/item.c index 8b2a994042dd..a66f6624d899 100644 --- a/fs/configfs/item.c +++ b/fs/configfs/item.c | |||
@@ -138,6 +138,14 @@ struct config_item *config_item_get(struct config_item *item) | |||
138 | } | 138 | } |
139 | EXPORT_SYMBOL(config_item_get); | 139 | EXPORT_SYMBOL(config_item_get); |
140 | 140 | ||
141 | struct config_item *config_item_get_unless_zero(struct config_item *item) | ||
142 | { | ||
143 | if (item && kref_get_unless_zero(&item->ci_kref)) | ||
144 | return item; | ||
145 | return NULL; | ||
146 | } | ||
147 | EXPORT_SYMBOL(config_item_get_unless_zero); | ||
148 | |||
141 | static void config_item_cleanup(struct config_item *item) | 149 | static void config_item_cleanup(struct config_item *item) |
142 | { | 150 | { |
143 | struct config_item_type *t = item->ci_type; | 151 | struct config_item_type *t = item->ci_type; |
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index a6ab012a2c6a..c8aabba502f6 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c | |||
@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item, | |||
83 | ret = -ENOMEM; | 83 | ret = -ENOMEM; |
84 | sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); | 84 | sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); |
85 | if (sl) { | 85 | if (sl) { |
86 | sl->sl_target = config_item_get(item); | ||
87 | spin_lock(&configfs_dirent_lock); | 86 | spin_lock(&configfs_dirent_lock); |
88 | if (target_sd->s_type & CONFIGFS_USET_DROPPING) { | 87 | if (target_sd->s_type & CONFIGFS_USET_DROPPING) { |
89 | spin_unlock(&configfs_dirent_lock); | 88 | spin_unlock(&configfs_dirent_lock); |
90 | config_item_put(item); | ||
91 | kfree(sl); | 89 | kfree(sl); |
92 | return -ENOENT; | 90 | return -ENOENT; |
93 | } | 91 | } |
92 | sl->sl_target = config_item_get(item); | ||
94 | list_add(&sl->sl_list, &target_sd->s_links); | 93 | list_add(&sl->sl_list, &target_sd->s_links); |
95 | spin_unlock(&configfs_dirent_lock); | 94 | spin_unlock(&configfs_dirent_lock); |
96 | ret = configfs_create_link(sl, parent_item->ci_dentry, | 95 | ret = configfs_create_link(sl, parent_item->ci_dentry, |
@@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, | |||
859 | if (ret < 0) | 859 | if (ret < 0) |
860 | goto out; | 860 | goto out; |
861 | } | 861 | } |
862 | start_index = indices[pvec.nr - 1] + 1; | ||
862 | } | 863 | } |
863 | out: | 864 | out: |
864 | put_dax(dax_dev); | 865 | put_dax(dax_dev); |
diff --git a/fs/dcache.c b/fs/dcache.c index cddf39777835..a9f995f6859e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -1494,7 +1494,7 @@ static void check_and_drop(void *_data) | |||
1494 | { | 1494 | { |
1495 | struct detach_data *data = _data; | 1495 | struct detach_data *data = _data; |
1496 | 1496 | ||
1497 | if (!data->mountpoint && !data->select.found) | 1497 | if (!data->mountpoint && list_empty(&data->select.dispose)) |
1498 | __d_drop(data->select.start); | 1498 | __d_drop(data->select.start); |
1499 | } | 1499 | } |
1500 | 1500 | ||
@@ -1536,17 +1536,15 @@ void d_invalidate(struct dentry *dentry) | |||
1536 | 1536 | ||
1537 | d_walk(dentry, &data, detach_and_collect, check_and_drop); | 1537 | d_walk(dentry, &data, detach_and_collect, check_and_drop); |
1538 | 1538 | ||
1539 | if (data.select.found) | 1539 | if (!list_empty(&data.select.dispose)) |
1540 | shrink_dentry_list(&data.select.dispose); | 1540 | shrink_dentry_list(&data.select.dispose); |
1541 | else if (!data.mountpoint) | ||
1542 | return; | ||
1541 | 1543 | ||
1542 | if (data.mountpoint) { | 1544 | if (data.mountpoint) { |
1543 | detach_mounts(data.mountpoint); | 1545 | detach_mounts(data.mountpoint); |
1544 | dput(data.mountpoint); | 1546 | dput(data.mountpoint); |
1545 | } | 1547 | } |
1546 | |||
1547 | if (!data.mountpoint && !data.select.found) | ||
1548 | break; | ||
1549 | |||
1550 | cond_resched(); | 1548 | cond_resched(); |
1551 | } | 1549 | } |
1552 | } | 1550 | } |
@@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
220 | 220 | ||
221 | if (write) { | 221 | if (write) { |
222 | unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; | 222 | unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; |
223 | unsigned long ptr_size; | ||
223 | struct rlimit *rlim; | 224 | struct rlimit *rlim; |
224 | 225 | ||
226 | /* | ||
227 | * Since the stack will hold pointers to the strings, we | ||
228 | * must account for them as well. | ||
229 | * | ||
230 | * The size calculation is the entire vma while each arg page is | ||
231 | * built, so each time we get here it's calculating how far it | ||
232 | * is currently (rather than each call being just the newly | ||
233 | * added size from the arg page). As a result, we need to | ||
234 | * always add the entire size of the pointers, so that on the | ||
235 | * last call to get_arg_page() we'll actually have the entire | ||
236 | * correct size. | ||
237 | */ | ||
238 | ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); | ||
239 | if (ptr_size > ULONG_MAX - size) | ||
240 | goto fail; | ||
241 | size += ptr_size; | ||
242 | |||
225 | acct_arg_size(bprm, size / PAGE_SIZE); | 243 | acct_arg_size(bprm, size / PAGE_SIZE); |
226 | 244 | ||
227 | /* | 245 | /* |
@@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, | |||
239 | * to work from. | 257 | * to work from. |
240 | */ | 258 | */ |
241 | rlim = current->signal->rlim; | 259 | rlim = current->signal->rlim; |
242 | if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { | 260 | if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) |
243 | put_page(page); | 261 | goto fail; |
244 | return NULL; | ||
245 | } | ||
246 | } | 262 | } |
247 | 263 | ||
248 | return page; | 264 | return page; |
265 | |||
266 | fail: | ||
267 | put_page(page); | ||
268 | return NULL; | ||
249 | } | 269 | } |
250 | 270 | ||
251 | static void put_arg_page(struct page *page) | 271 | static void put_arg_page(struct page *page) |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 2185c7a040a1..fd2e651bad6d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -1078,6 +1078,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, | |||
1078 | { | 1078 | { |
1079 | SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); | 1079 | SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); |
1080 | u32 *ctx = (u32 *)shash_desc_ctx(shash); | 1080 | u32 *ctx = (u32 *)shash_desc_ctx(shash); |
1081 | u32 retval; | ||
1081 | int err; | 1082 | int err; |
1082 | 1083 | ||
1083 | shash->tfm = sbi->s_chksum_driver; | 1084 | shash->tfm = sbi->s_chksum_driver; |
@@ -1087,7 +1088,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, | |||
1087 | err = crypto_shash_update(shash, address, length); | 1088 | err = crypto_shash_update(shash, address, length); |
1088 | BUG_ON(err); | 1089 | BUG_ON(err); |
1089 | 1090 | ||
1090 | return *ctx; | 1091 | retval = *ctx; |
1092 | barrier_data(ctx); | ||
1093 | return retval; | ||
1091 | } | 1094 | } |
1092 | 1095 | ||
1093 | static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, | 1096 | static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index dde861387a40..d44f5456eb9b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
200 | addr = ALIGN(addr, huge_page_size(h)); | 200 | addr = ALIGN(addr, huge_page_size(h)); |
201 | vma = find_vma(mm, addr); | 201 | vma = find_vma(mm, addr); |
202 | if (TASK_SIZE - len >= addr && | 202 | if (TASK_SIZE - len >= addr && |
203 | (!vma || addr + len <= vma->vm_start)) | 203 | (!vma || addr + len <= vm_start_gap(vma))) |
204 | return addr; | 204 | return addr; |
205 | } | 205 | } |
206 | 206 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index 8bd3e4d448b9..5a4438445bf7 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -3488,6 +3488,8 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) | |||
3488 | return err; | 3488 | return err; |
3489 | } | 3489 | } |
3490 | 3490 | ||
3491 | put_mnt_ns(old_mnt_ns); | ||
3492 | |||
3491 | /* Update the pwd and root */ | 3493 | /* Update the pwd and root */ |
3492 | set_fs_pwd(fs, &root); | 3494 | set_fs_pwd(fs, &root); |
3493 | set_fs_root(fs, &root); | 3495 | set_fs_root(fs, &root); |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 3b7c937a36b5..4689940a953c 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode, | |||
2591 | struct ocfs2_lock_res *lockres; | 2591 | struct ocfs2_lock_res *lockres; |
2592 | 2592 | ||
2593 | lockres = &OCFS2_I(inode)->ip_inode_lockres; | 2593 | lockres = &OCFS2_I(inode)->ip_inode_lockres; |
2594 | /* had_lock means that the currect process already takes the cluster | ||
2595 | * lock previously. If had_lock is 1, we have nothing to do here, and | ||
2596 | * it will get unlocked where we got the lock. | ||
2597 | */ | ||
2594 | if (!had_lock) { | 2598 | if (!had_lock) { |
2595 | ocfs2_remove_holder(lockres, oh); | 2599 | ocfs2_remove_holder(lockres, oh); |
2596 | ocfs2_inode_unlock(inode, ex); | 2600 | ocfs2_inode_unlock(inode, ex); |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3c5384d9b3a5..f70c3778d600 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode, | |||
1328 | void *buffer, | 1328 | void *buffer, |
1329 | size_t buffer_size) | 1329 | size_t buffer_size) |
1330 | { | 1330 | { |
1331 | int ret; | 1331 | int ret, had_lock; |
1332 | struct buffer_head *di_bh = NULL; | 1332 | struct buffer_head *di_bh = NULL; |
1333 | struct ocfs2_lock_holder oh; | ||
1333 | 1334 | ||
1334 | ret = ocfs2_inode_lock(inode, &di_bh, 0); | 1335 | had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh); |
1335 | if (ret < 0) { | 1336 | if (had_lock < 0) { |
1336 | mlog_errno(ret); | 1337 | mlog_errno(had_lock); |
1337 | return ret; | 1338 | return had_lock; |
1338 | } | 1339 | } |
1339 | down_read(&OCFS2_I(inode)->ip_xattr_sem); | 1340 | down_read(&OCFS2_I(inode)->ip_xattr_sem); |
1340 | ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, | 1341 | ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, |
1341 | name, buffer, buffer_size); | 1342 | name, buffer, buffer_size); |
1342 | up_read(&OCFS2_I(inode)->ip_xattr_sem); | 1343 | up_read(&OCFS2_I(inode)->ip_xattr_sem); |
1343 | 1344 | ||
1344 | ocfs2_inode_unlock(inode, 0); | 1345 | ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock); |
1345 | 1346 | ||
1346 | brelse(di_bh); | 1347 | brelse(di_bh); |
1347 | 1348 | ||
@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode, | |||
3537 | { | 3538 | { |
3538 | struct buffer_head *di_bh = NULL; | 3539 | struct buffer_head *di_bh = NULL; |
3539 | struct ocfs2_dinode *di; | 3540 | struct ocfs2_dinode *di; |
3540 | int ret, credits, ref_meta = 0, ref_credits = 0; | 3541 | int ret, credits, had_lock, ref_meta = 0, ref_credits = 0; |
3541 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 3542 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
3542 | struct inode *tl_inode = osb->osb_tl_inode; | 3543 | struct inode *tl_inode = osb->osb_tl_inode; |
3543 | struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; | 3544 | struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; |
3544 | struct ocfs2_refcount_tree *ref_tree = NULL; | 3545 | struct ocfs2_refcount_tree *ref_tree = NULL; |
3546 | struct ocfs2_lock_holder oh; | ||
3545 | 3547 | ||
3546 | struct ocfs2_xattr_info xi = { | 3548 | struct ocfs2_xattr_info xi = { |
3547 | .xi_name_index = name_index, | 3549 | .xi_name_index = name_index, |
@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode, | |||
3572 | return -ENOMEM; | 3574 | return -ENOMEM; |
3573 | } | 3575 | } |
3574 | 3576 | ||
3575 | ret = ocfs2_inode_lock(inode, &di_bh, 1); | 3577 | had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh); |
3576 | if (ret < 0) { | 3578 | if (had_lock < 0) { |
3579 | ret = had_lock; | ||
3577 | mlog_errno(ret); | 3580 | mlog_errno(ret); |
3578 | goto cleanup_nolock; | 3581 | goto cleanup_nolock; |
3579 | } | 3582 | } |
@@ -3670,7 +3673,7 @@ cleanup: | |||
3670 | if (ret) | 3673 | if (ret) |
3671 | mlog_errno(ret); | 3674 | mlog_errno(ret); |
3672 | } | 3675 | } |
3673 | ocfs2_inode_unlock(inode, 1); | 3676 | ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); |
3674 | cleanup_nolock: | 3677 | cleanup_nolock: |
3675 | brelse(di_bh); | 3678 | brelse(di_bh); |
3676 | brelse(xbs.xattr_bh); | 3679 | brelse(xbs.xattr_bh); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0c8b33d99b1..520802da059c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) | |||
300 | 300 | ||
301 | /* We don't show the stack guard page in /proc/maps */ | 301 | /* We don't show the stack guard page in /proc/maps */ |
302 | start = vma->vm_start; | 302 | start = vma->vm_start; |
303 | if (stack_guard_page_start(vma, start)) | ||
304 | start += PAGE_SIZE; | ||
305 | end = vma->vm_end; | 303 | end = vma->vm_end; |
306 | if (stack_guard_page_end(vma, end)) | ||
307 | end -= PAGE_SIZE; | ||
308 | 304 | ||
309 | seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); | 305 | seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); |
310 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", | 306 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", |
diff --git a/fs/read_write.c b/fs/read_write.c index 47c1d4484df9..19d4d88fa285 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -1285,7 +1285,7 @@ static size_t compat_writev(struct file *file, | |||
1285 | if (!(file->f_mode & FMODE_CAN_WRITE)) | 1285 | if (!(file->f_mode & FMODE_CAN_WRITE)) |
1286 | goto out; | 1286 | goto out; |
1287 | 1287 | ||
1288 | ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0); | 1288 | ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags); |
1289 | 1289 | ||
1290 | out: | 1290 | out: |
1291 | if (ret > 0) | 1291 | if (ret > 0) |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index d642cc0a8271..f80be4c5df9d 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -400,10 +400,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
400 | /* | 400 | /* |
401 | * There is not enough space for user on the device | 401 | * There is not enough space for user on the device |
402 | */ | 402 | */ |
403 | if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { | 403 | if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) { |
404 | mutex_unlock(&UFS_SB(sb)->s_lock); | 404 | if (!capable(CAP_SYS_RESOURCE)) { |
405 | UFSD("EXIT (FAILED)\n"); | 405 | mutex_unlock(&UFS_SB(sb)->s_lock); |
406 | return 0; | 406 | UFSD("EXIT (FAILED)\n"); |
407 | return 0; | ||
408 | } | ||
407 | } | 409 | } |
408 | 410 | ||
409 | if (goal >= uspi->s_size) | 411 | if (goal >= uspi->s_size) |
@@ -421,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
421 | if (result) { | 423 | if (result) { |
422 | ufs_clear_frags(inode, result + oldcount, | 424 | ufs_clear_frags(inode, result + oldcount, |
423 | newcount - oldcount, locked_page != NULL); | 425 | newcount - oldcount, locked_page != NULL); |
426 | *err = 0; | ||
424 | write_seqlock(&UFS_I(inode)->meta_lock); | 427 | write_seqlock(&UFS_I(inode)->meta_lock); |
425 | ufs_cpu_to_data_ptr(sb, p, result); | 428 | ufs_cpu_to_data_ptr(sb, p, result); |
426 | write_sequnlock(&UFS_I(inode)->meta_lock); | ||
427 | *err = 0; | ||
428 | UFS_I(inode)->i_lastfrag = | 429 | UFS_I(inode)->i_lastfrag = |
429 | max(UFS_I(inode)->i_lastfrag, fragment + count); | 430 | max(UFS_I(inode)->i_lastfrag, fragment + count); |
431 | write_sequnlock(&UFS_I(inode)->meta_lock); | ||
430 | } | 432 | } |
431 | mutex_unlock(&UFS_SB(sb)->s_lock); | 433 | mutex_unlock(&UFS_SB(sb)->s_lock); |
432 | UFSD("EXIT, result %llu\n", (unsigned long long)result); | 434 | UFSD("EXIT, result %llu\n", (unsigned long long)result); |
@@ -439,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
439 | result = ufs_add_fragments(inode, tmp, oldcount, newcount); | 441 | result = ufs_add_fragments(inode, tmp, oldcount, newcount); |
440 | if (result) { | 442 | if (result) { |
441 | *err = 0; | 443 | *err = 0; |
444 | read_seqlock_excl(&UFS_I(inode)->meta_lock); | ||
442 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, | 445 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, |
443 | fragment + count); | 446 | fragment + count); |
447 | read_sequnlock_excl(&UFS_I(inode)->meta_lock); | ||
444 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, | 448 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, |
445 | locked_page != NULL); | 449 | locked_page != NULL); |
446 | mutex_unlock(&UFS_SB(sb)->s_lock); | 450 | mutex_unlock(&UFS_SB(sb)->s_lock); |
@@ -451,39 +455,29 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
451 | /* | 455 | /* |
452 | * allocate new block and move data | 456 | * allocate new block and move data |
453 | */ | 457 | */ |
454 | switch (fs32_to_cpu(sb, usb1->fs_optim)) { | 458 | if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) { |
455 | case UFS_OPTSPACE: | ||
456 | request = newcount; | 459 | request = newcount; |
457 | if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree | 460 | if (uspi->cs_total.cs_nffree < uspi->s_space_to_time) |
458 | > uspi->s_dsize * uspi->s_minfree / (2 * 100)) | 461 | usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); |
459 | break; | 462 | } else { |
460 | usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); | ||
461 | break; | ||
462 | default: | ||
463 | usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); | ||
464 | |||
465 | case UFS_OPTTIME: | ||
466 | request = uspi->s_fpb; | 463 | request = uspi->s_fpb; |
467 | if (uspi->cs_total.cs_nffree < uspi->s_dsize * | 464 | if (uspi->cs_total.cs_nffree > uspi->s_time_to_space) |
468 | (uspi->s_minfree - 2) / 100) | 465 | usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); |
469 | break; | ||
470 | usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); | ||
471 | break; | ||
472 | } | 466 | } |
473 | result = ufs_alloc_fragments (inode, cgno, goal, request, err); | 467 | result = ufs_alloc_fragments (inode, cgno, goal, request, err); |
474 | if (result) { | 468 | if (result) { |
475 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, | 469 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, |
476 | locked_page != NULL); | 470 | locked_page != NULL); |
471 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
477 | ufs_change_blocknr(inode, fragment - oldcount, oldcount, | 472 | ufs_change_blocknr(inode, fragment - oldcount, oldcount, |
478 | uspi->s_sbbase + tmp, | 473 | uspi->s_sbbase + tmp, |
479 | uspi->s_sbbase + result, locked_page); | 474 | uspi->s_sbbase + result, locked_page); |
475 | *err = 0; | ||
480 | write_seqlock(&UFS_I(inode)->meta_lock); | 476 | write_seqlock(&UFS_I(inode)->meta_lock); |
481 | ufs_cpu_to_data_ptr(sb, p, result); | 477 | ufs_cpu_to_data_ptr(sb, p, result); |
482 | write_sequnlock(&UFS_I(inode)->meta_lock); | ||
483 | *err = 0; | ||
484 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, | 478 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, |
485 | fragment + count); | 479 | fragment + count); |
486 | mutex_unlock(&UFS_SB(sb)->s_lock); | 480 | write_sequnlock(&UFS_I(inode)->meta_lock); |
487 | if (newcount < request) | 481 | if (newcount < request) |
488 | ufs_free_fragments (inode, result + newcount, request - newcount); | 482 | ufs_free_fragments (inode, result + newcount, request - newcount); |
489 | ufs_free_fragments (inode, tmp, oldcount); | 483 | ufs_free_fragments (inode, tmp, oldcount); |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index da553ffec85b..f36d6a53687d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -401,13 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff | |||
401 | u64 phys64 = 0; | 401 | u64 phys64 = 0; |
402 | unsigned frag = fragment & uspi->s_fpbmask; | 402 | unsigned frag = fragment & uspi->s_fpbmask; |
403 | 403 | ||
404 | if (!create) { | 404 | phys64 = ufs_frag_map(inode, offsets, depth); |
405 | phys64 = ufs_frag_map(inode, offsets, depth); | 405 | if (!create) |
406 | if (phys64) | 406 | goto done; |
407 | map_bh(bh_result, sb, phys64 + frag); | ||
408 | return 0; | ||
409 | } | ||
410 | 407 | ||
408 | if (phys64) { | ||
409 | if (fragment >= UFS_NDIR_FRAGMENT) | ||
410 | goto done; | ||
411 | read_seqlock_excl(&UFS_I(inode)->meta_lock); | ||
412 | if (fragment < UFS_I(inode)->i_lastfrag) { | ||
413 | read_sequnlock_excl(&UFS_I(inode)->meta_lock); | ||
414 | goto done; | ||
415 | } | ||
416 | read_sequnlock_excl(&UFS_I(inode)->meta_lock); | ||
417 | } | ||
411 | /* This code entered only while writing ....? */ | 418 | /* This code entered only while writing ....? */ |
412 | 419 | ||
413 | mutex_lock(&UFS_I(inode)->truncate_mutex); | 420 | mutex_lock(&UFS_I(inode)->truncate_mutex); |
@@ -451,6 +458,11 @@ out: | |||
451 | } | 458 | } |
452 | mutex_unlock(&UFS_I(inode)->truncate_mutex); | 459 | mutex_unlock(&UFS_I(inode)->truncate_mutex); |
453 | return err; | 460 | return err; |
461 | |||
462 | done: | ||
463 | if (phys64) | ||
464 | map_bh(bh_result, sb, phys64 + frag); | ||
465 | return 0; | ||
454 | } | 466 | } |
455 | 467 | ||
456 | static int ufs_writepage(struct page *page, struct writeback_control *wbc) | 468 | static int ufs_writepage(struct page *page, struct writeback_control *wbc) |
@@ -554,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) | |||
554 | */ | 566 | */ |
555 | inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); | 567 | inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); |
556 | set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); | 568 | set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); |
557 | if (inode->i_nlink == 0) { | 569 | if (inode->i_nlink == 0) |
558 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); | 570 | return -ESTALE; |
559 | return -1; | ||
560 | } | ||
561 | 571 | ||
562 | /* | 572 | /* |
563 | * Linux now has 32-bit uid and gid, so we can support EFT. | 573 | * Linux now has 32-bit uid and gid, so we can support EFT. |
@@ -566,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) | |||
566 | i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); | 576 | i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); |
567 | 577 | ||
568 | inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); | 578 | inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); |
569 | inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); | 579 | inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); |
570 | inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); | 580 | inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); |
571 | inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); | 581 | inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); |
572 | inode->i_mtime.tv_nsec = 0; | 582 | inode->i_mtime.tv_nsec = 0; |
573 | inode->i_atime.tv_nsec = 0; | 583 | inode->i_atime.tv_nsec = 0; |
574 | inode->i_ctime.tv_nsec = 0; | 584 | inode->i_ctime.tv_nsec = 0; |
@@ -602,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) | |||
602 | */ | 612 | */ |
603 | inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); | 613 | inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); |
604 | set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); | 614 | set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); |
605 | if (inode->i_nlink == 0) { | 615 | if (inode->i_nlink == 0) |
606 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); | 616 | return -ESTALE; |
607 | return -1; | ||
608 | } | ||
609 | 617 | ||
610 | /* | 618 | /* |
611 | * Linux now has 32-bit uid and gid, so we can support EFT. | 619 | * Linux now has 32-bit uid and gid, so we can support EFT. |
@@ -645,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino) | |||
645 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; | 653 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
646 | struct buffer_head * bh; | 654 | struct buffer_head * bh; |
647 | struct inode *inode; | 655 | struct inode *inode; |
648 | int err; | 656 | int err = -EIO; |
649 | 657 | ||
650 | UFSD("ENTER, ino %lu\n", ino); | 658 | UFSD("ENTER, ino %lu\n", ino); |
651 | 659 | ||
@@ -680,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino) | |||
680 | err = ufs1_read_inode(inode, | 688 | err = ufs1_read_inode(inode, |
681 | ufs_inode + ufs_inotofsbo(inode->i_ino)); | 689 | ufs_inode + ufs_inotofsbo(inode->i_ino)); |
682 | } | 690 | } |
683 | 691 | brelse(bh); | |
684 | if (err) | 692 | if (err) |
685 | goto bad_inode; | 693 | goto bad_inode; |
694 | |||
686 | inode->i_version++; | 695 | inode->i_version++; |
687 | ufsi->i_lastfrag = | 696 | ufsi->i_lastfrag = |
688 | (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; | 697 | (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; |
@@ -691,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino) | |||
691 | 700 | ||
692 | ufs_set_inode_ops(inode); | 701 | ufs_set_inode_ops(inode); |
693 | 702 | ||
694 | brelse(bh); | ||
695 | |||
696 | UFSD("EXIT\n"); | 703 | UFSD("EXIT\n"); |
697 | unlock_new_inode(inode); | 704 | unlock_new_inode(inode); |
698 | return inode; | 705 | return inode; |
699 | 706 | ||
700 | bad_inode: | 707 | bad_inode: |
701 | iget_failed(inode); | 708 | iget_failed(inode); |
702 | return ERR_PTR(-EIO); | 709 | return ERR_PTR(err); |
703 | } | 710 | } |
704 | 711 | ||
705 | static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) | 712 | static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
@@ -874,7 +881,6 @@ static inline void free_data(struct to_free *ctx, u64 from, unsigned count) | |||
874 | ctx->to = from + count; | 881 | ctx->to = from + count; |
875 | } | 882 | } |
876 | 883 | ||
877 | #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) | ||
878 | #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) | 884 | #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) |
879 | 885 | ||
880 | static void ufs_trunc_direct(struct inode *inode) | 886 | static void ufs_trunc_direct(struct inode *inode) |
@@ -1112,19 +1118,24 @@ static void ufs_truncate_blocks(struct inode *inode) | |||
1112 | struct super_block *sb = inode->i_sb; | 1118 | struct super_block *sb = inode->i_sb; |
1113 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; | 1119 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
1114 | unsigned offsets[4]; | 1120 | unsigned offsets[4]; |
1115 | int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); | 1121 | int depth; |
1116 | int depth2; | 1122 | int depth2; |
1117 | unsigned i; | 1123 | unsigned i; |
1118 | struct ufs_buffer_head *ubh[3]; | 1124 | struct ufs_buffer_head *ubh[3]; |
1119 | void *p; | 1125 | void *p; |
1120 | u64 block; | 1126 | u64 block; |
1121 | 1127 | ||
1122 | if (!depth) | 1128 | if (inode->i_size) { |
1123 | return; | 1129 | sector_t last = (inode->i_size - 1) >> uspi->s_bshift; |
1130 | depth = ufs_block_to_path(inode, last, offsets); | ||
1131 | if (!depth) | ||
1132 | return; | ||
1133 | } else { | ||
1134 | depth = 1; | ||
1135 | } | ||
1124 | 1136 | ||
1125 | /* find the last non-zero in offsets[] */ | ||
1126 | for (depth2 = depth - 1; depth2; depth2--) | 1137 | for (depth2 = depth - 1; depth2; depth2--) |
1127 | if (offsets[depth2]) | 1138 | if (offsets[depth2] != uspi->s_apb - 1) |
1128 | break; | 1139 | break; |
1129 | 1140 | ||
1130 | mutex_lock(&ufsi->truncate_mutex); | 1141 | mutex_lock(&ufsi->truncate_mutex); |
@@ -1133,9 +1144,8 @@ static void ufs_truncate_blocks(struct inode *inode) | |||
1133 | offsets[0] = UFS_IND_BLOCK; | 1144 | offsets[0] = UFS_IND_BLOCK; |
1134 | } else { | 1145 | } else { |
1135 | /* get the blocks that should be partially emptied */ | 1146 | /* get the blocks that should be partially emptied */ |
1136 | p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); | 1147 | p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++); |
1137 | for (i = 0; i < depth2; i++) { | 1148 | for (i = 0; i < depth2; i++) { |
1138 | offsets[i]++; /* next branch is fully freed */ | ||
1139 | block = ufs_data_ptr_to_cpu(sb, p); | 1149 | block = ufs_data_ptr_to_cpu(sb, p); |
1140 | if (!block) | 1150 | if (!block) |
1141 | break; | 1151 | break; |
@@ -1146,7 +1156,7 @@ static void ufs_truncate_blocks(struct inode *inode) | |||
1146 | write_sequnlock(&ufsi->meta_lock); | 1156 | write_sequnlock(&ufsi->meta_lock); |
1147 | break; | 1157 | break; |
1148 | } | 1158 | } |
1149 | p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); | 1159 | p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++); |
1150 | } | 1160 | } |
1151 | while (i--) | 1161 | while (i--) |
1152 | free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); | 1162 | free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); |
@@ -1161,7 +1171,9 @@ static void ufs_truncate_blocks(struct inode *inode) | |||
1161 | free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); | 1171 | free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); |
1162 | } | 1172 | } |
1163 | } | 1173 | } |
1174 | read_seqlock_excl(&ufsi->meta_lock); | ||
1164 | ufsi->i_lastfrag = DIRECT_FRAGMENT; | 1175 | ufsi->i_lastfrag = DIRECT_FRAGMENT; |
1176 | read_sequnlock_excl(&ufsi->meta_lock); | ||
1165 | mark_inode_dirty(inode); | 1177 | mark_inode_dirty(inode); |
1166 | mutex_unlock(&ufsi->truncate_mutex); | 1178 | mutex_unlock(&ufsi->truncate_mutex); |
1167 | } | 1179 | } |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 878cc6264f1a..0a4f58a5073c 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -480,7 +480,7 @@ static void ufs_setup_cstotal(struct super_block *sb) | |||
480 | usb3 = ubh_get_usb_third(uspi); | 480 | usb3 = ubh_get_usb_third(uspi); |
481 | 481 | ||
482 | if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && | 482 | if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && |
483 | (usb1->fs_flags & UFS_FLAGS_UPDATED)) || | 483 | (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) || |
484 | mtype == UFS_MOUNT_UFSTYPE_UFS2) { | 484 | mtype == UFS_MOUNT_UFSTYPE_UFS2) { |
485 | /*we have statistic in different place, then usual*/ | 485 | /*we have statistic in different place, then usual*/ |
486 | uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); | 486 | uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); |
@@ -596,9 +596,7 @@ static void ufs_put_cstotal(struct super_block *sb) | |||
596 | usb2 = ubh_get_usb_second(uspi); | 596 | usb2 = ubh_get_usb_second(uspi); |
597 | usb3 = ubh_get_usb_third(uspi); | 597 | usb3 = ubh_get_usb_third(uspi); |
598 | 598 | ||
599 | if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && | 599 | if (mtype == UFS_MOUNT_UFSTYPE_UFS2) { |
600 | (usb1->fs_flags & UFS_FLAGS_UPDATED)) || | ||
601 | mtype == UFS_MOUNT_UFSTYPE_UFS2) { | ||
602 | /*we have statistic in different place, then usual*/ | 600 | /*we have statistic in different place, then usual*/ |
603 | usb2->fs_un.fs_u2.cs_ndir = | 601 | usb2->fs_un.fs_u2.cs_ndir = |
604 | cpu_to_fs64(sb, uspi->cs_total.cs_ndir); | 602 | cpu_to_fs64(sb, uspi->cs_total.cs_ndir); |
@@ -608,16 +606,26 @@ static void ufs_put_cstotal(struct super_block *sb) | |||
608 | cpu_to_fs64(sb, uspi->cs_total.cs_nifree); | 606 | cpu_to_fs64(sb, uspi->cs_total.cs_nifree); |
609 | usb3->fs_un1.fs_u2.cs_nffree = | 607 | usb3->fs_un1.fs_u2.cs_nffree = |
610 | cpu_to_fs64(sb, uspi->cs_total.cs_nffree); | 608 | cpu_to_fs64(sb, uspi->cs_total.cs_nffree); |
611 | } else { | 609 | goto out; |
612 | usb1->fs_cstotal.cs_ndir = | 610 | } |
613 | cpu_to_fs32(sb, uspi->cs_total.cs_ndir); | 611 | |
614 | usb1->fs_cstotal.cs_nbfree = | 612 | if (mtype == UFS_MOUNT_UFSTYPE_44BSD && |
615 | cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); | 613 | (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) { |
616 | usb1->fs_cstotal.cs_nifree = | 614 | /* store stats in both old and new places */ |
617 | cpu_to_fs32(sb, uspi->cs_total.cs_nifree); | 615 | usb2->fs_un.fs_u2.cs_ndir = |
618 | usb1->fs_cstotal.cs_nffree = | 616 | cpu_to_fs64(sb, uspi->cs_total.cs_ndir); |
619 | cpu_to_fs32(sb, uspi->cs_total.cs_nffree); | 617 | usb2->fs_un.fs_u2.cs_nbfree = |
618 | cpu_to_fs64(sb, uspi->cs_total.cs_nbfree); | ||
619 | usb3->fs_un1.fs_u2.cs_nifree = | ||
620 | cpu_to_fs64(sb, uspi->cs_total.cs_nifree); | ||
621 | usb3->fs_un1.fs_u2.cs_nffree = | ||
622 | cpu_to_fs64(sb, uspi->cs_total.cs_nffree); | ||
620 | } | 623 | } |
624 | usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir); | ||
625 | usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); | ||
626 | usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree); | ||
627 | usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree); | ||
628 | out: | ||
621 | ubh_mark_buffer_dirty(USPI_UBH(uspi)); | 629 | ubh_mark_buffer_dirty(USPI_UBH(uspi)); |
622 | ufs_print_super_stuff(sb, usb1, usb2, usb3); | 630 | ufs_print_super_stuff(sb, usb1, usb2, usb3); |
623 | UFSD("EXIT\n"); | 631 | UFSD("EXIT\n"); |
@@ -996,6 +1004,13 @@ again: | |||
996 | flags |= UFS_ST_SUN; | 1004 | flags |= UFS_ST_SUN; |
997 | } | 1005 | } |
998 | 1006 | ||
1007 | if ((flags & UFS_ST_MASK) == UFS_ST_44BSD && | ||
1008 | uspi->s_postblformat == UFS_42POSTBLFMT) { | ||
1009 | if (!silent) | ||
1010 | pr_err("this is not a 44bsd filesystem"); | ||
1011 | goto failed; | ||
1012 | } | ||
1013 | |||
999 | /* | 1014 | /* |
1000 | * Check ufs magic number | 1015 | * Check ufs magic number |
1001 | */ | 1016 | */ |
@@ -1143,8 +1158,8 @@ magic_found: | |||
1143 | uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); | 1158 | uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); |
1144 | 1159 | ||
1145 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { | 1160 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { |
1146 | uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); | 1161 | uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); |
1147 | uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); | 1162 | uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); |
1148 | } else { | 1163 | } else { |
1149 | uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); | 1164 | uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); |
1150 | uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); | 1165 | uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); |
@@ -1193,6 +1208,18 @@ magic_found: | |||
1193 | uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); | 1208 | uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); |
1194 | uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); | 1209 | uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); |
1195 | 1210 | ||
1211 | uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, | ||
1212 | uspi->s_minfree, 100); | ||
1213 | if (uspi->s_minfree <= 5) { | ||
1214 | uspi->s_time_to_space = ~0ULL; | ||
1215 | uspi->s_space_to_time = 0; | ||
1216 | usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); | ||
1217 | } else { | ||
1218 | uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1; | ||
1219 | uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize, | ||
1220 | uspi->s_minfree - 2, 100) - 1; | ||
1221 | } | ||
1222 | |||
1196 | /* | 1223 | /* |
1197 | * Compute another frequently used values | 1224 | * Compute another frequently used values |
1198 | */ | 1225 | */ |
@@ -1382,19 +1409,17 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
1382 | mutex_lock(&UFS_SB(sb)->s_lock); | 1409 | mutex_lock(&UFS_SB(sb)->s_lock); |
1383 | usb3 = ubh_get_usb_third(uspi); | 1410 | usb3 = ubh_get_usb_third(uspi); |
1384 | 1411 | ||
1385 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { | 1412 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) |
1386 | buf->f_type = UFS2_MAGIC; | 1413 | buf->f_type = UFS2_MAGIC; |
1387 | buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); | 1414 | else |
1388 | } else { | ||
1389 | buf->f_type = UFS_MAGIC; | 1415 | buf->f_type = UFS_MAGIC; |
1390 | buf->f_blocks = uspi->s_dsize; | 1416 | |
1391 | } | 1417 | buf->f_blocks = uspi->s_dsize; |
1392 | buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) + | 1418 | buf->f_bfree = ufs_freefrags(uspi); |
1393 | uspi->cs_total.cs_nffree; | ||
1394 | buf->f_ffree = uspi->cs_total.cs_nifree; | 1419 | buf->f_ffree = uspi->cs_total.cs_nifree; |
1395 | buf->f_bsize = sb->s_blocksize; | 1420 | buf->f_bsize = sb->s_blocksize; |
1396 | buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree)) | 1421 | buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks) |
1397 | ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0; | 1422 | ? (buf->f_bfree - uspi->s_root_blocks) : 0; |
1398 | buf->f_files = uspi->s_ncg * uspi->s_ipg; | 1423 | buf->f_files = uspi->s_ncg * uspi->s_ipg; |
1399 | buf->f_namelen = UFS_MAXNAMLEN; | 1424 | buf->f_namelen = UFS_MAXNAMLEN; |
1400 | buf->f_fsid.val[0] = (u32)id; | 1425 | buf->f_fsid.val[0] = (u32)id; |
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h index 0cbd5d340b67..150eef6f1233 100644 --- a/fs/ufs/ufs_fs.h +++ b/fs/ufs/ufs_fs.h | |||
@@ -733,10 +733,8 @@ struct ufs_sb_private_info { | |||
733 | __u32 s_dblkno; /* offset of first data after cg */ | 733 | __u32 s_dblkno; /* offset of first data after cg */ |
734 | __u32 s_cgoffset; /* cylinder group offset in cylinder */ | 734 | __u32 s_cgoffset; /* cylinder group offset in cylinder */ |
735 | __u32 s_cgmask; /* used to calc mod fs_ntrak */ | 735 | __u32 s_cgmask; /* used to calc mod fs_ntrak */ |
736 | __u32 s_size; /* number of blocks (fragments) in fs */ | 736 | __u64 s_size; /* number of blocks (fragments) in fs */ |
737 | __u32 s_dsize; /* number of data blocks in fs */ | 737 | __u64 s_dsize; /* number of data blocks in fs */ |
738 | __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */ | ||
739 | __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */ | ||
740 | __u32 s_ncg; /* number of cylinder groups */ | 738 | __u32 s_ncg; /* number of cylinder groups */ |
741 | __u32 s_bsize; /* size of basic blocks */ | 739 | __u32 s_bsize; /* size of basic blocks */ |
742 | __u32 s_fsize; /* size of fragments */ | 740 | __u32 s_fsize; /* size of fragments */ |
@@ -793,6 +791,9 @@ struct ufs_sb_private_info { | |||
793 | __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ | 791 | __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ |
794 | __s32 fs_magic; /* filesystem magic */ | 792 | __s32 fs_magic; /* filesystem magic */ |
795 | unsigned int s_dirblksize; | 793 | unsigned int s_dirblksize; |
794 | __u64 s_root_blocks; | ||
795 | __u64 s_time_to_space; | ||
796 | __u64 s_space_to_time; | ||
796 | }; | 797 | }; |
797 | 798 | ||
798 | /* | 799 | /* |
diff --git a/fs/ufs/util.c b/fs/ufs/util.c index f41ad0a6106f..02497a492eb2 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c | |||
@@ -243,9 +243,8 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev | |||
243 | struct page *ufs_get_locked_page(struct address_space *mapping, | 243 | struct page *ufs_get_locked_page(struct address_space *mapping, |
244 | pgoff_t index) | 244 | pgoff_t index) |
245 | { | 245 | { |
246 | struct page *page; | 246 | struct inode *inode = mapping->host; |
247 | 247 | struct page *page = find_lock_page(mapping, index); | |
248 | page = find_lock_page(mapping, index); | ||
249 | if (!page) { | 248 | if (!page) { |
250 | page = read_mapping_page(mapping, index, NULL); | 249 | page = read_mapping_page(mapping, index, NULL); |
251 | 250 | ||
@@ -253,7 +252,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping, | |||
253 | printk(KERN_ERR "ufs_change_blocknr: " | 252 | printk(KERN_ERR "ufs_change_blocknr: " |
254 | "read_mapping_page error: ino %lu, index: %lu\n", | 253 | "read_mapping_page error: ino %lu, index: %lu\n", |
255 | mapping->host->i_ino, index); | 254 | mapping->host->i_ino, index); |
256 | goto out; | 255 | return page; |
257 | } | 256 | } |
258 | 257 | ||
259 | lock_page(page); | 258 | lock_page(page); |
@@ -262,8 +261,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping, | |||
262 | /* Truncate got there first */ | 261 | /* Truncate got there first */ |
263 | unlock_page(page); | 262 | unlock_page(page); |
264 | put_page(page); | 263 | put_page(page); |
265 | page = NULL; | 264 | return NULL; |
266 | goto out; | ||
267 | } | 265 | } |
268 | 266 | ||
269 | if (!PageUptodate(page) || PageError(page)) { | 267 | if (!PageUptodate(page) || PageError(page)) { |
@@ -272,11 +270,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping, | |||
272 | 270 | ||
273 | printk(KERN_ERR "ufs_change_blocknr: " | 271 | printk(KERN_ERR "ufs_change_blocknr: " |
274 | "can not read page: ino %lu, index: %lu\n", | 272 | "can not read page: ino %lu, index: %lu\n", |
275 | mapping->host->i_ino, index); | 273 | inode->i_ino, index); |
276 | 274 | ||
277 | page = ERR_PTR(-EIO); | 275 | return ERR_PTR(-EIO); |
278 | } | 276 | } |
279 | } | 277 | } |
280 | out: | 278 | if (!page_has_buffers(page)) |
279 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); | ||
281 | return page; | 280 | return page; |
282 | } | 281 | } |
diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 398019fb1448..9fc7119a1551 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h | |||
@@ -350,16 +350,11 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi, | |||
350 | #define ubh_blkmap(ubh,begin,bit) \ | 350 | #define ubh_blkmap(ubh,begin,bit) \ |
351 | ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) | 351 | ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) |
352 | 352 | ||
353 | /* | ||
354 | * Determine the number of available frags given a | ||
355 | * percentage to hold in reserve. | ||
356 | */ | ||
357 | static inline u64 | 353 | static inline u64 |
358 | ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved) | 354 | ufs_freefrags(struct ufs_sb_private_info *uspi) |
359 | { | 355 | { |
360 | return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + | 356 | return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + |
361 | uspi->cs_total.cs_nffree - | 357 | uspi->cs_total.cs_nffree; |
362 | (uspi->s_dsize * (percentreserved) / 100); | ||
363 | } | 358 | } |
364 | 359 | ||
365 | /* | 360 | /* |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index f7555fc25877..1d622f276e3a 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
340 | bool must_wait, return_to_userland; | 340 | bool must_wait, return_to_userland; |
341 | long blocking_state; | 341 | long blocking_state; |
342 | 342 | ||
343 | BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | ||
344 | |||
345 | ret = VM_FAULT_SIGBUS; | 343 | ret = VM_FAULT_SIGBUS; |
344 | |||
345 | /* | ||
346 | * We don't do userfault handling for the final child pid update. | ||
347 | * | ||
348 | * We also don't do userfault handling during | ||
349 | * coredumping. hugetlbfs has the special | ||
350 | * follow_hugetlb_page() to skip missing pages in the | ||
351 | * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with | ||
352 | * the no_page_table() helper in follow_page_mask(), but the | ||
353 | * shmem_vm_ops->fault method is invoked even during | ||
354 | * coredumping without mmap_sem and it ends up here. | ||
355 | */ | ||
356 | if (current->flags & (PF_EXITING|PF_DUMPCORE)) | ||
357 | goto out; | ||
358 | |||
359 | /* | ||
360 | * Coredumping runs without mmap_sem so we can only check that | ||
361 | * the mmap_sem is held, if PF_DUMPCORE was not set. | ||
362 | */ | ||
363 | WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); | ||
364 | |||
346 | ctx = vmf->vma->vm_userfaultfd_ctx.ctx; | 365 | ctx = vmf->vma->vm_userfaultfd_ctx.ctx; |
347 | if (!ctx) | 366 | if (!ctx) |
348 | goto out; | 367 | goto out; |
@@ -361,12 +380,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
361 | goto out; | 380 | goto out; |
362 | 381 | ||
363 | /* | 382 | /* |
364 | * We don't do userfault handling for the final child pid update. | ||
365 | */ | ||
366 | if (current->flags & PF_EXITING) | ||
367 | goto out; | ||
368 | |||
369 | /* | ||
370 | * Check that we can return VM_FAULT_RETRY. | 383 | * Check that we can return VM_FAULT_RETRY. |
371 | * | 384 | * |
372 | * NOTE: it should become possible to return VM_FAULT_RETRY | 385 | * NOTE: it should become possible to return VM_FAULT_RETRY |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 09af0f7cd55e..3b91faacc1ba 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -1316,9 +1316,12 @@ xfs_vm_bmap( | |||
1316 | * The swap code (ab-)uses ->bmap to get a block mapping and then | 1316 | * The swap code (ab-)uses ->bmap to get a block mapping and then |
1317 | * bypasseѕ the file system for actual I/O. We really can't allow | 1317 | * bypasseѕ the file system for actual I/O. We really can't allow |
1318 | * that on reflinks inodes, so we have to skip out here. And yes, | 1318 | * that on reflinks inodes, so we have to skip out here. And yes, |
1319 | * 0 is the magic code for a bmap error.. | 1319 | * 0 is the magic code for a bmap error. |
1320 | * | ||
1321 | * Since we don't pass back blockdev info, we can't return bmap | ||
1322 | * information for rt files either. | ||
1320 | */ | 1323 | */ |
1321 | if (xfs_is_reflink_inode(ip)) | 1324 | if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip)) |
1322 | return 0; | 1325 | return 0; |
1323 | 1326 | ||
1324 | filemap_write_and_wait(mapping); | 1327 | filemap_write_and_wait(mapping); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 07b77b73b024..16d6a578fc16 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -117,7 +117,7 @@ static inline void | |||
117 | __xfs_buf_ioacct_dec( | 117 | __xfs_buf_ioacct_dec( |
118 | struct xfs_buf *bp) | 118 | struct xfs_buf *bp) |
119 | { | 119 | { |
120 | ASSERT(spin_is_locked(&bp->b_lock)); | 120 | lockdep_assert_held(&bp->b_lock); |
121 | 121 | ||
122 | if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { | 122 | if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { |
123 | bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; | 123 | bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; |
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index f61c84f8e31a..990210fcb9c3 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c | |||
@@ -66,7 +66,6 @@ xfs_inode_alloc( | |||
66 | 66 | ||
67 | XFS_STATS_INC(mp, vn_active); | 67 | XFS_STATS_INC(mp, vn_active); |
68 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 68 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
69 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | ||
70 | ASSERT(!xfs_isiflocked(ip)); | 69 | ASSERT(!xfs_isiflocked(ip)); |
71 | ASSERT(ip->i_ino == 0); | 70 | ASSERT(ip->i_ino == 0); |
72 | 71 | ||
@@ -190,7 +189,7 @@ xfs_perag_set_reclaim_tag( | |||
190 | { | 189 | { |
191 | struct xfs_mount *mp = pag->pag_mount; | 190 | struct xfs_mount *mp = pag->pag_mount; |
192 | 191 | ||
193 | ASSERT(spin_is_locked(&pag->pag_ici_lock)); | 192 | lockdep_assert_held(&pag->pag_ici_lock); |
194 | if (pag->pag_ici_reclaimable++) | 193 | if (pag->pag_ici_reclaimable++) |
195 | return; | 194 | return; |
196 | 195 | ||
@@ -212,7 +211,7 @@ xfs_perag_clear_reclaim_tag( | |||
212 | { | 211 | { |
213 | struct xfs_mount *mp = pag->pag_mount; | 212 | struct xfs_mount *mp = pag->pag_mount; |
214 | 213 | ||
215 | ASSERT(spin_is_locked(&pag->pag_ici_lock)); | 214 | lockdep_assert_held(&pag->pag_ici_lock); |
216 | if (--pag->pag_ici_reclaimable) | 215 | if (--pag->pag_ici_reclaimable) |
217 | return; | 216 | return; |
218 | 217 | ||
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 197f3fffc9a7..408c7820e200 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -210,7 +210,8 @@ struct acpi_device_flags { | |||
210 | u32 of_compatible_ok:1; | 210 | u32 of_compatible_ok:1; |
211 | u32 coherent_dma:1; | 211 | u32 coherent_dma:1; |
212 | u32 cca_seen:1; | 212 | u32 cca_seen:1; |
213 | u32 reserved:20; | 213 | u32 spi_i2c_slave:1; |
214 | u32 reserved:19; | ||
214 | }; | 215 | }; |
215 | 216 | ||
216 | /* File System */ | 217 | /* File System */ |
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index d92543f3bbfd..bdc55c0da19c 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h | |||
@@ -374,6 +374,20 @@ struct acpi_table_desc { | |||
374 | u16 validation_count; | 374 | u16 validation_count; |
375 | }; | 375 | }; |
376 | 376 | ||
377 | /* | ||
378 | * Maximum value of the validation_count field in struct acpi_table_desc. | ||
379 | * When reached, validation_count cannot be changed any more and the table will | ||
380 | * be permanently regarded as validated. | ||
381 | * | ||
382 | * This is to prevent situations in which unbalanced table get/put operations | ||
383 | * may cause premature table unmapping in the OS to happen. | ||
384 | * | ||
385 | * The maximum validation count can be defined to any value, but should be | ||
386 | * greater than the maximum number of OS early stage mapping slots to avoid | ||
387 | * leaking early stage table mappings to the late stage. | ||
388 | */ | ||
389 | #define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX | ||
390 | |||
377 | /* Masks for Flags field above */ | 391 | /* Masks for Flags field above */ |
378 | 392 | ||
379 | #define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ | 393 | #define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ |
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h index 370c0a0473fc..d66432c6e675 100644 --- a/include/dt-bindings/clock/sun50i-a64-ccu.h +++ b/include/dt-bindings/clock/sun50i-a64-ccu.h | |||
@@ -43,6 +43,8 @@ | |||
43 | #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ | 43 | #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ |
44 | #define _DT_BINDINGS_CLK_SUN50I_A64_H_ | 44 | #define _DT_BINDINGS_CLK_SUN50I_A64_H_ |
45 | 45 | ||
46 | #define CLK_PLL_PERIPH0 11 | ||
47 | |||
46 | #define CLK_BUS_MIPI_DSI 28 | 48 | #define CLK_BUS_MIPI_DSI 28 |
47 | #define CLK_BUS_CE 29 | 49 | #define CLK_BUS_CE 29 |
48 | #define CLK_BUS_DMA 30 | 50 | #define CLK_BUS_DMA 30 |
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h index c2afc41d6964..e139fe5c62ec 100644 --- a/include/dt-bindings/clock/sun8i-h3-ccu.h +++ b/include/dt-bindings/clock/sun8i-h3-ccu.h | |||
@@ -43,6 +43,8 @@ | |||
43 | #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ | 43 | #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ |
44 | #define _DT_BINDINGS_CLK_SUN8I_H3_H_ | 44 | #define _DT_BINDINGS_CLK_SUN8I_H3_H_ |
45 | 45 | ||
46 | #define CLK_PLL_PERIPH0 9 | ||
47 | |||
46 | #define CLK_CPUX 14 | 48 | #define CLK_CPUX 14 |
47 | 49 | ||
48 | #define CLK_BUS_CE 20 | 50 | #define CLK_BUS_CE 20 |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ab92c4ea138b..1ddd36bd2173 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -391,6 +391,8 @@ struct request_queue { | |||
391 | int nr_rqs[2]; /* # allocated [a]sync rqs */ | 391 | int nr_rqs[2]; /* # allocated [a]sync rqs */ |
392 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ | 392 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ |
393 | 393 | ||
394 | atomic_t shared_hctx_restart; | ||
395 | |||
394 | struct blk_queue_stats *stats; | 396 | struct blk_queue_stats *stats; |
395 | struct rq_wb *rq_wb; | 397 | struct rq_wb *rq_wb; |
396 | 398 | ||
@@ -586,6 +588,8 @@ struct request_queue { | |||
586 | 588 | ||
587 | size_t cmd_size; | 589 | size_t cmd_size; |
588 | void *rq_alloc_data; | 590 | void *rq_alloc_data; |
591 | |||
592 | struct work_struct release_work; | ||
589 | }; | 593 | }; |
590 | 594 | ||
591 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 595 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 2319b8c108e8..c96709049683 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
@@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item, | |||
74 | const char *name, | 74 | const char *name, |
75 | struct config_item_type *type); | 75 | struct config_item_type *type); |
76 | 76 | ||
77 | extern struct config_item * config_item_get(struct config_item *); | 77 | extern struct config_item *config_item_get(struct config_item *); |
78 | extern struct config_item *config_item_get_unless_zero(struct config_item *); | ||
78 | extern void config_item_put(struct config_item *); | 79 | extern void config_item_put(struct config_item *); |
79 | 80 | ||
80 | struct config_item_type { | 81 | struct config_item_type { |
diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 5e9c74cf8894..9bbf21a516e4 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h | |||
@@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; } | |||
136 | static inline int dmi_name_in_serial(const char *s) { return 0; } | 136 | static inline int dmi_name_in_serial(const char *s) { return 0; } |
137 | #define dmi_available 0 | 137 | #define dmi_available 0 |
138 | static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), | 138 | static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), |
139 | void *private_data) { return -1; } | 139 | void *private_data) { return -ENXIO; } |
140 | static inline bool dmi_match(enum dmi_field f, const char *str) | 140 | static inline bool dmi_match(enum dmi_field f, const char *str) |
141 | { return false; } | 141 | { return false; } |
142 | static inline void dmi_memdev_name(u16 handle, const char **bank, | 142 | static inline void dmi_memdev_name(u16 handle, const char **bank, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index b892e95d4929..6f543a47fc92 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page); | |||
1393 | 1393 | ||
1394 | int get_cmdline(struct task_struct *task, char *buffer, int buflen); | 1394 | int get_cmdline(struct task_struct *task, char *buffer, int buflen); |
1395 | 1395 | ||
1396 | /* Is the vma a continuation of the stack vma above it? */ | ||
1397 | static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) | ||
1398 | { | ||
1399 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
1400 | } | ||
1401 | |||
1402 | static inline bool vma_is_anonymous(struct vm_area_struct *vma) | 1396 | static inline bool vma_is_anonymous(struct vm_area_struct *vma) |
1403 | { | 1397 | { |
1404 | return !vma->vm_ops; | 1398 | return !vma->vm_ops; |
@@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma); | |||
1414 | static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } | 1408 | static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } |
1415 | #endif | 1409 | #endif |
1416 | 1410 | ||
1417 | static inline int stack_guard_page_start(struct vm_area_struct *vma, | ||
1418 | unsigned long addr) | ||
1419 | { | ||
1420 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
1421 | (vma->vm_start == addr) && | ||
1422 | !vma_growsdown(vma->vm_prev, addr); | ||
1423 | } | ||
1424 | |||
1425 | /* Is the vma a continuation of the stack vma below it? */ | ||
1426 | static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) | ||
1427 | { | ||
1428 | return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); | ||
1429 | } | ||
1430 | |||
1431 | static inline int stack_guard_page_end(struct vm_area_struct *vma, | ||
1432 | unsigned long addr) | ||
1433 | { | ||
1434 | return (vma->vm_flags & VM_GROWSUP) && | ||
1435 | (vma->vm_end == addr) && | ||
1436 | !vma_growsup(vma->vm_next, addr); | ||
1437 | } | ||
1438 | |||
1439 | int vma_is_stack_for_current(struct vm_area_struct *vma); | 1411 | int vma_is_stack_for_current(struct vm_area_struct *vma); |
1440 | 1412 | ||
1441 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 1413 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
@@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping, | |||
2222 | pgoff_t offset, | 2194 | pgoff_t offset, |
2223 | unsigned long size); | 2195 | unsigned long size); |
2224 | 2196 | ||
2197 | extern unsigned long stack_guard_gap; | ||
2225 | /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ | 2198 | /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ |
2226 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 2199 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); |
2227 | 2200 | ||
@@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m | |||
2250 | return vma; | 2223 | return vma; |
2251 | } | 2224 | } |
2252 | 2225 | ||
2226 | static inline unsigned long vm_start_gap(struct vm_area_struct *vma) | ||
2227 | { | ||
2228 | unsigned long vm_start = vma->vm_start; | ||
2229 | |||
2230 | if (vma->vm_flags & VM_GROWSDOWN) { | ||
2231 | vm_start -= stack_guard_gap; | ||
2232 | if (vm_start > vma->vm_start) | ||
2233 | vm_start = 0; | ||
2234 | } | ||
2235 | return vm_start; | ||
2236 | } | ||
2237 | |||
2238 | static inline unsigned long vm_end_gap(struct vm_area_struct *vma) | ||
2239 | { | ||
2240 | unsigned long vm_end = vma->vm_end; | ||
2241 | |||
2242 | if (vma->vm_flags & VM_GROWSUP) { | ||
2243 | vm_end += stack_guard_gap; | ||
2244 | if (vm_end < vma->vm_end) | ||
2245 | vm_end = -PAGE_SIZE; | ||
2246 | } | ||
2247 | return vm_end; | ||
2248 | } | ||
2249 | |||
2253 | static inline unsigned long vma_pages(struct vm_area_struct *vma) | 2250 | static inline unsigned long vma_pages(struct vm_area_struct *vma) |
2254 | { | 2251 | { |
2255 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 2252 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3f39d27decf4..4ed952c17fc7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -914,8 +914,7 @@ struct xfrmdev_ops { | |||
914 | * | 914 | * |
915 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | 915 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); |
916 | * Called when a user wants to change the Maximum Transfer Unit | 916 | * Called when a user wants to change the Maximum Transfer Unit |
917 | * of a device. If not defined, any request to change MTU will | 917 | * of a device. |
918 | * will return an error. | ||
919 | * | 918 | * |
920 | * void (*ndo_tx_timeout)(struct net_device *dev); | 919 | * void (*ndo_tx_timeout)(struct net_device *dev); |
921 | * Callback used when the transmitter has not made any progress | 920 | * Callback used when the transmitter has not made any progress |
@@ -1596,8 +1595,8 @@ enum netdev_priv_flags { | |||
1596 | * @rtnl_link_state: This enum represents the phases of creating | 1595 | * @rtnl_link_state: This enum represents the phases of creating |
1597 | * a new link | 1596 | * a new link |
1598 | * | 1597 | * |
1599 | * @destructor: Called from unregister, | 1598 | * @needs_free_netdev: Should unregister perform free_netdev? |
1600 | * can be used to call free_netdev | 1599 | * @priv_destructor: Called from unregister |
1601 | * @npinfo: XXX: need comments on this one | 1600 | * @npinfo: XXX: need comments on this one |
1602 | * @nd_net: Network namespace this network device is inside | 1601 | * @nd_net: Network namespace this network device is inside |
1603 | * | 1602 | * |
@@ -1858,7 +1857,8 @@ struct net_device { | |||
1858 | RTNL_LINK_INITIALIZING, | 1857 | RTNL_LINK_INITIALIZING, |
1859 | } rtnl_link_state:16; | 1858 | } rtnl_link_state:16; |
1860 | 1859 | ||
1861 | void (*destructor)(struct net_device *dev); | 1860 | bool needs_free_netdev; |
1861 | void (*priv_destructor)(struct net_device *dev); | ||
1862 | 1862 | ||
1863 | #ifdef CONFIG_NETPOLL | 1863 | #ifdef CONFIG_NETPOLL |
1864 | struct netpoll_info __rcu *npinfo; | 1864 | struct netpoll_info __rcu *npinfo; |
@@ -4261,6 +4261,11 @@ static inline const char *netdev_name(const struct net_device *dev) | |||
4261 | return dev->name; | 4261 | return dev->name; |
4262 | } | 4262 | } |
4263 | 4263 | ||
4264 | static inline bool netdev_unregistering(const struct net_device *dev) | ||
4265 | { | ||
4266 | return dev->reg_state == NETREG_UNREGISTERING; | ||
4267 | } | ||
4268 | |||
4264 | static inline const char *netdev_reg_state(const struct net_device *dev) | 4269 | static inline const char *netdev_reg_state(const struct net_device *dev) |
4265 | { | 4270 | { |
4266 | switch (dev->reg_state) { | 4271 | switch (dev->reg_state) { |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 07ef550c6627..93315d6b21a8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -84,6 +84,7 @@ struct kmem_cache { | |||
84 | int red_left_pad; /* Left redzone padding size */ | 84 | int red_left_pad; /* Left redzone padding size */ |
85 | #ifdef CONFIG_SYSFS | 85 | #ifdef CONFIG_SYSFS |
86 | struct kobject kobj; /* For sysfs */ | 86 | struct kobject kobj; /* For sysfs */ |
87 | struct work_struct kobj_remove_work; | ||
87 | #endif | 88 | #endif |
88 | #ifdef CONFIG_MEMCG | 89 | #ifdef CONFIG_MEMCG |
89 | struct memcg_cache_params memcg_params; | 90 | struct memcg_cache_params memcg_params; |
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 110f4532188c..f7043ccca81c 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h | |||
@@ -29,7 +29,6 @@ | |||
29 | */ | 29 | */ |
30 | struct tk_read_base { | 30 | struct tk_read_base { |
31 | struct clocksource *clock; | 31 | struct clocksource *clock; |
32 | u64 (*read)(struct clocksource *cs); | ||
33 | u64 mask; | 32 | u64 mask; |
34 | u64 cycle_last; | 33 | u64 cycle_last; |
35 | u32 mult; | 34 | u32 mult; |
@@ -58,7 +57,7 @@ struct tk_read_base { | |||
58 | * interval. | 57 | * interval. |
59 | * @xtime_remainder: Shifted nano seconds left over when rounding | 58 | * @xtime_remainder: Shifted nano seconds left over when rounding |
60 | * @cycle_interval | 59 | * @cycle_interval |
61 | * @raw_interval: Raw nano seconds accumulated per NTP interval. | 60 | * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. |
62 | * @ntp_error: Difference between accumulated time and NTP time in ntp | 61 | * @ntp_error: Difference between accumulated time and NTP time in ntp |
63 | * shifted nano seconds. | 62 | * shifted nano seconds. |
64 | * @ntp_error_shift: Shift conversion between clock shifted nano seconds and | 63 | * @ntp_error_shift: Shift conversion between clock shifted nano seconds and |
@@ -100,7 +99,7 @@ struct timekeeper { | |||
100 | u64 cycle_interval; | 99 | u64 cycle_interval; |
101 | u64 xtime_interval; | 100 | u64 xtime_interval; |
102 | s64 xtime_remainder; | 101 | s64 xtime_remainder; |
103 | u32 raw_interval; | 102 | u64 raw_interval; |
104 | /* The ntp_tick_length() value currently being used. | 103 | /* The ntp_tick_length() value currently being used. |
105 | * This cached copy ensures we consistently apply the tick | 104 | * This cached copy ensures we consistently apply the tick |
106 | * length for an entire tick, as ntp_tick_length may change | 105 | * length for an entire tick, as ntp_tick_length may change |
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index 413335c8cb52..298f996969df 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h | |||
@@ -106,6 +106,16 @@ static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, | |||
106 | { | 106 | { |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline void cec_notifier_register(struct cec_notifier *n, | ||
110 | struct cec_adapter *adap, | ||
111 | void (*callback)(struct cec_adapter *adap, u16 pa)) | ||
112 | { | ||
113 | } | ||
114 | |||
115 | static inline void cec_notifier_unregister(struct cec_notifier *n) | ||
116 | { | ||
117 | } | ||
118 | |||
109 | #endif | 119 | #endif |
110 | 120 | ||
111 | #endif | 121 | #endif |
diff --git a/include/media/cec.h b/include/media/cec.h index bfa88d4d67e1..201f060978da 100644 --- a/include/media/cec.h +++ b/include/media/cec.h | |||
@@ -206,7 +206,7 @@ static inline bool cec_is_sink(const struct cec_adapter *adap) | |||
206 | #define cec_phys_addr_exp(pa) \ | 206 | #define cec_phys_addr_exp(pa) \ |
207 | ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf | 207 | ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf |
208 | 208 | ||
209 | #if IS_ENABLED(CONFIG_CEC_CORE) | 209 | #if IS_REACHABLE(CONFIG_CEC_CORE) |
210 | struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, | 210 | struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, |
211 | void *priv, const char *name, u32 caps, u8 available_las); | 211 | void *priv, const char *name, u32 caps, u8 available_las); |
212 | int cec_register_adapter(struct cec_adapter *adap, struct device *parent); | 212 | int cec_register_adapter(struct cec_adapter *adap, struct device *parent); |
diff --git a/include/net/wext.h b/include/net/wext.h index 345911965dbb..454ff763eeba 100644 --- a/include/net/wext.h +++ b/include/net/wext.h | |||
@@ -6,7 +6,7 @@ | |||
6 | struct net; | 6 | struct net; |
7 | 7 | ||
8 | #ifdef CONFIG_WEXT_CORE | 8 | #ifdef CONFIG_WEXT_CORE |
9 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, | 9 | int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, |
10 | void __user *arg); | 10 | void __user *arg); |
11 | int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | 11 | int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, |
12 | unsigned long arg); | 12 | unsigned long arg); |
@@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | |||
14 | struct iw_statistics *get_wireless_stats(struct net_device *dev); | 14 | struct iw_statistics *get_wireless_stats(struct net_device *dev); |
15 | int call_commit_handler(struct net_device *dev); | 15 | int call_commit_handler(struct net_device *dev); |
16 | #else | 16 | #else |
17 | static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, | 17 | static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, |
18 | void __user *arg) | 18 | void __user *arg) |
19 | { | 19 | { |
20 | return -EINVAL; | 20 | return -EINVAL; |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index d179d7767f51..7d4a594d5d58 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
@@ -1486,8 +1486,10 @@ enum ethtool_link_mode_bit_indices { | |||
1486 | * it was forced up into this mode or autonegotiated. | 1486 | * it was forced up into this mode or autonegotiated. |
1487 | */ | 1487 | */ |
1488 | 1488 | ||
1489 | /* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ | 1489 | /* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. |
1490 | /* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */ | 1490 | * Update drivers/net/phy/phy.c:phy_speed_to_str() and |
1491 | * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values. | ||
1492 | */ | ||
1491 | #define SPEED_10 10 | 1493 | #define SPEED_10 10 |
1492 | #define SPEED_100 100 | 1494 | #define SPEED_100 100 |
1493 | #define SPEED_1000 1000 | 1495 | #define SPEED_1000 1000 |
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 61b7d36dfe34..156ee4cab82e 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h | |||
@@ -343,6 +343,7 @@ enum ovs_key_attr { | |||
343 | #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) | 343 | #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) |
344 | 344 | ||
345 | enum ovs_tunnel_key_attr { | 345 | enum ovs_tunnel_key_attr { |
346 | /* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */ | ||
346 | OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ | 347 | OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ |
347 | OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ | 348 | OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ |
348 | OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */ | 349 | OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */ |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 2831480c63a2..ee97196bb151 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, | |||
580 | int ret = -ENOMEM, max_order = 0; | 580 | int ret = -ENOMEM, max_order = 0; |
581 | 581 | ||
582 | if (!has_aux(event)) | 582 | if (!has_aux(event)) |
583 | return -ENOTSUPP; | 583 | return -EOPNOTSUPP; |
584 | 584 | ||
585 | if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { | 585 | if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { |
586 | /* | 586 | /* |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 070be980c37a..425170d4439b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1312,8 +1312,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1312 | ret = __irq_set_trigger(desc, | 1312 | ret = __irq_set_trigger(desc, |
1313 | new->flags & IRQF_TRIGGER_MASK); | 1313 | new->flags & IRQF_TRIGGER_MASK); |
1314 | 1314 | ||
1315 | if (ret) | 1315 | if (ret) { |
1316 | irq_release_resources(desc); | ||
1316 | goto out_mask; | 1317 | goto out_mask; |
1318 | } | ||
1317 | } | 1319 | } |
1318 | 1320 | ||
1319 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | 1321 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index f8269036bf0b..52c4e907c14b 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c | |||
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
59 | 59 | ||
60 | ops = container_of(fops, struct klp_ops, fops); | 60 | ops = container_of(fops, struct klp_ops, fops); |
61 | 61 | ||
62 | rcu_read_lock(); | 62 | /* |
63 | * A variant of synchronize_sched() is used to allow patching functions | ||
64 | * where RCU is not watching, see klp_synchronize_transition(). | ||
65 | */ | ||
66 | preempt_disable_notrace(); | ||
63 | 67 | ||
64 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | 68 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
65 | stack_node); | 69 | stack_node); |
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
115 | 119 | ||
116 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | 120 | klp_arch_set_pc(regs, (unsigned long)func->new_func); |
117 | unlock: | 121 | unlock: |
118 | rcu_read_unlock(); | 122 | preempt_enable_notrace(); |
119 | } | 123 | } |
120 | 124 | ||
121 | /* | 125 | /* |
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index adc0cc64aa4b..b004a1fb6032 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c | |||
@@ -49,6 +49,28 @@ static void klp_transition_work_fn(struct work_struct *work) | |||
49 | static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); | 49 | static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * This function is just a stub to implement a hard force | ||
53 | * of synchronize_sched(). This requires synchronizing | ||
54 | * tasks even in userspace and idle. | ||
55 | */ | ||
56 | static void klp_sync(struct work_struct *work) | ||
57 | { | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * We allow to patch also functions where RCU is not watching, | ||
62 | * e.g. before user_exit(). We can not rely on the RCU infrastructure | ||
63 | * to do the synchronization. Instead hard force the sched synchronization. | ||
64 | * | ||
65 | * This approach allows to use RCU functions for manipulating func_stack | ||
66 | * safely. | ||
67 | */ | ||
68 | static void klp_synchronize_transition(void) | ||
69 | { | ||
70 | schedule_on_each_cpu(klp_sync); | ||
71 | } | ||
72 | |||
73 | /* | ||
52 | * The transition to the target patch state is complete. Clean up the data | 74 | * The transition to the target patch state is complete. Clean up the data |
53 | * structures. | 75 | * structures. |
54 | */ | 76 | */ |
@@ -73,7 +95,7 @@ static void klp_complete_transition(void) | |||
73 | * func->transition gets cleared, the handler may choose a | 95 | * func->transition gets cleared, the handler may choose a |
74 | * removed function. | 96 | * removed function. |
75 | */ | 97 | */ |
76 | synchronize_rcu(); | 98 | klp_synchronize_transition(); |
77 | } | 99 | } |
78 | 100 | ||
79 | if (klp_transition_patch->immediate) | 101 | if (klp_transition_patch->immediate) |
@@ -92,7 +114,7 @@ static void klp_complete_transition(void) | |||
92 | 114 | ||
93 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ | 115 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ |
94 | if (klp_target_state == KLP_PATCHED) | 116 | if (klp_target_state == KLP_PATCHED) |
95 | synchronize_rcu(); | 117 | klp_synchronize_transition(); |
96 | 118 | ||
97 | read_lock(&tasklist_lock); | 119 | read_lock(&tasklist_lock); |
98 | for_each_process_thread(g, task) { | 120 | for_each_process_thread(g, task) { |
@@ -136,7 +158,11 @@ void klp_cancel_transition(void) | |||
136 | */ | 158 | */ |
137 | void klp_update_patch_state(struct task_struct *task) | 159 | void klp_update_patch_state(struct task_struct *task) |
138 | { | 160 | { |
139 | rcu_read_lock(); | 161 | /* |
162 | * A variant of synchronize_sched() is used to allow patching functions | ||
163 | * where RCU is not watching, see klp_synchronize_transition(). | ||
164 | */ | ||
165 | preempt_disable_notrace(); | ||
140 | 166 | ||
141 | /* | 167 | /* |
142 | * This test_and_clear_tsk_thread_flag() call also serves as a read | 168 | * This test_and_clear_tsk_thread_flag() call also serves as a read |
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task) | |||
153 | if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) | 179 | if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) |
154 | task->patch_state = READ_ONCE(klp_target_state); | 180 | task->patch_state = READ_ONCE(klp_target_state); |
155 | 181 | ||
156 | rcu_read_unlock(); | 182 | preempt_enable_notrace(); |
157 | } | 183 | } |
158 | 184 | ||
159 | /* | 185 | /* |
@@ -539,7 +565,7 @@ void klp_reverse_transition(void) | |||
539 | clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); | 565 | clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); |
540 | 566 | ||
541 | /* Let any remaining calls to klp_update_patch_state() complete */ | 567 | /* Let any remaining calls to klp_update_patch_state() complete */ |
542 | synchronize_rcu(); | 568 | klp_synchronize_transition(); |
543 | 569 | ||
544 | klp_start_transition(); | 570 | klp_start_transition(); |
545 | } | 571 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 803c3bc274c4..326d4f88e2b1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5605,7 +5605,7 @@ void idle_task_exit(void) | |||
5605 | BUG_ON(cpu_online(smp_processor_id())); | 5605 | BUG_ON(cpu_online(smp_processor_id())); |
5606 | 5606 | ||
5607 | if (mm != &init_mm) { | 5607 | if (mm != &init_mm) { |
5608 | switch_mm_irqs_off(mm, &init_mm, current); | 5608 | switch_mm(mm, &init_mm, current); |
5609 | finish_arch_post_lock_switch(); | 5609 | finish_arch_post_lock_switch(); |
5610 | } | 5610 | } |
5611 | mmdrop(mm); | 5611 | mmdrop(mm); |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 622eed1b7658..076a2e31951c 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, | |||
101 | if (sg_policy->next_freq == next_freq) | 101 | if (sg_policy->next_freq == next_freq) |
102 | return; | 102 | return; |
103 | 103 | ||
104 | if (sg_policy->next_freq > next_freq) | ||
105 | next_freq = (sg_policy->next_freq + next_freq) >> 1; | ||
106 | |||
107 | sg_policy->next_freq = next_freq; | 104 | sg_policy->next_freq = next_freq; |
108 | sg_policy->last_freq_update_time = time; | 105 | sg_policy->last_freq_update_time = time; |
109 | 106 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d71109321841..c77e4b1d51c0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3563,7 +3563,7 @@ static inline void check_schedstat_required(void) | |||
3563 | trace_sched_stat_runtime_enabled()) { | 3563 | trace_sched_stat_runtime_enabled()) { |
3564 | printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " | 3564 | printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " |
3565 | "stat_blocked and stat_runtime require the " | 3565 | "stat_blocked and stat_runtime require the " |
3566 | "kernel parameter schedstats=enabled or " | 3566 | "kernel parameter schedstats=enable or " |
3567 | "kernel.sched_schedstats=1\n"); | 3567 | "kernel.sched_schedstats=1\n"); |
3568 | } | 3568 | } |
3569 | #endif | 3569 | #endif |
diff --git a/kernel/signal.c b/kernel/signal.c index ca92bcfeb322..45b4c1ffe14e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -510,7 +510,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) | |||
510 | return !tsk->ptrace; | 510 | return !tsk->ptrace; |
511 | } | 511 | } |
512 | 512 | ||
513 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) | 513 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, |
514 | bool *resched_timer) | ||
514 | { | 515 | { |
515 | struct sigqueue *q, *first = NULL; | 516 | struct sigqueue *q, *first = NULL; |
516 | 517 | ||
@@ -532,6 +533,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) | |||
532 | still_pending: | 533 | still_pending: |
533 | list_del_init(&first->list); | 534 | list_del_init(&first->list); |
534 | copy_siginfo(info, &first->info); | 535 | copy_siginfo(info, &first->info); |
536 | |||
537 | *resched_timer = | ||
538 | (first->flags & SIGQUEUE_PREALLOC) && | ||
539 | (info->si_code == SI_TIMER) && | ||
540 | (info->si_sys_private); | ||
541 | |||
535 | __sigqueue_free(first); | 542 | __sigqueue_free(first); |
536 | } else { | 543 | } else { |
537 | /* | 544 | /* |
@@ -548,12 +555,12 @@ still_pending: | |||
548 | } | 555 | } |
549 | 556 | ||
550 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | 557 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
551 | siginfo_t *info) | 558 | siginfo_t *info, bool *resched_timer) |
552 | { | 559 | { |
553 | int sig = next_signal(pending, mask); | 560 | int sig = next_signal(pending, mask); |
554 | 561 | ||
555 | if (sig) | 562 | if (sig) |
556 | collect_signal(sig, pending, info); | 563 | collect_signal(sig, pending, info, resched_timer); |
557 | return sig; | 564 | return sig; |
558 | } | 565 | } |
559 | 566 | ||
@@ -565,15 +572,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
565 | */ | 572 | */ |
566 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 573 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
567 | { | 574 | { |
575 | bool resched_timer = false; | ||
568 | int signr; | 576 | int signr; |
569 | 577 | ||
570 | /* We only dequeue private signals from ourselves, we don't let | 578 | /* We only dequeue private signals from ourselves, we don't let |
571 | * signalfd steal them | 579 | * signalfd steal them |
572 | */ | 580 | */ |
573 | signr = __dequeue_signal(&tsk->pending, mask, info); | 581 | signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); |
574 | if (!signr) { | 582 | if (!signr) { |
575 | signr = __dequeue_signal(&tsk->signal->shared_pending, | 583 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
576 | mask, info); | 584 | mask, info, &resched_timer); |
577 | #ifdef CONFIG_POSIX_TIMERS | 585 | #ifdef CONFIG_POSIX_TIMERS |
578 | /* | 586 | /* |
579 | * itimer signal ? | 587 | * itimer signal ? |
@@ -621,7 +629,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
621 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | 629 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
622 | } | 630 | } |
623 | #ifdef CONFIG_POSIX_TIMERS | 631 | #ifdef CONFIG_POSIX_TIMERS |
624 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 632 | if (resched_timer) { |
625 | /* | 633 | /* |
626 | * Release the siglock to ensure proper locking order | 634 | * Release the siglock to ensure proper locking order |
627 | * of timer locks outside of siglocks. Note, we leave | 635 | * of timer locks outside of siglocks. Note, we leave |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 5cb5b0008d97..ee2f4202d82a 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -387,7 +387,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start) | |||
387 | { | 387 | { |
388 | struct alarm_base *base = &alarm_bases[alarm->type]; | 388 | struct alarm_base *base = &alarm_bases[alarm->type]; |
389 | 389 | ||
390 | start = ktime_add(start, base->gettime()); | 390 | start = ktime_add_safe(start, base->gettime()); |
391 | alarm_start(alarm, start); | 391 | alarm_start(alarm, start); |
392 | } | 392 | } |
393 | EXPORT_SYMBOL_GPL(alarm_start_relative); | 393 | EXPORT_SYMBOL_GPL(alarm_start_relative); |
@@ -475,7 +475,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) | |||
475 | overrun++; | 475 | overrun++; |
476 | } | 476 | } |
477 | 477 | ||
478 | alarm->node.expires = ktime_add(alarm->node.expires, interval); | 478 | alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); |
479 | return overrun; | 479 | return overrun; |
480 | } | 480 | } |
481 | EXPORT_SYMBOL_GPL(alarm_forward); | 481 | EXPORT_SYMBOL_GPL(alarm_forward); |
@@ -660,13 +660,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, | |||
660 | 660 | ||
661 | /* start the timer */ | 661 | /* start the timer */ |
662 | timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); | 662 | timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); |
663 | |||
664 | /* | ||
665 | * Rate limit to the tick as a hot fix to prevent DOS. Will be | ||
666 | * mopped up later. | ||
667 | */ | ||
668 | if (timr->it.alarm.interval < TICK_NSEC) | ||
669 | timr->it.alarm.interval = TICK_NSEC; | ||
670 | |||
663 | exp = timespec64_to_ktime(new_setting->it_value); | 671 | exp = timespec64_to_ktime(new_setting->it_value); |
664 | /* Convert (if necessary) to absolute time */ | 672 | /* Convert (if necessary) to absolute time */ |
665 | if (flags != TIMER_ABSTIME) { | 673 | if (flags != TIMER_ABSTIME) { |
666 | ktime_t now; | 674 | ktime_t now; |
667 | 675 | ||
668 | now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); | 676 | now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); |
669 | exp = ktime_add(now, exp); | 677 | exp = ktime_add_safe(now, exp); |
670 | } | 678 | } |
671 | 679 | ||
672 | alarm_start(&timr->it.alarm.alarmtimer, exp); | 680 | alarm_start(&timr->it.alarm.alarmtimer, exp); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 987e496bb51a..b398c2ea69b2 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -37,9 +37,11 @@ static int tick_broadcast_forced; | |||
37 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); | 37 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
38 | 38 | ||
39 | #ifdef CONFIG_TICK_ONESHOT | 39 | #ifdef CONFIG_TICK_ONESHOT |
40 | static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); | ||
40 | static void tick_broadcast_clear_oneshot(int cpu); | 41 | static void tick_broadcast_clear_oneshot(int cpu); |
41 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 42 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
42 | #else | 43 | #else |
44 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } | ||
43 | static inline void tick_broadcast_clear_oneshot(int cpu) { } | 45 | static inline void tick_broadcast_clear_oneshot(int cpu) { } |
44 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } | 46 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } |
45 | #endif | 47 | #endif |
@@ -867,7 +869,7 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, | |||
867 | /** | 869 | /** |
868 | * tick_broadcast_setup_oneshot - setup the broadcast device | 870 | * tick_broadcast_setup_oneshot - setup the broadcast device |
869 | */ | 871 | */ |
870 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 872 | static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
871 | { | 873 | { |
872 | int cpu = smp_processor_id(); | 874 | int cpu = smp_processor_id(); |
873 | 875 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index f738251000fe..be0ac01f2e12 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -126,7 +126,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } | |||
126 | 126 | ||
127 | /* Functions related to oneshot broadcasting */ | 127 | /* Functions related to oneshot broadcasting */ |
128 | #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) | 128 | #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) |
129 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); | ||
130 | extern void tick_broadcast_switch_to_oneshot(void); | 129 | extern void tick_broadcast_switch_to_oneshot(void); |
131 | extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); | 130 | extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); |
132 | extern int tick_broadcast_oneshot_active(void); | 131 | extern int tick_broadcast_oneshot_active(void); |
@@ -134,7 +133,6 @@ extern void tick_check_oneshot_broadcast_this_cpu(void); | |||
134 | bool tick_broadcast_oneshot_available(void); | 133 | bool tick_broadcast_oneshot_available(void); |
135 | extern struct cpumask *tick_get_broadcast_oneshot_mask(void); | 134 | extern struct cpumask *tick_get_broadcast_oneshot_mask(void); |
136 | #else /* !(BROADCAST && ONESHOT): */ | 135 | #else /* !(BROADCAST && ONESHOT): */ |
137 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } | ||
138 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 136 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
139 | static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } | 137 | static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } |
140 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 138 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 9652bc57fd09..b602c48cb841 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) | |||
118 | tk->offs_boot = ktime_add(tk->offs_boot, delta); | 118 | tk->offs_boot = ktime_add(tk->offs_boot, delta); |
119 | } | 119 | } |
120 | 120 | ||
121 | /* | ||
122 | * tk_clock_read - atomic clocksource read() helper | ||
123 | * | ||
124 | * This helper is necessary to use in the read paths because, while the | ||
125 | * seqlock ensures we don't return a bad value while structures are updated, | ||
126 | * it doesn't protect from potential crashes. There is the possibility that | ||
127 | * the tkr's clocksource may change between the read reference, and the | ||
128 | * clock reference passed to the read function. This can cause crashes if | ||
129 | * the wrong clocksource is passed to the wrong read function. | ||
130 | * This isn't necessary to use when holding the timekeeper_lock or doing | ||
131 | * a read of the fast-timekeeper tkrs (which is protected by its own locking | ||
132 | * and update logic). | ||
133 | */ | ||
134 | static inline u64 tk_clock_read(struct tk_read_base *tkr) | ||
135 | { | ||
136 | struct clocksource *clock = READ_ONCE(tkr->clock); | ||
137 | |||
138 | return clock->read(clock); | ||
139 | } | ||
140 | |||
121 | #ifdef CONFIG_DEBUG_TIMEKEEPING | 141 | #ifdef CONFIG_DEBUG_TIMEKEEPING |
122 | #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ | 142 | #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ |
123 | 143 | ||
@@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) | |||
175 | */ | 195 | */ |
176 | do { | 196 | do { |
177 | seq = read_seqcount_begin(&tk_core.seq); | 197 | seq = read_seqcount_begin(&tk_core.seq); |
178 | now = tkr->read(tkr->clock); | 198 | now = tk_clock_read(tkr); |
179 | last = tkr->cycle_last; | 199 | last = tkr->cycle_last; |
180 | mask = tkr->mask; | 200 | mask = tkr->mask; |
181 | max = tkr->clock->max_cycles; | 201 | max = tkr->clock->max_cycles; |
@@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) | |||
209 | u64 cycle_now, delta; | 229 | u64 cycle_now, delta; |
210 | 230 | ||
211 | /* read clocksource */ | 231 | /* read clocksource */ |
212 | cycle_now = tkr->read(tkr->clock); | 232 | cycle_now = tk_clock_read(tkr); |
213 | 233 | ||
214 | /* calculate the delta since the last update_wall_time */ | 234 | /* calculate the delta since the last update_wall_time */ |
215 | delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); | 235 | delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); |
@@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
238 | ++tk->cs_was_changed_seq; | 258 | ++tk->cs_was_changed_seq; |
239 | old_clock = tk->tkr_mono.clock; | 259 | old_clock = tk->tkr_mono.clock; |
240 | tk->tkr_mono.clock = clock; | 260 | tk->tkr_mono.clock = clock; |
241 | tk->tkr_mono.read = clock->read; | ||
242 | tk->tkr_mono.mask = clock->mask; | 261 | tk->tkr_mono.mask = clock->mask; |
243 | tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); | 262 | tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); |
244 | 263 | ||
245 | tk->tkr_raw.clock = clock; | 264 | tk->tkr_raw.clock = clock; |
246 | tk->tkr_raw.read = clock->read; | ||
247 | tk->tkr_raw.mask = clock->mask; | 265 | tk->tkr_raw.mask = clock->mask; |
248 | tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; | 266 | tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; |
249 | 267 | ||
@@ -262,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
262 | /* Go back from cycles -> shifted ns */ | 280 | /* Go back from cycles -> shifted ns */ |
263 | tk->xtime_interval = interval * clock->mult; | 281 | tk->xtime_interval = interval * clock->mult; |
264 | tk->xtime_remainder = ntpinterval - tk->xtime_interval; | 282 | tk->xtime_remainder = ntpinterval - tk->xtime_interval; |
265 | tk->raw_interval = (interval * clock->mult) >> clock->shift; | 283 | tk->raw_interval = interval * clock->mult; |
266 | 284 | ||
267 | /* if changing clocks, convert xtime_nsec shift units */ | 285 | /* if changing clocks, convert xtime_nsec shift units */ |
268 | if (old_clock) { | 286 | if (old_clock) { |
@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) | |||
404 | 422 | ||
405 | now += timekeeping_delta_to_ns(tkr, | 423 | now += timekeeping_delta_to_ns(tkr, |
406 | clocksource_delta( | 424 | clocksource_delta( |
407 | tkr->read(tkr->clock), | 425 | tk_clock_read(tkr), |
408 | tkr->cycle_last, | 426 | tkr->cycle_last, |
409 | tkr->mask)); | 427 | tkr->mask)); |
410 | } while (read_seqcount_retry(&tkf->seq, seq)); | 428 | } while (read_seqcount_retry(&tkf->seq, seq)); |
@@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs) | |||
461 | return cycles_at_suspend; | 479 | return cycles_at_suspend; |
462 | } | 480 | } |
463 | 481 | ||
482 | static struct clocksource dummy_clock = { | ||
483 | .read = dummy_clock_read, | ||
484 | }; | ||
485 | |||
464 | /** | 486 | /** |
465 | * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. | 487 | * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. |
466 | * @tk: Timekeeper to snapshot. | 488 | * @tk: Timekeeper to snapshot. |
@@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) | |||
477 | struct tk_read_base *tkr = &tk->tkr_mono; | 499 | struct tk_read_base *tkr = &tk->tkr_mono; |
478 | 500 | ||
479 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); | 501 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); |
480 | cycles_at_suspend = tkr->read(tkr->clock); | 502 | cycles_at_suspend = tk_clock_read(tkr); |
481 | tkr_dummy.read = dummy_clock_read; | 503 | tkr_dummy.clock = &dummy_clock; |
482 | update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); | 504 | update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); |
483 | 505 | ||
484 | tkr = &tk->tkr_raw; | 506 | tkr = &tk->tkr_raw; |
485 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); | 507 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); |
486 | tkr_dummy.read = dummy_clock_read; | 508 | tkr_dummy.clock = &dummy_clock; |
487 | update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); | 509 | update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); |
488 | } | 510 | } |
489 | 511 | ||
@@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
649 | */ | 671 | */ |
650 | static void timekeeping_forward_now(struct timekeeper *tk) | 672 | static void timekeeping_forward_now(struct timekeeper *tk) |
651 | { | 673 | { |
652 | struct clocksource *clock = tk->tkr_mono.clock; | ||
653 | u64 cycle_now, delta; | 674 | u64 cycle_now, delta; |
654 | u64 nsec; | 675 | u64 nsec; |
655 | 676 | ||
656 | cycle_now = tk->tkr_mono.read(clock); | 677 | cycle_now = tk_clock_read(&tk->tkr_mono); |
657 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); | 678 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
658 | tk->tkr_mono.cycle_last = cycle_now; | 679 | tk->tkr_mono.cycle_last = cycle_now; |
659 | tk->tkr_raw.cycle_last = cycle_now; | 680 | tk->tkr_raw.cycle_last = cycle_now; |
@@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) | |||
929 | 950 | ||
930 | do { | 951 | do { |
931 | seq = read_seqcount_begin(&tk_core.seq); | 952 | seq = read_seqcount_begin(&tk_core.seq); |
932 | 953 | now = tk_clock_read(&tk->tkr_mono); | |
933 | now = tk->tkr_mono.read(tk->tkr_mono.clock); | ||
934 | systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; | 954 | systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; |
935 | systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; | 955 | systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; |
936 | base_real = ktime_add(tk->tkr_mono.base, | 956 | base_real = ktime_add(tk->tkr_mono.base, |
@@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn) | |||
1108 | * Check whether the system counter value provided by the | 1128 | * Check whether the system counter value provided by the |
1109 | * device driver is on the current timekeeping interval. | 1129 | * device driver is on the current timekeeping interval. |
1110 | */ | 1130 | */ |
1111 | now = tk->tkr_mono.read(tk->tkr_mono.clock); | 1131 | now = tk_clock_read(&tk->tkr_mono); |
1112 | interval_start = tk->tkr_mono.cycle_last; | 1132 | interval_start = tk->tkr_mono.cycle_last; |
1113 | if (!cycle_between(interval_start, cycles, now)) { | 1133 | if (!cycle_between(interval_start, cycles, now)) { |
1114 | clock_was_set_seq = tk->clock_was_set_seq; | 1134 | clock_was_set_seq = tk->clock_was_set_seq; |
@@ -1629,7 +1649,7 @@ void timekeeping_resume(void) | |||
1629 | * The less preferred source will only be tried if there is no better | 1649 | * The less preferred source will only be tried if there is no better |
1630 | * usable source. The rtc part is handled separately in rtc core code. | 1650 | * usable source. The rtc part is handled separately in rtc core code. |
1631 | */ | 1651 | */ |
1632 | cycle_now = tk->tkr_mono.read(clock); | 1652 | cycle_now = tk_clock_read(&tk->tkr_mono); |
1633 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && | 1653 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && |
1634 | cycle_now > tk->tkr_mono.cycle_last) { | 1654 | cycle_now > tk->tkr_mono.cycle_last) { |
1635 | u64 nsec, cyc_delta; | 1655 | u64 nsec, cyc_delta; |
@@ -1976,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, | |||
1976 | u32 shift, unsigned int *clock_set) | 1996 | u32 shift, unsigned int *clock_set) |
1977 | { | 1997 | { |
1978 | u64 interval = tk->cycle_interval << shift; | 1998 | u64 interval = tk->cycle_interval << shift; |
1979 | u64 raw_nsecs; | 1999 | u64 snsec_per_sec; |
1980 | 2000 | ||
1981 | /* If the offset is smaller than a shifted interval, do nothing */ | 2001 | /* If the offset is smaller than a shifted interval, do nothing */ |
1982 | if (offset < interval) | 2002 | if (offset < interval) |
@@ -1991,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, | |||
1991 | *clock_set |= accumulate_nsecs_to_secs(tk); | 2011 | *clock_set |= accumulate_nsecs_to_secs(tk); |
1992 | 2012 | ||
1993 | /* Accumulate raw time */ | 2013 | /* Accumulate raw time */ |
1994 | raw_nsecs = (u64)tk->raw_interval << shift; | 2014 | tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; |
1995 | raw_nsecs += tk->raw_time.tv_nsec; | 2015 | tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; |
1996 | if (raw_nsecs >= NSEC_PER_SEC) { | 2016 | snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; |
1997 | u64 raw_secs = raw_nsecs; | 2017 | while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { |
1998 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); | 2018 | tk->tkr_raw.xtime_nsec -= snsec_per_sec; |
1999 | tk->raw_time.tv_sec += raw_secs; | 2019 | tk->raw_time.tv_sec++; |
2000 | } | 2020 | } |
2001 | tk->raw_time.tv_nsec = raw_nsecs; | 2021 | tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; |
2022 | tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; | ||
2002 | 2023 | ||
2003 | /* Accumulate error between NTP and clock interval */ | 2024 | /* Accumulate error between NTP and clock interval */ |
2004 | tk->ntp_error += tk->ntp_tick << shift; | 2025 | tk->ntp_error += tk->ntp_tick << shift; |
@@ -2030,7 +2051,7 @@ void update_wall_time(void) | |||
2030 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 2051 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
2031 | offset = real_tk->cycle_interval; | 2052 | offset = real_tk->cycle_interval; |
2032 | #else | 2053 | #else |
2033 | offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), | 2054 | offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), |
2034 | tk->tkr_mono.cycle_last, tk->tkr_mono.mask); | 2055 | tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
2035 | #endif | 2056 | #endif |
2036 | 2057 | ||
diff --git a/lib/cmdline.c b/lib/cmdline.c index 3c6432df7e63..4c0888c4a68d 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -23,14 +23,14 @@ | |||
23 | * the values[M, M+1, ..., N] into the ints array in get_options. | 23 | * the values[M, M+1, ..., N] into the ints array in get_options. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | static int get_range(char **str, int *pint) | 26 | static int get_range(char **str, int *pint, int n) |
27 | { | 27 | { |
28 | int x, inc_counter, upper_range; | 28 | int x, inc_counter, upper_range; |
29 | 29 | ||
30 | (*str)++; | 30 | (*str)++; |
31 | upper_range = simple_strtol((*str), NULL, 0); | 31 | upper_range = simple_strtol((*str), NULL, 0); |
32 | inc_counter = upper_range - *pint; | 32 | inc_counter = upper_range - *pint; |
33 | for (x = *pint; x < upper_range; x++) | 33 | for (x = *pint; n && x < upper_range; x++, n--) |
34 | *pint++ = x; | 34 | *pint++ = x; |
35 | return inc_counter; | 35 | return inc_counter; |
36 | } | 36 | } |
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
97 | break; | 97 | break; |
98 | if (res == 3) { | 98 | if (res == 3) { |
99 | int range_nums; | 99 | int range_nums; |
100 | range_nums = get_range((char **)&str, ints + i); | 100 | range_nums = get_range((char **)&str, ints + i, nints - i); |
101 | if (range_nums < 0) | 101 | if (range_nums < 0) |
102 | break; | 102 | break; |
103 | /* | 103 | /* |
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 74a54b7f2562..9f79547d1b97 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c | |||
@@ -43,7 +43,7 @@ static struct crypto_shash *tfm; | |||
43 | u32 crc32c(u32 crc, const void *address, unsigned int length) | 43 | u32 crc32c(u32 crc, const void *address, unsigned int length) |
44 | { | 44 | { |
45 | SHASH_DESC_ON_STACK(shash, tfm); | 45 | SHASH_DESC_ON_STACK(shash, tfm); |
46 | u32 *ctx = (u32 *)shash_desc_ctx(shash); | 46 | u32 ret, *ctx = (u32 *)shash_desc_ctx(shash); |
47 | int err; | 47 | int err; |
48 | 48 | ||
49 | shash->tfm = tfm; | 49 | shash->tfm = tfm; |
@@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length) | |||
53 | err = crypto_shash_update(shash, address, length); | 53 | err = crypto_shash_update(shash, address, length); |
54 | BUG_ON(err); | 54 | BUG_ON(err); |
55 | 55 | ||
56 | return *ctx; | 56 | ret = *ctx; |
57 | barrier_data(ctx); | ||
58 | return ret; | ||
57 | } | 59 | } |
58 | 60 | ||
59 | EXPORT_SYMBOL(crc32c); | 61 | EXPORT_SYMBOL(crc32c); |
@@ -387,11 +387,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, | |||
387 | /* mlock all present pages, but do not fault in new pages */ | 387 | /* mlock all present pages, but do not fault in new pages */ |
388 | if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) | 388 | if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) |
389 | return -ENOENT; | 389 | return -ENOENT; |
390 | /* For mm_populate(), just skip the stack guard page. */ | ||
391 | if ((*flags & FOLL_POPULATE) && | ||
392 | (stack_guard_page_start(vma, address) || | ||
393 | stack_guard_page_end(vma, address + PAGE_SIZE))) | ||
394 | return -ENOENT; | ||
395 | if (*flags & FOLL_WRITE) | 390 | if (*flags & FOLL_WRITE) |
396 | fault_flags |= FAULT_FLAG_WRITE; | 391 | fault_flags |= FAULT_FLAG_WRITE; |
397 | if (*flags & FOLL_REMOTE) | 392 | if (*flags & FOLL_REMOTE) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a84909cf20d3..88c6167f194d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) | |||
1426 | */ | 1426 | */ |
1427 | if (unlikely(pmd_trans_migrating(*vmf->pmd))) { | 1427 | if (unlikely(pmd_trans_migrating(*vmf->pmd))) { |
1428 | page = pmd_page(*vmf->pmd); | 1428 | page = pmd_page(*vmf->pmd); |
1429 | if (!get_page_unless_zero(page)) | ||
1430 | goto out_unlock; | ||
1429 | spin_unlock(vmf->ptl); | 1431 | spin_unlock(vmf->ptl); |
1430 | wait_on_page_locked(page); | 1432 | wait_on_page_locked(page); |
1433 | put_page(page); | ||
1431 | goto out; | 1434 | goto out; |
1432 | } | 1435 | } |
1433 | 1436 | ||
@@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) | |||
1459 | 1462 | ||
1460 | /* Migration could have started since the pmd_trans_migrating check */ | 1463 | /* Migration could have started since the pmd_trans_migrating check */ |
1461 | if (!page_locked) { | 1464 | if (!page_locked) { |
1465 | page_nid = -1; | ||
1466 | if (!get_page_unless_zero(page)) | ||
1467 | goto out_unlock; | ||
1462 | spin_unlock(vmf->ptl); | 1468 | spin_unlock(vmf->ptl); |
1463 | wait_on_page_locked(page); | 1469 | wait_on_page_locked(page); |
1464 | page_nid = -1; | 1470 | put_page(page); |
1465 | goto out; | 1471 | goto out; |
1466 | } | 1472 | } |
1467 | 1473 | ||
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 945fd1ca49b5..df4ebdb2b10a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c | |||
@@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |||
652 | spin_unlock(ptl); | 652 | spin_unlock(ptl); |
653 | free_page_and_swap_cache(src_page); | 653 | free_page_and_swap_cache(src_page); |
654 | } | 654 | } |
655 | cond_resched(); | ||
656 | } | 655 | } |
657 | } | 656 | } |
658 | 657 | ||
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 342fac9ba89b..ecc183fd94f3 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1184 | * page_remove_rmap() in try_to_unmap_one(). So to determine page status | 1184 | * page_remove_rmap() in try_to_unmap_one(). So to determine page status |
1185 | * correctly, we save a copy of the page flags at this time. | 1185 | * correctly, we save a copy of the page flags at this time. |
1186 | */ | 1186 | */ |
1187 | page_flags = p->flags; | 1187 | if (PageHuge(p)) |
1188 | page_flags = hpage->flags; | ||
1189 | else | ||
1190 | page_flags = p->flags; | ||
1188 | 1191 | ||
1189 | /* | 1192 | /* |
1190 | * unpoison always clear PG_hwpoison inside page lock | 1193 | * unpoison always clear PG_hwpoison inside page lock |
diff --git a/mm/memory.c b/mm/memory.c index 2e65df1831d9..bb11c474857e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2855,40 +2855,6 @@ out_release: | |||
2855 | } | 2855 | } |
2856 | 2856 | ||
2857 | /* | 2857 | /* |
2858 | * This is like a special single-page "expand_{down|up}wards()", | ||
2859 | * except we must first make sure that 'address{-|+}PAGE_SIZE' | ||
2860 | * doesn't hit another vma. | ||
2861 | */ | ||
2862 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) | ||
2863 | { | ||
2864 | address &= PAGE_MASK; | ||
2865 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { | ||
2866 | struct vm_area_struct *prev = vma->vm_prev; | ||
2867 | |||
2868 | /* | ||
2869 | * Is there a mapping abutting this one below? | ||
2870 | * | ||
2871 | * That's only ok if it's the same stack mapping | ||
2872 | * that has gotten split.. | ||
2873 | */ | ||
2874 | if (prev && prev->vm_end == address) | ||
2875 | return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; | ||
2876 | |||
2877 | return expand_downwards(vma, address - PAGE_SIZE); | ||
2878 | } | ||
2879 | if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { | ||
2880 | struct vm_area_struct *next = vma->vm_next; | ||
2881 | |||
2882 | /* As VM_GROWSDOWN but s/below/above/ */ | ||
2883 | if (next && next->vm_start == address + PAGE_SIZE) | ||
2884 | return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; | ||
2885 | |||
2886 | return expand_upwards(vma, address + PAGE_SIZE); | ||
2887 | } | ||
2888 | return 0; | ||
2889 | } | ||
2890 | |||
2891 | /* | ||
2892 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | 2858 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
2893 | * but allow concurrent faults), and pte mapped but not yet locked. | 2859 | * but allow concurrent faults), and pte mapped but not yet locked. |
2894 | * We return with mmap_sem still held, but pte unmapped and unlocked. | 2860 | * We return with mmap_sem still held, but pte unmapped and unlocked. |
@@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_fault *vmf) | |||
2904 | if (vma->vm_flags & VM_SHARED) | 2870 | if (vma->vm_flags & VM_SHARED) |
2905 | return VM_FAULT_SIGBUS; | 2871 | return VM_FAULT_SIGBUS; |
2906 | 2872 | ||
2907 | /* Check if we need to add a guard page to the stack */ | ||
2908 | if (check_stack_guard_page(vma, vmf->address) < 0) | ||
2909 | return VM_FAULT_SIGSEGV; | ||
2910 | |||
2911 | /* | 2873 | /* |
2912 | * Use pte_alloc() instead of pte_alloc_map(). We can't run | 2874 | * Use pte_alloc() instead of pte_alloc_map(). We can't run |
2913 | * pte_offset_map() on pmds where a huge pmd might be created | 2875 | * pte_offset_map() on pmds where a huge pmd might be created |
@@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
183 | unsigned long retval; | 183 | unsigned long retval; |
184 | unsigned long newbrk, oldbrk; | 184 | unsigned long newbrk, oldbrk; |
185 | struct mm_struct *mm = current->mm; | 185 | struct mm_struct *mm = current->mm; |
186 | struct vm_area_struct *next; | ||
186 | unsigned long min_brk; | 187 | unsigned long min_brk; |
187 | bool populate; | 188 | bool populate; |
188 | LIST_HEAD(uf); | 189 | LIST_HEAD(uf); |
@@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
229 | } | 230 | } |
230 | 231 | ||
231 | /* Check against existing mmap mappings. */ | 232 | /* Check against existing mmap mappings. */ |
232 | if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) | 233 | next = find_vma(mm, oldbrk); |
234 | if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) | ||
233 | goto out; | 235 | goto out; |
234 | 236 | ||
235 | /* Ok, looks good - let it rip. */ | 237 | /* Ok, looks good - let it rip. */ |
@@ -253,10 +255,22 @@ out: | |||
253 | 255 | ||
254 | static long vma_compute_subtree_gap(struct vm_area_struct *vma) | 256 | static long vma_compute_subtree_gap(struct vm_area_struct *vma) |
255 | { | 257 | { |
256 | unsigned long max, subtree_gap; | 258 | unsigned long max, prev_end, subtree_gap; |
257 | max = vma->vm_start; | 259 | |
258 | if (vma->vm_prev) | 260 | /* |
259 | max -= vma->vm_prev->vm_end; | 261 | * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we |
262 | * allow two stack_guard_gaps between them here, and when choosing | ||
263 | * an unmapped area; whereas when expanding we only require one. | ||
264 | * That's a little inconsistent, but keeps the code here simpler. | ||
265 | */ | ||
266 | max = vm_start_gap(vma); | ||
267 | if (vma->vm_prev) { | ||
268 | prev_end = vm_end_gap(vma->vm_prev); | ||
269 | if (max > prev_end) | ||
270 | max -= prev_end; | ||
271 | else | ||
272 | max = 0; | ||
273 | } | ||
260 | if (vma->vm_rb.rb_left) { | 274 | if (vma->vm_rb.rb_left) { |
261 | subtree_gap = rb_entry(vma->vm_rb.rb_left, | 275 | subtree_gap = rb_entry(vma->vm_rb.rb_left, |
262 | struct vm_area_struct, vm_rb)->rb_subtree_gap; | 276 | struct vm_area_struct, vm_rb)->rb_subtree_gap; |
@@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct *mm) | |||
352 | anon_vma_unlock_read(anon_vma); | 366 | anon_vma_unlock_read(anon_vma); |
353 | } | 367 | } |
354 | 368 | ||
355 | highest_address = vma->vm_end; | 369 | highest_address = vm_end_gap(vma); |
356 | vma = vma->vm_next; | 370 | vma = vma->vm_next; |
357 | i++; | 371 | i++; |
358 | } | 372 | } |
@@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, | |||
541 | if (vma->vm_next) | 555 | if (vma->vm_next) |
542 | vma_gap_update(vma->vm_next); | 556 | vma_gap_update(vma->vm_next); |
543 | else | 557 | else |
544 | mm->highest_vm_end = vma->vm_end; | 558 | mm->highest_vm_end = vm_end_gap(vma); |
545 | 559 | ||
546 | /* | 560 | /* |
547 | * vma->vm_prev wasn't known when we followed the rbtree to find the | 561 | * vma->vm_prev wasn't known when we followed the rbtree to find the |
@@ -856,7 +870,7 @@ again: | |||
856 | vma_gap_update(vma); | 870 | vma_gap_update(vma); |
857 | if (end_changed) { | 871 | if (end_changed) { |
858 | if (!next) | 872 | if (!next) |
859 | mm->highest_vm_end = end; | 873 | mm->highest_vm_end = vm_end_gap(vma); |
860 | else if (!adjust_next) | 874 | else if (!adjust_next) |
861 | vma_gap_update(next); | 875 | vma_gap_update(next); |
862 | } | 876 | } |
@@ -941,7 +955,7 @@ again: | |||
941 | * mm->highest_vm_end doesn't need any update | 955 | * mm->highest_vm_end doesn't need any update |
942 | * in remove_next == 1 case. | 956 | * in remove_next == 1 case. |
943 | */ | 957 | */ |
944 | VM_WARN_ON(mm->highest_vm_end != end); | 958 | VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); |
945 | } | 959 | } |
946 | } | 960 | } |
947 | if (insert && file) | 961 | if (insert && file) |
@@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) | |||
1787 | 1801 | ||
1788 | while (true) { | 1802 | while (true) { |
1789 | /* Visit left subtree if it looks promising */ | 1803 | /* Visit left subtree if it looks promising */ |
1790 | gap_end = vma->vm_start; | 1804 | gap_end = vm_start_gap(vma); |
1791 | if (gap_end >= low_limit && vma->vm_rb.rb_left) { | 1805 | if (gap_end >= low_limit && vma->vm_rb.rb_left) { |
1792 | struct vm_area_struct *left = | 1806 | struct vm_area_struct *left = |
1793 | rb_entry(vma->vm_rb.rb_left, | 1807 | rb_entry(vma->vm_rb.rb_left, |
@@ -1798,12 +1812,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) | |||
1798 | } | 1812 | } |
1799 | } | 1813 | } |
1800 | 1814 | ||
1801 | gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; | 1815 | gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; |
1802 | check_current: | 1816 | check_current: |
1803 | /* Check if current node has a suitable gap */ | 1817 | /* Check if current node has a suitable gap */ |
1804 | if (gap_start > high_limit) | 1818 | if (gap_start > high_limit) |
1805 | return -ENOMEM; | 1819 | return -ENOMEM; |
1806 | if (gap_end >= low_limit && gap_end - gap_start >= length) | 1820 | if (gap_end >= low_limit && |
1821 | gap_end > gap_start && gap_end - gap_start >= length) | ||
1807 | goto found; | 1822 | goto found; |
1808 | 1823 | ||
1809 | /* Visit right subtree if it looks promising */ | 1824 | /* Visit right subtree if it looks promising */ |
@@ -1825,8 +1840,8 @@ check_current: | |||
1825 | vma = rb_entry(rb_parent(prev), | 1840 | vma = rb_entry(rb_parent(prev), |
1826 | struct vm_area_struct, vm_rb); | 1841 | struct vm_area_struct, vm_rb); |
1827 | if (prev == vma->vm_rb.rb_left) { | 1842 | if (prev == vma->vm_rb.rb_left) { |
1828 | gap_start = vma->vm_prev->vm_end; | 1843 | gap_start = vm_end_gap(vma->vm_prev); |
1829 | gap_end = vma->vm_start; | 1844 | gap_end = vm_start_gap(vma); |
1830 | goto check_current; | 1845 | goto check_current; |
1831 | } | 1846 | } |
1832 | } | 1847 | } |
@@ -1890,7 +1905,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) | |||
1890 | 1905 | ||
1891 | while (true) { | 1906 | while (true) { |
1892 | /* Visit right subtree if it looks promising */ | 1907 | /* Visit right subtree if it looks promising */ |
1893 | gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; | 1908 | gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; |
1894 | if (gap_start <= high_limit && vma->vm_rb.rb_right) { | 1909 | if (gap_start <= high_limit && vma->vm_rb.rb_right) { |
1895 | struct vm_area_struct *right = | 1910 | struct vm_area_struct *right = |
1896 | rb_entry(vma->vm_rb.rb_right, | 1911 | rb_entry(vma->vm_rb.rb_right, |
@@ -1903,10 +1918,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) | |||
1903 | 1918 | ||
1904 | check_current: | 1919 | check_current: |
1905 | /* Check if current node has a suitable gap */ | 1920 | /* Check if current node has a suitable gap */ |
1906 | gap_end = vma->vm_start; | 1921 | gap_end = vm_start_gap(vma); |
1907 | if (gap_end < low_limit) | 1922 | if (gap_end < low_limit) |
1908 | return -ENOMEM; | 1923 | return -ENOMEM; |
1909 | if (gap_start <= high_limit && gap_end - gap_start >= length) | 1924 | if (gap_start <= high_limit && |
1925 | gap_end > gap_start && gap_end - gap_start >= length) | ||
1910 | goto found; | 1926 | goto found; |
1911 | 1927 | ||
1912 | /* Visit left subtree if it looks promising */ | 1928 | /* Visit left subtree if it looks promising */ |
@@ -1929,7 +1945,7 @@ check_current: | |||
1929 | struct vm_area_struct, vm_rb); | 1945 | struct vm_area_struct, vm_rb); |
1930 | if (prev == vma->vm_rb.rb_right) { | 1946 | if (prev == vma->vm_rb.rb_right) { |
1931 | gap_start = vma->vm_prev ? | 1947 | gap_start = vma->vm_prev ? |
1932 | vma->vm_prev->vm_end : 0; | 1948 | vm_end_gap(vma->vm_prev) : 0; |
1933 | goto check_current; | 1949 | goto check_current; |
1934 | } | 1950 | } |
1935 | } | 1951 | } |
@@ -1967,7 +1983,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
1967 | unsigned long len, unsigned long pgoff, unsigned long flags) | 1983 | unsigned long len, unsigned long pgoff, unsigned long flags) |
1968 | { | 1984 | { |
1969 | struct mm_struct *mm = current->mm; | 1985 | struct mm_struct *mm = current->mm; |
1970 | struct vm_area_struct *vma; | 1986 | struct vm_area_struct *vma, *prev; |
1971 | struct vm_unmapped_area_info info; | 1987 | struct vm_unmapped_area_info info; |
1972 | 1988 | ||
1973 | if (len > TASK_SIZE - mmap_min_addr) | 1989 | if (len > TASK_SIZE - mmap_min_addr) |
@@ -1978,9 +1994,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
1978 | 1994 | ||
1979 | if (addr) { | 1995 | if (addr) { |
1980 | addr = PAGE_ALIGN(addr); | 1996 | addr = PAGE_ALIGN(addr); |
1981 | vma = find_vma(mm, addr); | 1997 | vma = find_vma_prev(mm, addr, &prev); |
1982 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && | 1998 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
1983 | (!vma || addr + len <= vma->vm_start)) | 1999 | (!vma || addr + len <= vm_start_gap(vma)) && |
2000 | (!prev || addr >= vm_end_gap(prev))) | ||
1984 | return addr; | 2001 | return addr; |
1985 | } | 2002 | } |
1986 | 2003 | ||
@@ -2003,7 +2020,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
2003 | const unsigned long len, const unsigned long pgoff, | 2020 | const unsigned long len, const unsigned long pgoff, |
2004 | const unsigned long flags) | 2021 | const unsigned long flags) |
2005 | { | 2022 | { |
2006 | struct vm_area_struct *vma; | 2023 | struct vm_area_struct *vma, *prev; |
2007 | struct mm_struct *mm = current->mm; | 2024 | struct mm_struct *mm = current->mm; |
2008 | unsigned long addr = addr0; | 2025 | unsigned long addr = addr0; |
2009 | struct vm_unmapped_area_info info; | 2026 | struct vm_unmapped_area_info info; |
@@ -2018,9 +2035,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
2018 | /* requesting a specific address */ | 2035 | /* requesting a specific address */ |
2019 | if (addr) { | 2036 | if (addr) { |
2020 | addr = PAGE_ALIGN(addr); | 2037 | addr = PAGE_ALIGN(addr); |
2021 | vma = find_vma(mm, addr); | 2038 | vma = find_vma_prev(mm, addr, &prev); |
2022 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && | 2039 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
2023 | (!vma || addr + len <= vma->vm_start)) | 2040 | (!vma || addr + len <= vm_start_gap(vma)) && |
2041 | (!prev || addr >= vm_end_gap(prev))) | ||
2024 | return addr; | 2042 | return addr; |
2025 | } | 2043 | } |
2026 | 2044 | ||
@@ -2155,21 +2173,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, | |||
2155 | * update accounting. This is shared with both the | 2173 | * update accounting. This is shared with both the |
2156 | * grow-up and grow-down cases. | 2174 | * grow-up and grow-down cases. |
2157 | */ | 2175 | */ |
2158 | static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) | 2176 | static int acct_stack_growth(struct vm_area_struct *vma, |
2177 | unsigned long size, unsigned long grow) | ||
2159 | { | 2178 | { |
2160 | struct mm_struct *mm = vma->vm_mm; | 2179 | struct mm_struct *mm = vma->vm_mm; |
2161 | struct rlimit *rlim = current->signal->rlim; | 2180 | struct rlimit *rlim = current->signal->rlim; |
2162 | unsigned long new_start, actual_size; | 2181 | unsigned long new_start; |
2163 | 2182 | ||
2164 | /* address space limit tests */ | 2183 | /* address space limit tests */ |
2165 | if (!may_expand_vm(mm, vma->vm_flags, grow)) | 2184 | if (!may_expand_vm(mm, vma->vm_flags, grow)) |
2166 | return -ENOMEM; | 2185 | return -ENOMEM; |
2167 | 2186 | ||
2168 | /* Stack limit test */ | 2187 | /* Stack limit test */ |
2169 | actual_size = size; | 2188 | if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) |
2170 | if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) | ||
2171 | actual_size -= PAGE_SIZE; | ||
2172 | if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) | ||
2173 | return -ENOMEM; | 2189 | return -ENOMEM; |
2174 | 2190 | ||
2175 | /* mlock limit tests */ | 2191 | /* mlock limit tests */ |
@@ -2207,16 +2223,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns | |||
2207 | int expand_upwards(struct vm_area_struct *vma, unsigned long address) | 2223 | int expand_upwards(struct vm_area_struct *vma, unsigned long address) |
2208 | { | 2224 | { |
2209 | struct mm_struct *mm = vma->vm_mm; | 2225 | struct mm_struct *mm = vma->vm_mm; |
2226 | struct vm_area_struct *next; | ||
2227 | unsigned long gap_addr; | ||
2210 | int error = 0; | 2228 | int error = 0; |
2211 | 2229 | ||
2212 | if (!(vma->vm_flags & VM_GROWSUP)) | 2230 | if (!(vma->vm_flags & VM_GROWSUP)) |
2213 | return -EFAULT; | 2231 | return -EFAULT; |
2214 | 2232 | ||
2215 | /* Guard against wrapping around to address 0. */ | 2233 | /* Guard against exceeding limits of the address space. */ |
2216 | if (address < PAGE_ALIGN(address+4)) | 2234 | address &= PAGE_MASK; |
2217 | address = PAGE_ALIGN(address+4); | 2235 | if (address >= TASK_SIZE) |
2218 | else | ||
2219 | return -ENOMEM; | 2236 | return -ENOMEM; |
2237 | address += PAGE_SIZE; | ||
2238 | |||
2239 | /* Enforce stack_guard_gap */ | ||
2240 | gap_addr = address + stack_guard_gap; | ||
2241 | |||
2242 | /* Guard against overflow */ | ||
2243 | if (gap_addr < address || gap_addr > TASK_SIZE) | ||
2244 | gap_addr = TASK_SIZE; | ||
2245 | |||
2246 | next = vma->vm_next; | ||
2247 | if (next && next->vm_start < gap_addr) { | ||
2248 | if (!(next->vm_flags & VM_GROWSUP)) | ||
2249 | return -ENOMEM; | ||
2250 | /* Check that both stack segments have the same anon_vma? */ | ||
2251 | } | ||
2220 | 2252 | ||
2221 | /* We must make sure the anon_vma is allocated. */ | 2253 | /* We must make sure the anon_vma is allocated. */ |
2222 | if (unlikely(anon_vma_prepare(vma))) | 2254 | if (unlikely(anon_vma_prepare(vma))) |
@@ -2261,7 +2293,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) | |||
2261 | if (vma->vm_next) | 2293 | if (vma->vm_next) |
2262 | vma_gap_update(vma->vm_next); | 2294 | vma_gap_update(vma->vm_next); |
2263 | else | 2295 | else |
2264 | mm->highest_vm_end = address; | 2296 | mm->highest_vm_end = vm_end_gap(vma); |
2265 | spin_unlock(&mm->page_table_lock); | 2297 | spin_unlock(&mm->page_table_lock); |
2266 | 2298 | ||
2267 | perf_event_mmap(vma); | 2299 | perf_event_mmap(vma); |
@@ -2282,6 +2314,8 @@ int expand_downwards(struct vm_area_struct *vma, | |||
2282 | unsigned long address) | 2314 | unsigned long address) |
2283 | { | 2315 | { |
2284 | struct mm_struct *mm = vma->vm_mm; | 2316 | struct mm_struct *mm = vma->vm_mm; |
2317 | struct vm_area_struct *prev; | ||
2318 | unsigned long gap_addr; | ||
2285 | int error; | 2319 | int error; |
2286 | 2320 | ||
2287 | address &= PAGE_MASK; | 2321 | address &= PAGE_MASK; |
@@ -2289,6 +2323,17 @@ int expand_downwards(struct vm_area_struct *vma, | |||
2289 | if (error) | 2323 | if (error) |
2290 | return error; | 2324 | return error; |
2291 | 2325 | ||
2326 | /* Enforce stack_guard_gap */ | ||
2327 | gap_addr = address - stack_guard_gap; | ||
2328 | if (gap_addr > address) | ||
2329 | return -ENOMEM; | ||
2330 | prev = vma->vm_prev; | ||
2331 | if (prev && prev->vm_end > gap_addr) { | ||
2332 | if (!(prev->vm_flags & VM_GROWSDOWN)) | ||
2333 | return -ENOMEM; | ||
2334 | /* Check that both stack segments have the same anon_vma? */ | ||
2335 | } | ||
2336 | |||
2292 | /* We must make sure the anon_vma is allocated. */ | 2337 | /* We must make sure the anon_vma is allocated. */ |
2293 | if (unlikely(anon_vma_prepare(vma))) | 2338 | if (unlikely(anon_vma_prepare(vma))) |
2294 | return -ENOMEM; | 2339 | return -ENOMEM; |
@@ -2343,28 +2388,25 @@ int expand_downwards(struct vm_area_struct *vma, | |||
2343 | return error; | 2388 | return error; |
2344 | } | 2389 | } |
2345 | 2390 | ||
2346 | /* | 2391 | /* enforced gap between the expanding stack and other mappings. */ |
2347 | * Note how expand_stack() refuses to expand the stack all the way to | 2392 | unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; |
2348 | * abut the next virtual mapping, *unless* that mapping itself is also | 2393 | |
2349 | * a stack mapping. We want to leave room for a guard page, after all | 2394 | static int __init cmdline_parse_stack_guard_gap(char *p) |
2350 | * (the guard page itself is not added here, that is done by the | 2395 | { |
2351 | * actual page faulting logic) | 2396 | unsigned long val; |
2352 | * | 2397 | char *endptr; |
2353 | * This matches the behavior of the guard page logic (see mm/memory.c: | 2398 | |
2354 | * check_stack_guard_page()), which only allows the guard page to be | 2399 | val = simple_strtoul(p, &endptr, 10); |
2355 | * removed under these circumstances. | 2400 | if (!*endptr) |
2356 | */ | 2401 | stack_guard_gap = val << PAGE_SHIFT; |
2402 | |||
2403 | return 0; | ||
2404 | } | ||
2405 | __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); | ||
2406 | |||
2357 | #ifdef CONFIG_STACK_GROWSUP | 2407 | #ifdef CONFIG_STACK_GROWSUP |
2358 | int expand_stack(struct vm_area_struct *vma, unsigned long address) | 2408 | int expand_stack(struct vm_area_struct *vma, unsigned long address) |
2359 | { | 2409 | { |
2360 | struct vm_area_struct *next; | ||
2361 | |||
2362 | address &= PAGE_MASK; | ||
2363 | next = vma->vm_next; | ||
2364 | if (next && next->vm_start == address + PAGE_SIZE) { | ||
2365 | if (!(next->vm_flags & VM_GROWSUP)) | ||
2366 | return -ENOMEM; | ||
2367 | } | ||
2368 | return expand_upwards(vma, address); | 2410 | return expand_upwards(vma, address); |
2369 | } | 2411 | } |
2370 | 2412 | ||
@@ -2386,14 +2428,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) | |||
2386 | #else | 2428 | #else |
2387 | int expand_stack(struct vm_area_struct *vma, unsigned long address) | 2429 | int expand_stack(struct vm_area_struct *vma, unsigned long address) |
2388 | { | 2430 | { |
2389 | struct vm_area_struct *prev; | ||
2390 | |||
2391 | address &= PAGE_MASK; | ||
2392 | prev = vma->vm_prev; | ||
2393 | if (prev && prev->vm_end == address) { | ||
2394 | if (!(prev->vm_flags & VM_GROWSDOWN)) | ||
2395 | return -ENOMEM; | ||
2396 | } | ||
2397 | return expand_downwards(vma, address); | 2431 | return expand_downwards(vma, address); |
2398 | } | 2432 | } |
2399 | 2433 | ||
@@ -2491,7 +2525,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2491 | vma->vm_prev = prev; | 2525 | vma->vm_prev = prev; |
2492 | vma_gap_update(vma); | 2526 | vma_gap_update(vma); |
2493 | } else | 2527 | } else |
2494 | mm->highest_vm_end = prev ? prev->vm_end : 0; | 2528 | mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; |
2495 | tail_vma->vm_next = NULL; | 2529 | tail_vma->vm_next = NULL; |
2496 | 2530 | ||
2497 | /* Kill the cache */ | 2531 | /* Kill the cache */ |
@@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s) | |||
5625 | return name; | 5625 | return name; |
5626 | } | 5626 | } |
5627 | 5627 | ||
5628 | static void sysfs_slab_remove_workfn(struct work_struct *work) | ||
5629 | { | ||
5630 | struct kmem_cache *s = | ||
5631 | container_of(work, struct kmem_cache, kobj_remove_work); | ||
5632 | |||
5633 | if (!s->kobj.state_in_sysfs) | ||
5634 | /* | ||
5635 | * For a memcg cache, this may be called during | ||
5636 | * deactivation and again on shutdown. Remove only once. | ||
5637 | * A cache is never shut down before deactivation is | ||
5638 | * complete, so no need to worry about synchronization. | ||
5639 | */ | ||
5640 | return; | ||
5641 | |||
5642 | #ifdef CONFIG_MEMCG | ||
5643 | kset_unregister(s->memcg_kset); | ||
5644 | #endif | ||
5645 | kobject_uevent(&s->kobj, KOBJ_REMOVE); | ||
5646 | kobject_del(&s->kobj); | ||
5647 | kobject_put(&s->kobj); | ||
5648 | } | ||
5649 | |||
5628 | static int sysfs_slab_add(struct kmem_cache *s) | 5650 | static int sysfs_slab_add(struct kmem_cache *s) |
5629 | { | 5651 | { |
5630 | int err; | 5652 | int err; |
@@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s) | |||
5632 | struct kset *kset = cache_kset(s); | 5654 | struct kset *kset = cache_kset(s); |
5633 | int unmergeable = slab_unmergeable(s); | 5655 | int unmergeable = slab_unmergeable(s); |
5634 | 5656 | ||
5657 | INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); | ||
5658 | |||
5635 | if (!kset) { | 5659 | if (!kset) { |
5636 | kobject_init(&s->kobj, &slab_ktype); | 5660 | kobject_init(&s->kobj, &slab_ktype); |
5637 | return 0; | 5661 | return 0; |
@@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s) | |||
5695 | */ | 5719 | */ |
5696 | return; | 5720 | return; |
5697 | 5721 | ||
5698 | if (!s->kobj.state_in_sysfs) | 5722 | kobject_get(&s->kobj); |
5699 | /* | 5723 | schedule_work(&s->kobj_remove_work); |
5700 | * For a memcg cache, this may be called during | ||
5701 | * deactivation and again on shutdown. Remove only once. | ||
5702 | * A cache is never shut down before deactivation is | ||
5703 | * complete, so no need to worry about synchronization. | ||
5704 | */ | ||
5705 | return; | ||
5706 | |||
5707 | #ifdef CONFIG_MEMCG | ||
5708 | kset_unregister(s->memcg_kset); | ||
5709 | #endif | ||
5710 | kobject_uevent(&s->kobj, KOBJ_REMOVE); | ||
5711 | kobject_del(&s->kobj); | ||
5712 | } | 5724 | } |
5713 | 5725 | ||
5714 | void sysfs_slab_release(struct kmem_cache *s) | 5726 | void sysfs_slab_release(struct kmem_cache *s) |
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index ac6318a064d3..3405b4ee1757 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c | |||
@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type) | |||
48 | if (!page) | 48 | if (!page) |
49 | goto not_enough_page; | 49 | goto not_enough_page; |
50 | ctrl->map[idx] = page; | 50 | ctrl->map[idx] = page; |
51 | |||
52 | if (!(idx % SWAP_CLUSTER_MAX)) | ||
53 | cond_resched(); | ||
51 | } | 54 | } |
52 | return 0; | 55 | return 0; |
53 | not_enough_page: | 56 | not_enough_page: |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 34a1c3e46ed7..ecc97f74ab18 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) | |||
287 | if (p4d_none(*p4d)) | 287 | if (p4d_none(*p4d)) |
288 | return NULL; | 288 | return NULL; |
289 | pud = pud_offset(p4d, addr); | 289 | pud = pud_offset(p4d, addr); |
290 | if (pud_none(*pud)) | 290 | |
291 | /* | ||
292 | * Don't dereference bad PUD or PMD (below) entries. This will also | ||
293 | * identify huge mappings, which we may encounter on architectures | ||
294 | * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be | ||
295 | * identified as vmalloc addresses by is_vmalloc_addr(), but are | ||
296 | * not [unambiguously] associated with a struct page, so there is | ||
297 | * no correct value to return for them. | ||
298 | */ | ||
299 | WARN_ON_ONCE(pud_bad(*pud)); | ||
300 | if (pud_none(*pud) || pud_bad(*pud)) | ||
291 | return NULL; | 301 | return NULL; |
292 | pmd = pmd_offset(pud, addr); | 302 | pmd = pmd_offset(pud, addr); |
293 | if (pmd_none(*pmd)) | 303 | WARN_ON_ONCE(pmd_bad(*pmd)); |
304 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | ||
294 | return NULL; | 305 | return NULL; |
295 | 306 | ||
296 | ptep = pte_offset_map(pmd, addr); | 307 | ptep = pte_offset_map(pmd, addr); |
diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 6063581f705c..ce0618bfa8d0 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c | |||
@@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, | |||
115 | unsigned long pressure = 0; | 115 | unsigned long pressure = 0; |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * reclaimed can be greater than scanned in cases | 118 | * reclaimed can be greater than scanned for things such as reclaimed |
119 | * like THP, where the scanned is 1 and reclaimed | 119 | * slab pages. shrink_node() just adds reclaimed pages without a |
120 | * could be 512 | 120 | * related increment to scanned pages. |
121 | */ | 121 | */ |
122 | if (reclaimed >= scanned) | 122 | if (reclaimed >= scanned) |
123 | goto out; | 123 | goto out; |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 467069b73ce1..9649579b5b9f 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) | |||
277 | return 0; | 277 | return 0; |
278 | 278 | ||
279 | out_free_newdev: | 279 | out_free_newdev: |
280 | free_netdev(new_dev); | 280 | if (new_dev->reg_state == NETREG_UNINITIALIZED) |
281 | free_netdev(new_dev); | ||
281 | return err; | 282 | return err; |
282 | } | 283 | } |
283 | 284 | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 953b6728bd00..abc5f400fc71 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev) | |||
813 | 813 | ||
814 | free_percpu(vlan->vlan_pcpu_stats); | 814 | free_percpu(vlan->vlan_pcpu_stats); |
815 | vlan->vlan_pcpu_stats = NULL; | 815 | vlan->vlan_pcpu_stats = NULL; |
816 | free_netdev(dev); | ||
817 | } | 816 | } |
818 | 817 | ||
819 | void vlan_setup(struct net_device *dev) | 818 | void vlan_setup(struct net_device *dev) |
@@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev) | |||
826 | netif_keep_dst(dev); | 825 | netif_keep_dst(dev); |
827 | 826 | ||
828 | dev->netdev_ops = &vlan_netdev_ops; | 827 | dev->netdev_ops = &vlan_netdev_ops; |
829 | dev->destructor = vlan_dev_free; | 828 | dev->needs_free_netdev = true; |
829 | dev->priv_destructor = vlan_dev_free; | ||
830 | dev->ethtool_ops = &vlan_ethtool_ops; | 830 | dev->ethtool_ops = &vlan_ethtool_ops; |
831 | 831 | ||
832 | dev->min_mtu = 0; | 832 | dev->min_mtu = 0; |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 013e970eff39..000ca2f113ab 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -1064,8 +1064,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, | |||
1064 | 1064 | ||
1065 | skb_new->protocol = eth_type_trans(skb_new, soft_iface); | 1065 | skb_new->protocol = eth_type_trans(skb_new, soft_iface); |
1066 | 1066 | ||
1067 | soft_iface->stats.rx_packets++; | 1067 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); |
1068 | soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; | 1068 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, |
1069 | skb->len + ETH_HLEN + hdr_size); | ||
1069 | 1070 | ||
1070 | netif_rx(skb_new); | 1071 | netif_rx(skb_new); |
1071 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); | 1072 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index e1ebe14ee2a6..ae9f4d37d34f 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -987,7 +987,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
987 | batadv_dbg(BATADV_DBG_BLA, bat_priv, | 987 | batadv_dbg(BATADV_DBG_BLA, bat_priv, |
988 | "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", | 988 | "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", |
989 | orig_addr_gw); | 989 | orig_addr_gw); |
990 | return NET_RX_DROP; | 990 | goto free_skb; |
991 | } | 991 | } |
992 | } | 992 | } |
993 | 993 | ||
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index b25789abf7b9..10f7edfb176e 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev) | |||
1034 | * netdev and its private data (bat_priv) | 1034 | * netdev and its private data (bat_priv) |
1035 | */ | 1035 | */ |
1036 | rcu_barrier(); | 1036 | rcu_barrier(); |
1037 | |||
1038 | free_netdev(dev); | ||
1039 | } | 1037 | } |
1040 | 1038 | ||
1041 | /** | 1039 | /** |
@@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev) | |||
1047 | ether_setup(dev); | 1045 | ether_setup(dev); |
1048 | 1046 | ||
1049 | dev->netdev_ops = &batadv_netdev_ops; | 1047 | dev->netdev_ops = &batadv_netdev_ops; |
1050 | dev->destructor = batadv_softif_free; | 1048 | dev->needs_free_netdev = true; |
1049 | dev->priv_destructor = batadv_softif_free; | ||
1051 | dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; | 1050 | dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; |
1052 | dev->priv_flags |= IFF_NO_QUEUE; | 1051 | dev->priv_flags |= IFF_NO_QUEUE; |
1053 | 1052 | ||
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 608959989f8e..ab3b654b05cc 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c | |||
@@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev) | |||
598 | 598 | ||
599 | dev->netdev_ops = &netdev_ops; | 599 | dev->netdev_ops = &netdev_ops; |
600 | dev->header_ops = &header_ops; | 600 | dev->header_ops = &header_ops; |
601 | dev->destructor = free_netdev; | 601 | dev->needs_free_netdev = true; |
602 | } | 602 | } |
603 | 603 | ||
604 | static struct device_type bt_type = { | 604 | static struct device_type bt_type = { |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 430b53e7d941..f0f3447e8aa4 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev) | |||
379 | ether_setup(dev); | 379 | ether_setup(dev); |
380 | 380 | ||
381 | dev->netdev_ops = &br_netdev_ops; | 381 | dev->netdev_ops = &br_netdev_ops; |
382 | dev->destructor = free_netdev; | 382 | dev->needs_free_netdev = true; |
383 | dev->ethtool_ops = &br_ethtool_ops; | 383 | dev->ethtool_ops = &br_ethtool_ops; |
384 | SET_NETDEV_DEVTYPE(dev, &br_type); | 384 | SET_NETDEV_DEVTYPE(dev, &br_type); |
385 | dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; | 385 | dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index adcad344c843..21f18ea2fce4 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | |||
754 | 754 | ||
755 | lock_sock(sk); | 755 | lock_sock(sk); |
756 | 756 | ||
757 | err = -EINVAL; | ||
758 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
759 | goto out; | ||
760 | |||
757 | err = -EAFNOSUPPORT; | 761 | err = -EAFNOSUPPORT; |
758 | if (uaddr->sa_family != AF_CAIF) | 762 | if (uaddr->sa_family != AF_CAIF) |
759 | goto out; | 763 | goto out; |
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 59ce1fcc220c..71b6ab240dea 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) | |||
81 | { | 81 | { |
82 | struct sk_buff *skb; | 82 | struct sk_buff *skb; |
83 | 83 | ||
84 | if (likely(in_interrupt())) | 84 | skb = alloc_skb(len + pfx, GFP_ATOMIC); |
85 | skb = alloc_skb(len + pfx, GFP_ATOMIC); | ||
86 | else | ||
87 | skb = alloc_skb(len + pfx, GFP_KERNEL); | ||
88 | |||
89 | if (unlikely(skb == NULL)) | 85 | if (unlikely(skb == NULL)) |
90 | return NULL; | 86 | return NULL; |
91 | 87 | ||
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 1816fc9f1ee7..fe3c53efb949 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
@@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev) | |||
392 | { | 392 | { |
393 | struct chnl_net *priv = netdev_priv(dev); | 393 | struct chnl_net *priv = netdev_priv(dev); |
394 | caif_free_client(&priv->chnl); | 394 | caif_free_client(&priv->chnl); |
395 | free_netdev(dev); | ||
396 | } | 395 | } |
397 | 396 | ||
398 | static void ipcaif_net_setup(struct net_device *dev) | 397 | static void ipcaif_net_setup(struct net_device *dev) |
399 | { | 398 | { |
400 | struct chnl_net *priv; | 399 | struct chnl_net *priv; |
401 | dev->netdev_ops = &netdev_ops; | 400 | dev->netdev_ops = &netdev_ops; |
402 | dev->destructor = chnl_net_destructor; | 401 | dev->needs_free_netdev = true; |
402 | dev->priv_destructor = chnl_net_destructor; | ||
403 | dev->flags |= IFF_NOARP; | 403 | dev->flags |= IFF_NOARP; |
404 | dev->flags |= IFF_POINTOPOINT; | 404 | dev->flags |= IFF_POINTOPOINT; |
405 | dev->mtu = GPRS_PDP_MTU; | 405 | dev->mtu = GPRS_PDP_MTU; |
diff --git a/net/can/af_can.c b/net/can/af_can.c index b6406fe33c76..88edac0f3e36 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -872,8 +872,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, | |||
872 | 872 | ||
873 | static int can_pernet_init(struct net *net) | 873 | static int can_pernet_init(struct net *net) |
874 | { | 874 | { |
875 | net->can.can_rcvlists_lock = | 875 | spin_lock_init(&net->can.can_rcvlists_lock); |
876 | __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock); | ||
877 | net->can.can_rx_alldev_list = | 876 | net->can.can_rx_alldev_list = |
878 | kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); | 877 | kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); |
879 | 878 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index fca407b4a6ea..7243421c9783 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1253,8 +1253,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) | |||
1253 | if (!new_ifalias) | 1253 | if (!new_ifalias) |
1254 | return -ENOMEM; | 1254 | return -ENOMEM; |
1255 | dev->ifalias = new_ifalias; | 1255 | dev->ifalias = new_ifalias; |
1256 | memcpy(dev->ifalias, alias, len); | ||
1257 | dev->ifalias[len] = 0; | ||
1256 | 1258 | ||
1257 | strlcpy(dev->ifalias, alias, len+1); | ||
1258 | return len; | 1259 | return len; |
1259 | } | 1260 | } |
1260 | 1261 | ||
@@ -4948,6 +4949,19 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) | |||
4948 | } | 4949 | } |
4949 | EXPORT_SYMBOL(__skb_gro_checksum_complete); | 4950 | EXPORT_SYMBOL(__skb_gro_checksum_complete); |
4950 | 4951 | ||
4952 | static void net_rps_send_ipi(struct softnet_data *remsd) | ||
4953 | { | ||
4954 | #ifdef CONFIG_RPS | ||
4955 | while (remsd) { | ||
4956 | struct softnet_data *next = remsd->rps_ipi_next; | ||
4957 | |||
4958 | if (cpu_online(remsd->cpu)) | ||
4959 | smp_call_function_single_async(remsd->cpu, &remsd->csd); | ||
4960 | remsd = next; | ||
4961 | } | ||
4962 | #endif | ||
4963 | } | ||
4964 | |||
4951 | /* | 4965 | /* |
4952 | * net_rps_action_and_irq_enable sends any pending IPI's for rps. | 4966 | * net_rps_action_and_irq_enable sends any pending IPI's for rps. |
4953 | * Note: called with local irq disabled, but exits with local irq enabled. | 4967 | * Note: called with local irq disabled, but exits with local irq enabled. |
@@ -4963,14 +4977,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) | |||
4963 | local_irq_enable(); | 4977 | local_irq_enable(); |
4964 | 4978 | ||
4965 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | 4979 | /* Send pending IPI's to kick RPS processing on remote cpus. */ |
4966 | while (remsd) { | 4980 | net_rps_send_ipi(remsd); |
4967 | struct softnet_data *next = remsd->rps_ipi_next; | ||
4968 | |||
4969 | if (cpu_online(remsd->cpu)) | ||
4970 | smp_call_function_single_async(remsd->cpu, | ||
4971 | &remsd->csd); | ||
4972 | remsd = next; | ||
4973 | } | ||
4974 | } else | 4981 | } else |
4975 | #endif | 4982 | #endif |
4976 | local_irq_enable(); | 4983 | local_irq_enable(); |
@@ -5199,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) | |||
5199 | if (rc == BUSY_POLL_BUDGET) | 5206 | if (rc == BUSY_POLL_BUDGET) |
5200 | __napi_schedule(napi); | 5207 | __napi_schedule(napi); |
5201 | local_bh_enable(); | 5208 | local_bh_enable(); |
5202 | if (local_softirq_pending()) | ||
5203 | do_softirq(); | ||
5204 | } | 5209 | } |
5205 | 5210 | ||
5206 | void napi_busy_loop(unsigned int napi_id, | 5211 | void napi_busy_loop(unsigned int napi_id, |
@@ -7501,6 +7506,8 @@ out: | |||
7501 | err_uninit: | 7506 | err_uninit: |
7502 | if (dev->netdev_ops->ndo_uninit) | 7507 | if (dev->netdev_ops->ndo_uninit) |
7503 | dev->netdev_ops->ndo_uninit(dev); | 7508 | dev->netdev_ops->ndo_uninit(dev); |
7509 | if (dev->priv_destructor) | ||
7510 | dev->priv_destructor(dev); | ||
7504 | goto out; | 7511 | goto out; |
7505 | } | 7512 | } |
7506 | EXPORT_SYMBOL(register_netdevice); | 7513 | EXPORT_SYMBOL(register_netdevice); |
@@ -7708,8 +7715,10 @@ void netdev_run_todo(void) | |||
7708 | WARN_ON(rcu_access_pointer(dev->ip6_ptr)); | 7715 | WARN_ON(rcu_access_pointer(dev->ip6_ptr)); |
7709 | WARN_ON(dev->dn_ptr); | 7716 | WARN_ON(dev->dn_ptr); |
7710 | 7717 | ||
7711 | if (dev->destructor) | 7718 | if (dev->priv_destructor) |
7712 | dev->destructor(dev); | 7719 | dev->priv_destructor(dev); |
7720 | if (dev->needs_free_netdev) | ||
7721 | free_netdev(dev); | ||
7713 | 7722 | ||
7714 | /* Report a network device has been unregistered */ | 7723 | /* Report a network device has been unregistered */ |
7715 | rtnl_lock(); | 7724 | rtnl_lock(); |
@@ -8192,7 +8201,7 @@ static int dev_cpu_dead(unsigned int oldcpu) | |||
8192 | struct sk_buff **list_skb; | 8201 | struct sk_buff **list_skb; |
8193 | struct sk_buff *skb; | 8202 | struct sk_buff *skb; |
8194 | unsigned int cpu; | 8203 | unsigned int cpu; |
8195 | struct softnet_data *sd, *oldsd; | 8204 | struct softnet_data *sd, *oldsd, *remsd = NULL; |
8196 | 8205 | ||
8197 | local_irq_disable(); | 8206 | local_irq_disable(); |
8198 | cpu = smp_processor_id(); | 8207 | cpu = smp_processor_id(); |
@@ -8233,6 +8242,13 @@ static int dev_cpu_dead(unsigned int oldcpu) | |||
8233 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 8242 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
8234 | local_irq_enable(); | 8243 | local_irq_enable(); |
8235 | 8244 | ||
8245 | #ifdef CONFIG_RPS | ||
8246 | remsd = oldsd->rps_ipi_list; | ||
8247 | oldsd->rps_ipi_list = NULL; | ||
8248 | #endif | ||
8249 | /* send out pending IPI's on offline CPU */ | ||
8250 | net_rps_send_ipi(remsd); | ||
8251 | |||
8236 | /* Process offline CPU's input_pkt_queue */ | 8252 | /* Process offline CPU's input_pkt_queue */ |
8237 | while ((skb = __skb_dequeue(&oldsd->process_queue))) { | 8253 | while ((skb = __skb_dequeue(&oldsd->process_queue))) { |
8238 | netif_rx_ni(skb); | 8254 | netif_rx_ni(skb); |
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b94b1d293506..27fad31784a8 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c | |||
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
410 | if (cmd == SIOCGIFNAME) | 410 | if (cmd == SIOCGIFNAME) |
411 | return dev_ifname(net, (struct ifreq __user *)arg); | 411 | return dev_ifname(net, (struct ifreq __user *)arg); |
412 | 412 | ||
413 | /* | ||
414 | * Take care of Wireless Extensions. Unfortunately struct iwreq | ||
415 | * isn't a proper subset of struct ifreq (it's 8 byte shorter) | ||
416 | * so we need to treat it specially, otherwise applications may | ||
417 | * fault if the struct they're passing happens to land at the | ||
418 | * end of a mapped page. | ||
419 | */ | ||
420 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { | ||
421 | struct iwreq iwr; | ||
422 | |||
423 | if (copy_from_user(&iwr, arg, sizeof(iwr))) | ||
424 | return -EFAULT; | ||
425 | |||
426 | return wext_handle_ioctl(net, &iwr, cmd, arg); | ||
427 | } | ||
428 | |||
413 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | 429 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) |
414 | return -EFAULT; | 430 | return -EFAULT; |
415 | 431 | ||
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
559 | ret = -EFAULT; | 575 | ret = -EFAULT; |
560 | return ret; | 576 | return ret; |
561 | } | 577 | } |
562 | /* Take care of Wireless Extensions */ | ||
563 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) | ||
564 | return wext_handle_ioctl(net, &ifr, cmd, arg); | ||
565 | return -ENOTTY; | 578 | return -ENOTTY; |
566 | } | 579 | } |
567 | } | 580 | } |
diff --git a/net/core/dst.c b/net/core/dst.c index 6192f11beec9..13ba4a090c41 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -469,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, | |||
469 | spin_lock_bh(&dst_garbage.lock); | 469 | spin_lock_bh(&dst_garbage.lock); |
470 | dst = dst_garbage.list; | 470 | dst = dst_garbage.list; |
471 | dst_garbage.list = NULL; | 471 | dst_garbage.list = NULL; |
472 | /* The code in dst_ifdown places a hold on the loopback device. | ||
473 | * If the gc entry processing is set to expire after a lengthy | ||
474 | * interval, this hold can cause netdev_wait_allrefs() to hang | ||
475 | * out and wait for a long time -- until the the loopback | ||
476 | * interface is released. If we're really unlucky, it'll emit | ||
477 | * pr_emerg messages to console too. Reset the interval here, | ||
478 | * so dst cleanups occur in a more timely fashion. | ||
479 | */ | ||
480 | if (dst_garbage.timer_inc > DST_GC_INC) { | ||
481 | dst_garbage.timer_inc = DST_GC_INC; | ||
482 | dst_garbage.timer_expires = DST_GC_MIN; | ||
483 | mod_delayed_work(system_wq, &dst_gc_work, | ||
484 | dst_garbage.timer_expires); | ||
485 | } | ||
472 | spin_unlock_bh(&dst_garbage.lock); | 486 | spin_unlock_bh(&dst_garbage.lock); |
473 | 487 | ||
474 | if (last) | 488 | if (last) |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index f21c4d3aeae0..3bba291c6c32 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
568 | struct net *net = sock_net(skb->sk); | 568 | struct net *net = sock_net(skb->sk); |
569 | struct fib_rule_hdr *frh = nlmsg_data(nlh); | 569 | struct fib_rule_hdr *frh = nlmsg_data(nlh); |
570 | struct fib_rules_ops *ops = NULL; | 570 | struct fib_rules_ops *ops = NULL; |
571 | struct fib_rule *rule, *tmp; | 571 | struct fib_rule *rule, *r; |
572 | struct nlattr *tb[FRA_MAX+1]; | 572 | struct nlattr *tb[FRA_MAX+1]; |
573 | struct fib_kuid_range range; | 573 | struct fib_kuid_range range; |
574 | int err = -EINVAL; | 574 | int err = -EINVAL; |
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
668 | 668 | ||
669 | /* | 669 | /* |
670 | * Check if this rule is a target to any of them. If so, | 670 | * Check if this rule is a target to any of them. If so, |
671 | * adjust to the next one with the same preference or | ||
671 | * disable them. As this operation is eventually very | 672 | * disable them. As this operation is eventually very |
672 | * expensive, it is only performed if goto rules have | 673 | * expensive, it is only performed if goto rules, except |
673 | * actually been added. | 674 | * current if it is goto rule, have actually been added. |
674 | */ | 675 | */ |
675 | if (ops->nr_goto_rules > 0) { | 676 | if (ops->nr_goto_rules > 0) { |
676 | list_for_each_entry(tmp, &ops->rules_list, list) { | 677 | struct fib_rule *n; |
677 | if (rtnl_dereference(tmp->ctarget) == rule) { | 678 | |
678 | RCU_INIT_POINTER(tmp->ctarget, NULL); | 679 | n = list_next_entry(rule, list); |
680 | if (&n->list == &ops->rules_list || n->pref != rule->pref) | ||
681 | n = NULL; | ||
682 | list_for_each_entry(r, &ops->rules_list, list) { | ||
683 | if (rtnl_dereference(r->ctarget) != rule) | ||
684 | continue; | ||
685 | rcu_assign_pointer(r->ctarget, n); | ||
686 | if (!n) | ||
679 | ops->unresolved_rules++; | 687 | ops->unresolved_rules++; |
680 | } | ||
681 | } | 688 | } |
682 | } | 689 | } |
683 | 690 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9e2c0a7cb325..467a2f4510a7 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, | |||
931 | + nla_total_size(1) /* IFLA_LINKMODE */ | 931 | + nla_total_size(1) /* IFLA_LINKMODE */ |
932 | + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ | 932 | + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ |
933 | + nla_total_size(4) /* IFLA_LINK_NETNSID */ | 933 | + nla_total_size(4) /* IFLA_LINK_NETNSID */ |
934 | + nla_total_size(4) /* IFLA_GROUP */ | ||
934 | + nla_total_size(ext_filter_mask | 935 | + nla_total_size(ext_filter_mask |
935 | & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ | 936 | & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ |
936 | + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ | 937 | + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ |
@@ -1124,6 +1125,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, | |||
1124 | struct ifla_vf_mac vf_mac; | 1125 | struct ifla_vf_mac vf_mac; |
1125 | struct ifla_vf_info ivi; | 1126 | struct ifla_vf_info ivi; |
1126 | 1127 | ||
1128 | memset(&ivi, 0, sizeof(ivi)); | ||
1129 | |||
1127 | /* Not all SR-IOV capable drivers support the | 1130 | /* Not all SR-IOV capable drivers support the |
1128 | * spoofcheck and "RSS query enable" query. Preset to | 1131 | * spoofcheck and "RSS query enable" query. Preset to |
1129 | * -1 so the user space tool can detect that the driver | 1132 | * -1 so the user space tool can detect that the driver |
@@ -1132,7 +1135,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, | |||
1132 | ivi.spoofchk = -1; | 1135 | ivi.spoofchk = -1; |
1133 | ivi.rss_query_en = -1; | 1136 | ivi.rss_query_en = -1; |
1134 | ivi.trusted = -1; | 1137 | ivi.trusted = -1; |
1135 | memset(ivi.mac, 0, sizeof(ivi.mac)); | ||
1136 | /* The default value for VF link state is "auto" | 1138 | /* The default value for VF link state is "auto" |
1137 | * IFLA_VF_LINK_STATE_AUTO which equals zero | 1139 | * IFLA_VF_LINK_STATE_AUTO which equals zero |
1138 | */ | 1140 | */ |
@@ -1467,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
1467 | [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, | 1469 | [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, |
1468 | [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, | 1470 | [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, |
1469 | [IFLA_XDP] = { .type = NLA_NESTED }, | 1471 | [IFLA_XDP] = { .type = NLA_NESTED }, |
1472 | [IFLA_GROUP] = { .type = NLA_U32 }, | ||
1470 | }; | 1473 | }; |
1471 | 1474 | ||
1472 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | 1475 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 4b9518a0d248..6f95612b4d32 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt) | |||
188 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); | 188 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); |
189 | } | 189 | } |
190 | 190 | ||
191 | static inline void dnrt_drop(struct dn_route *rt) | ||
192 | { | ||
193 | dst_release(&rt->dst); | ||
194 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); | ||
195 | } | ||
196 | |||
197 | static void dn_dst_check_expire(unsigned long dummy) | 191 | static void dn_dst_check_expire(unsigned long dummy) |
198 | { | 192 | { |
199 | int i; | 193 | int i; |
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops) | |||
248 | } | 242 | } |
249 | *rtp = rt->dst.dn_next; | 243 | *rtp = rt->dst.dn_next; |
250 | rt->dst.dn_next = NULL; | 244 | rt->dst.dn_next = NULL; |
251 | dnrt_drop(rt); | 245 | dnrt_free(rt); |
252 | break; | 246 | break; |
253 | } | 247 | } |
254 | spin_unlock_bh(&dn_rt_hash_table[i].lock); | 248 | spin_unlock_bh(&dn_rt_hash_table[i].lock); |
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou | |||
350 | dst_use(&rth->dst, now); | 344 | dst_use(&rth->dst, now); |
351 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); | 345 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); |
352 | 346 | ||
353 | dnrt_drop(rt); | 347 | dst_free(&rt->dst); |
354 | *rp = rth; | 348 | *rp = rth; |
355 | return 0; | 349 | return 0; |
356 | } | 350 | } |
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy) | |||
380 | for(; rt; rt = next) { | 374 | for(; rt; rt = next) { |
381 | next = rcu_dereference_raw(rt->dst.dn_next); | 375 | next = rcu_dereference_raw(rt->dst.dn_next); |
382 | RCU_INIT_POINTER(rt->dst.dn_next, NULL); | 376 | RCU_INIT_POINTER(rt->dst.dn_next, NULL); |
383 | dst_free((struct dst_entry *)rt); | 377 | dnrt_free(rt); |
384 | } | 378 | } |
385 | 379 | ||
386 | nothing_to_declare: | 380 | nothing_to_declare: |
@@ -1187,7 +1181,7 @@ make_route: | |||
1187 | if (dev_out->flags & IFF_LOOPBACK) | 1181 | if (dev_out->flags & IFF_LOOPBACK) |
1188 | flags |= RTCF_LOCAL; | 1182 | flags |= RTCF_LOCAL; |
1189 | 1183 | ||
1190 | rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); | 1184 | rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); |
1191 | if (rt == NULL) | 1185 | if (rt == NULL) |
1192 | goto e_nobufs; | 1186 | goto e_nobufs; |
1193 | 1187 | ||
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 1ed81ac6dd1a..aa8ffecc46a4 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c | |||
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) | |||
102 | { | 102 | { |
103 | struct nlmsghdr *nlh = nlmsg_hdr(skb); | 103 | struct nlmsghdr *nlh = nlmsg_hdr(skb); |
104 | 104 | ||
105 | if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) | 105 | if (skb->len < sizeof(*nlh) || |
106 | nlh->nlmsg_len < sizeof(*nlh) || | ||
107 | skb->len < nlh->nlmsg_len) | ||
106 | return; | 108 | return; |
107 | 109 | ||
108 | if (!netlink_capable(skb, CAP_NET_ADMIN)) | 110 | if (!netlink_capable(skb, CAP_NET_ADMIN)) |
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index c73160fb11e7..0a0a392dc2bd 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c | |||
@@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) | |||
378 | del_timer_sync(&hsr->announce_timer); | 378 | del_timer_sync(&hsr->announce_timer); |
379 | 379 | ||
380 | synchronize_rcu(); | 380 | synchronize_rcu(); |
381 | free_netdev(hsr_dev); | ||
382 | } | 381 | } |
383 | 382 | ||
384 | static const struct net_device_ops hsr_device_ops = { | 383 | static const struct net_device_ops hsr_device_ops = { |
@@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev) | |||
404 | SET_NETDEV_DEVTYPE(dev, &hsr_type); | 403 | SET_NETDEV_DEVTYPE(dev, &hsr_type); |
405 | dev->priv_flags |= IFF_NO_QUEUE; | 404 | dev->priv_flags |= IFF_NO_QUEUE; |
406 | 405 | ||
407 | dev->destructor = hsr_dev_destroy; | 406 | dev->needs_free_netdev = true; |
407 | dev->priv_destructor = hsr_dev_destroy; | ||
408 | 408 | ||
409 | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | | 409 | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | |
410 | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | | 410 | NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | |
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 4ebe2aa3e7d3..04b5450c5a55 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c | |||
@@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame, | |||
324 | unsigned long irqflags; | 324 | unsigned long irqflags; |
325 | 325 | ||
326 | frame->is_supervision = is_supervision_frame(port->hsr, skb); | 326 | frame->is_supervision = is_supervision_frame(port->hsr, skb); |
327 | frame->node_src = hsr_get_node(&port->hsr->node_db, skb, | 327 | frame->node_src = hsr_get_node(port, skb, frame->is_supervision); |
328 | frame->is_supervision); | ||
329 | if (frame->node_src == NULL) | 328 | if (frame->node_src == NULL) |
330 | return -1; /* Unknown node and !is_supervision, or no mem */ | 329 | return -1; /* Unknown node and !is_supervision, or no mem */ |
331 | 330 | ||
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 7ea925816f79..284a9b820df8 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c | |||
@@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], | |||
158 | 158 | ||
159 | /* Get the hsr_node from which 'skb' was sent. | 159 | /* Get the hsr_node from which 'skb' was sent. |
160 | */ | 160 | */ |
161 | struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, | 161 | struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, |
162 | bool is_sup) | 162 | bool is_sup) |
163 | { | 163 | { |
164 | struct list_head *node_db = &port->hsr->node_db; | ||
164 | struct hsr_node *node; | 165 | struct hsr_node *node; |
165 | struct ethhdr *ethhdr; | 166 | struct ethhdr *ethhdr; |
166 | u16 seq_out; | 167 | u16 seq_out; |
@@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, | |||
186 | */ | 187 | */ |
187 | seq_out = hsr_get_skb_sequence_nr(skb) - 1; | 188 | seq_out = hsr_get_skb_sequence_nr(skb) - 1; |
188 | } else { | 189 | } else { |
189 | WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); | 190 | /* this is called also for frames from master port and |
191 | * so warn only for non master ports | ||
192 | */ | ||
193 | if (port->type != HSR_PT_MASTER) | ||
194 | WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); | ||
190 | seq_out = HSR_SEQNR_START; | 195 | seq_out = HSR_SEQNR_START; |
191 | } | 196 | } |
192 | 197 | ||
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 438b40f98f5a..4e04f0e868e9 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h | |||
@@ -18,7 +18,7 @@ struct hsr_node; | |||
18 | 18 | ||
19 | struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], | 19 | struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], |
20 | u16 seq_out); | 20 | u16 seq_out); |
21 | struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, | 21 | struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, |
22 | bool is_sup); | 22 | bool is_sup); |
23 | void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, | 23 | void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, |
24 | struct hsr_port *port); | 24 | struct hsr_port *port); |
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index d7efbf0dad20..0a866f332290 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c | |||
@@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev) | |||
107 | 107 | ||
108 | ldev->netdev_ops = &lowpan_netdev_ops; | 108 | ldev->netdev_ops = &lowpan_netdev_ops; |
109 | ldev->header_ops = &lowpan_header_ops; | 109 | ldev->header_ops = &lowpan_header_ops; |
110 | ldev->destructor = free_netdev; | 110 | ldev->needs_free_netdev = true; |
111 | ldev->features |= NETIF_F_NETNS_LOCAL; | 111 | ldev->features |= NETIF_F_NETNS_LOCAL; |
112 | } | 112 | } |
113 | 113 | ||
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 43318b5f5647..9144fa7df2ad 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -657,8 +657,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
657 | /* Needed by both icmp_global_allow and icmp_xmit_lock */ | 657 | /* Needed by both icmp_global_allow and icmp_xmit_lock */ |
658 | local_bh_disable(); | 658 | local_bh_disable(); |
659 | 659 | ||
660 | /* Check global sysctl_icmp_msgs_per_sec ratelimit */ | 660 | /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless |
661 | if (!icmpv4_global_allow(net, type, code)) | 661 | * incoming dev is loopback. If outgoing dev change to not be |
662 | * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) | ||
663 | */ | ||
664 | if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && | ||
665 | !icmpv4_global_allow(net, type, code)) | ||
662 | goto out_bh_enable; | 666 | goto out_bh_enable; |
663 | 667 | ||
664 | sk = icmp_xmit_lock(net); | 668 | sk = icmp_xmit_lock(net); |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 44fd86de2823..ec9a396fa466 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) | |||
1112 | pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); | 1112 | pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); |
1113 | if (!pmc) | 1113 | if (!pmc) |
1114 | return; | 1114 | return; |
1115 | spin_lock_init(&pmc->lock); | ||
1115 | spin_lock_bh(&im->lock); | 1116 | spin_lock_bh(&im->lock); |
1116 | pmc->interface = im->interface; | 1117 | pmc->interface = im->interface; |
1117 | in_dev_hold(in_dev); | 1118 | in_dev_hold(in_dev); |
@@ -2071,21 +2072,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | |||
2071 | 2072 | ||
2072 | static void ip_mc_clear_src(struct ip_mc_list *pmc) | 2073 | static void ip_mc_clear_src(struct ip_mc_list *pmc) |
2073 | { | 2074 | { |
2074 | struct ip_sf_list *psf, *nextpsf; | 2075 | struct ip_sf_list *psf, *nextpsf, *tomb, *sources; |
2075 | 2076 | ||
2076 | for (psf = pmc->tomb; psf; psf = nextpsf) { | 2077 | spin_lock_bh(&pmc->lock); |
2078 | tomb = pmc->tomb; | ||
2079 | pmc->tomb = NULL; | ||
2080 | sources = pmc->sources; | ||
2081 | pmc->sources = NULL; | ||
2082 | pmc->sfmode = MCAST_EXCLUDE; | ||
2083 | pmc->sfcount[MCAST_INCLUDE] = 0; | ||
2084 | pmc->sfcount[MCAST_EXCLUDE] = 1; | ||
2085 | spin_unlock_bh(&pmc->lock); | ||
2086 | |||
2087 | for (psf = tomb; psf; psf = nextpsf) { | ||
2077 | nextpsf = psf->sf_next; | 2088 | nextpsf = psf->sf_next; |
2078 | kfree(psf); | 2089 | kfree(psf); |
2079 | } | 2090 | } |
2080 | pmc->tomb = NULL; | 2091 | for (psf = sources; psf; psf = nextpsf) { |
2081 | for (psf = pmc->sources; psf; psf = nextpsf) { | ||
2082 | nextpsf = psf->sf_next; | 2092 | nextpsf = psf->sf_next; |
2083 | kfree(psf); | 2093 | kfree(psf); |
2084 | } | 2094 | } |
2085 | pmc->sources = NULL; | ||
2086 | pmc->sfmode = MCAST_EXCLUDE; | ||
2087 | pmc->sfcount[MCAST_INCLUDE] = 0; | ||
2088 | pmc->sfcount[MCAST_EXCLUDE] = 1; | ||
2089 | } | 2095 | } |
2090 | 2096 | ||
2091 | /* Join a multicast group | 2097 | /* Join a multicast group |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index b878ecbc0608..129d1a3616f8 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, | |||
446 | return 0; | 446 | return 0; |
447 | 447 | ||
448 | drop: | 448 | drop: |
449 | if (tun_dst) | ||
450 | dst_release((struct dst_entry *)tun_dst); | ||
449 | kfree_skb(skb); | 451 | kfree_skb(skb); |
450 | return 0; | 452 | return 0; |
451 | } | 453 | } |
@@ -967,7 +969,6 @@ static void ip_tunnel_dev_free(struct net_device *dev) | |||
967 | gro_cells_destroy(&tunnel->gro_cells); | 969 | gro_cells_destroy(&tunnel->gro_cells); |
968 | dst_cache_destroy(&tunnel->dst_cache); | 970 | dst_cache_destroy(&tunnel->dst_cache); |
969 | free_percpu(dev->tstats); | 971 | free_percpu(dev->tstats); |
970 | free_netdev(dev); | ||
971 | } | 972 | } |
972 | 973 | ||
973 | void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) | 974 | void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) |
@@ -1155,7 +1156,8 @@ int ip_tunnel_init(struct net_device *dev) | |||
1155 | struct iphdr *iph = &tunnel->parms.iph; | 1156 | struct iphdr *iph = &tunnel->parms.iph; |
1156 | int err; | 1157 | int err; |
1157 | 1158 | ||
1158 | dev->destructor = ip_tunnel_dev_free; | 1159 | dev->needs_free_netdev = true; |
1160 | dev->priv_destructor = ip_tunnel_dev_free; | ||
1159 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 1161 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
1160 | if (!dev->tstats) | 1162 | if (!dev->tstats) |
1161 | return -ENOMEM; | 1163 | return -ENOMEM; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 551de4d023a8..8ae425cad818 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -101,8 +101,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id); | |||
101 | static void ipmr_free_table(struct mr_table *mrt); | 101 | static void ipmr_free_table(struct mr_table *mrt); |
102 | 102 | ||
103 | static void ip_mr_forward(struct net *net, struct mr_table *mrt, | 103 | static void ip_mr_forward(struct net *net, struct mr_table *mrt, |
104 | struct sk_buff *skb, struct mfc_cache *cache, | 104 | struct net_device *dev, struct sk_buff *skb, |
105 | int local); | 105 | struct mfc_cache *cache, int local); |
106 | static int ipmr_cache_report(struct mr_table *mrt, | 106 | static int ipmr_cache_report(struct mr_table *mrt, |
107 | struct sk_buff *pkt, vifi_t vifi, int assert); | 107 | struct sk_buff *pkt, vifi_t vifi, int assert); |
108 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | 108 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
@@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev) | |||
501 | dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; | 501 | dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; |
502 | dev->flags = IFF_NOARP; | 502 | dev->flags = IFF_NOARP; |
503 | dev->netdev_ops = ®_vif_netdev_ops; | 503 | dev->netdev_ops = ®_vif_netdev_ops; |
504 | dev->destructor = free_netdev; | 504 | dev->needs_free_netdev = true; |
505 | dev->features |= NETIF_F_NETNS_LOCAL; | 505 | dev->features |= NETIF_F_NETNS_LOCAL; |
506 | } | 506 | } |
507 | 507 | ||
@@ -988,7 +988,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, | |||
988 | 988 | ||
989 | rtnl_unicast(skb, net, NETLINK_CB(skb).portid); | 989 | rtnl_unicast(skb, net, NETLINK_CB(skb).portid); |
990 | } else { | 990 | } else { |
991 | ip_mr_forward(net, mrt, skb, c, 0); | 991 | ip_mr_forward(net, mrt, skb->dev, skb, c, 0); |
992 | } | 992 | } |
993 | } | 993 | } |
994 | } | 994 | } |
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(struct mr_table *mrt, | |||
1073 | 1073 | ||
1074 | /* Queue a packet for resolution. It gets locked cache entry! */ | 1074 | /* Queue a packet for resolution. It gets locked cache entry! */ |
1075 | static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, | 1075 | static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, |
1076 | struct sk_buff *skb) | 1076 | struct sk_buff *skb, struct net_device *dev) |
1077 | { | 1077 | { |
1078 | const struct iphdr *iph = ip_hdr(skb); | 1078 | const struct iphdr *iph = ip_hdr(skb); |
1079 | struct mfc_cache *c; | 1079 | struct mfc_cache *c; |
@@ -1130,6 +1130,10 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, | |||
1130 | kfree_skb(skb); | 1130 | kfree_skb(skb); |
1131 | err = -ENOBUFS; | 1131 | err = -ENOBUFS; |
1132 | } else { | 1132 | } else { |
1133 | if (dev) { | ||
1134 | skb->dev = dev; | ||
1135 | skb->skb_iif = dev->ifindex; | ||
1136 | } | ||
1133 | skb_queue_tail(&c->mfc_un.unres.unresolved, skb); | 1137 | skb_queue_tail(&c->mfc_un.unres.unresolved, skb); |
1134 | err = 0; | 1138 | err = 0; |
1135 | } | 1139 | } |
@@ -1828,10 +1832,10 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) | |||
1828 | 1832 | ||
1829 | /* "local" means that we should preserve one skb (for local delivery) */ | 1833 | /* "local" means that we should preserve one skb (for local delivery) */ |
1830 | static void ip_mr_forward(struct net *net, struct mr_table *mrt, | 1834 | static void ip_mr_forward(struct net *net, struct mr_table *mrt, |
1831 | struct sk_buff *skb, struct mfc_cache *cache, | 1835 | struct net_device *dev, struct sk_buff *skb, |
1832 | int local) | 1836 | struct mfc_cache *cache, int local) |
1833 | { | 1837 | { |
1834 | int true_vifi = ipmr_find_vif(mrt, skb->dev); | 1838 | int true_vifi = ipmr_find_vif(mrt, dev); |
1835 | int psend = -1; | 1839 | int psend = -1; |
1836 | int vif, ct; | 1840 | int vif, ct; |
1837 | 1841 | ||
@@ -1853,13 +1857,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt, | |||
1853 | } | 1857 | } |
1854 | 1858 | ||
1855 | /* Wrong interface: drop packet and (maybe) send PIM assert. */ | 1859 | /* Wrong interface: drop packet and (maybe) send PIM assert. */ |
1856 | if (mrt->vif_table[vif].dev != skb->dev) { | 1860 | if (mrt->vif_table[vif].dev != dev) { |
1857 | struct net_device *mdev; | ||
1858 | |||
1859 | mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev); | ||
1860 | if (mdev == skb->dev) | ||
1861 | goto forward; | ||
1862 | |||
1863 | if (rt_is_output_route(skb_rtable(skb))) { | 1861 | if (rt_is_output_route(skb_rtable(skb))) { |
1864 | /* It is our own packet, looped back. | 1862 | /* It is our own packet, looped back. |
1865 | * Very complicated situation... | 1863 | * Very complicated situation... |
@@ -2053,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
2053 | read_lock(&mrt_lock); | 2051 | read_lock(&mrt_lock); |
2054 | vif = ipmr_find_vif(mrt, dev); | 2052 | vif = ipmr_find_vif(mrt, dev); |
2055 | if (vif >= 0) { | 2053 | if (vif >= 0) { |
2056 | int err2 = ipmr_cache_unresolved(mrt, vif, skb); | 2054 | int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev); |
2057 | read_unlock(&mrt_lock); | 2055 | read_unlock(&mrt_lock); |
2058 | 2056 | ||
2059 | return err2; | 2057 | return err2; |
@@ -2064,7 +2062,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
2064 | } | 2062 | } |
2065 | 2063 | ||
2066 | read_lock(&mrt_lock); | 2064 | read_lock(&mrt_lock); |
2067 | ip_mr_forward(net, mrt, skb, cache, local); | 2065 | ip_mr_forward(net, mrt, dev, skb, cache, local); |
2068 | read_unlock(&mrt_lock); | 2066 | read_unlock(&mrt_lock); |
2069 | 2067 | ||
2070 | if (local) | 2068 | if (local) |
@@ -2238,7 +2236,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, | |||
2238 | iph->saddr = saddr; | 2236 | iph->saddr = saddr; |
2239 | iph->daddr = daddr; | 2237 | iph->daddr = daddr; |
2240 | iph->version = 0; | 2238 | iph->version = 0; |
2241 | err = ipmr_cache_unresolved(mrt, vif, skb2); | 2239 | err = ipmr_cache_unresolved(mrt, vif, skb2, dev); |
2242 | read_unlock(&mrt_lock); | 2240 | read_unlock(&mrt_lock); |
2243 | rcu_read_unlock(); | 2241 | rcu_read_unlock(); |
2244 | return err; | 2242 | return err; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6a4fb1e629fb..686c92375e81 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, | |||
332 | static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, | 332 | static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, |
333 | unsigned long delay) | 333 | unsigned long delay) |
334 | { | 334 | { |
335 | if (!delayed_work_pending(&ifp->dad_work)) | 335 | in6_ifa_hold(ifp); |
336 | in6_ifa_hold(ifp); | 336 | if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) |
337 | mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); | 337 | in6_ifa_put(ifp); |
338 | } | 338 | } |
339 | 339 | ||
340 | static int snmp6_alloc_dev(struct inet6_dev *idev) | 340 | static int snmp6_alloc_dev(struct inet6_dev *idev) |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index eea23b57c6a5..ec849d88a662 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -32,7 +32,6 @@ struct fib6_rule { | |||
32 | struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, | 32 | struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, |
33 | int flags, pol_lookup_t lookup) | 33 | int flags, pol_lookup_t lookup) |
34 | { | 34 | { |
35 | struct rt6_info *rt; | ||
36 | struct fib_lookup_arg arg = { | 35 | struct fib_lookup_arg arg = { |
37 | .lookup_ptr = lookup, | 36 | .lookup_ptr = lookup, |
38 | .flags = FIB_LOOKUP_NOREF, | 37 | .flags = FIB_LOOKUP_NOREF, |
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, | |||
44 | fib_rules_lookup(net->ipv6.fib6_rules_ops, | 43 | fib_rules_lookup(net->ipv6.fib6_rules_ops, |
45 | flowi6_to_flowi(fl6), flags, &arg); | 44 | flowi6_to_flowi(fl6), flags, &arg); |
46 | 45 | ||
47 | rt = arg.result; | 46 | if (arg.result) |
47 | return arg.result; | ||
48 | 48 | ||
49 | if (!rt) { | 49 | dst_hold(&net->ipv6.ip6_null_entry->dst); |
50 | dst_hold(&net->ipv6.ip6_null_entry->dst); | 50 | return &net->ipv6.ip6_null_entry->dst; |
51 | return &net->ipv6.ip6_null_entry->dst; | ||
52 | } | ||
53 | |||
54 | if (rt->rt6i_flags & RTF_REJECT && | ||
55 | rt->dst.error == -EAGAIN) { | ||
56 | ip6_rt_put(rt); | ||
57 | rt = net->ipv6.ip6_null_entry; | ||
58 | dst_hold(&rt->dst); | ||
59 | } | ||
60 | |||
61 | return &rt->dst; | ||
62 | } | 51 | } |
63 | 52 | ||
64 | static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | 53 | static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, |
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
121 | flp6->saddr = saddr; | 110 | flp6->saddr = saddr; |
122 | } | 111 | } |
123 | err = rt->dst.error; | 112 | err = rt->dst.error; |
124 | goto out; | 113 | if (err != -EAGAIN) |
114 | goto out; | ||
125 | } | 115 | } |
126 | again: | 116 | again: |
127 | ip6_rt_put(rt); | 117 | ip6_rt_put(rt); |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 230b5aac9f03..8d7b113958b1 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | |||
491 | local_bh_disable(); | 491 | local_bh_disable(); |
492 | 492 | ||
493 | /* Check global sysctl_icmp_msgs_per_sec ratelimit */ | 493 | /* Check global sysctl_icmp_msgs_per_sec ratelimit */ |
494 | if (!icmpv6_global_allow(type)) | 494 | if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type)) |
495 | goto out_bh_enable; | 495 | goto out_bh_enable; |
496 | 496 | ||
497 | mip6_addr_swap(skb); | 497 | mip6_addr_swap(skb); |
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 2fd5ca151dcf..77f7f8c7d93d 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c | |||
@@ -62,6 +62,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc) | |||
62 | { | 62 | { |
63 | u32 *v = (u32 *)loc.v32; | 63 | u32 *v = (u32 *)loc.v32; |
64 | 64 | ||
65 | __ila_hash_secret_init(); | ||
65 | return jhash_2words(v[0], v[1], hashrnd); | 66 | return jhash_2words(v[0], v[1], hashrnd); |
66 | } | 67 | } |
67 | 68 | ||
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index d4bf2c68a545..e6b78ba0e636 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, | |||
289 | struct rt6_info *rt; | 289 | struct rt6_info *rt; |
290 | 290 | ||
291 | rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); | 291 | rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); |
292 | if (rt->rt6i_flags & RTF_REJECT && | 292 | if (rt->dst.error == -EAGAIN) { |
293 | rt->dst.error == -EAGAIN) { | ||
294 | ip6_rt_put(rt); | 293 | ip6_rt_put(rt); |
295 | rt = net->ipv6.ip6_null_entry; | 294 | rt = net->ipv6.ip6_null_entry; |
296 | dst_hold(&rt->dst); | 295 | dst_hold(&rt->dst); |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0c5b4caa1949..64eea3962733 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -991,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev) | |||
991 | 991 | ||
992 | dst_cache_destroy(&t->dst_cache); | 992 | dst_cache_destroy(&t->dst_cache); |
993 | free_percpu(dev->tstats); | 993 | free_percpu(dev->tstats); |
994 | free_netdev(dev); | ||
995 | } | 994 | } |
996 | 995 | ||
997 | static void ip6gre_tunnel_setup(struct net_device *dev) | 996 | static void ip6gre_tunnel_setup(struct net_device *dev) |
998 | { | 997 | { |
999 | dev->netdev_ops = &ip6gre_netdev_ops; | 998 | dev->netdev_ops = &ip6gre_netdev_ops; |
1000 | dev->destructor = ip6gre_dev_free; | 999 | dev->needs_free_netdev = true; |
1000 | dev->priv_destructor = ip6gre_dev_free; | ||
1001 | 1001 | ||
1002 | dev->type = ARPHRD_IP6GRE; | 1002 | dev->type = ARPHRD_IP6GRE; |
1003 | 1003 | ||
@@ -1148,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net) | |||
1148 | return 0; | 1148 | return 0; |
1149 | 1149 | ||
1150 | err_reg_dev: | 1150 | err_reg_dev: |
1151 | ip6gre_dev_free(ign->fb_tunnel_dev); | 1151 | free_netdev(ign->fb_tunnel_dev); |
1152 | err_alloc_dev: | 1152 | err_alloc_dev: |
1153 | return err; | 1153 | return err; |
1154 | } | 1154 | } |
@@ -1300,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev) | |||
1300 | ether_setup(dev); | 1300 | ether_setup(dev); |
1301 | 1301 | ||
1302 | dev->netdev_ops = &ip6gre_tap_netdev_ops; | 1302 | dev->netdev_ops = &ip6gre_tap_netdev_ops; |
1303 | dev->destructor = ip6gre_dev_free; | 1303 | dev->needs_free_netdev = true; |
1304 | dev->priv_destructor = ip6gre_dev_free; | ||
1304 | 1305 | ||
1305 | dev->features |= NETIF_F_NETNS_LOCAL; | 1306 | dev->features |= NETIF_F_NETNS_LOCAL; |
1306 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1307 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9b37f9747fc6..8c6c3c8e7eef 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev) | |||
254 | gro_cells_destroy(&t->gro_cells); | 254 | gro_cells_destroy(&t->gro_cells); |
255 | dst_cache_destroy(&t->dst_cache); | 255 | dst_cache_destroy(&t->dst_cache); |
256 | free_percpu(dev->tstats); | 256 | free_percpu(dev->tstats); |
257 | free_netdev(dev); | ||
258 | } | 257 | } |
259 | 258 | ||
260 | static int ip6_tnl_create2(struct net_device *dev) | 259 | static int ip6_tnl_create2(struct net_device *dev) |
@@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | |||
322 | return t; | 321 | return t; |
323 | 322 | ||
324 | failed_free: | 323 | failed_free: |
325 | ip6_dev_free(dev); | 324 | free_netdev(dev); |
326 | failed: | 325 | failed: |
327 | return ERR_PTR(err); | 326 | return ERR_PTR(err); |
328 | } | 327 | } |
@@ -859,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, | |||
859 | return 0; | 858 | return 0; |
860 | 859 | ||
861 | drop: | 860 | drop: |
861 | if (tun_dst) | ||
862 | dst_release((struct dst_entry *)tun_dst); | ||
862 | kfree_skb(skb); | 863 | kfree_skb(skb); |
863 | return 0; | 864 | return 0; |
864 | } | 865 | } |
@@ -1247,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1247 | fl6.flowi6_proto = IPPROTO_IPIP; | 1248 | fl6.flowi6_proto = IPPROTO_IPIP; |
1248 | fl6.daddr = key->u.ipv6.dst; | 1249 | fl6.daddr = key->u.ipv6.dst; |
1249 | fl6.flowlabel = key->label; | 1250 | fl6.flowlabel = key->label; |
1250 | dsfield = ip6_tclass(key->label); | 1251 | dsfield = key->tos; |
1251 | } else { | 1252 | } else { |
1252 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1253 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1253 | encap_limit = t->parms.encap_limit; | 1254 | encap_limit = t->parms.encap_limit; |
@@ -1318,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1318 | fl6.flowi6_proto = IPPROTO_IPV6; | 1319 | fl6.flowi6_proto = IPPROTO_IPV6; |
1319 | fl6.daddr = key->u.ipv6.dst; | 1320 | fl6.daddr = key->u.ipv6.dst; |
1320 | fl6.flowlabel = key->label; | 1321 | fl6.flowlabel = key->label; |
1321 | dsfield = ip6_tclass(key->label); | 1322 | dsfield = key->tos; |
1322 | } else { | 1323 | } else { |
1323 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); | 1324 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); |
1324 | /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ | 1325 | /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ |
@@ -1777,7 +1778,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { | |||
1777 | static void ip6_tnl_dev_setup(struct net_device *dev) | 1778 | static void ip6_tnl_dev_setup(struct net_device *dev) |
1778 | { | 1779 | { |
1779 | dev->netdev_ops = &ip6_tnl_netdev_ops; | 1780 | dev->netdev_ops = &ip6_tnl_netdev_ops; |
1780 | dev->destructor = ip6_dev_free; | 1781 | dev->needs_free_netdev = true; |
1782 | dev->priv_destructor = ip6_dev_free; | ||
1781 | 1783 | ||
1782 | dev->type = ARPHRD_TUNNEL6; | 1784 | dev->type = ARPHRD_TUNNEL6; |
1783 | dev->flags |= IFF_NOARP; | 1785 | dev->flags |= IFF_NOARP; |
@@ -2224,7 +2226,7 @@ static int __net_init ip6_tnl_init_net(struct net *net) | |||
2224 | return 0; | 2226 | return 0; |
2225 | 2227 | ||
2226 | err_register: | 2228 | err_register: |
2227 | ip6_dev_free(ip6n->fb_tnl_dev); | 2229 | free_netdev(ip6n->fb_tnl_dev); |
2228 | err_alloc_dev: | 2230 | err_alloc_dev: |
2229 | return err; | 2231 | return err; |
2230 | } | 2232 | } |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d67ef56454b2..837ea1eefe7f 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t) | |||
180 | static void vti6_dev_free(struct net_device *dev) | 180 | static void vti6_dev_free(struct net_device *dev) |
181 | { | 181 | { |
182 | free_percpu(dev->tstats); | 182 | free_percpu(dev->tstats); |
183 | free_netdev(dev); | ||
184 | } | 183 | } |
185 | 184 | ||
186 | static int vti6_tnl_create2(struct net_device *dev) | 185 | static int vti6_tnl_create2(struct net_device *dev) |
@@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p | |||
235 | return t; | 234 | return t; |
236 | 235 | ||
237 | failed_free: | 236 | failed_free: |
238 | vti6_dev_free(dev); | 237 | free_netdev(dev); |
239 | failed: | 238 | failed: |
240 | return NULL; | 239 | return NULL; |
241 | } | 240 | } |
@@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = { | |||
842 | static void vti6_dev_setup(struct net_device *dev) | 841 | static void vti6_dev_setup(struct net_device *dev) |
843 | { | 842 | { |
844 | dev->netdev_ops = &vti6_netdev_ops; | 843 | dev->netdev_ops = &vti6_netdev_ops; |
845 | dev->destructor = vti6_dev_free; | 844 | dev->needs_free_netdev = true; |
845 | dev->priv_destructor = vti6_dev_free; | ||
846 | 846 | ||
847 | dev->type = ARPHRD_TUNNEL6; | 847 | dev->type = ARPHRD_TUNNEL6; |
848 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); | 848 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); |
@@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net) | |||
1100 | return 0; | 1100 | return 0; |
1101 | 1101 | ||
1102 | err_register: | 1102 | err_register: |
1103 | vti6_dev_free(ip6n->fb_tnl_dev); | 1103 | free_netdev(ip6n->fb_tnl_dev); |
1104 | err_alloc_dev: | 1104 | err_alloc_dev: |
1105 | return err; | 1105 | return err; |
1106 | } | 1106 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 374997d26488..2ecb39b943b5 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev) | |||
733 | dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; | 733 | dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; |
734 | dev->flags = IFF_NOARP; | 734 | dev->flags = IFF_NOARP; |
735 | dev->netdev_ops = ®_vif_netdev_ops; | 735 | dev->netdev_ops = ®_vif_netdev_ops; |
736 | dev->destructor = free_netdev; | 736 | dev->needs_free_netdev = true; |
737 | dev->features |= NETIF_F_NETNS_LOCAL; | 737 | dev->features |= NETIF_F_NETNS_LOCAL; |
738 | } | 738 | } |
739 | 739 | ||
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index cc8e3ae9ca73..e88bcb8ff0fd 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib, | |||
219 | u64 buff64[SNMP_MIB_MAX]; | 219 | u64 buff64[SNMP_MIB_MAX]; |
220 | int i; | 220 | int i; |
221 | 221 | ||
222 | memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); | 222 | memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX); |
223 | 223 | ||
224 | snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); | 224 | snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); |
225 | for (i = 0; itemlist[i].name; i++) | 225 | for (i = 0; itemlist[i].name; i++) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index dc61b0b5e64e..7cebd954d5bb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg) | |||
2804 | if ((rt->dst.dev == dev || !dev) && | 2804 | if ((rt->dst.dev == dev || !dev) && |
2805 | rt != adn->net->ipv6.ip6_null_entry && | 2805 | rt != adn->net->ipv6.ip6_null_entry && |
2806 | (rt->rt6i_nsiblings == 0 || | 2806 | (rt->rt6i_nsiblings == 0 || |
2807 | (dev && netdev_unregistering(dev)) || | ||
2807 | !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) | 2808 | !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) |
2808 | return -1; | 2809 | return -1; |
2809 | 2810 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 61e5902f0687..2378503577b0 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, | |||
265 | return nt; | 265 | return nt; |
266 | 266 | ||
267 | failed_free: | 267 | failed_free: |
268 | ipip6_dev_free(dev); | 268 | free_netdev(dev); |
269 | failed: | 269 | failed: |
270 | return NULL; | 270 | return NULL; |
271 | } | 271 | } |
@@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev) | |||
1336 | 1336 | ||
1337 | dst_cache_destroy(&tunnel->dst_cache); | 1337 | dst_cache_destroy(&tunnel->dst_cache); |
1338 | free_percpu(dev->tstats); | 1338 | free_percpu(dev->tstats); |
1339 | free_netdev(dev); | ||
1340 | } | 1339 | } |
1341 | 1340 | ||
1342 | #define SIT_FEATURES (NETIF_F_SG | \ | 1341 | #define SIT_FEATURES (NETIF_F_SG | \ |
@@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev) | |||
1351 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); | 1350 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); |
1352 | 1351 | ||
1353 | dev->netdev_ops = &ipip6_netdev_ops; | 1352 | dev->netdev_ops = &ipip6_netdev_ops; |
1354 | dev->destructor = ipip6_dev_free; | 1353 | dev->needs_free_netdev = true; |
1354 | dev->priv_destructor = ipip6_dev_free; | ||
1355 | 1355 | ||
1356 | dev->type = ARPHRD_SIT; | 1356 | dev->type = ARPHRD_SIT; |
1357 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; | 1357 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; |
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 74d09f91709e..3be852808a9d 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c | |||
@@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev) | |||
65 | ether_setup(dev); | 65 | ether_setup(dev); |
66 | 66 | ||
67 | dev->netdev_ops = &irlan_eth_netdev_ops; | 67 | dev->netdev_ops = &irlan_eth_netdev_ops; |
68 | dev->destructor = free_netdev; | 68 | dev->needs_free_netdev = true; |
69 | dev->min_mtu = 0; | 69 | dev->min_mtu = 0; |
70 | dev->max_mtu = ETH_MAX_MTU; | 70 | dev->max_mtu = ETH_MAX_MTU; |
71 | 71 | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 8b21af7321b9..4de2ec94b08c 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
@@ -114,12 +114,13 @@ static void l2tp_eth_get_stats64(struct net_device *dev, | |||
114 | { | 114 | { |
115 | struct l2tp_eth *priv = netdev_priv(dev); | 115 | struct l2tp_eth *priv = netdev_priv(dev); |
116 | 116 | ||
117 | stats->tx_bytes = atomic_long_read(&priv->tx_bytes); | 117 | stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes); |
118 | stats->tx_packets = atomic_long_read(&priv->tx_packets); | 118 | stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets); |
119 | stats->tx_dropped = atomic_long_read(&priv->tx_dropped); | 119 | stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped); |
120 | stats->rx_bytes = atomic_long_read(&priv->rx_bytes); | 120 | stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes); |
121 | stats->rx_packets = atomic_long_read(&priv->rx_packets); | 121 | stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets); |
122 | stats->rx_errors = atomic_long_read(&priv->rx_errors); | 122 | stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors); |
123 | |||
123 | } | 124 | } |
124 | 125 | ||
125 | static const struct net_device_ops l2tp_eth_netdev_ops = { | 126 | static const struct net_device_ops l2tp_eth_netdev_ops = { |
@@ -141,7 +142,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev) | |||
141 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 142 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
142 | dev->features |= NETIF_F_LLTX; | 143 | dev->features |= NETIF_F_LLTX; |
143 | dev->netdev_ops = &l2tp_eth_netdev_ops; | 144 | dev->netdev_ops = &l2tp_eth_netdev_ops; |
144 | dev->destructor = free_netdev; | 145 | dev->needs_free_netdev = true; |
145 | } | 146 | } |
146 | 147 | ||
147 | static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) | 148 | static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 6c2e6060cd54..4a388fe8c2d1 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -902,6 +902,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
902 | default: | 902 | default: |
903 | return -EINVAL; | 903 | return -EINVAL; |
904 | } | 904 | } |
905 | sdata->u.ap.req_smps = sdata->smps_mode; | ||
906 | |||
905 | sdata->needed_rx_chains = sdata->local->rx_chains; | 907 | sdata->needed_rx_chains = sdata->local->rx_chains; |
906 | 908 | ||
907 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 909 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 665501ac358f..5e002f62c235 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1531,7 +1531,7 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) | |||
1531 | return true; | 1531 | return true; |
1532 | /* can't handle non-legacy preamble yet */ | 1532 | /* can't handle non-legacy preamble yet */ |
1533 | if (status->flag & RX_FLAG_MACTIME_PLCP_START && | 1533 | if (status->flag & RX_FLAG_MACTIME_PLCP_START && |
1534 | status->encoding != RX_ENC_LEGACY) | 1534 | status->encoding == RX_ENC_LEGACY) |
1535 | return true; | 1535 | return true; |
1536 | return false; | 1536 | return false; |
1537 | } | 1537 | } |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8fae1a72e6a7..f5f50150ba1c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = { | |||
1213 | static void ieee80211_if_free(struct net_device *dev) | 1213 | static void ieee80211_if_free(struct net_device *dev) |
1214 | { | 1214 | { |
1215 | free_percpu(dev->tstats); | 1215 | free_percpu(dev->tstats); |
1216 | free_netdev(dev); | ||
1217 | } | 1216 | } |
1218 | 1217 | ||
1219 | static void ieee80211_if_setup(struct net_device *dev) | 1218 | static void ieee80211_if_setup(struct net_device *dev) |
@@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev) | |||
1221 | ether_setup(dev); | 1220 | ether_setup(dev); |
1222 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1221 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1223 | dev->netdev_ops = &ieee80211_dataif_ops; | 1222 | dev->netdev_ops = &ieee80211_dataif_ops; |
1224 | dev->destructor = ieee80211_if_free; | 1223 | dev->needs_free_netdev = true; |
1224 | dev->priv_destructor = ieee80211_if_free; | ||
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | static void ieee80211_if_setup_no_queue(struct net_device *dev) | 1227 | static void ieee80211_if_setup_no_queue(struct net_device *dev) |
@@ -1816,6 +1816,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
1816 | ret = dev_alloc_name(ndev, ndev->name); | 1816 | ret = dev_alloc_name(ndev, ndev->name); |
1817 | if (ret < 0) { | 1817 | if (ret < 0) { |
1818 | ieee80211_if_free(ndev); | 1818 | ieee80211_if_free(ndev); |
1819 | free_netdev(ndev); | ||
1819 | return ret; | 1820 | return ret; |
1820 | } | 1821 | } |
1821 | 1822 | ||
@@ -1905,7 +1906,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
1905 | 1906 | ||
1906 | ret = register_netdevice(ndev); | 1907 | ret = register_netdevice(ndev); |
1907 | if (ret) { | 1908 | if (ret) { |
1908 | ieee80211_if_free(ndev); | 1909 | free_netdev(ndev); |
1909 | return ret; | 1910 | return ret; |
1910 | } | 1911 | } |
1911 | } | 1912 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0ea9712bd99e..cc8e6ea1b27e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -601,7 +601,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
601 | struct ieee80211_supported_band *sband; | 601 | struct ieee80211_supported_band *sband; |
602 | struct ieee80211_chanctx_conf *chanctx_conf; | 602 | struct ieee80211_chanctx_conf *chanctx_conf; |
603 | struct ieee80211_channel *chan; | 603 | struct ieee80211_channel *chan; |
604 | u32 rate_flags, rates = 0; | 604 | u32 rates = 0; |
605 | 605 | ||
606 | sdata_assert_lock(sdata); | 606 | sdata_assert_lock(sdata); |
607 | 607 | ||
@@ -612,7 +612,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
612 | return; | 612 | return; |
613 | } | 613 | } |
614 | chan = chanctx_conf->def.chan; | 614 | chan = chanctx_conf->def.chan; |
615 | rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); | ||
616 | rcu_read_unlock(); | 615 | rcu_read_unlock(); |
617 | sband = local->hw.wiphy->bands[chan->band]; | 616 | sband = local->hw.wiphy->bands[chan->band]; |
618 | shift = ieee80211_vif_get_shift(&sdata->vif); | 617 | shift = ieee80211_vif_get_shift(&sdata->vif); |
@@ -636,9 +635,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
636 | */ | 635 | */ |
637 | rates_len = 0; | 636 | rates_len = 0; |
638 | for (i = 0; i < sband->n_bitrates; i++) { | 637 | for (i = 0; i < sband->n_bitrates; i++) { |
639 | if ((rate_flags & sband->bitrates[i].flags) | ||
640 | != rate_flags) | ||
641 | continue; | ||
642 | rates |= BIT(i); | 638 | rates |= BIT(i); |
643 | rates_len++; | 639 | rates_len++; |
644 | } | 640 | } |
@@ -2818,7 +2814,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, | |||
2818 | u32 *rates, u32 *basic_rates, | 2814 | u32 *rates, u32 *basic_rates, |
2819 | bool *have_higher_than_11mbit, | 2815 | bool *have_higher_than_11mbit, |
2820 | int *min_rate, int *min_rate_index, | 2816 | int *min_rate, int *min_rate_index, |
2821 | int shift, u32 rate_flags) | 2817 | int shift) |
2822 | { | 2818 | { |
2823 | int i, j; | 2819 | int i, j; |
2824 | 2820 | ||
@@ -2846,8 +2842,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, | |||
2846 | int brate; | 2842 | int brate; |
2847 | 2843 | ||
2848 | br = &sband->bitrates[j]; | 2844 | br = &sband->bitrates[j]; |
2849 | if ((rate_flags & br->flags) != rate_flags) | ||
2850 | continue; | ||
2851 | 2845 | ||
2852 | brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); | 2846 | brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); |
2853 | if (brate == rate) { | 2847 | if (brate == rate) { |
@@ -4398,40 +4392,32 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
4398 | return -ENOMEM; | 4392 | return -ENOMEM; |
4399 | } | 4393 | } |
4400 | 4394 | ||
4401 | if (new_sta || override) { | 4395 | /* |
4402 | err = ieee80211_prep_channel(sdata, cbss); | 4396 | * Set up the information for the new channel before setting the |
4403 | if (err) { | 4397 | * new channel. We can't - completely race-free - change the basic |
4404 | if (new_sta) | 4398 | * rates bitmap and the channel (sband) that it refers to, but if |
4405 | sta_info_free(local, new_sta); | 4399 | * we set it up before we at least avoid calling into the driver's |
4406 | return -EINVAL; | 4400 | * bss_info_changed() method with invalid information (since we do |
4407 | } | 4401 | * call that from changing the channel - only for IDLE and perhaps |
4408 | } | 4402 | * some others, but ...). |
4409 | 4403 | * | |
4404 | * So to avoid that, just set up all the new information before the | ||
4405 | * channel, but tell the driver to apply it only afterwards, since | ||
4406 | * it might need the new channel for that. | ||
4407 | */ | ||
4410 | if (new_sta) { | 4408 | if (new_sta) { |
4411 | u32 rates = 0, basic_rates = 0; | 4409 | u32 rates = 0, basic_rates = 0; |
4412 | bool have_higher_than_11mbit; | 4410 | bool have_higher_than_11mbit; |
4413 | int min_rate = INT_MAX, min_rate_index = -1; | 4411 | int min_rate = INT_MAX, min_rate_index = -1; |
4414 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
4415 | const struct cfg80211_bss_ies *ies; | 4412 | const struct cfg80211_bss_ies *ies; |
4416 | int shift = ieee80211_vif_get_shift(&sdata->vif); | 4413 | int shift = ieee80211_vif_get_shift(&sdata->vif); |
4417 | u32 rate_flags; | ||
4418 | |||
4419 | rcu_read_lock(); | ||
4420 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
4421 | if (WARN_ON(!chanctx_conf)) { | ||
4422 | rcu_read_unlock(); | ||
4423 | sta_info_free(local, new_sta); | ||
4424 | return -EINVAL; | ||
4425 | } | ||
4426 | rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); | ||
4427 | rcu_read_unlock(); | ||
4428 | 4414 | ||
4429 | ieee80211_get_rates(sband, bss->supp_rates, | 4415 | ieee80211_get_rates(sband, bss->supp_rates, |
4430 | bss->supp_rates_len, | 4416 | bss->supp_rates_len, |
4431 | &rates, &basic_rates, | 4417 | &rates, &basic_rates, |
4432 | &have_higher_than_11mbit, | 4418 | &have_higher_than_11mbit, |
4433 | &min_rate, &min_rate_index, | 4419 | &min_rate, &min_rate_index, |
4434 | shift, rate_flags); | 4420 | shift); |
4435 | 4421 | ||
4436 | /* | 4422 | /* |
4437 | * This used to be a workaround for basic rates missing | 4423 | * This used to be a workaround for basic rates missing |
@@ -4489,8 +4475,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
4489 | sdata->vif.bss_conf.sync_dtim_count = 0; | 4475 | sdata->vif.bss_conf.sync_dtim_count = 0; |
4490 | } | 4476 | } |
4491 | rcu_read_unlock(); | 4477 | rcu_read_unlock(); |
4478 | } | ||
4492 | 4479 | ||
4493 | /* tell driver about BSSID, basic rates and timing */ | 4480 | if (new_sta || override) { |
4481 | err = ieee80211_prep_channel(sdata, cbss); | ||
4482 | if (err) { | ||
4483 | if (new_sta) | ||
4484 | sta_info_free(local, new_sta); | ||
4485 | return -EINVAL; | ||
4486 | } | ||
4487 | } | ||
4488 | |||
4489 | if (new_sta) { | ||
4490 | /* | ||
4491 | * tell driver about BSSID, basic rates and timing | ||
4492 | * this was set up above, before setting the channel | ||
4493 | */ | ||
4494 | ieee80211_bss_info_change_notify(sdata, | 4494 | ieee80211_bss_info_change_notify(sdata, |
4495 | BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | | 4495 | BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | |
4496 | BSS_CHANGED_BEACON_INT); | 4496 | BSS_CHANGED_BEACON_INT); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1f75280ba26c..3674fe3d67dc 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1613,12 +1613,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1613 | */ | 1613 | */ |
1614 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && | 1614 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && |
1615 | !ieee80211_has_morefrags(hdr->frame_control) && | 1615 | !ieee80211_has_morefrags(hdr->frame_control) && |
1616 | !ieee80211_is_back_req(hdr->frame_control) && | ||
1616 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | 1617 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && |
1617 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || | 1618 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || |
1618 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && | 1619 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && |
1619 | /* PM bit is only checked in frames where it isn't reserved, | 1620 | /* |
1621 | * PM bit is only checked in frames where it isn't reserved, | ||
1620 | * in AP mode it's reserved in non-bufferable management frames | 1622 | * in AP mode it's reserved in non-bufferable management frames |
1621 | * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) | 1623 | * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) |
1624 | * BAR frames should be ignored as specified in | ||
1625 | * IEEE 802.11-2012 10.2.1.2. | ||
1622 | */ | 1626 | */ |
1623 | (!ieee80211_is_mgmt(hdr->frame_control) || | 1627 | (!ieee80211_is_mgmt(hdr->frame_control) || |
1624 | ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { | 1628 | ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index c1ef22df865f..cc19614ff4e6 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/unaligned.h> | 17 | #include <asm/unaligned.h> |
18 | #include <net/mac80211.h> | 18 | #include <net/mac80211.h> |
19 | #include <crypto/aes.h> | 19 | #include <crypto/aes.h> |
20 | #include <crypto/algapi.h> | ||
20 | 21 | ||
21 | #include "ieee80211_i.h" | 22 | #include "ieee80211_i.h" |
22 | #include "michael.h" | 23 | #include "michael.h" |
@@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
153 | data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; | 154 | data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; |
154 | key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; | 155 | key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; |
155 | michael_mic(key, hdr, data, data_len, mic); | 156 | michael_mic(key, hdr, data, data_len, mic); |
156 | if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) | 157 | if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) |
157 | goto mic_fail; | 158 | goto mic_fail; |
158 | 159 | ||
159 | /* remove Michael MIC from payload */ | 160 | /* remove Michael MIC from payload */ |
@@ -1048,7 +1049,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) | |||
1048 | bip_aad(skb, aad); | 1049 | bip_aad(skb, aad); |
1049 | ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, | 1050 | ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, |
1050 | skb->data + 24, skb->len - 24, mic); | 1051 | skb->data + 24, skb->len - 24, mic); |
1051 | if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { | 1052 | if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { |
1052 | key->u.aes_cmac.icverrors++; | 1053 | key->u.aes_cmac.icverrors++; |
1053 | return RX_DROP_UNUSABLE; | 1054 | return RX_DROP_UNUSABLE; |
1054 | } | 1055 | } |
@@ -1098,7 +1099,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) | |||
1098 | bip_aad(skb, aad); | 1099 | bip_aad(skb, aad); |
1099 | ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, | 1100 | ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, |
1100 | skb->data + 24, skb->len - 24, mic); | 1101 | skb->data + 24, skb->len - 24, mic); |
1101 | if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { | 1102 | if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { |
1102 | key->u.aes_cmac.icverrors++; | 1103 | key->u.aes_cmac.icverrors++; |
1103 | return RX_DROP_UNUSABLE; | 1104 | return RX_DROP_UNUSABLE; |
1104 | } | 1105 | } |
@@ -1202,7 +1203,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) | |||
1202 | if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, | 1203 | if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, |
1203 | skb->data + 24, skb->len - 24, | 1204 | skb->data + 24, skb->len - 24, |
1204 | mic) < 0 || | 1205 | mic) < 0 || |
1205 | memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { | 1206 | crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { |
1206 | key->u.aes_gmac.icverrors++; | 1207 | key->u.aes_gmac.icverrors++; |
1207 | return RX_DROP_UNUSABLE; | 1208 | return RX_DROP_UNUSABLE; |
1208 | } | 1209 | } |
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 06019dba4b10..bd88a9b80773 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c | |||
@@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev) | |||
526 | struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); | 526 | struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); |
527 | 527 | ||
528 | mac802154_llsec_destroy(&sdata->sec); | 528 | mac802154_llsec_destroy(&sdata->sec); |
529 | |||
530 | free_netdev(dev); | ||
531 | } | 529 | } |
532 | 530 | ||
533 | static void ieee802154_if_setup(struct net_device *dev) | 531 | static void ieee802154_if_setup(struct net_device *dev) |
@@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, | |||
593 | sdata->dev->dev_addr); | 591 | sdata->dev->dev_addr); |
594 | 592 | ||
595 | sdata->dev->header_ops = &mac802154_header_ops; | 593 | sdata->dev->header_ops = &mac802154_header_ops; |
596 | sdata->dev->destructor = mac802154_wpan_free; | 594 | sdata->dev->needs_free_netdev = true; |
595 | sdata->dev->priv_destructor = mac802154_wpan_free; | ||
597 | sdata->dev->netdev_ops = &mac802154_wpan_ops; | 596 | sdata->dev->netdev_ops = &mac802154_wpan_ops; |
598 | sdata->dev->ml_priv = &mac802154_mlme_wpan; | 597 | sdata->dev->ml_priv = &mac802154_mlme_wpan; |
599 | wpan_dev->promiscuous_mode = false; | 598 | wpan_dev->promiscuous_mode = false; |
@@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, | |||
608 | 607 | ||
609 | break; | 608 | break; |
610 | case NL802154_IFTYPE_MONITOR: | 609 | case NL802154_IFTYPE_MONITOR: |
611 | sdata->dev->destructor = free_netdev; | 610 | sdata->dev->needs_free_netdev = true; |
612 | sdata->dev->netdev_ops = &mac802154_monitor_ops; | 611 | sdata->dev->netdev_ops = &mac802154_monitor_ops; |
613 | wpan_dev->promiscuous_mode = true; | 612 | wpan_dev->promiscuous_mode = true; |
614 | break; | 613 | break; |
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 89193a634da4..04a3128adcf0 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c | |||
@@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev) | |||
94 | struct vport *vport = ovs_internal_dev_get_vport(dev); | 94 | struct vport *vport = ovs_internal_dev_get_vport(dev); |
95 | 95 | ||
96 | ovs_vport_free(vport); | 96 | ovs_vport_free(vport); |
97 | free_netdev(dev); | ||
98 | } | 97 | } |
99 | 98 | ||
100 | static void | 99 | static void |
@@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev) | |||
156 | netdev->priv_flags &= ~IFF_TX_SKB_SHARING; | 155 | netdev->priv_flags &= ~IFF_TX_SKB_SHARING; |
157 | netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | | 156 | netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | |
158 | IFF_PHONY_HEADROOM | IFF_NO_QUEUE; | 157 | IFF_PHONY_HEADROOM | IFF_NO_QUEUE; |
159 | netdev->destructor = internal_dev_destructor; | 158 | netdev->needs_free_netdev = true; |
159 | netdev->priv_destructor = internal_dev_destructor; | ||
160 | netdev->ethtool_ops = &internal_dev_ethtool_ops; | 160 | netdev->ethtool_ops = &internal_dev_ethtool_ops; |
161 | netdev->rtnl_link_ops = &internal_dev_link_ops; | 161 | netdev->rtnl_link_ops = &internal_dev_link_ops; |
162 | 162 | ||
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index 21c28b51be94..2c9337946e30 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
@@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev) | |||
236 | dev->tx_queue_len = 10; | 236 | dev->tx_queue_len = 10; |
237 | 237 | ||
238 | dev->netdev_ops = &gprs_netdev_ops; | 238 | dev->netdev_ops = &gprs_netdev_ops; |
239 | dev->destructor = free_netdev; | 239 | dev->needs_free_netdev = true; |
240 | } | 240 | } |
241 | 241 | ||
242 | /* | 242 | /* |
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 0a4e28477ad9..54369225766e 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c | |||
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, | |||
217 | unsigned int *_toklen) | 217 | unsigned int *_toklen) |
218 | { | 218 | { |
219 | const __be32 *xdr = *_xdr; | 219 | const __be32 *xdr = *_xdr; |
220 | unsigned int toklen = *_toklen, n_parts, loop, tmp; | 220 | unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen; |
221 | 221 | ||
222 | /* there must be at least one name, and at least #names+1 length | 222 | /* there must be at least one name, and at least #names+1 length |
223 | * words */ | 223 | * words */ |
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, | |||
247 | toklen -= 4; | 247 | toklen -= 4; |
248 | if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) | 248 | if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) |
249 | return -EINVAL; | 249 | return -EINVAL; |
250 | if (tmp > toklen) | 250 | paddedlen = (tmp + 3) & ~3; |
251 | if (paddedlen > toklen) | ||
251 | return -EINVAL; | 252 | return -EINVAL; |
252 | princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); | 253 | princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); |
253 | if (!princ->name_parts[loop]) | 254 | if (!princ->name_parts[loop]) |
254 | return -ENOMEM; | 255 | return -ENOMEM; |
255 | memcpy(princ->name_parts[loop], xdr, tmp); | 256 | memcpy(princ->name_parts[loop], xdr, tmp); |
256 | princ->name_parts[loop][tmp] = 0; | 257 | princ->name_parts[loop][tmp] = 0; |
257 | tmp = (tmp + 3) & ~3; | 258 | toklen -= paddedlen; |
258 | toklen -= tmp; | 259 | xdr += paddedlen >> 2; |
259 | xdr += tmp >> 2; | ||
260 | } | 260 | } |
261 | 261 | ||
262 | if (toklen < 4) | 262 | if (toklen < 4) |
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, | |||
265 | toklen -= 4; | 265 | toklen -= 4; |
266 | if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) | 266 | if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) |
267 | return -EINVAL; | 267 | return -EINVAL; |
268 | if (tmp > toklen) | 268 | paddedlen = (tmp + 3) & ~3; |
269 | if (paddedlen > toklen) | ||
269 | return -EINVAL; | 270 | return -EINVAL; |
270 | princ->realm = kmalloc(tmp + 1, GFP_KERNEL); | 271 | princ->realm = kmalloc(tmp + 1, GFP_KERNEL); |
271 | if (!princ->realm) | 272 | if (!princ->realm) |
272 | return -ENOMEM; | 273 | return -ENOMEM; |
273 | memcpy(princ->realm, xdr, tmp); | 274 | memcpy(princ->realm, xdr, tmp); |
274 | princ->realm[tmp] = 0; | 275 | princ->realm[tmp] = 0; |
275 | tmp = (tmp + 3) & ~3; | 276 | toklen -= paddedlen; |
276 | toklen -= tmp; | 277 | xdr += paddedlen >> 2; |
277 | xdr += tmp >> 2; | ||
278 | 278 | ||
279 | _debug("%s/...@%s", princ->name_parts[0], princ->realm); | 279 | _debug("%s/...@%s", princ->name_parts[0], princ->realm); |
280 | 280 | ||
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, | |||
293 | unsigned int *_toklen) | 293 | unsigned int *_toklen) |
294 | { | 294 | { |
295 | const __be32 *xdr = *_xdr; | 295 | const __be32 *xdr = *_xdr; |
296 | unsigned int toklen = *_toklen, len; | 296 | unsigned int toklen = *_toklen, len, paddedlen; |
297 | 297 | ||
298 | /* there must be at least one tag and one length word */ | 298 | /* there must be at least one tag and one length word */ |
299 | if (toklen <= 8) | 299 | if (toklen <= 8) |
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, | |||
307 | toklen -= 8; | 307 | toklen -= 8; |
308 | if (len > max_data_size) | 308 | if (len > max_data_size) |
309 | return -EINVAL; | 309 | return -EINVAL; |
310 | paddedlen = (len + 3) & ~3; | ||
311 | if (paddedlen > toklen) | ||
312 | return -EINVAL; | ||
310 | td->data_len = len; | 313 | td->data_len = len; |
311 | 314 | ||
312 | if (len > 0) { | 315 | if (len > 0) { |
313 | td->data = kmemdup(xdr, len, GFP_KERNEL); | 316 | td->data = kmemdup(xdr, len, GFP_KERNEL); |
314 | if (!td->data) | 317 | if (!td->data) |
315 | return -ENOMEM; | 318 | return -ENOMEM; |
316 | len = (len + 3) & ~3; | 319 | toklen -= paddedlen; |
317 | toklen -= len; | 320 | xdr += paddedlen >> 2; |
318 | xdr += len >> 2; | ||
319 | } | 321 | } |
320 | 322 | ||
321 | _debug("tag %x len %x", td->tag, td->data_len); | 323 | _debug("tag %x len %x", td->tag, td->data_len); |
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, | |||
387 | const __be32 **_xdr, unsigned int *_toklen) | 389 | const __be32 **_xdr, unsigned int *_toklen) |
388 | { | 390 | { |
389 | const __be32 *xdr = *_xdr; | 391 | const __be32 *xdr = *_xdr; |
390 | unsigned int toklen = *_toklen, len; | 392 | unsigned int toklen = *_toklen, len, paddedlen; |
391 | 393 | ||
392 | /* there must be at least one length word */ | 394 | /* there must be at least one length word */ |
393 | if (toklen <= 4) | 395 | if (toklen <= 4) |
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, | |||
399 | toklen -= 4; | 401 | toklen -= 4; |
400 | if (len > AFSTOKEN_K5_TIX_MAX) | 402 | if (len > AFSTOKEN_K5_TIX_MAX) |
401 | return -EINVAL; | 403 | return -EINVAL; |
404 | paddedlen = (len + 3) & ~3; | ||
405 | if (paddedlen > toklen) | ||
406 | return -EINVAL; | ||
402 | *_tktlen = len; | 407 | *_tktlen = len; |
403 | 408 | ||
404 | _debug("ticket len %u", len); | 409 | _debug("ticket len %u", len); |
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, | |||
407 | *_ticket = kmemdup(xdr, len, GFP_KERNEL); | 412 | *_ticket = kmemdup(xdr, len, GFP_KERNEL); |
408 | if (!*_ticket) | 413 | if (!*_ticket) |
409 | return -ENOMEM; | 414 | return -ENOMEM; |
410 | len = (len + 3) & ~3; | 415 | toklen -= paddedlen; |
411 | toklen -= len; | 416 | xdr += paddedlen >> 2; |
412 | xdr += len >> 2; | ||
413 | } | 417 | } |
414 | 418 | ||
415 | *_xdr = xdr; | 419 | *_xdr = xdr; |
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) | |||
552 | { | 556 | { |
553 | const __be32 *xdr = prep->data, *token; | 557 | const __be32 *xdr = prep->data, *token; |
554 | const char *cp; | 558 | const char *cp; |
555 | unsigned int len, tmp, loop, ntoken, toklen, sec_ix; | 559 | unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; |
556 | size_t datalen = prep->datalen; | 560 | size_t datalen = prep->datalen; |
557 | int ret; | 561 | int ret; |
558 | 562 | ||
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) | |||
578 | if (len < 1 || len > AFSTOKEN_CELL_MAX) | 582 | if (len < 1 || len > AFSTOKEN_CELL_MAX) |
579 | goto not_xdr; | 583 | goto not_xdr; |
580 | datalen -= 4; | 584 | datalen -= 4; |
581 | tmp = (len + 3) & ~3; | 585 | paddedlen = (len + 3) & ~3; |
582 | if (tmp > datalen) | 586 | if (paddedlen > datalen) |
583 | goto not_xdr; | 587 | goto not_xdr; |
584 | 588 | ||
585 | cp = (const char *) xdr; | 589 | cp = (const char *) xdr; |
586 | for (loop = 0; loop < len; loop++) | 590 | for (loop = 0; loop < len; loop++) |
587 | if (!isprint(cp[loop])) | 591 | if (!isprint(cp[loop])) |
588 | goto not_xdr; | 592 | goto not_xdr; |
589 | if (len < tmp) | 593 | for (; loop < paddedlen; loop++) |
590 | for (; loop < tmp; loop++) | 594 | if (cp[loop]) |
591 | if (cp[loop]) | 595 | goto not_xdr; |
592 | goto not_xdr; | ||
593 | _debug("cellname: [%u/%u] '%*.*s'", | 596 | _debug("cellname: [%u/%u] '%*.*s'", |
594 | len, tmp, len, len, (const char *) xdr); | 597 | len, paddedlen, len, len, (const char *) xdr); |
595 | datalen -= tmp; | 598 | datalen -= paddedlen; |
596 | xdr += tmp >> 2; | 599 | xdr += paddedlen >> 2; |
597 | 600 | ||
598 | /* get the token count */ | 601 | /* get the token count */ |
599 | if (datalen < 12) | 602 | if (datalen < 12) |
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) | |||
614 | sec_ix = ntohl(*xdr); | 617 | sec_ix = ntohl(*xdr); |
615 | datalen -= 4; | 618 | datalen -= 4; |
616 | _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); | 619 | _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); |
617 | if (toklen < 20 || toklen > datalen) | 620 | paddedlen = (toklen + 3) & ~3; |
621 | if (toklen < 20 || toklen > datalen || paddedlen > datalen) | ||
618 | goto not_xdr; | 622 | goto not_xdr; |
619 | datalen -= (toklen + 3) & ~3; | 623 | datalen -= paddedlen; |
620 | xdr += (toklen + 3) >> 2; | 624 | xdr += paddedlen >> 2; |
621 | 625 | ||
622 | } while (--loop > 0); | 626 | } while (--loop > 0); |
623 | 627 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 164b5ac094be..7dc5892671c8 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -94,8 +94,10 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, | |||
94 | k++; | 94 | k++; |
95 | } | 95 | } |
96 | 96 | ||
97 | if (n) | 97 | if (n) { |
98 | err = -EINVAL; | ||
98 | goto err_out; | 99 | goto err_out; |
100 | } | ||
99 | 101 | ||
100 | return keys_ex; | 102 | return keys_ex; |
101 | 103 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index f42008b29311..b062bc80c7cb 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -132,21 +132,21 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, | |||
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | spin_lock_bh(&police->tcf_lock); | ||
136 | if (est) { | 135 | if (est) { |
137 | err = gen_replace_estimator(&police->tcf_bstats, NULL, | 136 | err = gen_replace_estimator(&police->tcf_bstats, NULL, |
138 | &police->tcf_rate_est, | 137 | &police->tcf_rate_est, |
139 | &police->tcf_lock, | 138 | &police->tcf_lock, |
140 | NULL, est); | 139 | NULL, est); |
141 | if (err) | 140 | if (err) |
142 | goto failure_unlock; | 141 | goto failure; |
143 | } else if (tb[TCA_POLICE_AVRATE] && | 142 | } else if (tb[TCA_POLICE_AVRATE] && |
144 | (ret == ACT_P_CREATED || | 143 | (ret == ACT_P_CREATED || |
145 | !gen_estimator_active(&police->tcf_rate_est))) { | 144 | !gen_estimator_active(&police->tcf_rate_est))) { |
146 | err = -EINVAL; | 145 | err = -EINVAL; |
147 | goto failure_unlock; | 146 | goto failure; |
148 | } | 147 | } |
149 | 148 | ||
149 | spin_lock_bh(&police->tcf_lock); | ||
150 | /* No failure allowed after this point */ | 150 | /* No failure allowed after this point */ |
151 | police->tcfp_mtu = parm->mtu; | 151 | police->tcfp_mtu = parm->mtu; |
152 | if (police->tcfp_mtu == 0) { | 152 | if (police->tcfp_mtu == 0) { |
@@ -192,8 +192,6 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, | |||
192 | 192 | ||
193 | return ret; | 193 | return ret; |
194 | 194 | ||
195 | failure_unlock: | ||
196 | spin_unlock_bh(&police->tcf_lock); | ||
197 | failure: | 195 | failure: |
198 | qdisc_put_rtab(P_tab); | 196 | qdisc_put_rtab(P_tab); |
199 | qdisc_put_rtab(R_tab); | 197 | qdisc_put_rtab(R_tab); |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 8c589230794f..3dcd0ecf3d99 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | |||
275 | if (sctp_sk(sk)->bind_hash) | 275 | if (sctp_sk(sk)->bind_hash) |
276 | sctp_put_port(sk); | 276 | sctp_put_port(sk); |
277 | 277 | ||
278 | sctp_sk(sk)->ep = NULL; | ||
278 | sock_put(sk); | 279 | sock_put(sk); |
279 | } | 280 | } |
280 | 281 | ||
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 048954eee984..9a647214a91e 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
@@ -278,7 +278,6 @@ out: | |||
278 | 278 | ||
279 | static int sctp_sock_dump(struct sock *sk, void *p) | 279 | static int sctp_sock_dump(struct sock *sk, void *p) |
280 | { | 280 | { |
281 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; | ||
282 | struct sctp_comm_param *commp = p; | 281 | struct sctp_comm_param *commp = p; |
283 | struct sk_buff *skb = commp->skb; | 282 | struct sk_buff *skb = commp->skb; |
284 | struct netlink_callback *cb = commp->cb; | 283 | struct netlink_callback *cb = commp->cb; |
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p) | |||
287 | int err = 0; | 286 | int err = 0; |
288 | 287 | ||
289 | lock_sock(sk); | 288 | lock_sock(sk); |
290 | list_for_each_entry(assoc, &ep->asocs, asocs) { | 289 | if (!sctp_sk(sk)->ep) |
290 | goto release; | ||
291 | list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) { | ||
291 | if (cb->args[4] < cb->args[1]) | 292 | if (cb->args[4] < cb->args[1]) |
292 | goto next; | 293 | goto next; |
293 | 294 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f16c8d97b7f3..3a8318e518f1 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4622,13 +4622,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), | |||
4622 | 4622 | ||
4623 | for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; | 4623 | for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; |
4624 | hash++, head++) { | 4624 | hash++, head++) { |
4625 | read_lock(&head->lock); | 4625 | read_lock_bh(&head->lock); |
4626 | sctp_for_each_hentry(epb, &head->chain) { | 4626 | sctp_for_each_hentry(epb, &head->chain) { |
4627 | err = cb(sctp_ep(epb), p); | 4627 | err = cb(sctp_ep(epb), p); |
4628 | if (err) | 4628 | if (err) |
4629 | break; | 4629 | break; |
4630 | } | 4630 | } |
4631 | read_unlock(&head->lock); | 4631 | read_unlock_bh(&head->lock); |
4632 | } | 4632 | } |
4633 | 4633 | ||
4634 | return err; | 4634 | return err; |
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), | |||
4666 | if (err) | 4666 | if (err) |
4667 | return err; | 4667 | return err; |
4668 | 4668 | ||
4669 | sctp_transport_get_idx(net, &hti, pos); | 4669 | obj = sctp_transport_get_idx(net, &hti, pos + 1); |
4670 | obj = sctp_transport_get_next(net, &hti); | 4670 | for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) { |
4671 | for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { | ||
4672 | struct sctp_transport *transport = obj; | 4671 | struct sctp_transport *transport = obj; |
4673 | 4672 | ||
4674 | if (!sctp_transport_hold(transport)) | 4673 | if (!sctp_transport_hold(transport)) |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 312ef7de57d7..ab3087687a32 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) | |||
508 | } | 508 | } |
509 | 509 | ||
510 | if (skb_cloned(_skb) && | 510 | if (skb_cloned(_skb) && |
511 | pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) | 511 | pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) |
512 | goto exit; | 512 | goto exit; |
513 | 513 | ||
514 | /* Now reverse the concerned fields */ | 514 | /* Now reverse the concerned fields */ |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 6a7fe7660551..1a0c961f4ffe 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
999 | struct path path = { }; | 999 | struct path path = { }; |
1000 | 1000 | ||
1001 | err = -EINVAL; | 1001 | err = -EINVAL; |
1002 | if (sunaddr->sun_family != AF_UNIX) | 1002 | if (addr_len < offsetofend(struct sockaddr_un, sun_family) || |
1003 | sunaddr->sun_family != AF_UNIX) | ||
1003 | goto out; | 1004 | goto out; |
1004 | 1005 | ||
1005 | if (addr_len == sizeof(short)) { | 1006 | if (addr_len == sizeof(short)) { |
@@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, | |||
1110 | unsigned int hash; | 1111 | unsigned int hash; |
1111 | int err; | 1112 | int err; |
1112 | 1113 | ||
1114 | err = -EINVAL; | ||
1115 | if (alen < offsetofend(struct sockaddr, sa_family)) | ||
1116 | goto out; | ||
1117 | |||
1113 | if (addr->sa_family != AF_UNSPEC) { | 1118 | if (addr->sa_family != AF_UNSPEC) { |
1114 | err = unix_mkname(sunaddr, alen, &hash); | 1119 | err = unix_mkname(sunaddr, alen, &hash); |
1115 | if (err < 0) | 1120 | if (err < 0) |
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 1a4db6790e20..6cdb054484d6 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev) | |||
914 | * Main IOCTl dispatcher. | 914 | * Main IOCTl dispatcher. |
915 | * Check the type of IOCTL and call the appropriate wrapper... | 915 | * Check the type of IOCTL and call the appropriate wrapper... |
916 | */ | 916 | */ |
917 | static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | 917 | static int wireless_process_ioctl(struct net *net, struct iwreq *iwr, |
918 | unsigned int cmd, | 918 | unsigned int cmd, |
919 | struct iw_request_info *info, | 919 | struct iw_request_info *info, |
920 | wext_ioctl_func standard, | 920 | wext_ioctl_func standard, |
921 | wext_ioctl_func private) | 921 | wext_ioctl_func private) |
922 | { | 922 | { |
923 | struct iwreq *iwr = (struct iwreq *) ifr; | ||
924 | struct net_device *dev; | 923 | struct net_device *dev; |
925 | iw_handler handler; | 924 | iw_handler handler; |
926 | 925 | ||
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
928 | * The copy_to/from_user() of ifr is also dealt with in there */ | 927 | * The copy_to/from_user() of ifr is also dealt with in there */ |
929 | 928 | ||
930 | /* Make sure the device exist */ | 929 | /* Make sure the device exist */ |
931 | if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) | 930 | if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL) |
932 | return -ENODEV; | 931 | return -ENODEV; |
933 | 932 | ||
934 | /* A bunch of special cases, then the generic case... | 933 | /* A bunch of special cases, then the generic case... |
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
957 | else if (private) | 956 | else if (private) |
958 | return private(dev, iwr, cmd, info, handler); | 957 | return private(dev, iwr, cmd, info, handler); |
959 | } | 958 | } |
960 | /* Old driver API : call driver ioctl handler */ | ||
961 | if (dev->netdev_ops->ndo_do_ioctl) | ||
962 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); | ||
963 | return -EOPNOTSUPP; | 959 | return -EOPNOTSUPP; |
964 | } | 960 | } |
965 | 961 | ||
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd) | |||
977 | } | 973 | } |
978 | 974 | ||
979 | /* entry point from dev ioctl */ | 975 | /* entry point from dev ioctl */ |
980 | static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, | 976 | static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr, |
981 | unsigned int cmd, struct iw_request_info *info, | 977 | unsigned int cmd, struct iw_request_info *info, |
982 | wext_ioctl_func standard, | 978 | wext_ioctl_func standard, |
983 | wext_ioctl_func private) | 979 | wext_ioctl_func private) |
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, | |||
987 | if (ret) | 983 | if (ret) |
988 | return ret; | 984 | return ret; |
989 | 985 | ||
990 | dev_load(net, ifr->ifr_name); | 986 | dev_load(net, iwr->ifr_name); |
991 | rtnl_lock(); | 987 | rtnl_lock(); |
992 | ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); | 988 | ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private); |
993 | rtnl_unlock(); | 989 | rtnl_unlock(); |
994 | 990 | ||
995 | return ret; | 991 | return ret; |
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device * dev, | |||
1039 | } | 1035 | } |
1040 | 1036 | ||
1041 | 1037 | ||
1042 | int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, | 1038 | int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, |
1043 | void __user *arg) | 1039 | void __user *arg) |
1044 | { | 1040 | { |
1045 | struct iw_request_info info = { .cmd = cmd, .flags = 0 }; | 1041 | struct iw_request_info info = { .cmd = cmd, .flags = 0 }; |
1046 | int ret; | 1042 | int ret; |
1047 | 1043 | ||
1048 | ret = wext_ioctl_dispatch(net, ifr, cmd, &info, | 1044 | ret = wext_ioctl_dispatch(net, iwr, cmd, &info, |
1049 | ioctl_standard_call, | 1045 | ioctl_standard_call, |
1050 | ioctl_private_call); | 1046 | ioctl_private_call); |
1051 | if (ret >= 0 && | 1047 | if (ret >= 0 && |
1052 | IW_IS_GET(cmd) && | 1048 | IW_IS_GET(cmd) && |
1053 | copy_to_user(arg, ifr, sizeof(struct iwreq))) | 1049 | copy_to_user(arg, iwr, sizeof(struct iwreq))) |
1054 | return -EFAULT; | 1050 | return -EFAULT; |
1055 | 1051 | ||
1056 | return ret; | 1052 | return ret; |
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | |||
1107 | info.cmd = cmd; | 1103 | info.cmd = cmd; |
1108 | info.flags = IW_REQUEST_FLAG_COMPAT; | 1104 | info.flags = IW_REQUEST_FLAG_COMPAT; |
1109 | 1105 | ||
1110 | ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, | 1106 | ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, |
1111 | compat_standard_call, | 1107 | compat_standard_call, |
1112 | compat_private_call); | 1108 | compat_private_call); |
1113 | 1109 | ||
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index ce753a408c56..c583a1e1bd3c 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst | |||
@@ -14,7 +14,15 @@ __headers: | |||
14 | include scripts/Kbuild.include | 14 | include scripts/Kbuild.include |
15 | 15 | ||
16 | srcdir := $(srctree)/$(obj) | 16 | srcdir := $(srctree)/$(obj) |
17 | subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) | 17 | |
18 | # When make is run under a fakechroot environment, the function | ||
19 | # $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular | ||
20 | # files. So, we are using a combination of sort/dir/wildcard which works | ||
21 | # with fakechroot. | ||
22 | subdirs := $(patsubst $(srcdir)/%/,%,\ | ||
23 | $(filter-out $(srcdir)/,\ | ||
24 | $(sort $(dir $(wildcard $(srcdir)/*/))))) | ||
25 | |||
18 | # caller may set destination dir (when installing to asm/) | 26 | # caller may set destination dir (when installing to asm/) |
19 | _dst := $(if $(dst),$(dst),$(obj)) | 27 | _dst := $(if $(dst),$(dst),$(obj)) |
20 | 28 | ||
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h index 3bffdcaaa274..b724a0290c75 100644 --- a/scripts/genksyms/genksyms.h +++ b/scripts/genksyms/genksyms.h | |||
@@ -75,7 +75,7 @@ struct string_list *copy_list_range(struct string_list *start, | |||
75 | int yylex(void); | 75 | int yylex(void); |
76 | int yyparse(void); | 76 | int yyparse(void); |
77 | 77 | ||
78 | void error_with_pos(const char *, ...); | 78 | void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2))); |
79 | 79 | ||
80 | /*----------------------------------------------------------------------*/ | 80 | /*----------------------------------------------------------------------*/ |
81 | #define xmalloc(size) ({ void *__ptr = malloc(size); \ | 81 | #define xmalloc(size) ({ void *__ptr = malloc(size); \ |
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 90a091b6ae4d..eb8144643b78 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile | |||
@@ -196,7 +196,7 @@ clean-files += config.pot linux.pot | |||
196 | 196 | ||
197 | # Check that we have the required ncurses stuff installed for lxdialog (menuconfig) | 197 | # Check that we have the required ncurses stuff installed for lxdialog (menuconfig) |
198 | PHONY += $(obj)/dochecklxdialog | 198 | PHONY += $(obj)/dochecklxdialog |
199 | $(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog | 199 | $(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog |
200 | $(obj)/dochecklxdialog: | 200 | $(obj)/dochecklxdialog: |
201 | $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf) | 201 | $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf) |
202 | 202 | ||
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index a9bc5334a478..003114779815 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c | |||
@@ -271,7 +271,7 @@ static struct mitem k_menu_items[MAX_MENU_ITEMS]; | |||
271 | static int items_num; | 271 | static int items_num; |
272 | static int global_exit; | 272 | static int global_exit; |
273 | /* the currently selected button */ | 273 | /* the currently selected button */ |
274 | const char *current_instructions = menu_instructions; | 274 | static const char *current_instructions = menu_instructions; |
275 | 275 | ||
276 | static char *dialog_input_result; | 276 | static char *dialog_input_result; |
277 | static int dialog_input_result_len; | 277 | static int dialog_input_result_len; |
@@ -305,7 +305,7 @@ struct function_keys { | |||
305 | }; | 305 | }; |
306 | 306 | ||
307 | static const int function_keys_num = 9; | 307 | static const int function_keys_num = 9; |
308 | struct function_keys function_keys[] = { | 308 | static struct function_keys function_keys[] = { |
309 | { | 309 | { |
310 | .key_str = "F1", | 310 | .key_str = "F1", |
311 | .func = "Help", | 311 | .func = "Help", |
@@ -508,7 +508,7 @@ static int get_mext_match(const char *match_str, match_f flag) | |||
508 | index = (index + items_num) % items_num; | 508 | index = (index + items_num) % items_num; |
509 | while (true) { | 509 | while (true) { |
510 | char *str = k_menu_items[index].str; | 510 | char *str = k_menu_items[index].str; |
511 | if (strcasestr(str, match_str) != 0) | 511 | if (strcasestr(str, match_str) != NULL) |
512 | return index; | 512 | return index; |
513 | if (flag == FIND_NEXT_MATCH_UP || | 513 | if (flag == FIND_NEXT_MATCH_UP || |
514 | flag == MATCH_TINKER_PATTERN_UP) | 514 | flag == MATCH_TINKER_PATTERN_UP) |
@@ -1067,7 +1067,7 @@ static int do_match(int key, struct match_state *state, int *ans) | |||
1067 | 1067 | ||
1068 | static void conf(struct menu *menu) | 1068 | static void conf(struct menu *menu) |
1069 | { | 1069 | { |
1070 | struct menu *submenu = 0; | 1070 | struct menu *submenu = NULL; |
1071 | const char *prompt = menu_get_prompt(menu); | 1071 | const char *prompt = menu_get_prompt(menu); |
1072 | struct symbol *sym; | 1072 | struct symbol *sym; |
1073 | int res; | 1073 | int res; |
@@ -1234,7 +1234,7 @@ static void show_help(struct menu *menu) | |||
1234 | static void conf_choice(struct menu *menu) | 1234 | static void conf_choice(struct menu *menu) |
1235 | { | 1235 | { |
1236 | const char *prompt = _(menu_get_prompt(menu)); | 1236 | const char *prompt = _(menu_get_prompt(menu)); |
1237 | struct menu *child = 0; | 1237 | struct menu *child = NULL; |
1238 | struct symbol *active; | 1238 | struct symbol *active; |
1239 | int selected_index = 0; | 1239 | int selected_index = 0; |
1240 | int last_top_row = 0; | 1240 | int last_top_row = 0; |
@@ -1456,7 +1456,7 @@ static void conf_save(void) | |||
1456 | } | 1456 | } |
1457 | } | 1457 | } |
1458 | 1458 | ||
1459 | void setup_windows(void) | 1459 | static void setup_windows(void) |
1460 | { | 1460 | { |
1461 | int lines, columns; | 1461 | int lines, columns; |
1462 | 1462 | ||
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 4b2f44c20caf..a64b1c31253e 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c | |||
@@ -129,7 +129,7 @@ static void no_colors_theme(void) | |||
129 | mkattrn(FUNCTION_TEXT, A_REVERSE); | 129 | mkattrn(FUNCTION_TEXT, A_REVERSE); |
130 | } | 130 | } |
131 | 131 | ||
132 | void set_colors() | 132 | void set_colors(void) |
133 | { | 133 | { |
134 | start_color(); | 134 | start_color(); |
135 | use_default_colors(); | 135 | use_default_colors(); |
@@ -192,7 +192,7 @@ const char *get_line(const char *text, int line_no) | |||
192 | int lines = 0; | 192 | int lines = 0; |
193 | 193 | ||
194 | if (!text) | 194 | if (!text) |
195 | return 0; | 195 | return NULL; |
196 | 196 | ||
197 | for (i = 0; text[i] != '\0' && lines < line_no; i++) | 197 | for (i = 0; text[i] != '\0' && lines < line_no; i++) |
198 | if (text[i] == '\n') | 198 | if (text[i] == '\n') |
diff --git a/scripts/tags.sh b/scripts/tags.sh index d661f2f3ef61..d23dcbf17457 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
@@ -106,6 +106,7 @@ all_compiled_sources() | |||
106 | case "$i" in | 106 | case "$i" in |
107 | *.[cS]) | 107 | *.[cS]) |
108 | j=${i/\.[cS]/\.o} | 108 | j=${i/\.[cS]/\.o} |
109 | j="${j#$tree}" | ||
109 | if [ -e $j ]; then | 110 | if [ -e $j ]; then |
110 | echo $i | 111 | echo $i |
111 | fi | 112 | fi |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e67a526d1f30..819fd6858b49 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -1106,10 +1106,8 @@ static int selinux_parse_opts_str(char *options, | |||
1106 | 1106 | ||
1107 | opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), | 1107 | opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), |
1108 | GFP_KERNEL); | 1108 | GFP_KERNEL); |
1109 | if (!opts->mnt_opts_flags) { | 1109 | if (!opts->mnt_opts_flags) |
1110 | kfree(opts->mnt_opts); | ||
1111 | goto out_err; | 1110 | goto out_err; |
1112 | } | ||
1113 | 1111 | ||
1114 | if (fscontext) { | 1112 | if (fscontext) { |
1115 | opts->mnt_opts[num_mnt_opts] = fscontext; | 1113 | opts->mnt_opts[num_mnt_opts] = fscontext; |
@@ -1132,6 +1130,7 @@ static int selinux_parse_opts_str(char *options, | |||
1132 | return 0; | 1130 | return 0; |
1133 | 1131 | ||
1134 | out_err: | 1132 | out_err: |
1133 | security_free_mnt_opts(opts); | ||
1135 | kfree(context); | 1134 | kfree(context); |
1136 | kfree(defcontext); | 1135 | kfree(defcontext); |
1137 | kfree(fscontext); | 1136 | kfree(fscontext); |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 5088d4b8db22..009e6c98754e 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, | |||
2492 | struct snd_pcm_substream *substream; | 2492 | struct snd_pcm_substream *substream; |
2493 | const struct snd_pcm_chmap_elem *map; | 2493 | const struct snd_pcm_chmap_elem *map; |
2494 | 2494 | ||
2495 | if (snd_BUG_ON(!info->chmap)) | 2495 | if (!info->chmap) |
2496 | return -EINVAL; | 2496 | return -EINVAL; |
2497 | substream = snd_pcm_chmap_substream(info, idx); | 2497 | substream = snd_pcm_chmap_substream(info, idx); |
2498 | if (!substream) | 2498 | if (!substream) |
@@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, | |||
2524 | unsigned int __user *dst; | 2524 | unsigned int __user *dst; |
2525 | int c, count = 0; | 2525 | int c, count = 0; |
2526 | 2526 | ||
2527 | if (snd_BUG_ON(!info->chmap)) | 2527 | if (!info->chmap) |
2528 | return -EINVAL; | 2528 | return -EINVAL; |
2529 | if (size < 8) | 2529 | if (size < 8) |
2530 | return -ENOMEM; | 2530 | return -ENOMEM; |
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 9e6f54f8c45d..1e26854b3425 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c | |||
@@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, | |||
682 | cycle = increment_cycle_count(cycle, 1); | 682 | cycle = increment_cycle_count(cycle, 1); |
683 | if (s->handle_packet(s, 0, cycle, i) < 0) { | 683 | if (s->handle_packet(s, 0, cycle, i) < 0) { |
684 | s->packet_index = -1; | 684 | s->packet_index = -1; |
685 | amdtp_stream_pcm_abort(s); | 685 | if (in_interrupt()) |
686 | amdtp_stream_pcm_abort(s); | ||
687 | WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); | ||
686 | return; | 688 | return; |
687 | } | 689 | } |
688 | } | 690 | } |
@@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, | |||
734 | /* Queueing error or detecting invalid payload. */ | 736 | /* Queueing error or detecting invalid payload. */ |
735 | if (i < packets) { | 737 | if (i < packets) { |
736 | s->packet_index = -1; | 738 | s->packet_index = -1; |
737 | amdtp_stream_pcm_abort(s); | 739 | if (in_interrupt()) |
740 | amdtp_stream_pcm_abort(s); | ||
741 | WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); | ||
738 | return; | 742 | return; |
739 | } | 743 | } |
740 | 744 | ||
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h index 7e8831722821..ea1a91e99875 100644 --- a/sound/firewire/amdtp-stream.h +++ b/sound/firewire/amdtp-stream.h | |||
@@ -135,7 +135,7 @@ struct amdtp_stream { | |||
135 | /* For a PCM substream processing. */ | 135 | /* For a PCM substream processing. */ |
136 | struct snd_pcm_substream *pcm; | 136 | struct snd_pcm_substream *pcm; |
137 | struct tasklet_struct period_tasklet; | 137 | struct tasklet_struct period_tasklet; |
138 | unsigned int pcm_buffer_pointer; | 138 | snd_pcm_uframes_t pcm_buffer_pointer; |
139 | unsigned int pcm_period_pointer; | 139 | unsigned int pcm_period_pointer; |
140 | 140 | ||
141 | /* To wait for first packet. */ | 141 | /* To wait for first packet. */ |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1770f085c2a6..01eb1dc7b5b3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -370,10 +370,12 @@ enum { | |||
370 | #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) | 370 | #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) |
371 | #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) | 371 | #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) |
372 | #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) | 372 | #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) |
373 | #define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98) | ||
373 | #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) | 374 | #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) |
374 | #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ | 375 | #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) |
375 | IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ | 376 | #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \ |
376 | IS_GLK(pci) | 377 | IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \ |
378 | IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci)) | ||
377 | 379 | ||
378 | static char *driver_short_names[] = { | 380 | static char *driver_short_names[] = { |
379 | [AZX_DRIVER_ICH] = "HDA Intel", | 381 | [AZX_DRIVER_ICH] = "HDA Intel", |
@@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = { | |||
2378 | /* Kabylake-H */ | 2380 | /* Kabylake-H */ |
2379 | { PCI_DEVICE(0x8086, 0xa2f0), | 2381 | { PCI_DEVICE(0x8086, 0xa2f0), |
2380 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 2382 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
2383 | /* Coffelake */ | ||
2384 | { PCI_DEVICE(0x8086, 0xa348), | ||
2385 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE}, | ||
2381 | /* Broxton-P(Apollolake) */ | 2386 | /* Broxton-P(Apollolake) */ |
2382 | { PCI_DEVICE(0x8086, 0x5a98), | 2387 | { PCI_DEVICE(0x8086, 0x5a98), |
2383 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, | 2388 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, |
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 282a60368b14..5f66697fe1e0 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c | |||
@@ -192,7 +192,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, | |||
192 | "complete_and_exit", | 192 | "complete_and_exit", |
193 | "kvm_spurious_fault", | 193 | "kvm_spurious_fault", |
194 | "__reiserfs_panic", | 194 | "__reiserfs_panic", |
195 | "lbug_with_loc" | 195 | "lbug_with_loc", |
196 | "fortify_panic", | ||
196 | }; | 197 | }; |
197 | 198 | ||
198 | if (func->bind == STB_WEAK) | 199 | if (func->bind == STB_WEAK) |
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 8354d04b392f..1f4fbc9a3292 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config | |||
@@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS) | |||
19 | 19 | ||
20 | include $(srctree)/tools/scripts/Makefile.arch | 20 | include $(srctree)/tools/scripts/Makefile.arch |
21 | 21 | ||
22 | $(call detected_var,ARCH) | 22 | $(call detected_var,SRCARCH) |
23 | 23 | ||
24 | NO_PERF_REGS := 1 | 24 | NO_PERF_REGS := 1 |
25 | 25 | ||
26 | # Additional ARCH settings for ppc | 26 | # Additional ARCH settings for ppc |
27 | ifeq ($(ARCH),powerpc) | 27 | ifeq ($(SRCARCH),powerpc) |
28 | NO_PERF_REGS := 0 | 28 | NO_PERF_REGS := 0 |
29 | LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 | 29 | LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 |
30 | endif | 30 | endif |
31 | 31 | ||
32 | # Additional ARCH settings for x86 | 32 | # Additional ARCH settings for x86 |
33 | ifeq ($(ARCH),x86) | 33 | ifeq ($(SRCARCH),x86) |
34 | $(call detected,CONFIG_X86) | 34 | $(call detected,CONFIG_X86) |
35 | ifeq (${IS_64_BIT}, 1) | 35 | ifeq (${IS_64_BIT}, 1) |
36 | CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated | 36 | CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated |
@@ -43,12 +43,12 @@ ifeq ($(ARCH),x86) | |||
43 | NO_PERF_REGS := 0 | 43 | NO_PERF_REGS := 0 |
44 | endif | 44 | endif |
45 | 45 | ||
46 | ifeq ($(ARCH),arm) | 46 | ifeq ($(SRCARCH),arm) |
47 | NO_PERF_REGS := 0 | 47 | NO_PERF_REGS := 0 |
48 | LIBUNWIND_LIBS = -lunwind -lunwind-arm | 48 | LIBUNWIND_LIBS = -lunwind -lunwind-arm |
49 | endif | 49 | endif |
50 | 50 | ||
51 | ifeq ($(ARCH),arm64) | 51 | ifeq ($(SRCARCH),arm64) |
52 | NO_PERF_REGS := 0 | 52 | NO_PERF_REGS := 0 |
53 | LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 | 53 | LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 |
54 | endif | 54 | endif |
@@ -61,7 +61,7 @@ endif | |||
61 | # Disable it on all other architectures in case libdw unwind | 61 | # Disable it on all other architectures in case libdw unwind |
62 | # support is detected in system. Add supported architectures | 62 | # support is detected in system. Add supported architectures |
63 | # to the check. | 63 | # to the check. |
64 | ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) | 64 | ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm)) |
65 | NO_LIBDW_DWARF_UNWIND := 1 | 65 | NO_LIBDW_DWARF_UNWIND := 1 |
66 | endif | 66 | endif |
67 | 67 | ||
@@ -115,9 +115,9 @@ endif | |||
115 | FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) | 115 | FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) |
116 | FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf | 116 | FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf |
117 | 117 | ||
118 | FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi | 118 | FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi |
119 | # include ARCH specific config | 119 | # include ARCH specific config |
120 | -include $(src-perf)/arch/$(ARCH)/Makefile | 120 | -include $(src-perf)/arch/$(SRCARCH)/Makefile |
121 | 121 | ||
122 | ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET | 122 | ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET |
123 | CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET | 123 | CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET |
@@ -228,12 +228,12 @@ ifeq ($(DEBUG),0) | |||
228 | endif | 228 | endif |
229 | 229 | ||
230 | INC_FLAGS += -I$(src-perf)/util/include | 230 | INC_FLAGS += -I$(src-perf)/util/include |
231 | INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include | 231 | INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include |
232 | INC_FLAGS += -I$(srctree)/tools/include/uapi | 232 | INC_FLAGS += -I$(srctree)/tools/include/uapi |
233 | INC_FLAGS += -I$(srctree)/tools/include/ | 233 | INC_FLAGS += -I$(srctree)/tools/include/ |
234 | INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi | 234 | INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi |
235 | INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ | 235 | INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/ |
236 | INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ | 236 | INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/ |
237 | 237 | ||
238 | # $(obj-perf) for generated common-cmds.h | 238 | # $(obj-perf) for generated common-cmds.h |
239 | # $(obj-perf)/util for generated bison/flex headers | 239 | # $(obj-perf)/util for generated bison/flex headers |
@@ -355,7 +355,7 @@ ifndef NO_LIBELF | |||
355 | 355 | ||
356 | ifndef NO_DWARF | 356 | ifndef NO_DWARF |
357 | ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) | 357 | ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) |
358 | msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); | 358 | msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled); |
359 | NO_DWARF := 1 | 359 | NO_DWARF := 1 |
360 | else | 360 | else |
361 | CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) | 361 | CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) |
@@ -380,7 +380,7 @@ ifndef NO_LIBELF | |||
380 | CFLAGS += -DHAVE_BPF_PROLOGUE | 380 | CFLAGS += -DHAVE_BPF_PROLOGUE |
381 | $(call detected,CONFIG_BPF_PROLOGUE) | 381 | $(call detected,CONFIG_BPF_PROLOGUE) |
382 | else | 382 | else |
383 | msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); | 383 | msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset()); |
384 | endif | 384 | endif |
385 | else | 385 | else |
386 | msg := $(warning DWARF support is off, BPF prologue is disabled); | 386 | msg := $(warning DWARF support is off, BPF prologue is disabled); |
@@ -406,7 +406,7 @@ ifdef PERF_HAVE_JITDUMP | |||
406 | endif | 406 | endif |
407 | endif | 407 | endif |
408 | 408 | ||
409 | ifeq ($(ARCH),powerpc) | 409 | ifeq ($(SRCARCH),powerpc) |
410 | ifndef NO_DWARF | 410 | ifndef NO_DWARF |
411 | CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX | 411 | CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX |
412 | endif | 412 | endif |
@@ -487,7 +487,7 @@ else | |||
487 | endif | 487 | endif |
488 | 488 | ||
489 | ifndef NO_LOCAL_LIBUNWIND | 489 | ifndef NO_LOCAL_LIBUNWIND |
490 | ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) | 490 | ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64)) |
491 | $(call feature_check,libunwind-debug-frame) | 491 | $(call feature_check,libunwind-debug-frame) |
492 | ifneq ($(feature-libunwind-debug-frame), 1) | 492 | ifneq ($(feature-libunwind-debug-frame), 1) |
493 | msg := $(warning No debug_frame support found in libunwind); | 493 | msg := $(warning No debug_frame support found in libunwind); |
@@ -740,7 +740,7 @@ ifeq (${IS_64_BIT}, 1) | |||
740 | NO_PERF_READ_VDSO32 := 1 | 740 | NO_PERF_READ_VDSO32 := 1 |
741 | endif | 741 | endif |
742 | endif | 742 | endif |
743 | ifneq ($(ARCH), x86) | 743 | ifneq ($(SRCARCH), x86) |
744 | NO_PERF_READ_VDSOX32 := 1 | 744 | NO_PERF_READ_VDSOX32 := 1 |
745 | endif | 745 | endif |
746 | ifndef NO_PERF_READ_VDSOX32 | 746 | ifndef NO_PERF_READ_VDSOX32 |
@@ -769,7 +769,7 @@ ifdef LIBBABELTRACE | |||
769 | endif | 769 | endif |
770 | 770 | ||
771 | ifndef NO_AUXTRACE | 771 | ifndef NO_AUXTRACE |
772 | ifeq ($(ARCH),x86) | 772 | ifeq ($(SRCARCH),x86) |
773 | ifeq ($(feature-get_cpuid), 0) | 773 | ifeq ($(feature-get_cpuid), 0) |
774 | msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); | 774 | msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); |
775 | NO_AUXTRACE := 1 | 775 | NO_AUXTRACE := 1 |
@@ -872,7 +872,7 @@ sysconfdir = $(prefix)/etc | |||
872 | ETC_PERFCONFIG = etc/perfconfig | 872 | ETC_PERFCONFIG = etc/perfconfig |
873 | endif | 873 | endif |
874 | ifndef lib | 874 | ifndef lib |
875 | ifeq ($(ARCH)$(IS_64_BIT), x861) | 875 | ifeq ($(SRCARCH)$(IS_64_BIT), x861) |
876 | lib = lib64 | 876 | lib = lib64 |
877 | else | 877 | else |
878 | lib = lib | 878 | lib = lib |
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 79fe31f20a17..5008f51a08a2 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf | |||
@@ -226,7 +226,7 @@ endif | |||
226 | 226 | ||
227 | ifeq ($(config),0) | 227 | ifeq ($(config),0) |
228 | include $(srctree)/tools/scripts/Makefile.arch | 228 | include $(srctree)/tools/scripts/Makefile.arch |
229 | -include arch/$(ARCH)/Makefile | 229 | -include arch/$(SRCARCH)/Makefile |
230 | endif | 230 | endif |
231 | 231 | ||
232 | # The FEATURE_DUMP_EXPORT holds location of the actual | 232 | # The FEATURE_DUMP_EXPORT holds location of the actual |
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build index 109eb75cf7de..d9b6af837c7d 100644 --- a/tools/perf/arch/Build +++ b/tools/perf/arch/Build | |||
@@ -1,2 +1,2 @@ | |||
1 | libperf-y += common.o | 1 | libperf-y += common.o |
2 | libperf-y += $(ARCH)/ | 2 | libperf-y += $(SRCARCH)/ |
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build index 9213a1273697..999a4e878162 100644 --- a/tools/perf/pmu-events/Build +++ b/tools/perf/pmu-events/Build | |||
@@ -2,7 +2,7 @@ hostprogs := jevents | |||
2 | 2 | ||
3 | jevents-y += json.o jsmn.o jevents.o | 3 | jevents-y += json.o jsmn.o jevents.o |
4 | pmu-events-y += pmu-events.o | 4 | pmu-events-y += pmu-events.o |
5 | JDIR = pmu-events/arch/$(ARCH) | 5 | JDIR = pmu-events/arch/$(SRCARCH) |
6 | JSON = $(shell [ -d $(JDIR) ] && \ | 6 | JSON = $(shell [ -d $(JDIR) ] && \ |
7 | find $(JDIR) -name '*.json' -o -name 'mapfile.csv') | 7 | find $(JDIR) -name '*.json' -o -name 'mapfile.csv') |
8 | # | 8 | # |
@@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \ | |||
10 | # directory and create tables in pmu-events.c. | 10 | # directory and create tables in pmu-events.c. |
11 | # | 11 | # |
12 | $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) | 12 | $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) |
13 | $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) | 13 | $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) |
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index af58ebc243ef..84222bdb8689 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build | |||
@@ -75,7 +75,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B | |||
75 | $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ | 75 | $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ |
76 | $(Q)echo ';' >> $@ | 76 | $(Q)echo ';' >> $@ |
77 | 77 | ||
78 | ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) | 78 | ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) |
79 | perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o | 79 | perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o |
80 | endif | 80 | endif |
81 | 81 | ||
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 32873ec91a4e..cf00ebad2ef5 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c | |||
@@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused) | |||
83 | 83 | ||
84 | evsel = perf_evlist__first(evlist); | 84 | evsel = perf_evlist__first(evlist); |
85 | evsel->attr.task = 1; | 85 | evsel->attr.task = 1; |
86 | evsel->attr.sample_freq = 0; | 86 | evsel->attr.sample_freq = 1; |
87 | evsel->attr.inherit = 0; | 87 | evsel->attr.inherit = 0; |
88 | evsel->attr.watermark = 0; | 88 | evsel->attr.watermark = 0; |
89 | evsel->attr.wakeup_events = 1; | 89 | evsel->attr.wakeup_events = 1; |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index e4f7902d5afa..cda44b0e821c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -273,8 +273,20 @@ struct perf_evsel *perf_evsel__new_cycles(void) | |||
273 | struct perf_evsel *evsel; | 273 | struct perf_evsel *evsel; |
274 | 274 | ||
275 | event_attr_init(&attr); | 275 | event_attr_init(&attr); |
276 | /* | ||
277 | * Unnamed union member, not supported as struct member named | ||
278 | * initializer in older compilers such as gcc 4.4.7 | ||
279 | * | ||
280 | * Just for probing the precise_ip: | ||
281 | */ | ||
282 | attr.sample_period = 1; | ||
276 | 283 | ||
277 | perf_event_attr__set_max_precise_ip(&attr); | 284 | perf_event_attr__set_max_precise_ip(&attr); |
285 | /* | ||
286 | * Now let the usual logic to set up the perf_event_attr defaults | ||
287 | * to kick in when we return and before perf_evsel__open() is called. | ||
288 | */ | ||
289 | attr.sample_period = 0; | ||
278 | 290 | ||
279 | evsel = perf_evsel__new(&attr); | 291 | evsel = perf_evsel__new(&attr); |
280 | if (evsel == NULL) | 292 | if (evsel == NULL) |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 5cac8d5e009a..b5baff3007bb 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -841,7 +841,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused, | |||
841 | 841 | ||
842 | /* | 842 | /* |
843 | * default get_cpuid(): nothing gets recorded | 843 | * default get_cpuid(): nothing gets recorded |
844 | * actual implementation must be in arch/$(ARCH)/util/header.c | 844 | * actual implementation must be in arch/$(SRCARCH)/util/header.c |
845 | */ | 845 | */ |
846 | int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) | 846 | int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) |
847 | { | 847 | { |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 84e7e698411e..a2670e9d652d 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -619,7 +619,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp, | |||
619 | struct map *map, unsigned long offs) | 619 | struct map *map, unsigned long offs) |
620 | { | 620 | { |
621 | struct symbol *sym; | 621 | struct symbol *sym; |
622 | u64 addr = tp->address + tp->offset - offs; | 622 | u64 addr = tp->address - offs; |
623 | 623 | ||
624 | sym = map__find_symbol(map, addr); | 624 | sym = map__find_symbol(map, addr); |
625 | if (!sym) | 625 | if (!sym) |
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index da45c4be5fb3..7755a5e0fe5e 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c | |||
@@ -178,6 +178,14 @@ frame_callback(Dwfl_Frame *state, void *arg) | |||
178 | Dwarf_Addr pc; | 178 | Dwarf_Addr pc; |
179 | bool isactivation; | 179 | bool isactivation; |
180 | 180 | ||
181 | if (!dwfl_frame_pc(state, &pc, NULL)) { | ||
182 | pr_err("%s", dwfl_errmsg(-1)); | ||
183 | return DWARF_CB_ABORT; | ||
184 | } | ||
185 | |||
186 | // report the module before we query for isactivation | ||
187 | report_module(pc, ui); | ||
188 | |||
181 | if (!dwfl_frame_pc(state, &pc, &isactivation)) { | 189 | if (!dwfl_frame_pc(state, &pc, &isactivation)) { |
182 | pr_err("%s", dwfl_errmsg(-1)); | 190 | pr_err("%s", dwfl_errmsg(-1)); |
183 | return DWARF_CB_ABORT; | 191 | return DWARF_CB_ABORT; |
diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h index 19d0604f8694..487cbfb89beb 100644 --- a/tools/testing/selftests/bpf/bpf_endian.h +++ b/tools/testing/selftests/bpf/bpf_endian.h | |||
@@ -1,23 +1,42 @@ | |||
1 | #ifndef __BPF_ENDIAN__ | 1 | #ifndef __BPF_ENDIAN__ |
2 | #define __BPF_ENDIAN__ | 2 | #define __BPF_ENDIAN__ |
3 | 3 | ||
4 | #include <asm/byteorder.h> | 4 | #include <linux/swab.h> |
5 | 5 | ||
6 | #if __BYTE_ORDER == __LITTLE_ENDIAN | 6 | /* LLVM's BPF target selects the endianness of the CPU |
7 | # define __bpf_ntohs(x) __builtin_bswap16(x) | 7 | * it compiles on, or the user specifies (bpfel/bpfeb), |
8 | # define __bpf_htons(x) __builtin_bswap16(x) | 8 | * respectively. The used __BYTE_ORDER__ is defined by |
9 | #elif __BYTE_ORDER == __BIG_ENDIAN | 9 | * the compiler, we cannot rely on __BYTE_ORDER from |
10 | # define __bpf_ntohs(x) (x) | 10 | * libc headers, since it doesn't reflect the actual |
11 | # define __bpf_htons(x) (x) | 11 | * requested byte order. |
12 | * | ||
13 | * Note, LLVM's BPF target has different __builtin_bswapX() | ||
14 | * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE | ||
15 | * in bpfel and bpfeb case, which means below, that we map | ||
16 | * to cpu_to_be16(). We could use it unconditionally in BPF | ||
17 | * case, but better not rely on it, so that this header here | ||
18 | * can be used from application and BPF program side, which | ||
19 | * use different targets. | ||
20 | */ | ||
21 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ | ||
22 | # define __bpf_ntohs(x) __builtin_bswap16(x) | ||
23 | # define __bpf_htons(x) __builtin_bswap16(x) | ||
24 | # define __bpf_constant_ntohs(x) ___constant_swab16(x) | ||
25 | # define __bpf_constant_htons(x) ___constant_swab16(x) | ||
26 | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ | ||
27 | # define __bpf_ntohs(x) (x) | ||
28 | # define __bpf_htons(x) (x) | ||
29 | # define __bpf_constant_ntohs(x) (x) | ||
30 | # define __bpf_constant_htons(x) (x) | ||
12 | #else | 31 | #else |
13 | # error "Fix your __BYTE_ORDER?!" | 32 | # error "Fix your compiler's __BYTE_ORDER__?!" |
14 | #endif | 33 | #endif |
15 | 34 | ||
16 | #define bpf_htons(x) \ | 35 | #define bpf_htons(x) \ |
17 | (__builtin_constant_p(x) ? \ | 36 | (__builtin_constant_p(x) ? \ |
18 | __constant_htons(x) : __bpf_htons(x)) | 37 | __bpf_constant_htons(x) : __bpf_htons(x)) |
19 | #define bpf_ntohs(x) \ | 38 | #define bpf_ntohs(x) \ |
20 | (__builtin_constant_p(x) ? \ | 39 | (__builtin_constant_p(x) ? \ |
21 | __constant_ntohs(x) : __bpf_ntohs(x)) | 40 | __bpf_constant_ntohs(x) : __bpf_ntohs(x)) |
22 | 41 | ||
23 | #endif | 42 | #endif /* __BPF_ENDIAN__ */ |
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh index a676d3eefefb..13f5198ba0ee 100755 --- a/tools/testing/selftests/ntb/ntb_test.sh +++ b/tools/testing/selftests/ntb/ntb_test.sh | |||
@@ -305,7 +305,7 @@ function perf_test() | |||
305 | echo "Running remote perf test $WITH DMA" | 305 | echo "Running remote perf test $WITH DMA" |
306 | write_file "" $REMOTE_PERF/run | 306 | write_file "" $REMOTE_PERF/run |
307 | echo -n " " | 307 | echo -n " " |
308 | read_file $LOCAL_PERF/run | 308 | read_file $REMOTE_PERF/run |
309 | echo " Passed" | 309 | echo " Passed" |
310 | 310 | ||
311 | _modprobe -r ntb_perf | 311 | _modprobe -r ntb_perf |