diff options
author | David S. Miller <davem@davemloft.net> | 2018-06-03 09:31:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-06-03 09:31:58 -0400 |
commit | 9c54aeb03a6d65a5834bd92376e921cbac6dfb8f (patch) | |
tree | 70441095d58678711d68cfef4934765251425d1f | |
parent | eaf47b17a77fda841a1102d76c15161ee438b347 (diff) | |
parent | 918fe1b3157978ada4267468008c5f89ef101e7d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Filling in the padding slot in the bpf structure as a bug fix in 'ne'
overlapped with actually using that padding area for something in
'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
145 files changed, 1054 insertions, 565 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt index 8acf51a4dfa8..47a6a7fe0b86 100644 --- a/Documentation/devicetree/bindings/net/dsa/b53.txt +++ b/Documentation/devicetree/bindings/net/dsa/b53.txt | |||
@@ -10,6 +10,7 @@ Required properties: | |||
10 | "brcm,bcm53128" | 10 | "brcm,bcm53128" |
11 | "brcm,bcm5365" | 11 | "brcm,bcm5365" |
12 | "brcm,bcm5395" | 12 | "brcm,bcm5395" |
13 | "brcm,bcm5389" | ||
13 | "brcm,bcm5397" | 14 | "brcm,bcm5397" |
14 | "brcm,bcm5398" | 15 | "brcm,bcm5398" |
15 | 16 | ||
diff --git a/Documentation/i2c/busses/i2c-ocores b/Documentation/i2c/busses/i2c-ocores index c269aaa2f26a..9e1dfe7553ad 100644 --- a/Documentation/i2c/busses/i2c-ocores +++ b/Documentation/i2c/busses/i2c-ocores | |||
@@ -2,7 +2,7 @@ Kernel driver i2c-ocores | |||
2 | 2 | ||
3 | Supported adapters: | 3 | Supported adapters: |
4 | * OpenCores.org I2C controller by Richard Herveille (see datasheet link) | 4 | * OpenCores.org I2C controller by Richard Herveille (see datasheet link) |
5 | Datasheet: http://www.opencores.org/projects.cgi/web/i2c/overview | 5 | https://opencores.org/project/i2c/overview |
6 | 6 | ||
7 | Author: Peter Korsgaard <jacmet@sunsite.dk> | 7 | Author: Peter Korsgaard <jacmet@sunsite.dk> |
8 | 8 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index e7396119ce58..0ae0dbf0e15e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -15554,6 +15554,14 @@ L: linux-kernel@vger.kernel.org | |||
15554 | S: Supported | 15554 | S: Supported |
15555 | F: drivers/char/xillybus/ | 15555 | F: drivers/char/xillybus/ |
15556 | 15556 | ||
15557 | XLP9XX I2C DRIVER | ||
15558 | M: George Cherian <george.cherian@cavium.com> | ||
15559 | M: Jan Glauber <jglauber@cavium.com> | ||
15560 | L: linux-i2c@vger.kernel.org | ||
15561 | W: http://www.cavium.com | ||
15562 | S: Supported | ||
15563 | F: drivers/i2c/busses/i2c-xlp9xx.c | ||
15564 | |||
15557 | XRA1403 GPIO EXPANDER | 15565 | XRA1403 GPIO EXPANDER |
15558 | M: Nandor Han <nandor.han@ge.com> | 15566 | M: Nandor Han <nandor.han@ge.com> |
15559 | M: Semi Malinen <semi.malinen@ge.com> | 15567 | M: Semi Malinen <semi.malinen@ge.com> |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 17 | 3 | PATCHLEVEL = 17 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc6 | 5 | EXTRAVERSION = -rc7 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -500,6 +500,9 @@ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk | |||
500 | RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) | 500 | RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) |
501 | export RETPOLINE_CFLAGS | 501 | export RETPOLINE_CFLAGS |
502 | 502 | ||
503 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | ||
504 | KBUILD_AFLAGS += $(call cc-option,-fno-PIE) | ||
505 | |||
503 | # check for 'asm goto' | 506 | # check for 'asm goto' |
504 | ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) | 507 | ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) |
505 | CC_HAVE_ASM_GOTO := 1 | 508 | CC_HAVE_ASM_GOTO := 1 |
@@ -621,9 +624,9 @@ endif # $(dot-config) | |||
621 | # Defaults to vmlinux, but the arch makefile usually adds further targets | 624 | # Defaults to vmlinux, but the arch makefile usually adds further targets |
622 | all: vmlinux | 625 | all: vmlinux |
623 | 626 | ||
624 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | 627 | CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ |
625 | KBUILD_AFLAGS += $(call cc-option,-fno-PIE) | 628 | $(call cc-option,-fno-tree-loop-im) \ |
626 | CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) | 629 | $(call cc-disable-warning,maybe-uninitialized,) |
627 | export CFLAGS_GCOV CFLAGS_KCOV | 630 | export CFLAGS_GCOV CFLAGS_KCOV |
628 | 631 | ||
629 | # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default | 632 | # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 77e8436beed4..3a1c6b45c9a1 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -76,7 +76,7 @@ | |||
76 | allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; | 76 | allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; |
77 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>, | 77 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>, |
78 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 78 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, |
79 | <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 79 | <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>, |
80 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>, | 80 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>, |
81 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; | 81 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; |
82 | status = "disabled"; | 82 | status = "disabled"; |
@@ -88,7 +88,7 @@ | |||
88 | allwinner,pipeline = "de_fe0-de_be0-lcd0"; | 88 | allwinner,pipeline = "de_fe0-de_be0-lcd0"; |
89 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>, | 89 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>, |
90 | <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>, | 90 | <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>, |
91 | <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>, | 91 | <&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>, |
92 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; | 92 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; |
93 | status = "disabled"; | 93 | status = "disabled"; |
94 | }; | 94 | }; |
@@ -99,7 +99,7 @@ | |||
99 | allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0"; | 99 | allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0"; |
100 | clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>, | 100 | clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>, |
101 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 101 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, |
102 | <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 102 | <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>, |
103 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>, | 103 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>, |
104 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; | 104 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; |
105 | status = "disabled"; | 105 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts index 3328fe583c9b..232f124ce62c 100644 --- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts +++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts | |||
@@ -117,6 +117,7 @@ | |||
117 | phy-handle = <&int_mii_phy>; | 117 | phy-handle = <&int_mii_phy>; |
118 | phy-mode = "mii"; | 118 | phy-mode = "mii"; |
119 | allwinner,leds-active-low; | 119 | allwinner,leds-active-low; |
120 | status = "okay"; | ||
120 | }; | 121 | }; |
121 | 122 | ||
122 | &hdmi { | 123 | &hdmi { |
diff --git a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts index d1311098ea45..ad173605b1b8 100644 --- a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts +++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts | |||
@@ -51,7 +51,7 @@ | |||
51 | 51 | ||
52 | leds { | 52 | leds { |
53 | /* The LEDs use PG0~2 pins, which conflict with MMC1 */ | 53 | /* The LEDs use PG0~2 pins, which conflict with MMC1 */ |
54 | status = "disbaled"; | 54 | status = "disabled"; |
55 | }; | 55 | }; |
56 | }; | 56 | }; |
57 | 57 | ||
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index e70feec6fad5..0581ffbedddd 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c | |||
@@ -323,7 +323,7 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr) | |||
323 | 323 | ||
324 | /* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ | 324 | /* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ |
325 | static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { | 325 | static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { |
326 | .dev_id = "i2c-gpio", | 326 | .dev_id = "i2c-gpio.0", |
327 | .table = { | 327 | .table = { |
328 | /* Use local offsets on gpiochip/port "G" */ | 328 | /* Use local offsets on gpiochip/port "G" */ |
329 | GPIO_LOOKUP_IDX("G", 1, NULL, 0, | 329 | GPIO_LOOKUP_IDX("G", 1, NULL, 0, |
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c index 77def6169f50..44cbbce6bda6 100644 --- a/arch/arm/mach-ixp4xx/avila-setup.c +++ b/arch/arm/mach-ixp4xx/avila-setup.c | |||
@@ -51,7 +51,7 @@ static struct platform_device avila_flash = { | |||
51 | }; | 51 | }; |
52 | 52 | ||
53 | static struct gpiod_lookup_table avila_i2c_gpiod_table = { | 53 | static struct gpiod_lookup_table avila_i2c_gpiod_table = { |
54 | .dev_id = "i2c-gpio", | 54 | .dev_id = "i2c-gpio.0", |
55 | .table = { | 55 | .table = { |
56 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN, | 56 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN, |
57 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 57 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index 0f5c99941a7d..397190f3a8da 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c | |||
@@ -70,7 +70,7 @@ static struct platform_device dsmg600_flash = { | |||
70 | }; | 70 | }; |
71 | 71 | ||
72 | static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = { | 72 | static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = { |
73 | .dev_id = "i2c-gpio", | 73 | .dev_id = "i2c-gpio.0", |
74 | .table = { | 74 | .table = { |
75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN, | 75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN, |
76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c index 033f79b35d51..f0a152e365b1 100644 --- a/arch/arm/mach-ixp4xx/fsg-setup.c +++ b/arch/arm/mach-ixp4xx/fsg-setup.c | |||
@@ -56,7 +56,7 @@ static struct platform_device fsg_flash = { | |||
56 | }; | 56 | }; |
57 | 57 | ||
58 | static struct gpiod_lookup_table fsg_i2c_gpiod_table = { | 58 | static struct gpiod_lookup_table fsg_i2c_gpiod_table = { |
59 | .dev_id = "i2c-gpio", | 59 | .dev_id = "i2c-gpio.0", |
60 | .table = { | 60 | .table = { |
61 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN, | 61 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN, |
62 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 62 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index b168e2fbdbeb..3ec829d52cdd 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c | |||
@@ -124,7 +124,7 @@ static struct platform_device ixdp425_flash_nand = { | |||
124 | #endif /* CONFIG_MTD_NAND_PLATFORM */ | 124 | #endif /* CONFIG_MTD_NAND_PLATFORM */ |
125 | 125 | ||
126 | static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = { | 126 | static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = { |
127 | .dev_id = "i2c-gpio", | 127 | .dev_id = "i2c-gpio.0", |
128 | .table = { | 128 | .table = { |
129 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN, | 129 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN, |
130 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 130 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c index 76dfff03cb71..4138d6aa4c52 100644 --- a/arch/arm/mach-ixp4xx/nas100d-setup.c +++ b/arch/arm/mach-ixp4xx/nas100d-setup.c | |||
@@ -102,7 +102,7 @@ static struct platform_device nas100d_leds = { | |||
102 | }; | 102 | }; |
103 | 103 | ||
104 | static struct gpiod_lookup_table nas100d_i2c_gpiod_table = { | 104 | static struct gpiod_lookup_table nas100d_i2c_gpiod_table = { |
105 | .dev_id = "i2c-gpio", | 105 | .dev_id = "i2c-gpio.0", |
106 | .table = { | 106 | .table = { |
107 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN, | 107 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN, |
108 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 108 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index 91da63a7d7b5..341b263482ef 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c | |||
@@ -70,7 +70,7 @@ static struct platform_device nslu2_flash = { | |||
70 | }; | 70 | }; |
71 | 71 | ||
72 | static struct gpiod_lookup_table nslu2_i2c_gpiod_table = { | 72 | static struct gpiod_lookup_table nslu2_i2c_gpiod_table = { |
73 | .dev_id = "i2c-gpio", | 73 | .dev_id = "i2c-gpio.0", |
74 | .table = { | 74 | .table = { |
75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN, | 75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN, |
76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 5877e547cecd..0adb1bd6208e 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c | |||
@@ -322,7 +322,7 @@ static struct soc_camera_link palmz72_iclink = { | |||
322 | }; | 322 | }; |
323 | 323 | ||
324 | static struct gpiod_lookup_table palmz72_i2c_gpiod_table = { | 324 | static struct gpiod_lookup_table palmz72_i2c_gpiod_table = { |
325 | .dev_id = "i2c-gpio", | 325 | .dev_id = "i2c-gpio.0", |
326 | .table = { | 326 | .table = { |
327 | GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0, | 327 | GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0, |
328 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 328 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 90d0f277de55..207dcc2e94e7 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c | |||
@@ -460,7 +460,7 @@ static struct platform_device smc91x_device = { | |||
460 | 460 | ||
461 | /* i2c */ | 461 | /* i2c */ |
462 | static struct gpiod_lookup_table viper_i2c_gpiod_table = { | 462 | static struct gpiod_lookup_table viper_i2c_gpiod_table = { |
463 | .dev_id = "i2c-gpio", | 463 | .dev_id = "i2c-gpio.1", |
464 | .table = { | 464 | .table = { |
465 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO, | 465 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO, |
466 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 466 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
@@ -789,7 +789,7 @@ static int __init viper_tpm_setup(char *str) | |||
789 | __setup("tpm=", viper_tpm_setup); | 789 | __setup("tpm=", viper_tpm_setup); |
790 | 790 | ||
791 | struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = { | 791 | struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = { |
792 | .dev_id = "i2c-gpio", | 792 | .dev_id = "i2c-gpio.2", |
793 | .table = { | 793 | .table = { |
794 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO, | 794 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO, |
795 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 795 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index ace010479eb6..f45aed2519ba 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c | |||
@@ -327,7 +327,7 @@ static struct platform_device simpad_gpio_leds = { | |||
327 | * i2c | 327 | * i2c |
328 | */ | 328 | */ |
329 | static struct gpiod_lookup_table simpad_i2c_gpiod_table = { | 329 | static struct gpiod_lookup_table simpad_i2c_gpiod_table = { |
330 | .dev_id = "i2c-gpio", | 330 | .dev_id = "i2c-gpio.0", |
331 | .table = { | 331 | .table = { |
332 | GPIO_LOOKUP_IDX("gpio", 21, NULL, 0, | 332 | GPIO_LOOKUP_IDX("gpio", 21, NULL, 0, |
333 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 333 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index 724a0d3b7683..edb4ee0b8896 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts | |||
@@ -299,7 +299,6 @@ | |||
299 | /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ | 299 | /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ |
300 | 300 | ||
301 | dwmmc_0: dwmmc0@f723d000 { | 301 | dwmmc_0: dwmmc0@f723d000 { |
302 | max-frequency = <150000000>; | ||
303 | cap-mmc-highspeed; | 302 | cap-mmc-highspeed; |
304 | mmc-hs200-1_8v; | 303 | mmc-hs200-1_8v; |
305 | non-removable; | 304 | non-removable; |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index b9e9bf628849..3775a8d694fb 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) | |||
721 | if (value & ~known_bits) | 721 | if (value & ~known_bits) |
722 | return -EOPNOTSUPP; | 722 | return -EOPNOTSUPP; |
723 | 723 | ||
724 | /* Setting FRE without FR is not supported. */ | ||
725 | if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) | ||
726 | return -EOPNOTSUPP; | ||
727 | |||
724 | /* Avoid inadvertently triggering emulation */ | 728 | /* Avoid inadvertently triggering emulation */ |
725 | if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && | 729 | if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && |
726 | !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) | 730 | !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 8d098b9f395c..0c0c23c9c9f5 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -818,7 +818,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
818 | break; | 818 | break; |
819 | } | 819 | } |
820 | #endif | 820 | #endif |
821 | tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); | 821 | tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); |
822 | break; | 822 | break; |
823 | case PC: | 823 | case PC: |
824 | tmp = regs->cp0_epc; | 824 | tmp = regs->cp0_epc; |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 656a137c1fe2..f30c381d3e1c 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
@@ -109,7 +109,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
109 | addr & 1); | 109 | addr & 1); |
110 | break; | 110 | break; |
111 | } | 111 | } |
112 | tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); | 112 | tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); |
113 | break; | 113 | break; |
114 | case PC: | 114 | case PC: |
115 | tmp = regs->cp0_epc; | 115 | tmp = regs->cp0_epc; |
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 249f38d3388f..b7404f2dcf5b 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig | |||
@@ -9,6 +9,12 @@ config NDS32 | |||
9 | select CLKSRC_MMIO | 9 | select CLKSRC_MMIO |
10 | select CLONE_BACKWARDS | 10 | select CLONE_BACKWARDS |
11 | select COMMON_CLK | 11 | select COMMON_CLK |
12 | select GENERIC_ASHLDI3 | ||
13 | select GENERIC_ASHRDI3 | ||
14 | select GENERIC_LSHRDI3 | ||
15 | select GENERIC_CMPDI2 | ||
16 | select GENERIC_MULDI3 | ||
17 | select GENERIC_UCMPDI2 | ||
12 | select GENERIC_ATOMIC64 | 18 | select GENERIC_ATOMIC64 |
13 | select GENERIC_CPU_DEVICES | 19 | select GENERIC_CPU_DEVICES |
14 | select GENERIC_CLOCKEVENTS | 20 | select GENERIC_CLOCKEVENTS |
@@ -82,6 +88,7 @@ endmenu | |||
82 | 88 | ||
83 | menu "Kernel Features" | 89 | menu "Kernel Features" |
84 | source "kernel/Kconfig.preempt" | 90 | source "kernel/Kconfig.preempt" |
91 | source "kernel/Kconfig.freezer" | ||
85 | source "mm/Kconfig" | 92 | source "mm/Kconfig" |
86 | source "kernel/Kconfig.hz" | 93 | source "kernel/Kconfig.hz" |
87 | endmenu | 94 | endmenu |
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu index ba44cc539da9..b8c8984d1456 100644 --- a/arch/nds32/Kconfig.cpu +++ b/arch/nds32/Kconfig.cpu | |||
@@ -1,10 +1,11 @@ | |||
1 | comment "Processor Features" | 1 | comment "Processor Features" |
2 | 2 | ||
3 | config CPU_BIG_ENDIAN | 3 | config CPU_BIG_ENDIAN |
4 | bool "Big endian" | 4 | def_bool !CPU_LITTLE_ENDIAN |
5 | 5 | ||
6 | config CPU_LITTLE_ENDIAN | 6 | config CPU_LITTLE_ENDIAN |
7 | def_bool !CPU_BIG_ENDIAN | 7 | bool "Little endian" |
8 | default y | ||
8 | 9 | ||
9 | config HWZOL | 10 | config HWZOL |
10 | bool "hardware zero overhead loop support" | 11 | bool "hardware zero overhead loop support" |
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 91f933d5a962..513bb2e9baf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile | |||
@@ -23,9 +23,6 @@ export TEXTADDR | |||
23 | # If we have a machine-specific directory, then include it in the build. | 23 | # If we have a machine-specific directory, then include it in the build. |
24 | core-y += arch/nds32/kernel/ arch/nds32/mm/ | 24 | core-y += arch/nds32/kernel/ arch/nds32/mm/ |
25 | libs-y += arch/nds32/lib/ | 25 | libs-y += arch/nds32/lib/ |
26 | LIBGCC_PATH := \ | ||
27 | $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) | ||
28 | libs-y += $(LIBGCC_PATH) | ||
29 | 26 | ||
30 | ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' | 27 | ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' |
31 | BUILTIN_DTB := y | 28 | BUILTIN_DTB := y |
@@ -35,8 +32,12 @@ endif | |||
35 | 32 | ||
36 | ifdef CONFIG_CPU_LITTLE_ENDIAN | 33 | ifdef CONFIG_CPU_LITTLE_ENDIAN |
37 | KBUILD_CFLAGS += $(call cc-option, -EL) | 34 | KBUILD_CFLAGS += $(call cc-option, -EL) |
35 | KBUILD_AFLAGS += $(call cc-option, -EL) | ||
36 | LDFLAGS += $(call cc-option, -EL) | ||
38 | else | 37 | else |
39 | KBUILD_CFLAGS += $(call cc-option, -EB) | 38 | KBUILD_CFLAGS += $(call cc-option, -EB) |
39 | KBUILD_AFLAGS += $(call cc-option, -EB) | ||
40 | LDFLAGS += $(call cc-option, -EB) | ||
40 | endif | 41 | endif |
41 | 42 | ||
42 | boot := arch/nds32/boot | 43 | boot := arch/nds32/boot |
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild index 06bdf8167f5a..142e612aa639 100644 --- a/arch/nds32/include/asm/Kbuild +++ b/arch/nds32/include/asm/Kbuild | |||
@@ -16,6 +16,7 @@ generic-y += dma.h | |||
16 | generic-y += emergency-restart.h | 16 | generic-y += emergency-restart.h |
17 | generic-y += errno.h | 17 | generic-y += errno.h |
18 | generic-y += exec.h | 18 | generic-y += exec.h |
19 | generic-y += export.h | ||
19 | generic-y += fb.h | 20 | generic-y += fb.h |
20 | generic-y += fcntl.h | 21 | generic-y += fcntl.h |
21 | generic-y += ftrace.h | 22 | generic-y += ftrace.h |
@@ -49,6 +50,7 @@ generic-y += switch_to.h | |||
49 | generic-y += timex.h | 50 | generic-y += timex.h |
50 | generic-y += topology.h | 51 | generic-y += topology.h |
51 | generic-y += trace_clock.h | 52 | generic-y += trace_clock.h |
53 | generic-y += xor.h | ||
52 | generic-y += unaligned.h | 54 | generic-y += unaligned.h |
53 | generic-y += user.h | 55 | generic-y += user.h |
54 | generic-y += vga.h | 56 | generic-y += vga.h |
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h index c73f71d67744..8e84fc385b94 100644 --- a/arch/nds32/include/asm/bitfield.h +++ b/arch/nds32/include/asm/bitfield.h | |||
@@ -336,7 +336,7 @@ | |||
336 | #define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) | 336 | #define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) |
337 | #define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) | 337 | #define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) |
338 | 338 | ||
339 | #define INT_MASK_INITAIAL_VAL 0x10003 | 339 | #define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE) |
340 | 340 | ||
341 | /****************************************************************************** | 341 | /****************************************************************************** |
342 | * ir15: INT_PEND (Interrupt Pending Register) | 342 | * ir15: INT_PEND (Interrupt Pending Register) |
@@ -396,6 +396,7 @@ | |||
396 | #define MMU_CTL_D8KB 1 | 396 | #define MMU_CTL_D8KB 1 |
397 | #define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) | 397 | #define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) |
398 | 398 | ||
399 | #define MMU_CTL_CACHEABLE_NON 0 | ||
399 | #define MMU_CTL_CACHEABLE_WB 2 | 400 | #define MMU_CTL_CACHEABLE_WB 2 |
400 | #define MMU_CTL_CACHEABLE_WT 3 | 401 | #define MMU_CTL_CACHEABLE_WT 3 |
401 | 402 | ||
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index 1240f148ec0f..10b48f0d8e85 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h | |||
@@ -32,6 +32,8 @@ void flush_anon_page(struct vm_area_struct *vma, | |||
32 | 32 | ||
33 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 33 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
34 | void flush_kernel_dcache_page(struct page *page); | 34 | void flush_kernel_dcache_page(struct page *page); |
35 | void flush_kernel_vmap_range(void *addr, int size); | ||
36 | void invalidate_kernel_vmap_range(void *addr, int size); | ||
35 | void flush_icache_range(unsigned long start, unsigned long end); | 37 | void flush_icache_range(unsigned long start, unsigned long end); |
36 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); | 38 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); |
37 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) | 39 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) |
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h index 966e71b3c960..71cd226d6863 100644 --- a/arch/nds32/include/asm/io.h +++ b/arch/nds32/include/asm/io.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #ifndef __ASM_NDS32_IO_H | 4 | #ifndef __ASM_NDS32_IO_H |
5 | #define __ASM_NDS32_IO_H | 5 | #define __ASM_NDS32_IO_H |
6 | 6 | ||
7 | #include <linux/types.h> | ||
8 | |||
7 | extern void iounmap(volatile void __iomem *addr); | 9 | extern void iounmap(volatile void __iomem *addr); |
8 | #define __raw_writeb __raw_writeb | 10 | #define __raw_writeb __raw_writeb |
9 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) | 11 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) |
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h index e27365c097b6..947f0491c9a7 100644 --- a/arch/nds32/include/asm/page.h +++ b/arch/nds32/include/asm/page.h | |||
@@ -27,6 +27,9 @@ extern void copy_user_highpage(struct page *to, struct page *from, | |||
27 | unsigned long vaddr, struct vm_area_struct *vma); | 27 | unsigned long vaddr, struct vm_area_struct *vma); |
28 | extern void clear_user_highpage(struct page *page, unsigned long vaddr); | 28 | extern void clear_user_highpage(struct page *page, unsigned long vaddr); |
29 | 29 | ||
30 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
31 | struct page *to); | ||
32 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page); | ||
30 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | 33 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
31 | #define clear_user_highpage clear_user_highpage | 34 | #define clear_user_highpage clear_user_highpage |
32 | #else | 35 | #else |
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index 6783937edbeb..d3e19a55cf53 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h | |||
@@ -152,6 +152,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
152 | #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) | 152 | #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) |
153 | #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | 153 | #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) |
154 | #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | 154 | #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) |
155 | #define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD) | ||
155 | #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) | 156 | #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) |
156 | #endif /* __ASSEMBLY__ */ | 157 | #endif /* __ASSEMBLY__ */ |
157 | 158 | ||
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S index a72e83d804f5..b8ae4e9a6b93 100644 --- a/arch/nds32/kernel/ex-entry.S +++ b/arch/nds32/kernel/ex-entry.S | |||
@@ -118,7 +118,7 @@ common_exception_handler: | |||
118 | /* interrupt */ | 118 | /* interrupt */ |
119 | 2: | 119 | 2: |
120 | #ifdef CONFIG_TRACE_IRQFLAGS | 120 | #ifdef CONFIG_TRACE_IRQFLAGS |
121 | jal arch_trace_hardirqs_off | 121 | jal trace_hardirqs_off |
122 | #endif | 122 | #endif |
123 | move $r0, $sp | 123 | move $r0, $sp |
124 | sethi $lp, hi20(ret_from_intr) | 124 | sethi $lp, hi20(ret_from_intr) |
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S index 71f57bd70f3b..c5fdae174ced 100644 --- a/arch/nds32/kernel/head.S +++ b/arch/nds32/kernel/head.S | |||
@@ -57,14 +57,32 @@ _nodtb: | |||
57 | isb | 57 | isb |
58 | mtsr $r4, $L1_PPTB ! load page table pointer\n" | 58 | mtsr $r4, $L1_PPTB ! load page table pointer\n" |
59 | 59 | ||
60 | /* set NTC0 cacheable/writeback, mutliple page size in use */ | 60 | #ifdef CONFIG_CPU_DCACHE_DISABLE |
61 | #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON | ||
62 | #else | ||
63 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
64 | #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT | ||
65 | #else | ||
66 | #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB | ||
67 | #endif | ||
68 | #endif | ||
69 | |||
70 | /* set NTC cacheability, mutliple page size in use */ | ||
61 | mfsr $r3, $MMU_CTL | 71 | mfsr $r3, $MMU_CTL |
62 | li $r0, #~MMU_CTL_mskNTC0 | 72 | #if CONFIG_MEMORY_START >= 0xc0000000 |
63 | and $r3, $r3, $r0 | 73 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3) |
74 | #elif CONFIG_MEMORY_START >= 0x80000000 | ||
75 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2) | ||
76 | #elif CONFIG_MEMORY_START >= 0x40000000 | ||
77 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1) | ||
78 | #else | ||
79 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0) | ||
80 | #endif | ||
81 | |||
64 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB | 82 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB |
65 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)) | 83 | ori $r3, $r3, #(MMU_CTL_mskMPZIU) |
66 | #else | 84 | #else |
67 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB) | 85 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB) |
68 | #endif | 86 | #endif |
69 | #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS | 87 | #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS |
70 | li $r0, #MMU_CTL_UNA | 88 | li $r0, #MMU_CTL_UNA |
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index ba910e9e4ecb..2f5b2ccebe47 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c | |||
@@ -293,6 +293,9 @@ void __init setup_arch(char **cmdline_p) | |||
293 | /* paging_init() sets up the MMU and marks all pages as reserved */ | 293 | /* paging_init() sets up the MMU and marks all pages as reserved */ |
294 | paging_init(); | 294 | paging_init(); |
295 | 295 | ||
296 | /* invalidate all TLB entries because the new mapping is created */ | ||
297 | __nds32__tlbop_flua(); | ||
298 | |||
296 | /* use generic way to parse */ | 299 | /* use generic way to parse */ |
297 | parse_early_param(); | 300 | parse_early_param(); |
298 | 301 | ||
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c index bc70113c0e84..8b231e910ea6 100644 --- a/arch/nds32/kernel/stacktrace.c +++ b/arch/nds32/kernel/stacktrace.c | |||
@@ -9,6 +9,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
9 | { | 9 | { |
10 | save_stack_trace_tsk(current, trace); | 10 | save_stack_trace_tsk(current, trace); |
11 | } | 11 | } |
12 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
12 | 13 | ||
13 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 14 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
14 | { | 15 | { |
@@ -45,3 +46,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
45 | fpn = (unsigned long *)fpp; | 46 | fpn = (unsigned long *)fpp; |
46 | } | 47 | } |
47 | } | 48 | } |
49 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c index f1198d7a5654..016f15891f6d 100644 --- a/arch/nds32/kernel/vdso.c +++ b/arch/nds32/kernel/vdso.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <asm/vdso_timer_info.h> | 23 | #include <asm/vdso_timer_info.h> |
24 | #include <asm/cache_info.h> | 24 | #include <asm/cache_info.h> |
25 | extern struct cache_info L1_cache_info[2]; | 25 | extern struct cache_info L1_cache_info[2]; |
26 | extern char vdso_start, vdso_end; | 26 | extern char vdso_start[], vdso_end[]; |
27 | static unsigned long vdso_pages __ro_after_init; | 27 | static unsigned long vdso_pages __ro_after_init; |
28 | static unsigned long timer_mapping_base; | 28 | static unsigned long timer_mapping_base; |
29 | 29 | ||
@@ -66,16 +66,16 @@ static int __init vdso_init(void) | |||
66 | int i; | 66 | int i; |
67 | struct page **vdso_pagelist; | 67 | struct page **vdso_pagelist; |
68 | 68 | ||
69 | if (memcmp(&vdso_start, "\177ELF", 4)) { | 69 | if (memcmp(vdso_start, "\177ELF", 4)) { |
70 | pr_err("vDSO is not a valid ELF object!\n"); | 70 | pr_err("vDSO is not a valid ELF object!\n"); |
71 | return -EINVAL; | 71 | return -EINVAL; |
72 | } | 72 | } |
73 | /* Creat a timer io mapping to get clock cycles counter */ | 73 | /* Creat a timer io mapping to get clock cycles counter */ |
74 | get_timer_node_info(); | 74 | get_timer_node_info(); |
75 | 75 | ||
76 | vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; | 76 | vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; |
77 | pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", | 77 | pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", |
78 | vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); | 78 | vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data); |
79 | 79 | ||
80 | /* Allocate the vDSO pagelist */ | 80 | /* Allocate the vDSO pagelist */ |
81 | vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); | 81 | vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); |
@@ -83,7 +83,7 @@ static int __init vdso_init(void) | |||
83 | return -ENOMEM; | 83 | return -ENOMEM; |
84 | 84 | ||
85 | for (i = 0; i < vdso_pages; i++) | 85 | for (i = 0; i < vdso_pages; i++) |
86 | vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); | 86 | vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE); |
87 | vdso_spec[1].pages = &vdso_pagelist[0]; | 87 | vdso_spec[1].pages = &vdso_pagelist[0]; |
88 | 88 | ||
89 | return 0; | 89 | return 0; |
diff --git a/arch/nds32/lib/copy_page.S b/arch/nds32/lib/copy_page.S index 4a2ff85f17ee..f8701ed161a8 100644 --- a/arch/nds32/lib/copy_page.S +++ b/arch/nds32/lib/copy_page.S | |||
@@ -2,6 +2,7 @@ | |||
2 | // Copyright (C) 2005-2017 Andes Technology Corporation | 2 | // Copyright (C) 2005-2017 Andes Technology Corporation |
3 | 3 | ||
4 | #include <linux/linkage.h> | 4 | #include <linux/linkage.h> |
5 | #include <asm/export.h> | ||
5 | #include <asm/page.h> | 6 | #include <asm/page.h> |
6 | 7 | ||
7 | .text | 8 | .text |
@@ -16,6 +17,7 @@ ENTRY(copy_page) | |||
16 | popm $r2, $r10 | 17 | popm $r2, $r10 |
17 | ret | 18 | ret |
18 | ENDPROC(copy_page) | 19 | ENDPROC(copy_page) |
20 | EXPORT_SYMBOL(copy_page) | ||
19 | 21 | ||
20 | ENTRY(clear_page) | 22 | ENTRY(clear_page) |
21 | pushm $r1, $r9 | 23 | pushm $r1, $r9 |
@@ -35,3 +37,4 @@ ENTRY(clear_page) | |||
35 | popm $r1, $r9 | 37 | popm $r1, $r9 |
36 | ret | 38 | ret |
37 | ENDPROC(clear_page) | 39 | ENDPROC(clear_page) |
40 | EXPORT_SYMBOL(clear_page) | ||
diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index b96a01b10ca7..e1aed9dc692d 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #define RA(inst) (((inst) >> 15) & 0x1FUL) | 19 | #define RA(inst) (((inst) >> 15) & 0x1FUL) |
20 | #define RB(inst) (((inst) >> 10) & 0x1FUL) | 20 | #define RB(inst) (((inst) >> 10) & 0x1FUL) |
21 | #define SV(inst) (((inst) >> 8) & 0x3UL) | 21 | #define SV(inst) (((inst) >> 8) & 0x3UL) |
22 | #define IMM(inst) (((inst) >> 0) & 0x3FFFUL) | 22 | #define IMM(inst) (((inst) >> 0) & 0x7FFFUL) |
23 | 23 | ||
24 | #define RA3(inst) (((inst) >> 3) & 0x7UL) | 24 | #define RA3(inst) (((inst) >> 3) & 0x7UL) |
25 | #define RT3(inst) (((inst) >> 6) & 0x7UL) | 25 | #define RT3(inst) (((inst) >> 6) & 0x7UL) |
@@ -28,6 +28,9 @@ | |||
28 | #define RA5(inst) (((inst) >> 0) & 0x1FUL) | 28 | #define RA5(inst) (((inst) >> 0) & 0x1FUL) |
29 | #define RT4(inst) (((inst) >> 5) & 0xFUL) | 29 | #define RT4(inst) (((inst) >> 5) & 0xFUL) |
30 | 30 | ||
31 | #define GET_IMMSVAL(imm_value) \ | ||
32 | (((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value) | ||
33 | |||
31 | #define __get8_data(val,addr,err) \ | 34 | #define __get8_data(val,addr,err) \ |
32 | __asm__( \ | 35 | __asm__( \ |
33 | "1: lbi.bi %1, [%2], #1\n" \ | 36 | "1: lbi.bi %1, [%2], #1\n" \ |
@@ -467,7 +470,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs) | |||
467 | } | 470 | } |
468 | 471 | ||
469 | if (imm) | 472 | if (imm) |
470 | shift = IMM(inst) * len; | 473 | shift = GET_IMMSVAL(IMM(inst)) * len; |
471 | else | 474 | else |
472 | shift = *idx_to_addr(regs, RB(inst)) << SV(inst); | 475 | shift = *idx_to_addr(regs, RB(inst)) << SV(inst); |
473 | 476 | ||
@@ -552,7 +555,7 @@ static struct ctl_table alignment_tbl[3] = { | |||
552 | 555 | ||
553 | static struct ctl_table nds32_sysctl_table[2] = { | 556 | static struct ctl_table nds32_sysctl_table[2] = { |
554 | { | 557 | { |
555 | .procname = "unaligned_acess", | 558 | .procname = "unaligned_access", |
556 | .mode = 0555, | 559 | .mode = 0555, |
557 | .child = alignment_tbl}, | 560 | .child = alignment_tbl}, |
558 | {} | 561 | {} |
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index 6eb786a399a2..ce8fd34497bf 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c | |||
@@ -147,6 +147,25 @@ void flush_cache_vunmap(unsigned long start, unsigned long end) | |||
147 | cpu_icache_inval_all(); | 147 | cpu_icache_inval_all(); |
148 | } | 148 | } |
149 | 149 | ||
150 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
151 | struct page *to) | ||
152 | { | ||
153 | cpu_dcache_wbinval_page((unsigned long)vaddr); | ||
154 | cpu_icache_inval_page((unsigned long)vaddr); | ||
155 | copy_page(vto, vfrom); | ||
156 | cpu_dcache_wbinval_page((unsigned long)vto); | ||
157 | cpu_icache_inval_page((unsigned long)vto); | ||
158 | } | ||
159 | |||
160 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page) | ||
161 | { | ||
162 | cpu_dcache_wbinval_page((unsigned long)vaddr); | ||
163 | cpu_icache_inval_page((unsigned long)vaddr); | ||
164 | clear_page(addr); | ||
165 | cpu_dcache_wbinval_page((unsigned long)addr); | ||
166 | cpu_icache_inval_page((unsigned long)addr); | ||
167 | } | ||
168 | |||
150 | void copy_user_highpage(struct page *to, struct page *from, | 169 | void copy_user_highpage(struct page *to, struct page *from, |
151 | unsigned long vaddr, struct vm_area_struct *vma) | 170 | unsigned long vaddr, struct vm_area_struct *vma) |
152 | { | 171 | { |
@@ -156,11 +175,9 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
156 | pto = page_to_phys(to); | 175 | pto = page_to_phys(to); |
157 | pfrom = page_to_phys(from); | 176 | pfrom = page_to_phys(from); |
158 | 177 | ||
178 | local_irq_save(flags); | ||
159 | if (aliasing(vaddr, (unsigned long)kfrom)) | 179 | if (aliasing(vaddr, (unsigned long)kfrom)) |
160 | cpu_dcache_wb_page((unsigned long)kfrom); | 180 | cpu_dcache_wb_page((unsigned long)kfrom); |
161 | if (aliasing(vaddr, (unsigned long)kto)) | ||
162 | cpu_dcache_inval_page((unsigned long)kto); | ||
163 | local_irq_save(flags); | ||
164 | vto = kremap0(vaddr, pto); | 181 | vto = kremap0(vaddr, pto); |
165 | vfrom = kremap1(vaddr, pfrom); | 182 | vfrom = kremap1(vaddr, pfrom); |
166 | copy_page((void *)vto, (void *)vfrom); | 183 | copy_page((void *)vto, (void *)vfrom); |
@@ -198,21 +215,25 @@ void flush_dcache_page(struct page *page) | |||
198 | if (mapping && !mapping_mapped(mapping)) | 215 | if (mapping && !mapping_mapped(mapping)) |
199 | set_bit(PG_dcache_dirty, &page->flags); | 216 | set_bit(PG_dcache_dirty, &page->flags); |
200 | else { | 217 | else { |
201 | int i, pc; | 218 | unsigned long kaddr, flags; |
202 | unsigned long vto, kaddr, flags; | 219 | |
203 | kaddr = (unsigned long)page_address(page); | 220 | kaddr = (unsigned long)page_address(page); |
204 | cpu_dcache_wbinval_page(kaddr); | ||
205 | pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE; | ||
206 | local_irq_save(flags); | 221 | local_irq_save(flags); |
207 | for (i = 0; i < pc; i++) { | 222 | cpu_dcache_wbinval_page(kaddr); |
208 | vto = | 223 | if (mapping) { |
209 | kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page)); | 224 | unsigned long vaddr, kto; |
210 | cpu_dcache_wbinval_page(vto); | 225 | |
211 | kunmap01(vto); | 226 | vaddr = page->index << PAGE_SHIFT; |
227 | if (aliasing(vaddr, kaddr)) { | ||
228 | kto = kremap0(vaddr, page_to_phys(page)); | ||
229 | cpu_dcache_wbinval_page(kto); | ||
230 | kunmap01(kto); | ||
231 | } | ||
212 | } | 232 | } |
213 | local_irq_restore(flags); | 233 | local_irq_restore(flags); |
214 | } | 234 | } |
215 | } | 235 | } |
236 | EXPORT_SYMBOL(flush_dcache_page); | ||
216 | 237 | ||
217 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 238 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
218 | unsigned long vaddr, void *dst, void *src, int len) | 239 | unsigned long vaddr, void *dst, void *src, int len) |
@@ -251,7 +272,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |||
251 | void flush_anon_page(struct vm_area_struct *vma, | 272 | void flush_anon_page(struct vm_area_struct *vma, |
252 | struct page *page, unsigned long vaddr) | 273 | struct page *page, unsigned long vaddr) |
253 | { | 274 | { |
254 | unsigned long flags; | 275 | unsigned long kaddr, flags, ktmp; |
255 | if (!PageAnon(page)) | 276 | if (!PageAnon(page)) |
256 | return; | 277 | return; |
257 | 278 | ||
@@ -261,7 +282,12 @@ void flush_anon_page(struct vm_area_struct *vma, | |||
261 | local_irq_save(flags); | 282 | local_irq_save(flags); |
262 | if (vma->vm_flags & VM_EXEC) | 283 | if (vma->vm_flags & VM_EXEC) |
263 | cpu_icache_inval_page(vaddr & PAGE_MASK); | 284 | cpu_icache_inval_page(vaddr & PAGE_MASK); |
264 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | 285 | kaddr = (unsigned long)page_address(page); |
286 | if (aliasing(vaddr, kaddr)) { | ||
287 | ktmp = kremap0(vaddr, page_to_phys(page)); | ||
288 | cpu_dcache_wbinval_page(ktmp); | ||
289 | kunmap01(ktmp); | ||
290 | } | ||
265 | local_irq_restore(flags); | 291 | local_irq_restore(flags); |
266 | } | 292 | } |
267 | 293 | ||
@@ -272,6 +298,25 @@ void flush_kernel_dcache_page(struct page *page) | |||
272 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | 298 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); |
273 | local_irq_restore(flags); | 299 | local_irq_restore(flags); |
274 | } | 300 | } |
301 | EXPORT_SYMBOL(flush_kernel_dcache_page); | ||
302 | |||
303 | void flush_kernel_vmap_range(void *addr, int size) | ||
304 | { | ||
305 | unsigned long flags; | ||
306 | local_irq_save(flags); | ||
307 | cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size); | ||
308 | local_irq_restore(flags); | ||
309 | } | ||
310 | EXPORT_SYMBOL(flush_kernel_vmap_range); | ||
311 | |||
312 | void invalidate_kernel_vmap_range(void *addr, int size) | ||
313 | { | ||
314 | unsigned long flags; | ||
315 | local_irq_save(flags); | ||
316 | cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size); | ||
317 | local_irq_restore(flags); | ||
318 | } | ||
319 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); | ||
275 | 320 | ||
276 | void flush_icache_range(unsigned long start, unsigned long end) | 321 | void flush_icache_range(unsigned long start, unsigned long end) |
277 | { | 322 | { |
@@ -283,6 +328,7 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
283 | cpu_cache_wbinval_range(start, end, 1); | 328 | cpu_cache_wbinval_range(start, end, 1); |
284 | local_irq_restore(flags); | 329 | local_irq_restore(flags); |
285 | } | 330 | } |
331 | EXPORT_SYMBOL(flush_icache_range); | ||
286 | 332 | ||
287 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | 333 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) |
288 | { | 334 | { |
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index 93ee0160720b..c713d2ad55dc 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c | |||
@@ -30,6 +30,7 @@ extern unsigned long phys_initrd_size; | |||
30 | * zero-initialized data and COW. | 30 | * zero-initialized data and COW. |
31 | */ | 31 | */ |
32 | struct page *empty_zero_page; | 32 | struct page *empty_zero_page; |
33 | EXPORT_SYMBOL(empty_zero_page); | ||
33 | 34 | ||
34 | static void __init zone_sizes_init(void) | 35 | static void __init zone_sizes_init(void) |
35 | { | 36 | { |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4c02a7378d06..e7377b73cfec 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -96,6 +96,7 @@ struct kvmppc_vcore { | |||
96 | struct kvm_vcpu *runner; | 96 | struct kvm_vcpu *runner; |
97 | struct kvm *kvm; | 97 | struct kvm *kvm; |
98 | u64 tb_offset; /* guest timebase - host timebase */ | 98 | u64 tb_offset; /* guest timebase - host timebase */ |
99 | u64 tb_offset_applied; /* timebase offset currently in force */ | ||
99 | ulong lpcr; | 100 | ulong lpcr; |
100 | u32 arch_compat; | 101 | u32 arch_compat; |
101 | ulong pcr; | 102 | ulong pcr; |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 6bee65f3cfd3..373dc1d6ef44 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -562,6 +562,7 @@ int main(void) | |||
562 | OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); | 562 | OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); |
563 | OFFSET(VCORE_KVM, kvmppc_vcore, kvm); | 563 | OFFSET(VCORE_KVM, kvmppc_vcore, kvm); |
564 | OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); | 564 | OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); |
565 | OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied); | ||
565 | OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); | 566 | OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); |
566 | OFFSET(VCORE_PCR, kvmppc_vcore, pcr); | 567 | OFFSET(VCORE_PCR, kvmppc_vcore, pcr); |
567 | OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); | 568 | OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index a57eafec4dc2..361f42c8c73e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
@@ -162,7 +162,7 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, | |||
162 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) | 162 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) |
163 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) | 163 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) |
164 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); | 164 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); |
165 | asm volatile("ptesync": : :"memory"); | 165 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); |
166 | } | 166 | } |
167 | 167 | ||
168 | static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) | 168 | static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) |
@@ -173,7 +173,7 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) | |||
173 | /* RIC=1 PRS=0 R=1 IS=2 */ | 173 | /* RIC=1 PRS=0 R=1 IS=2 */ |
174 | asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) | 174 | asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) |
175 | : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); | 175 | : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); |
176 | asm volatile("ptesync": : :"memory"); | 176 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); |
177 | } | 177 | } |
178 | 178 | ||
179 | unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, | 179 | unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, |
@@ -584,7 +584,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
584 | 584 | ||
585 | ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); | 585 | ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); |
586 | if (ptep && pte_present(*ptep)) { | 586 | if (ptep && pte_present(*ptep)) { |
587 | old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, | 587 | old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, |
588 | gpa, shift); | 588 | gpa, shift); |
589 | kvmppc_radix_tlbie_page(kvm, gpa, shift); | 589 | kvmppc_radix_tlbie_page(kvm, gpa, shift); |
590 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { | 590 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 4d07fca5121c..9963f65c212b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -2441,6 +2441,7 @@ static void init_vcore_to_run(struct kvmppc_vcore *vc) | |||
2441 | vc->in_guest = 0; | 2441 | vc->in_guest = 0; |
2442 | vc->napping_threads = 0; | 2442 | vc->napping_threads = 0; |
2443 | vc->conferring_threads = 0; | 2443 | vc->conferring_threads = 0; |
2444 | vc->tb_offset_applied = 0; | ||
2444 | } | 2445 | } |
2445 | 2446 | ||
2446 | static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) | 2447 | static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bd63fa8a08b5..07ca1b2a7966 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -692,6 +692,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
692 | 22: ld r8,VCORE_TB_OFFSET(r5) | 692 | 22: ld r8,VCORE_TB_OFFSET(r5) |
693 | cmpdi r8,0 | 693 | cmpdi r8,0 |
694 | beq 37f | 694 | beq 37f |
695 | std r8, VCORE_TB_OFFSET_APPL(r5) | ||
695 | mftb r6 /* current host timebase */ | 696 | mftb r6 /* current host timebase */ |
696 | add r8,r8,r6 | 697 | add r8,r8,r6 |
697 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | 698 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
@@ -940,18 +941,6 @@ FTR_SECTION_ELSE | |||
940 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | 941 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
941 | 8: | 942 | 8: |
942 | 943 | ||
943 | /* | ||
944 | * Set the decrementer to the guest decrementer. | ||
945 | */ | ||
946 | ld r8,VCPU_DEC_EXPIRES(r4) | ||
947 | /* r8 is a host timebase value here, convert to guest TB */ | ||
948 | ld r5,HSTATE_KVM_VCORE(r13) | ||
949 | ld r6,VCORE_TB_OFFSET(r5) | ||
950 | add r8,r8,r6 | ||
951 | mftb r7 | ||
952 | subf r3,r7,r8 | ||
953 | mtspr SPRN_DEC,r3 | ||
954 | |||
955 | ld r5, VCPU_SPRG0(r4) | 944 | ld r5, VCPU_SPRG0(r4) |
956 | ld r6, VCPU_SPRG1(r4) | 945 | ld r6, VCPU_SPRG1(r4) |
957 | ld r7, VCPU_SPRG2(r4) | 946 | ld r7, VCPU_SPRG2(r4) |
@@ -1005,6 +994,18 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |||
1005 | mtspr SPRN_LPCR,r8 | 994 | mtspr SPRN_LPCR,r8 |
1006 | isync | 995 | isync |
1007 | 996 | ||
997 | /* | ||
998 | * Set the decrementer to the guest decrementer. | ||
999 | */ | ||
1000 | ld r8,VCPU_DEC_EXPIRES(r4) | ||
1001 | /* r8 is a host timebase value here, convert to guest TB */ | ||
1002 | ld r5,HSTATE_KVM_VCORE(r13) | ||
1003 | ld r6,VCORE_TB_OFFSET_APPL(r5) | ||
1004 | add r8,r8,r6 | ||
1005 | mftb r7 | ||
1006 | subf r3,r7,r8 | ||
1007 | mtspr SPRN_DEC,r3 | ||
1008 | |||
1008 | /* Check if HDEC expires soon */ | 1009 | /* Check if HDEC expires soon */ |
1009 | mfspr r3, SPRN_HDEC | 1010 | mfspr r3, SPRN_HDEC |
1010 | EXTEND_HDEC(r3) | 1011 | EXTEND_HDEC(r3) |
@@ -1597,8 +1598,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
1597 | 1598 | ||
1598 | guest_bypass: | 1599 | guest_bypass: |
1599 | stw r12, STACK_SLOT_TRAP(r1) | 1600 | stw r12, STACK_SLOT_TRAP(r1) |
1600 | mr r3, r12 | 1601 | |
1602 | /* Save DEC */ | ||
1603 | /* Do this before kvmhv_commence_exit so we know TB is guest TB */ | ||
1604 | ld r3, HSTATE_KVM_VCORE(r13) | ||
1605 | mfspr r5,SPRN_DEC | ||
1606 | mftb r6 | ||
1607 | /* On P9, if the guest has large decr enabled, don't sign extend */ | ||
1608 | BEGIN_FTR_SECTION | ||
1609 | ld r4, VCORE_LPCR(r3) | ||
1610 | andis. r4, r4, LPCR_LD@h | ||
1611 | bne 16f | ||
1612 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
1613 | extsw r5,r5 | ||
1614 | 16: add r5,r5,r6 | ||
1615 | /* r5 is a guest timebase value here, convert to host TB */ | ||
1616 | ld r4,VCORE_TB_OFFSET_APPL(r3) | ||
1617 | subf r5,r4,r5 | ||
1618 | std r5,VCPU_DEC_EXPIRES(r9) | ||
1619 | |||
1601 | /* Increment exit count, poke other threads to exit */ | 1620 | /* Increment exit count, poke other threads to exit */ |
1621 | mr r3, r12 | ||
1602 | bl kvmhv_commence_exit | 1622 | bl kvmhv_commence_exit |
1603 | nop | 1623 | nop |
1604 | ld r9, HSTATE_KVM_VCPU(r13) | 1624 | ld r9, HSTATE_KVM_VCPU(r13) |
@@ -1639,23 +1659,6 @@ guest_bypass: | |||
1639 | mtspr SPRN_PURR,r3 | 1659 | mtspr SPRN_PURR,r3 |
1640 | mtspr SPRN_SPURR,r4 | 1660 | mtspr SPRN_SPURR,r4 |
1641 | 1661 | ||
1642 | /* Save DEC */ | ||
1643 | ld r3, HSTATE_KVM_VCORE(r13) | ||
1644 | mfspr r5,SPRN_DEC | ||
1645 | mftb r6 | ||
1646 | /* On P9, if the guest has large decr enabled, don't sign extend */ | ||
1647 | BEGIN_FTR_SECTION | ||
1648 | ld r4, VCORE_LPCR(r3) | ||
1649 | andis. r4, r4, LPCR_LD@h | ||
1650 | bne 16f | ||
1651 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
1652 | extsw r5,r5 | ||
1653 | 16: add r5,r5,r6 | ||
1654 | /* r5 is a guest timebase value here, convert to host TB */ | ||
1655 | ld r4,VCORE_TB_OFFSET(r3) | ||
1656 | subf r5,r4,r5 | ||
1657 | std r5,VCPU_DEC_EXPIRES(r9) | ||
1658 | |||
1659 | BEGIN_FTR_SECTION | 1662 | BEGIN_FTR_SECTION |
1660 | b 8f | 1663 | b 8f |
1661 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | 1664 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
@@ -1905,6 +1908,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
1905 | cmpwi cr2, r0, 0 | 1908 | cmpwi cr2, r0, 0 |
1906 | beq cr2, 4f | 1909 | beq cr2, 4f |
1907 | 1910 | ||
1911 | /* | ||
1912 | * Radix: do eieio; tlbsync; ptesync sequence in case we | ||
1913 | * interrupted the guest between a tlbie and a ptesync. | ||
1914 | */ | ||
1915 | eieio | ||
1916 | tlbsync | ||
1917 | ptesync | ||
1918 | |||
1908 | /* Radix: Handle the case where the guest used an illegal PID */ | 1919 | /* Radix: Handle the case where the guest used an illegal PID */ |
1909 | LOAD_REG_ADDR(r4, mmu_base_pid) | 1920 | LOAD_REG_ADDR(r4, mmu_base_pid) |
1910 | lwz r3, VCPU_GUEST_PID(r9) | 1921 | lwz r3, VCPU_GUEST_PID(r9) |
@@ -2017,9 +2028,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
2017 | 2028 | ||
2018 | 27: | 2029 | 27: |
2019 | /* Subtract timebase offset from timebase */ | 2030 | /* Subtract timebase offset from timebase */ |
2020 | ld r8,VCORE_TB_OFFSET(r5) | 2031 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
2021 | cmpdi r8,0 | 2032 | cmpdi r8,0 |
2022 | beq 17f | 2033 | beq 17f |
2034 | li r0, 0 | ||
2035 | std r0, VCORE_TB_OFFSET_APPL(r5) | ||
2023 | mftb r6 /* current guest timebase */ | 2036 | mftb r6 /* current guest timebase */ |
2024 | subf r8,r8,r6 | 2037 | subf r8,r8,r6 |
2025 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | 2038 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
@@ -2700,7 +2713,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
2700 | add r3, r3, r5 | 2713 | add r3, r3, r5 |
2701 | ld r4, HSTATE_KVM_VCPU(r13) | 2714 | ld r4, HSTATE_KVM_VCPU(r13) |
2702 | ld r5, HSTATE_KVM_VCORE(r13) | 2715 | ld r5, HSTATE_KVM_VCORE(r13) |
2703 | ld r6, VCORE_TB_OFFSET(r5) | 2716 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
2704 | subf r3, r6, r3 /* convert to host TB value */ | 2717 | subf r3, r6, r3 /* convert to host TB value */ |
2705 | std r3, VCPU_DEC_EXPIRES(r4) | 2718 | std r3, VCPU_DEC_EXPIRES(r4) |
2706 | 2719 | ||
@@ -2799,7 +2812,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
2799 | /* Restore guest decrementer */ | 2812 | /* Restore guest decrementer */ |
2800 | ld r3, VCPU_DEC_EXPIRES(r4) | 2813 | ld r3, VCPU_DEC_EXPIRES(r4) |
2801 | ld r5, HSTATE_KVM_VCORE(r13) | 2814 | ld r5, HSTATE_KVM_VCORE(r13) |
2802 | ld r6, VCORE_TB_OFFSET(r5) | 2815 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
2803 | add r3, r3, r6 /* convert host TB to guest TB value */ | 2816 | add r3, r3, r6 /* convert host TB to guest TB value */ |
2804 | mftb r7 | 2817 | mftb r7 |
2805 | subf r3, r7, r3 | 2818 | subf r3, r7, r3 |
@@ -3606,12 +3619,9 @@ kvmppc_fix_pmao: | |||
3606 | */ | 3619 | */ |
3607 | kvmhv_start_timing: | 3620 | kvmhv_start_timing: |
3608 | ld r5, HSTATE_KVM_VCORE(r13) | 3621 | ld r5, HSTATE_KVM_VCORE(r13) |
3609 | lbz r6, VCORE_IN_GUEST(r5) | 3622 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
3610 | cmpwi r6, 0 | 3623 | mftb r5 |
3611 | beq 5f /* if in guest, need to */ | 3624 | subf r5, r6, r5 /* subtract current timebase offset */ |
3612 | ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ | ||
3613 | 5: mftb r5 | ||
3614 | subf r5, r6, r5 | ||
3615 | std r3, VCPU_CUR_ACTIVITY(r4) | 3625 | std r3, VCPU_CUR_ACTIVITY(r4) |
3616 | std r5, VCPU_ACTIVITY_START(r4) | 3626 | std r5, VCPU_ACTIVITY_START(r4) |
3617 | blr | 3627 | blr |
@@ -3622,15 +3632,12 @@ kvmhv_start_timing: | |||
3622 | */ | 3632 | */ |
3623 | kvmhv_accumulate_time: | 3633 | kvmhv_accumulate_time: |
3624 | ld r5, HSTATE_KVM_VCORE(r13) | 3634 | ld r5, HSTATE_KVM_VCORE(r13) |
3625 | lbz r8, VCORE_IN_GUEST(r5) | 3635 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
3626 | cmpwi r8, 0 | 3636 | ld r5, VCPU_CUR_ACTIVITY(r4) |
3627 | beq 4f /* if in guest, need to */ | ||
3628 | ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ | ||
3629 | 4: ld r5, VCPU_CUR_ACTIVITY(r4) | ||
3630 | ld r6, VCPU_ACTIVITY_START(r4) | 3637 | ld r6, VCPU_ACTIVITY_START(r4) |
3631 | std r3, VCPU_CUR_ACTIVITY(r4) | 3638 | std r3, VCPU_CUR_ACTIVITY(r4) |
3632 | mftb r7 | 3639 | mftb r7 |
3633 | subf r7, r8, r7 | 3640 | subf r7, r8, r7 /* subtract current timebase offset */ |
3634 | std r7, VCPU_ACTIVITY_START(r4) | 3641 | std r7, VCPU_ACTIVITY_START(r4) |
3635 | cmpdi r5, 0 | 3642 | cmpdi r5, 0 |
3636 | beqlr | 3643 | beqlr |
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index c7a5deadd1cc..99c3620b40d9 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c | |||
@@ -11,6 +11,9 @@ | |||
11 | #define XGLUE(a,b) a##b | 11 | #define XGLUE(a,b) a##b |
12 | #define GLUE(a,b) XGLUE(a,b) | 12 | #define GLUE(a,b) XGLUE(a,b) |
13 | 13 | ||
14 | /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */ | ||
15 | #define XICS_DUMMY 1 | ||
16 | |||
14 | static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) | 17 | static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) |
15 | { | 18 | { |
16 | u8 cppr; | 19 | u8 cppr; |
@@ -205,6 +208,10 @@ skip_ipi: | |||
205 | goto skip_ipi; | 208 | goto skip_ipi; |
206 | } | 209 | } |
207 | 210 | ||
211 | /* If it's the dummy interrupt, continue searching */ | ||
212 | if (hirq == XICS_DUMMY) | ||
213 | goto skip_ipi; | ||
214 | |||
208 | /* If fetching, update queue pointers */ | 215 | /* If fetching, update queue pointers */ |
209 | if (scan_type == scan_fetch) { | 216 | if (scan_type == scan_fetch) { |
210 | q->idx = idx; | 217 | q->idx = idx; |
@@ -385,9 +392,76 @@ static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc) | |||
385 | __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); | 392 | __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); |
386 | } | 393 | } |
387 | 394 | ||
395 | static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive, | ||
396 | struct kvmppc_xive_vcpu *xc) | ||
397 | { | ||
398 | unsigned int prio; | ||
399 | |||
400 | /* For each priority that is now masked */ | ||
401 | for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { | ||
402 | struct xive_q *q = &xc->queues[prio]; | ||
403 | struct kvmppc_xive_irq_state *state; | ||
404 | struct kvmppc_xive_src_block *sb; | ||
405 | u32 idx, toggle, entry, irq, hw_num; | ||
406 | struct xive_irq_data *xd; | ||
407 | __be32 *qpage; | ||
408 | u16 src; | ||
409 | |||
410 | idx = q->idx; | ||
411 | toggle = q->toggle; | ||
412 | qpage = READ_ONCE(q->qpage); | ||
413 | if (!qpage) | ||
414 | continue; | ||
415 | |||
416 | /* For each interrupt in the queue */ | ||
417 | for (;;) { | ||
418 | entry = be32_to_cpup(qpage + idx); | ||
419 | |||
420 | /* No more ? */ | ||
421 | if ((entry >> 31) == toggle) | ||
422 | break; | ||
423 | irq = entry & 0x7fffffff; | ||
424 | |||
425 | /* Skip dummies and IPIs */ | ||
426 | if (irq == XICS_DUMMY || irq == XICS_IPI) | ||
427 | goto next; | ||
428 | sb = kvmppc_xive_find_source(xive, irq, &src); | ||
429 | if (!sb) | ||
430 | goto next; | ||
431 | state = &sb->irq_state[src]; | ||
432 | |||
433 | /* Has it been rerouted ? */ | ||
434 | if (xc->server_num == state->act_server) | ||
435 | goto next; | ||
436 | |||
437 | /* | ||
438 | * Allright, it *has* been re-routed, kill it from | ||
439 | * the queue. | ||
440 | */ | ||
441 | qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); | ||
442 | |||
443 | /* Find the HW interrupt */ | ||
444 | kvmppc_xive_select_irq(state, &hw_num, &xd); | ||
445 | |||
446 | /* If it's not an LSI, set PQ to 11 the EOI will force a resend */ | ||
447 | if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) | ||
448 | GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11); | ||
449 | |||
450 | /* EOI the source */ | ||
451 | GLUE(X_PFX,source_eoi)(hw_num, xd); | ||
452 | |||
453 | next: | ||
454 | idx = (idx + 1) & q->msk; | ||
455 | if (idx == 0) | ||
456 | toggle ^= 1; | ||
457 | } | ||
458 | } | ||
459 | } | ||
460 | |||
388 | X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) | 461 | X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) |
389 | { | 462 | { |
390 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | 463 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
464 | struct kvmppc_xive *xive = vcpu->kvm->arch.xive; | ||
391 | u8 old_cppr; | 465 | u8 old_cppr; |
392 | 466 | ||
393 | pr_devel("H_CPPR(cppr=%ld)\n", cppr); | 467 | pr_devel("H_CPPR(cppr=%ld)\n", cppr); |
@@ -407,14 +481,34 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) | |||
407 | */ | 481 | */ |
408 | smp_mb(); | 482 | smp_mb(); |
409 | 483 | ||
410 | /* | 484 | if (cppr > old_cppr) { |
411 | * We are masking less, we need to look for pending things | 485 | /* |
412 | * to deliver and set VP pending bits accordingly to trigger | 486 | * We are masking less, we need to look for pending things |
413 | * a new interrupt otherwise we might miss MFRR changes for | 487 | * to deliver and set VP pending bits accordingly to trigger |
414 | * which we have optimized out sending an IPI signal. | 488 | * a new interrupt otherwise we might miss MFRR changes for |
415 | */ | 489 | * which we have optimized out sending an IPI signal. |
416 | if (cppr > old_cppr) | 490 | */ |
417 | GLUE(X_PFX,push_pending_to_hw)(xc); | 491 | GLUE(X_PFX,push_pending_to_hw)(xc); |
492 | } else { | ||
493 | /* | ||
494 | * We are masking more, we need to check the queue for any | ||
495 | * interrupt that has been routed to another CPU, take | ||
496 | * it out (replace it with the dummy) and retrigger it. | ||
497 | * | ||
498 | * This is necessary since those interrupts may otherwise | ||
499 | * never be processed, at least not until this CPU restores | ||
500 | * its CPPR. | ||
501 | * | ||
502 | * This is in theory racy vs. HW adding new interrupts to | ||
503 | * the queue. In practice this works because the interesting | ||
504 | * cases are when the guest has done a set_xive() to move the | ||
505 | * interrupt away, which flushes the xive, followed by the | ||
506 | * target CPU doing a H_CPPR. So any new interrupt coming into | ||
507 | * the queue must still be routed to us and isn't a source | ||
508 | * of concern. | ||
509 | */ | ||
510 | GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc); | ||
511 | } | ||
418 | 512 | ||
419 | /* Apply new CPPR */ | 513 | /* Apply new CPPR */ |
420 | xc->hw_cppr = cppr; | 514 | xc->hw_cppr = cppr; |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 8961e3970901..969882b54266 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
578 | 578 | ||
579 | gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; | 579 | gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; |
580 | if (gpa && (scb_s->ecb & ECB_TE)) { | 580 | if (gpa && (scb_s->ecb & ECB_TE)) { |
581 | if (!(gpa & ~0x1fffU)) { | 581 | if (!(gpa & ~0x1fffUL)) { |
582 | rc = set_validity_icpt(scb_s, 0x0080U); | 582 | rc = set_validity_icpt(scb_s, 0x0080U); |
583 | goto unpin; | 583 | goto unpin; |
584 | } | 584 | } |
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index e9525bc1b4a6..1ace023cbdce 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile | |||
@@ -21,7 +21,7 @@ LDFLAGS_purgatory.ro += -z nodefaultlib | |||
21 | KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes | 21 | KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes |
22 | KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare | 22 | KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare |
23 | KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding | 23 | KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding |
24 | KBUILD_CFLAGS += -c -MD -Os -m64 | 24 | KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float |
25 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | 25 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) |
26 | 26 | ||
27 | $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE | 27 | $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 78decc3e3067..38276f58d3bf 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { | |||
942 | {} | 942 | {} |
943 | }; | 943 | }; |
944 | 944 | ||
945 | /* Only list CPUs which speculate but are non susceptible to SSB */ | ||
945 | static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { | 946 | static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { |
946 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW }, | ||
947 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT }, | ||
948 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL }, | ||
949 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW }, | ||
950 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW }, | ||
951 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, | 947 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, |
952 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, | 948 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, |
953 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, | 949 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, |
@@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { | |||
955 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, | 951 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, |
956 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, | 952 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, |
957 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, | 953 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, |
958 | { X86_VENDOR_CENTAUR, 5, }, | ||
959 | { X86_VENDOR_INTEL, 5, }, | ||
960 | { X86_VENDOR_NSC, 5, }, | ||
961 | { X86_VENDOR_AMD, 0x12, }, | 954 | { X86_VENDOR_AMD, 0x12, }, |
962 | { X86_VENDOR_AMD, 0x11, }, | 955 | { X86_VENDOR_AMD, 0x11, }, |
963 | { X86_VENDOR_AMD, 0x10, }, | 956 | { X86_VENDOR_AMD, 0x10, }, |
964 | { X86_VENDOR_AMD, 0xf, }, | 957 | { X86_VENDOR_AMD, 0xf, }, |
965 | { X86_VENDOR_ANY, 4, }, | ||
966 | {} | 958 | {} |
967 | }; | 959 | }; |
968 | 960 | ||
@@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) | |||
970 | { | 962 | { |
971 | u64 ia32_cap = 0; | 963 | u64 ia32_cap = 0; |
972 | 964 | ||
965 | if (x86_match_cpu(cpu_no_speculation)) | ||
966 | return; | ||
967 | |||
968 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | ||
969 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | ||
970 | |||
973 | if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) | 971 | if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) |
974 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); | 972 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
975 | 973 | ||
@@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) | |||
977 | !(ia32_cap & ARCH_CAP_SSB_NO)) | 975 | !(ia32_cap & ARCH_CAP_SSB_NO)) |
978 | setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); | 976 | setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
979 | 977 | ||
980 | if (x86_match_cpu(cpu_no_speculation)) | ||
981 | return; | ||
982 | |||
983 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | ||
984 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | ||
985 | |||
986 | if (x86_match_cpu(cpu_no_meltdown)) | 978 | if (x86_match_cpu(cpu_no_meltdown)) |
987 | return; | 979 | return; |
988 | 980 | ||
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ced851169730..92bf2f2e7cdd 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -407,8 +407,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
407 | 407 | ||
408 | /* cpuid 7.0.edx*/ | 408 | /* cpuid 7.0.edx*/ |
409 | const u32 kvm_cpuid_7_0_edx_x86_features = | 409 | const u32 kvm_cpuid_7_0_edx_x86_features = |
410 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) | | 410 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | |
411 | F(ARCH_CAPABILITIES); | 411 | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); |
412 | 412 | ||
413 | /* all calls to cpuid_count() should be made on the same cpu */ | 413 | /* all calls to cpuid_count() should be made on the same cpu */ |
414 | get_cpu(); | 414 | get_cpu(); |
@@ -495,6 +495,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
495 | entry->ecx &= ~F(PKU); | 495 | entry->ecx &= ~F(PKU); |
496 | entry->edx &= kvm_cpuid_7_0_edx_x86_features; | 496 | entry->edx &= kvm_cpuid_7_0_edx_x86_features; |
497 | cpuid_mask(&entry->edx, CPUID_7_EDX); | 497 | cpuid_mask(&entry->edx, CPUID_7_EDX); |
498 | /* | ||
499 | * We emulate ARCH_CAPABILITIES in software even | ||
500 | * if the host doesn't support it. | ||
501 | */ | ||
502 | entry->edx |= F(ARCH_CAPABILITIES); | ||
498 | } else { | 503 | } else { |
499 | entry->ebx = 0; | 504 | entry->ebx = 0; |
500 | entry->ecx = 0; | 505 | entry->ecx = 0; |
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 5708e951a5c6..46ff64da44ca 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c | |||
@@ -1260,14 +1260,18 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) | |||
1260 | } | 1260 | } |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) | 1263 | static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) |
1264 | { | 1264 | { |
1265 | struct kvm_run *run = vcpu->run; | 1265 | kvm_hv_hypercall_set_result(vcpu, result); |
1266 | 1266 | ++vcpu->stat.hypercalls; | |
1267 | kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); | ||
1268 | return kvm_skip_emulated_instruction(vcpu); | 1267 | return kvm_skip_emulated_instruction(vcpu); |
1269 | } | 1268 | } |
1270 | 1269 | ||
1270 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) | ||
1271 | { | ||
1272 | return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); | ||
1273 | } | ||
1274 | |||
1271 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) | 1275 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) |
1272 | { | 1276 | { |
1273 | struct eventfd_ctx *eventfd; | 1277 | struct eventfd_ctx *eventfd; |
@@ -1350,7 +1354,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
1350 | /* Hypercall continuation is not supported yet */ | 1354 | /* Hypercall continuation is not supported yet */ |
1351 | if (rep_cnt || rep_idx) { | 1355 | if (rep_cnt || rep_idx) { |
1352 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; | 1356 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
1353 | goto set_result; | 1357 | goto out; |
1354 | } | 1358 | } |
1355 | 1359 | ||
1356 | switch (code) { | 1360 | switch (code) { |
@@ -1381,9 +1385,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
1381 | break; | 1385 | break; |
1382 | } | 1386 | } |
1383 | 1387 | ||
1384 | set_result: | 1388 | out: |
1385 | kvm_hv_hypercall_set_result(vcpu, ret); | 1389 | return kvm_hv_hypercall_complete(vcpu, ret); |
1386 | return 1; | ||
1387 | } | 1390 | } |
1388 | 1391 | ||
1389 | void kvm_hv_init_vm(struct kvm *kvm) | 1392 | void kvm_hv_init_vm(struct kvm *kvm) |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index b74c9c1405b9..3773c4625114 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1522,11 +1522,23 @@ static bool set_target_expiration(struct kvm_lapic *apic) | |||
1522 | 1522 | ||
1523 | static void advance_periodic_target_expiration(struct kvm_lapic *apic) | 1523 | static void advance_periodic_target_expiration(struct kvm_lapic *apic) |
1524 | { | 1524 | { |
1525 | apic->lapic_timer.tscdeadline += | 1525 | ktime_t now = ktime_get(); |
1526 | nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); | 1526 | u64 tscl = rdtsc(); |
1527 | ktime_t delta; | ||
1528 | |||
1529 | /* | ||
1530 | * Synchronize both deadlines to the same time source or | ||
1531 | * differences in the periods (caused by differences in the | ||
1532 | * underlying clocks or numerical approximation errors) will | ||
1533 | * cause the two to drift apart over time as the errors | ||
1534 | * accumulate. | ||
1535 | */ | ||
1527 | apic->lapic_timer.target_expiration = | 1536 | apic->lapic_timer.target_expiration = |
1528 | ktime_add_ns(apic->lapic_timer.target_expiration, | 1537 | ktime_add_ns(apic->lapic_timer.target_expiration, |
1529 | apic->lapic_timer.period); | 1538 | apic->lapic_timer.period); |
1539 | delta = ktime_sub(apic->lapic_timer.target_expiration, now); | ||
1540 | apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + | ||
1541 | nsec_to_cycles(apic->vcpu, delta); | ||
1530 | } | 1542 | } |
1531 | 1543 | ||
1532 | static void start_sw_period(struct kvm_lapic *apic) | 1544 | static void start_sw_period(struct kvm_lapic *apic) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 22a183aac1c6..71e7cda6d014 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -6671,11 +6671,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
6671 | unsigned long nr, a0, a1, a2, a3, ret; | 6671 | unsigned long nr, a0, a1, a2, a3, ret; |
6672 | int op_64_bit; | 6672 | int op_64_bit; |
6673 | 6673 | ||
6674 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) { | 6674 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) |
6675 | if (!kvm_hv_hypercall(vcpu)) | 6675 | return kvm_hv_hypercall(vcpu); |
6676 | return 0; | ||
6677 | goto out; | ||
6678 | } | ||
6679 | 6676 | ||
6680 | nr = kvm_register_read(vcpu, VCPU_REGS_RAX); | 6677 | nr = kvm_register_read(vcpu, VCPU_REGS_RAX); |
6681 | a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); | 6678 | a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); |
@@ -6696,7 +6693,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
6696 | 6693 | ||
6697 | if (kvm_x86_ops->get_cpl(vcpu) != 0) { | 6694 | if (kvm_x86_ops->get_cpl(vcpu) != 0) { |
6698 | ret = -KVM_EPERM; | 6695 | ret = -KVM_EPERM; |
6699 | goto out_error; | 6696 | goto out; |
6700 | } | 6697 | } |
6701 | 6698 | ||
6702 | switch (nr) { | 6699 | switch (nr) { |
@@ -6716,12 +6713,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
6716 | ret = -KVM_ENOSYS; | 6713 | ret = -KVM_ENOSYS; |
6717 | break; | 6714 | break; |
6718 | } | 6715 | } |
6719 | out_error: | 6716 | out: |
6720 | if (!op_64_bit) | 6717 | if (!op_64_bit) |
6721 | ret = (u32)ret; | 6718 | ret = (u32)ret; |
6722 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); | 6719 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); |
6723 | 6720 | ||
6724 | out: | ||
6725 | ++vcpu->stat.hypercalls; | 6721 | ++vcpu->stat.hypercalls; |
6726 | return kvm_skip_emulated_instruction(vcpu); | 6722 | return kvm_skip_emulated_instruction(vcpu); |
6727 | } | 6723 | } |
@@ -7980,6 +7976,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
7980 | { | 7976 | { |
7981 | struct msr_data apic_base_msr; | 7977 | struct msr_data apic_base_msr; |
7982 | int mmu_reset_needed = 0; | 7978 | int mmu_reset_needed = 0; |
7979 | int cpuid_update_needed = 0; | ||
7983 | int pending_vec, max_bits, idx; | 7980 | int pending_vec, max_bits, idx; |
7984 | struct desc_ptr dt; | 7981 | struct desc_ptr dt; |
7985 | int ret = -EINVAL; | 7982 | int ret = -EINVAL; |
@@ -8018,8 +8015,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
8018 | vcpu->arch.cr0 = sregs->cr0; | 8015 | vcpu->arch.cr0 = sregs->cr0; |
8019 | 8016 | ||
8020 | mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; | 8017 | mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; |
8018 | cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & | ||
8019 | (X86_CR4_OSXSAVE | X86_CR4_PKE)); | ||
8021 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); | 8020 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); |
8022 | if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) | 8021 | if (cpuid_update_needed) |
8023 | kvm_update_cpuid(vcpu); | 8022 | kvm_update_cpuid(vcpu); |
8024 | 8023 | ||
8025 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 8024 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 9c9a22958717..a8d2eb0ceb8d 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c | |||
@@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte, | |||
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | 1153 | ||
1154 | static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, | 1154 | static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, |
1155 | int offset, int swap) | 1155 | int swap) |
1156 | { | 1156 | { |
1157 | unsigned char buf[ZEPROM_SIZE]; | 1157 | unsigned char buf[ZEPROM_SIZE]; |
1158 | struct zatm_dev *zatm_dev; | 1158 | struct zatm_dev *zatm_dev; |
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index d4a81be0d7d2..b6be62025325 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c | |||
@@ -152,8 +152,8 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) | |||
152 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; | 152 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; |
153 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); | 153 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); |
154 | 154 | ||
155 | memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, | 155 | memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, |
156 | EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); | 156 | EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); |
157 | 157 | ||
158 | eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, | 158 | eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, |
159 | EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); | 159 | EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); |
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index dfbd894d5bb7..4e24e591ae74 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c | |||
@@ -147,7 +147,7 @@ static u32 smc(u32 cmd_addr) | |||
147 | "smc #0 @ switch to secure world\n" | 147 | "smc #0 @ switch to secure world\n" |
148 | : "=r" (r0) | 148 | : "=r" (r0) |
149 | : "r" (r0), "r" (r1), "r" (r2) | 149 | : "r" (r0), "r" (r1), "r" (r2) |
150 | : "r3"); | 150 | : "r3", "r12"); |
151 | } while (r0 == QCOM_SCM_INTERRUPTED); | 151 | } while (r0 == QCOM_SCM_INTERRUPTED); |
152 | 152 | ||
153 | return r0; | 153 | return r0; |
@@ -263,7 +263,7 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) | |||
263 | "smc #0 @ switch to secure world\n" | 263 | "smc #0 @ switch to secure world\n" |
264 | : "=r" (r0) | 264 | : "=r" (r0) |
265 | : "r" (r0), "r" (r1), "r" (r2) | 265 | : "r" (r0), "r" (r1), "r" (r2) |
266 | : "r3"); | 266 | : "r3", "r12"); |
267 | return r0; | 267 | return r0; |
268 | } | 268 | } |
269 | 269 | ||
@@ -298,7 +298,7 @@ static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) | |||
298 | "smc #0 @ switch to secure world\n" | 298 | "smc #0 @ switch to secure world\n" |
299 | : "=r" (r0) | 299 | : "=r" (r0) |
300 | : "r" (r0), "r" (r1), "r" (r2), "r" (r3) | 300 | : "r" (r0), "r" (r1), "r" (r2), "r" (r3) |
301 | ); | 301 | : "r12"); |
302 | return r0; | 302 | return r0; |
303 | } | 303 | } |
304 | 304 | ||
@@ -328,7 +328,7 @@ u32 qcom_scm_get_version(void) | |||
328 | "smc #0 @ switch to secure world\n" | 328 | "smc #0 @ switch to secure world\n" |
329 | : "=r" (r0), "=r" (r1) | 329 | : "=r" (r0), "=r" (r1) |
330 | : "r" (r0), "r" (r1) | 330 | : "r" (r0), "r" (r1) |
331 | : "r2", "r3"); | 331 | : "r2", "r3", "r12"); |
332 | } while (r0 == QCOM_SCM_INTERRUPTED); | 332 | } while (r0 == QCOM_SCM_INTERRUPTED); |
333 | 333 | ||
334 | version = r1; | 334 | version = r1; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1dd1142246c2..27579443cdc5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -4555,8 +4555,8 @@ static int dm_update_crtcs_state(struct dc *dc, | |||
4555 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 4555 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
4556 | struct amdgpu_crtc *acrtc = NULL; | 4556 | struct amdgpu_crtc *acrtc = NULL; |
4557 | struct amdgpu_dm_connector *aconnector = NULL; | 4557 | struct amdgpu_dm_connector *aconnector = NULL; |
4558 | struct drm_connector_state *new_con_state = NULL; | 4558 | struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; |
4559 | struct dm_connector_state *dm_conn_state = NULL; | 4559 | struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; |
4560 | struct drm_plane_state *new_plane_state = NULL; | 4560 | struct drm_plane_state *new_plane_state = NULL; |
4561 | 4561 | ||
4562 | new_stream = NULL; | 4562 | new_stream = NULL; |
@@ -4577,19 +4577,23 @@ static int dm_update_crtcs_state(struct dc *dc, | |||
4577 | /* TODO This hack should go away */ | 4577 | /* TODO This hack should go away */ |
4578 | if (aconnector && enable) { | 4578 | if (aconnector && enable) { |
4579 | // Make sure fake sink is created in plug-in scenario | 4579 | // Make sure fake sink is created in plug-in scenario |
4580 | new_con_state = drm_atomic_get_connector_state(state, | 4580 | drm_new_conn_state = drm_atomic_get_new_connector_state(state, |
4581 | &aconnector->base); | 4581 | &aconnector->base); |
4582 | drm_old_conn_state = drm_atomic_get_old_connector_state(state, | ||
4583 | &aconnector->base); | ||
4582 | 4584 | ||
4583 | if (IS_ERR(new_con_state)) { | 4585 | |
4584 | ret = PTR_ERR_OR_ZERO(new_con_state); | 4586 | if (IS_ERR(drm_new_conn_state)) { |
4587 | ret = PTR_ERR_OR_ZERO(drm_new_conn_state); | ||
4585 | break; | 4588 | break; |
4586 | } | 4589 | } |
4587 | 4590 | ||
4588 | dm_conn_state = to_dm_connector_state(new_con_state); | 4591 | dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); |
4592 | dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); | ||
4589 | 4593 | ||
4590 | new_stream = create_stream_for_sink(aconnector, | 4594 | new_stream = create_stream_for_sink(aconnector, |
4591 | &new_crtc_state->mode, | 4595 | &new_crtc_state->mode, |
4592 | dm_conn_state); | 4596 | dm_new_conn_state); |
4593 | 4597 | ||
4594 | /* | 4598 | /* |
4595 | * we can have no stream on ACTION_SET if a display | 4599 | * we can have no stream on ACTION_SET if a display |
@@ -4695,20 +4699,30 @@ next_crtc: | |||
4695 | * We want to do dc stream updates that do not require a | 4699 | * We want to do dc stream updates that do not require a |
4696 | * full modeset below. | 4700 | * full modeset below. |
4697 | */ | 4701 | */ |
4698 | if (!enable || !aconnector || modereset_required(new_crtc_state)) | 4702 | if (!(enable && aconnector && new_crtc_state->enable && |
4703 | new_crtc_state->active)) | ||
4699 | continue; | 4704 | continue; |
4700 | /* | 4705 | /* |
4701 | * Given above conditions, the dc state cannot be NULL because: | 4706 | * Given above conditions, the dc state cannot be NULL because: |
4702 | * 1. We're attempting to enable a CRTC. Which has a... | 4707 | * 1. We're in the process of enabling CRTCs (just been added |
4703 | * 2. Valid connector attached, and | 4708 | * to the dc context, or already is on the context) |
4704 | * 3. User does not want to reset it (disable or mark inactive, | 4709 | * 2. Has a valid connector attached, and |
4705 | * which can happen on a CRTC that's already disabled). | 4710 | * 3. Is currently active and enabled. |
4706 | * => It currently exists. | 4711 | * => The dc stream state currently exists. |
4707 | */ | 4712 | */ |
4708 | BUG_ON(dm_new_crtc_state->stream == NULL); | 4713 | BUG_ON(dm_new_crtc_state->stream == NULL); |
4709 | 4714 | ||
4710 | /* Color managment settings */ | 4715 | /* Scaling or underscan settings */ |
4711 | if (dm_new_crtc_state->base.color_mgmt_changed) { | 4716 | if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) |
4717 | update_stream_scaling_settings( | ||
4718 | &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); | ||
4719 | |||
4720 | /* | ||
4721 | * Color management settings. We also update color properties | ||
4722 | * when a modeset is needed, to ensure it gets reprogrammed. | ||
4723 | */ | ||
4724 | if (dm_new_crtc_state->base.color_mgmt_changed || | ||
4725 | drm_atomic_crtc_needs_modeset(new_crtc_state)) { | ||
4712 | ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); | 4726 | ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); |
4713 | if (ret) | 4727 | if (ret) |
4714 | goto fail; | 4728 | goto fail; |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index ec8d0006ef7c..3c136f2b954f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | |||
@@ -2077,7 +2077,7 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) | |||
2077 | return ret; | 2077 | return ret; |
2078 | } | 2078 | } |
2079 | 2079 | ||
2080 | void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) | 2080 | void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) |
2081 | { | 2081 | { |
2082 | mutex_lock(&hdmi->mutex); | 2082 | mutex_lock(&hdmi->mutex); |
2083 | 2083 | ||
@@ -2103,13 +2103,6 @@ void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) | |||
2103 | } | 2103 | } |
2104 | mutex_unlock(&hdmi->mutex); | 2104 | mutex_unlock(&hdmi->mutex); |
2105 | } | 2105 | } |
2106 | |||
2107 | void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense) | ||
2108 | { | ||
2109 | struct dw_hdmi *hdmi = dev_get_drvdata(dev); | ||
2110 | |||
2111 | __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense); | ||
2112 | } | ||
2113 | EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense); | 2106 | EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense); |
2114 | 2107 | ||
2115 | static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) | 2108 | static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) |
@@ -2145,9 +2138,9 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) | |||
2145 | */ | 2138 | */ |
2146 | if (intr_stat & | 2139 | if (intr_stat & |
2147 | (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { | 2140 | (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { |
2148 | __dw_hdmi_setup_rx_sense(hdmi, | 2141 | dw_hdmi_setup_rx_sense(hdmi, |
2149 | phy_stat & HDMI_PHY_HPD, | 2142 | phy_stat & HDMI_PHY_HPD, |
2150 | phy_stat & HDMI_PHY_RX_SENSE); | 2143 | phy_stat & HDMI_PHY_RX_SENSE); |
2151 | 2144 | ||
2152 | if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) | 2145 | if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) |
2153 | cec_notifier_set_phys_addr(hdmi->cec_notifier, | 2146 | cec_notifier_set_phys_addr(hdmi->cec_notifier, |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index ffe14ec3e7f2..70ae1f232331 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -1145,6 +1145,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]) | |||
1145 | static const u16 psr_setup_time_us[] = { | 1145 | static const u16 psr_setup_time_us[] = { |
1146 | PSR_SETUP_TIME(330), | 1146 | PSR_SETUP_TIME(330), |
1147 | PSR_SETUP_TIME(275), | 1147 | PSR_SETUP_TIME(275), |
1148 | PSR_SETUP_TIME(220), | ||
1148 | PSR_SETUP_TIME(165), | 1149 | PSR_SETUP_TIME(165), |
1149 | PSR_SETUP_TIME(110), | 1150 | PSR_SETUP_TIME(110), |
1150 | PSR_SETUP_TIME(55), | 1151 | PSR_SETUP_TIME(55), |
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 3ace929dd90f..3f502eef2431 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c | |||
@@ -4,6 +4,8 @@ | |||
4 | * Copyright © 2018 Intel Corporation | 4 | * Copyright © 2018 Intel Corporation |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/nospec.h> | ||
8 | |||
7 | #include "i915_drv.h" | 9 | #include "i915_drv.h" |
8 | #include "i915_query.h" | 10 | #include "i915_query.h" |
9 | #include <uapi/drm/i915_drm.h> | 11 | #include <uapi/drm/i915_drm.h> |
@@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
100 | 102 | ||
101 | for (i = 0; i < args->num_items; i++, user_item_ptr++) { | 103 | for (i = 0; i < args->num_items; i++, user_item_ptr++) { |
102 | struct drm_i915_query_item item; | 104 | struct drm_i915_query_item item; |
103 | u64 func_idx; | 105 | unsigned long func_idx; |
104 | int ret; | 106 | int ret; |
105 | 107 | ||
106 | if (copy_from_user(&item, user_item_ptr, sizeof(item))) | 108 | if (copy_from_user(&item, user_item_ptr, sizeof(item))) |
@@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
109 | if (item.query_id == 0) | 111 | if (item.query_id == 0) |
110 | return -EINVAL; | 112 | return -EINVAL; |
111 | 113 | ||
114 | if (overflows_type(item.query_id - 1, unsigned long)) | ||
115 | return -EINVAL; | ||
116 | |||
112 | func_idx = item.query_id - 1; | 117 | func_idx = item.query_id - 1; |
113 | 118 | ||
114 | if (func_idx < ARRAY_SIZE(i915_query_funcs)) | 119 | ret = -EINVAL; |
120 | if (func_idx < ARRAY_SIZE(i915_query_funcs)) { | ||
121 | func_idx = array_index_nospec(func_idx, | ||
122 | ARRAY_SIZE(i915_query_funcs)); | ||
115 | ret = i915_query_funcs[func_idx](dev_priv, &item); | 123 | ret = i915_query_funcs[func_idx](dev_priv, &item); |
116 | else | 124 | } |
117 | ret = -EINVAL; | ||
118 | 125 | ||
119 | /* Only write the length back to userspace if they differ. */ | 126 | /* Only write the length back to userspace if they differ. */ |
120 | if (ret != item.length && put_user(ret, &user_item_ptr->length)) | 127 | if (ret != item.length && put_user(ret, &user_item_ptr->length)) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8691c86f579c..e125d16a1aa7 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -574,6 +574,36 @@ exit: | |||
574 | return NOTIFY_OK; | 574 | return NOTIFY_OK; |
575 | } | 575 | } |
576 | 576 | ||
577 | static int | ||
578 | intel_lvds_connector_register(struct drm_connector *connector) | ||
579 | { | ||
580 | struct intel_lvds_connector *lvds = to_lvds_connector(connector); | ||
581 | int ret; | ||
582 | |||
583 | ret = intel_connector_register(connector); | ||
584 | if (ret) | ||
585 | return ret; | ||
586 | |||
587 | lvds->lid_notifier.notifier_call = intel_lid_notify; | ||
588 | if (acpi_lid_notifier_register(&lvds->lid_notifier)) { | ||
589 | DRM_DEBUG_KMS("lid notifier registration failed\n"); | ||
590 | lvds->lid_notifier.notifier_call = NULL; | ||
591 | } | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static void | ||
597 | intel_lvds_connector_unregister(struct drm_connector *connector) | ||
598 | { | ||
599 | struct intel_lvds_connector *lvds = to_lvds_connector(connector); | ||
600 | |||
601 | if (lvds->lid_notifier.notifier_call) | ||
602 | acpi_lid_notifier_unregister(&lvds->lid_notifier); | ||
603 | |||
604 | intel_connector_unregister(connector); | ||
605 | } | ||
606 | |||
577 | /** | 607 | /** |
578 | * intel_lvds_destroy - unregister and free LVDS structures | 608 | * intel_lvds_destroy - unregister and free LVDS structures |
579 | * @connector: connector to free | 609 | * @connector: connector to free |
@@ -586,9 +616,6 @@ static void intel_lvds_destroy(struct drm_connector *connector) | |||
586 | struct intel_lvds_connector *lvds_connector = | 616 | struct intel_lvds_connector *lvds_connector = |
587 | to_lvds_connector(connector); | 617 | to_lvds_connector(connector); |
588 | 618 | ||
589 | if (lvds_connector->lid_notifier.notifier_call) | ||
590 | acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); | ||
591 | |||
592 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) | 619 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) |
593 | kfree(lvds_connector->base.edid); | 620 | kfree(lvds_connector->base.edid); |
594 | 621 | ||
@@ -609,8 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { | |||
609 | .fill_modes = drm_helper_probe_single_connector_modes, | 636 | .fill_modes = drm_helper_probe_single_connector_modes, |
610 | .atomic_get_property = intel_digital_connector_atomic_get_property, | 637 | .atomic_get_property = intel_digital_connector_atomic_get_property, |
611 | .atomic_set_property = intel_digital_connector_atomic_set_property, | 638 | .atomic_set_property = intel_digital_connector_atomic_set_property, |
612 | .late_register = intel_connector_register, | 639 | .late_register = intel_lvds_connector_register, |
613 | .early_unregister = intel_connector_unregister, | 640 | .early_unregister = intel_lvds_connector_unregister, |
614 | .destroy = intel_lvds_destroy, | 641 | .destroy = intel_lvds_destroy, |
615 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 642 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
616 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, | 643 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, |
@@ -827,6 +854,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
827 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), | 854 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), |
828 | }, | 855 | }, |
829 | }, | 856 | }, |
857 | { | ||
858 | .callback = intel_no_lvds_dmi_callback, | ||
859 | .ident = "Radiant P845", | ||
860 | .matches = { | ||
861 | DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"), | ||
862 | DMI_MATCH(DMI_PRODUCT_NAME, "P845"), | ||
863 | }, | ||
864 | }, | ||
830 | 865 | ||
831 | { } /* terminating entry */ | 866 | { } /* terminating entry */ |
832 | }; | 867 | }; |
@@ -1150,12 +1185,6 @@ out: | |||
1150 | 1185 | ||
1151 | lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; | 1186 | lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; |
1152 | 1187 | ||
1153 | lvds_connector->lid_notifier.notifier_call = intel_lid_notify; | ||
1154 | if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { | ||
1155 | DRM_DEBUG_KMS("lid notifier registration failed\n"); | ||
1156 | lvds_connector->lid_notifier.notifier_call = NULL; | ||
1157 | } | ||
1158 | |||
1159 | return; | 1188 | return; |
1160 | 1189 | ||
1161 | failed: | 1190 | failed: |
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index a393095aac1a..c9ad45686e7a 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c | |||
@@ -529,7 +529,7 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) | |||
529 | if (stat & HDMITX_TOP_INTR_HPD_RISE) | 529 | if (stat & HDMITX_TOP_INTR_HPD_RISE) |
530 | hpd_connected = true; | 530 | hpd_connected = true; |
531 | 531 | ||
532 | dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected, | 532 | dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, |
533 | hpd_connected); | 533 | hpd_connected); |
534 | 534 | ||
535 | drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); | 535 | drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); |
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 68a40ae26f5b..1e2c931f6acf 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c | |||
@@ -82,7 +82,7 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk, | |||
82 | struct dispc_clock_info *dispc_cinfo) | 82 | struct dispc_clock_info *dispc_cinfo) |
83 | { | 83 | { |
84 | int i; | 84 | int i; |
85 | struct sdi_clk_calc_ctx ctx = { .sdi = sdi }; | 85 | struct sdi_clk_calc_ctx ctx; |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * DSS fclk gives us very few possibilities, so finding a good pixel | 88 | * DSS fclk gives us very few possibilities, so finding a good pixel |
@@ -95,6 +95,9 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk, | |||
95 | bool ok; | 95 | bool ok; |
96 | 96 | ||
97 | memset(&ctx, 0, sizeof(ctx)); | 97 | memset(&ctx, 0, sizeof(ctx)); |
98 | |||
99 | ctx.sdi = sdi; | ||
100 | |||
98 | if (pclk > 1000 * i * i * i) | 101 | if (pclk > 1000 * i * i * i) |
99 | ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu); | 102 | ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu); |
100 | else | 103 | else |
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index ede388309376..634f58042c77 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c | |||
@@ -733,8 +733,8 @@ err_nomem: | |||
733 | /* Reset the page to write-back before releasing */ | 733 | /* Reset the page to write-back before releasing */ |
734 | set_memory_wb((unsigned long)win->block[i].bdesc, 1); | 734 | set_memory_wb((unsigned long)win->block[i].bdesc, 1); |
735 | #endif | 735 | #endif |
736 | dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, | 736 | dma_free_coherent(msc_dev(msc)->parent->parent, size, |
737 | win->block[i].addr); | 737 | win->block[i].bdesc, win->block[i].addr); |
738 | } | 738 | } |
739 | kfree(win); | 739 | kfree(win); |
740 | 740 | ||
@@ -769,7 +769,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) | |||
769 | /* Reset the page to write-back before releasing */ | 769 | /* Reset the page to write-back before releasing */ |
770 | set_memory_wb((unsigned long)win->block[i].bdesc, 1); | 770 | set_memory_wb((unsigned long)win->block[i].bdesc, 1); |
771 | #endif | 771 | #endif |
772 | dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, | 772 | dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, |
773 | win->block[i].bdesc, win->block[i].addr); | 773 | win->block[i].bdesc, win->block[i].addr); |
774 | } | 774 | } |
775 | 775 | ||
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 05386b76465e..10bcb5d73f90 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/stm.h> | 19 | #include <linux/stm.h> |
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/vmalloc.h> | ||
22 | #include "stm.h" | 23 | #include "stm.h" |
23 | 24 | ||
24 | #include <uapi/linux/stm.h> | 25 | #include <uapi/linux/stm.h> |
@@ -674,7 +675,7 @@ static void stm_device_release(struct device *dev) | |||
674 | { | 675 | { |
675 | struct stm_device *stm = to_stm_device(dev); | 676 | struct stm_device *stm = to_stm_device(dev); |
676 | 677 | ||
677 | kfree(stm); | 678 | vfree(stm); |
678 | } | 679 | } |
679 | 680 | ||
680 | int stm_register_device(struct device *parent, struct stm_data *stm_data, | 681 | int stm_register_device(struct device *parent, struct stm_data *stm_data, |
@@ -691,7 +692,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
691 | return -EINVAL; | 692 | return -EINVAL; |
692 | 693 | ||
693 | nmasters = stm_data->sw_end - stm_data->sw_start + 1; | 694 | nmasters = stm_data->sw_end - stm_data->sw_start + 1; |
694 | stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); | 695 | stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *)); |
695 | if (!stm) | 696 | if (!stm) |
696 | return -ENOMEM; | 697 | return -ENOMEM; |
697 | 698 | ||
@@ -744,7 +745,7 @@ err_device: | |||
744 | /* matches device_initialize() above */ | 745 | /* matches device_initialize() above */ |
745 | put_device(&stm->dev); | 746 | put_device(&stm->dev); |
746 | err_free: | 747 | err_free: |
747 | kfree(stm); | 748 | vfree(stm); |
748 | 749 | ||
749 | return err; | 750 | return err; |
750 | } | 751 | } |
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 8c42ca7107b2..45ae3c025bf6 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * i2c-ocores.c: I2C bus driver for OpenCores I2C controller | 2 | * i2c-ocores.c: I2C bus driver for OpenCores I2C controller |
3 | * (http://www.opencores.org/projects.cgi/web/i2c/overview). | 3 | * (https://opencores.org/project/i2c/overview) |
4 | * | 4 | * |
5 | * Peter Korsgaard <jacmet@sunsite.dk> | 5 | * Peter Korsgaard <jacmet@sunsite.dk> |
6 | * | 6 | * |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 15606f237480..9da79070357c 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig | |||
@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC | |||
158 | depends on ARCH_AT91 || COMPILE_TEST | 158 | depends on ARCH_AT91 || COMPILE_TEST |
159 | depends on HAS_IOMEM | 159 | depends on HAS_IOMEM |
160 | depends on HAS_DMA | 160 | depends on HAS_DMA |
161 | select IIO_BUFFER | ||
161 | select IIO_TRIGGERED_BUFFER | 162 | select IIO_TRIGGERED_BUFFER |
162 | help | 163 | help |
163 | Say yes here to build support for Atmel SAMA5D2 ADC which is | 164 | Say yes here to build support for Atmel SAMA5D2 ADC which is |
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index 801afb61310b..d4bbe5b53318 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c | |||
@@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, | |||
348 | static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, | 348 | static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, |
349 | 33, 0, 17, 16, 12, 10, 8, 6, 4}; | 349 | 33, 0, 17, 16, 12, 10, 8, 6, 4}; |
350 | 350 | ||
351 | static ssize_t ad7793_read_frequency(struct device *dev, | ||
352 | struct device_attribute *attr, | ||
353 | char *buf) | ||
354 | { | ||
355 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | ||
356 | struct ad7793_state *st = iio_priv(indio_dev); | ||
357 | |||
358 | return sprintf(buf, "%d\n", | ||
359 | st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]); | ||
360 | } | ||
361 | |||
362 | static ssize_t ad7793_write_frequency(struct device *dev, | ||
363 | struct device_attribute *attr, | ||
364 | const char *buf, | ||
365 | size_t len) | ||
366 | { | ||
367 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | ||
368 | struct ad7793_state *st = iio_priv(indio_dev); | ||
369 | long lval; | ||
370 | int i, ret; | ||
371 | |||
372 | ret = kstrtol(buf, 10, &lval); | ||
373 | if (ret) | ||
374 | return ret; | ||
375 | |||
376 | if (lval == 0) | ||
377 | return -EINVAL; | ||
378 | |||
379 | for (i = 0; i < 16; i++) | ||
380 | if (lval == st->chip_info->sample_freq_avail[i]) | ||
381 | break; | ||
382 | if (i == 16) | ||
383 | return -EINVAL; | ||
384 | |||
385 | ret = iio_device_claim_direct_mode(indio_dev); | ||
386 | if (ret) | ||
387 | return ret; | ||
388 | st->mode &= ~AD7793_MODE_RATE(-1); | ||
389 | st->mode |= AD7793_MODE_RATE(i); | ||
390 | ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); | ||
391 | iio_device_release_direct_mode(indio_dev); | ||
392 | |||
393 | return len; | ||
394 | } | ||
395 | |||
396 | static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, | ||
397 | ad7793_read_frequency, | ||
398 | ad7793_write_frequency); | ||
399 | |||
400 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( | 351 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( |
401 | "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); | 352 | "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); |
402 | 353 | ||
@@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, | |||
424 | ad7793_show_scale_available, NULL, 0); | 375 | ad7793_show_scale_available, NULL, 0); |
425 | 376 | ||
426 | static struct attribute *ad7793_attributes[] = { | 377 | static struct attribute *ad7793_attributes[] = { |
427 | &iio_dev_attr_sampling_frequency.dev_attr.attr, | ||
428 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, | 378 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, |
429 | &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, | 379 | &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, |
430 | NULL | 380 | NULL |
@@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = { | |||
435 | }; | 385 | }; |
436 | 386 | ||
437 | static struct attribute *ad7797_attributes[] = { | 387 | static struct attribute *ad7797_attributes[] = { |
438 | &iio_dev_attr_sampling_frequency.dev_attr.attr, | ||
439 | &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, | 388 | &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, |
440 | NULL | 389 | NULL |
441 | }; | 390 | }; |
@@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, | |||
505 | *val -= offset; | 454 | *val -= offset; |
506 | } | 455 | } |
507 | return IIO_VAL_INT; | 456 | return IIO_VAL_INT; |
457 | case IIO_CHAN_INFO_SAMP_FREQ: | ||
458 | *val = st->chip_info | ||
459 | ->sample_freq_avail[AD7793_MODE_RATE(st->mode)]; | ||
460 | return IIO_VAL_INT; | ||
508 | } | 461 | } |
509 | return -EINVAL; | 462 | return -EINVAL; |
510 | } | 463 | } |
@@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev, | |||
542 | break; | 495 | break; |
543 | } | 496 | } |
544 | break; | 497 | break; |
498 | case IIO_CHAN_INFO_SAMP_FREQ: | ||
499 | if (!val) { | ||
500 | ret = -EINVAL; | ||
501 | break; | ||
502 | } | ||
503 | |||
504 | for (i = 0; i < 16; i++) | ||
505 | if (val == st->chip_info->sample_freq_avail[i]) | ||
506 | break; | ||
507 | |||
508 | if (i == 16) { | ||
509 | ret = -EINVAL; | ||
510 | break; | ||
511 | } | ||
512 | |||
513 | st->mode &= ~AD7793_MODE_RATE(-1); | ||
514 | st->mode |= AD7793_MODE_RATE(i); | ||
515 | ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), | ||
516 | st->mode); | ||
517 | break; | ||
545 | default: | 518 | default: |
546 | ret = -EINVAL; | 519 | ret = -EINVAL; |
547 | } | 520 | } |
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 4eff8351ce29..8729d6524b4d 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c | |||
@@ -333,6 +333,27 @@ static const struct iio_chan_spec at91_adc_channels[] = { | |||
333 | + AT91_SAMA5D2_DIFF_CHAN_CNT + 1), | 333 | + AT91_SAMA5D2_DIFF_CHAN_CNT + 1), |
334 | }; | 334 | }; |
335 | 335 | ||
336 | static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan) | ||
337 | { | ||
338 | int i; | ||
339 | |||
340 | for (i = 0; i < indio_dev->num_channels; i++) { | ||
341 | if (indio_dev->channels[i].scan_index == chan) | ||
342 | return i; | ||
343 | } | ||
344 | return -EINVAL; | ||
345 | } | ||
346 | |||
347 | static inline struct iio_chan_spec const * | ||
348 | at91_adc_chan_get(struct iio_dev *indio_dev, int chan) | ||
349 | { | ||
350 | int index = at91_adc_chan_xlate(indio_dev, chan); | ||
351 | |||
352 | if (index < 0) | ||
353 | return NULL; | ||
354 | return indio_dev->channels + index; | ||
355 | } | ||
356 | |||
336 | static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | 357 | static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) |
337 | { | 358 | { |
338 | struct iio_dev *indio = iio_trigger_get_drvdata(trig); | 359 | struct iio_dev *indio = iio_trigger_get_drvdata(trig); |
@@ -350,8 +371,10 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | |||
350 | at91_adc_writel(st, AT91_SAMA5D2_TRGR, status); | 371 | at91_adc_writel(st, AT91_SAMA5D2_TRGR, status); |
351 | 372 | ||
352 | for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { | 373 | for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { |
353 | struct iio_chan_spec const *chan = indio->channels + bit; | 374 | struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit); |
354 | 375 | ||
376 | if (!chan) | ||
377 | continue; | ||
355 | if (state) { | 378 | if (state) { |
356 | at91_adc_writel(st, AT91_SAMA5D2_CHER, | 379 | at91_adc_writel(st, AT91_SAMA5D2_CHER, |
357 | BIT(chan->channel)); | 380 | BIT(chan->channel)); |
@@ -448,7 +471,11 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev) | |||
448 | 471 | ||
449 | for_each_set_bit(bit, indio_dev->active_scan_mask, | 472 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
450 | indio_dev->num_channels) { | 473 | indio_dev->num_channels) { |
451 | struct iio_chan_spec const *chan = indio_dev->channels + bit; | 474 | struct iio_chan_spec const *chan = |
475 | at91_adc_chan_get(indio_dev, bit); | ||
476 | |||
477 | if (!chan) | ||
478 | continue; | ||
452 | 479 | ||
453 | st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8; | 480 | st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8; |
454 | } | 481 | } |
@@ -526,8 +553,11 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev) | |||
526 | */ | 553 | */ |
527 | for_each_set_bit(bit, indio_dev->active_scan_mask, | 554 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
528 | indio_dev->num_channels) { | 555 | indio_dev->num_channels) { |
529 | struct iio_chan_spec const *chan = indio_dev->channels + bit; | 556 | struct iio_chan_spec const *chan = |
557 | at91_adc_chan_get(indio_dev, bit); | ||
530 | 558 | ||
559 | if (!chan) | ||
560 | continue; | ||
531 | if (st->dma_st.dma_chan) | 561 | if (st->dma_st.dma_chan) |
532 | at91_adc_readl(st, chan->address); | 562 | at91_adc_readl(st, chan->address); |
533 | } | 563 | } |
@@ -587,8 +617,11 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev, | |||
587 | 617 | ||
588 | for_each_set_bit(bit, indio_dev->active_scan_mask, | 618 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
589 | indio_dev->num_channels) { | 619 | indio_dev->num_channels) { |
590 | struct iio_chan_spec const *chan = indio_dev->channels + bit; | 620 | struct iio_chan_spec const *chan = |
621 | at91_adc_chan_get(indio_dev, bit); | ||
591 | 622 | ||
623 | if (!chan) | ||
624 | continue; | ||
592 | st->buffer[i] = at91_adc_readl(st, chan->address); | 625 | st->buffer[i] = at91_adc_readl(st, chan->address); |
593 | i++; | 626 | i++; |
594 | } | 627 | } |
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 01422d11753c..b28a716a23b2 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c | |||
@@ -144,6 +144,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl, | |||
144 | * Leave as soon as if exact resolution if reached. | 144 | * Leave as soon as if exact resolution if reached. |
145 | * Otherwise the higher resolution below 32 bits is kept. | 145 | * Otherwise the higher resolution below 32 bits is kept. |
146 | */ | 146 | */ |
147 | fl->res = 0; | ||
147 | for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) { | 148 | for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) { |
148 | for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) { | 149 | for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) { |
149 | if (fast) | 150 | if (fast) |
@@ -193,7 +194,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl, | |||
193 | } | 194 | } |
194 | } | 195 | } |
195 | 196 | ||
196 | if (!fl->fosr) | 197 | if (!fl->res) |
197 | return -EINVAL; | 198 | return -EINVAL; |
198 | 199 | ||
199 | return 0; | 200 | return 0; |
@@ -770,7 +771,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev, | |||
770 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); | 771 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); |
771 | struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; | 772 | struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; |
772 | struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel]; | 773 | struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel]; |
773 | unsigned int spi_freq = adc->spi_freq; | 774 | unsigned int spi_freq; |
774 | int ret = -EINVAL; | 775 | int ret = -EINVAL; |
775 | 776 | ||
776 | switch (mask) { | 777 | switch (mask) { |
@@ -784,8 +785,18 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev, | |||
784 | case IIO_CHAN_INFO_SAMP_FREQ: | 785 | case IIO_CHAN_INFO_SAMP_FREQ: |
785 | if (!val) | 786 | if (!val) |
786 | return -EINVAL; | 787 | return -EINVAL; |
787 | if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) | 788 | |
789 | switch (ch->src) { | ||
790 | case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL: | ||
788 | spi_freq = adc->dfsdm->spi_master_freq; | 791 | spi_freq = adc->dfsdm->spi_master_freq; |
792 | break; | ||
793 | case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING: | ||
794 | case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING: | ||
795 | spi_freq = adc->dfsdm->spi_master_freq / 2; | ||
796 | break; | ||
797 | default: | ||
798 | spi_freq = adc->spi_freq; | ||
799 | } | ||
789 | 800 | ||
790 | if (spi_freq % val) | 801 | if (spi_freq % val) |
791 | dev_warn(&indio_dev->dev, | 802 | dev_warn(&indio_dev->dev, |
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index 05e0c353e089..b32bf57910ca 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c | |||
@@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); | |||
587 | * Should be used as the set_length callback for iio_buffer_access_ops | 587 | * Should be used as the set_length callback for iio_buffer_access_ops |
588 | * struct for DMA buffers. | 588 | * struct for DMA buffers. |
589 | */ | 589 | */ |
590 | int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) | 590 | int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) |
591 | { | 591 | { |
592 | /* Avoid an invalid state */ | 592 | /* Avoid an invalid state */ |
593 | if (length < 2) | 593 | if (length < 2) |
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 047fe757ab97..70c302a93d7f 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c | |||
@@ -22,11 +22,18 @@ struct iio_kfifo { | |||
22 | #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) | 22 | #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) |
23 | 23 | ||
24 | static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, | 24 | static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, |
25 | int bytes_per_datum, int length) | 25 | size_t bytes_per_datum, unsigned int length) |
26 | { | 26 | { |
27 | if ((length == 0) || (bytes_per_datum == 0)) | 27 | if ((length == 0) || (bytes_per_datum == 0)) |
28 | return -EINVAL; | 28 | return -EINVAL; |
29 | 29 | ||
30 | /* | ||
31 | * Make sure we don't overflow an unsigned int after kfifo rounds up to | ||
32 | * the next power of 2. | ||
33 | */ | ||
34 | if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum) | ||
35 | return -EINVAL; | ||
36 | |||
30 | return __kfifo_alloc((struct __kfifo *)&buf->kf, length, | 37 | return __kfifo_alloc((struct __kfifo *)&buf->kf, length, |
31 | bytes_per_datum, GFP_KERNEL); | 38 | bytes_per_datum, GFP_KERNEL); |
32 | } | 39 | } |
@@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd) | |||
67 | return 0; | 74 | return 0; |
68 | } | 75 | } |
69 | 76 | ||
70 | static int iio_set_length_kfifo(struct iio_buffer *r, int length) | 77 | static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length) |
71 | { | 78 | { |
72 | /* Avoid an invalid state */ | 79 | /* Avoid an invalid state */ |
73 | if (length < 2) | 80 | if (length < 2) |
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index cfb6588565ba..4905a997a7ec 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c | |||
@@ -178,14 +178,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) | |||
178 | #ifdef CONFIG_PM | 178 | #ifdef CONFIG_PM |
179 | int ret; | 179 | int ret; |
180 | 180 | ||
181 | atomic_set(&st->user_requested_state, state); | ||
182 | |||
183 | if (atomic_add_unless(&st->runtime_pm_enable, 1, 1)) | 181 | if (atomic_add_unless(&st->runtime_pm_enable, 1, 1)) |
184 | pm_runtime_enable(&st->pdev->dev); | 182 | pm_runtime_enable(&st->pdev->dev); |
185 | 183 | ||
186 | if (state) | 184 | if (state) { |
185 | atomic_inc(&st->user_requested_state); | ||
187 | ret = pm_runtime_get_sync(&st->pdev->dev); | 186 | ret = pm_runtime_get_sync(&st->pdev->dev); |
188 | else { | 187 | } else { |
188 | atomic_dec(&st->user_requested_state); | ||
189 | pm_runtime_mark_last_busy(&st->pdev->dev); | 189 | pm_runtime_mark_last_busy(&st->pdev->dev); |
190 | pm_runtime_use_autosuspend(&st->pdev->dev); | 190 | pm_runtime_use_autosuspend(&st->pdev->dev); |
191 | ret = pm_runtime_put_autosuspend(&st->pdev->dev); | 191 | ret = pm_runtime_put_autosuspend(&st->pdev->dev); |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index fb2d347f760f..ecc55e98ddd3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, | |||
502 | return -EINVAL; | 502 | return -EINVAL; |
503 | 503 | ||
504 | if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) | 504 | if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) |
505 | return -EAGAIN; | 505 | return -EINVAL; |
506 | 506 | ||
507 | memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); | 507 | memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); |
508 | if (attr) { | 508 | if (attr) { |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index f6c739ec8b62..20b9f31052bf 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p) | |||
185 | bnxt_re_ib_unreg(rdev, false); | 185 | bnxt_re_ib_unreg(rdev, false); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void bnxt_re_stop_irq(void *handle) | ||
189 | { | ||
190 | struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; | ||
191 | struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; | ||
192 | struct bnxt_qplib_nq *nq; | ||
193 | int indx; | ||
194 | |||
195 | for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { | ||
196 | nq = &rdev->nq[indx - 1]; | ||
197 | bnxt_qplib_nq_stop_irq(nq, false); | ||
198 | } | ||
199 | |||
200 | bnxt_qplib_rcfw_stop_irq(rcfw, false); | ||
201 | } | ||
202 | |||
203 | static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) | ||
204 | { | ||
205 | struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; | ||
206 | struct bnxt_msix_entry *msix_ent = rdev->msix_entries; | ||
207 | struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; | ||
208 | struct bnxt_qplib_nq *nq; | ||
209 | int indx, rc; | ||
210 | |||
211 | if (!ent) { | ||
212 | /* Not setting the f/w timeout bit in rcfw. | ||
213 | * During the driver unload the first command | ||
214 | * to f/w will timeout and that will set the | ||
215 | * timeout bit. | ||
216 | */ | ||
217 | dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n"); | ||
218 | return; | ||
219 | } | ||
220 | |||
221 | /* Vectors may change after restart, so update with new vectors | ||
222 | * in device sctructure. | ||
223 | */ | ||
224 | for (indx = 0; indx < rdev->num_msix; indx++) | ||
225 | rdev->msix_entries[indx].vector = ent[indx].vector; | ||
226 | |||
227 | bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, | ||
228 | false); | ||
229 | for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { | ||
230 | nq = &rdev->nq[indx - 1]; | ||
231 | rc = bnxt_qplib_nq_start_irq(nq, indx - 1, | ||
232 | msix_ent[indx].vector, false); | ||
233 | if (rc) | ||
234 | dev_warn(rdev_to_dev(rdev), | ||
235 | "Failed to reinit NQ index %d\n", indx - 1); | ||
236 | } | ||
237 | } | ||
238 | |||
188 | static struct bnxt_ulp_ops bnxt_re_ulp_ops = { | 239 | static struct bnxt_ulp_ops bnxt_re_ulp_ops = { |
189 | .ulp_async_notifier = NULL, | 240 | .ulp_async_notifier = NULL, |
190 | .ulp_stop = bnxt_re_stop, | 241 | .ulp_stop = bnxt_re_stop, |
191 | .ulp_start = bnxt_re_start, | 242 | .ulp_start = bnxt_re_start, |
192 | .ulp_sriov_config = bnxt_re_sriov_config, | 243 | .ulp_sriov_config = bnxt_re_sriov_config, |
193 | .ulp_shutdown = bnxt_re_shutdown | 244 | .ulp_shutdown = bnxt_re_shutdown, |
245 | .ulp_irq_stop = bnxt_re_stop_irq, | ||
246 | .ulp_irq_restart = bnxt_re_start_irq | ||
194 | }; | 247 | }; |
195 | 248 | ||
196 | /* RoCE -> Net driver */ | 249 | /* RoCE -> Net driver */ |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 3a78faba8d91..50d8f1fc98d5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c | |||
@@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) | |||
336 | return IRQ_HANDLED; | 336 | return IRQ_HANDLED; |
337 | } | 337 | } |
338 | 338 | ||
339 | void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) | ||
340 | { | ||
341 | tasklet_disable(&nq->worker); | ||
342 | /* Mask h/w interrupt */ | ||
343 | NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); | ||
344 | /* Sync with last running IRQ handler */ | ||
345 | synchronize_irq(nq->vector); | ||
346 | if (kill) | ||
347 | tasklet_kill(&nq->worker); | ||
348 | if (nq->requested) { | ||
349 | irq_set_affinity_hint(nq->vector, NULL); | ||
350 | free_irq(nq->vector, nq); | ||
351 | nq->requested = false; | ||
352 | } | ||
353 | } | ||
354 | |||
339 | void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) | 355 | void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) |
340 | { | 356 | { |
341 | if (nq->cqn_wq) { | 357 | if (nq->cqn_wq) { |
342 | destroy_workqueue(nq->cqn_wq); | 358 | destroy_workqueue(nq->cqn_wq); |
343 | nq->cqn_wq = NULL; | 359 | nq->cqn_wq = NULL; |
344 | } | 360 | } |
361 | |||
345 | /* Make sure the HW is stopped! */ | 362 | /* Make sure the HW is stopped! */ |
346 | synchronize_irq(nq->vector); | 363 | bnxt_qplib_nq_stop_irq(nq, true); |
347 | tasklet_disable(&nq->worker); | ||
348 | tasklet_kill(&nq->worker); | ||
349 | 364 | ||
350 | if (nq->requested) { | ||
351 | irq_set_affinity_hint(nq->vector, NULL); | ||
352 | free_irq(nq->vector, nq); | ||
353 | nq->requested = false; | ||
354 | } | ||
355 | if (nq->bar_reg_iomem) | 365 | if (nq->bar_reg_iomem) |
356 | iounmap(nq->bar_reg_iomem); | 366 | iounmap(nq->bar_reg_iomem); |
357 | nq->bar_reg_iomem = NULL; | 367 | nq->bar_reg_iomem = NULL; |
@@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) | |||
361 | nq->vector = 0; | 371 | nq->vector = 0; |
362 | } | 372 | } |
363 | 373 | ||
374 | int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, | ||
375 | int msix_vector, bool need_init) | ||
376 | { | ||
377 | int rc; | ||
378 | |||
379 | if (nq->requested) | ||
380 | return -EFAULT; | ||
381 | |||
382 | nq->vector = msix_vector; | ||
383 | if (need_init) | ||
384 | tasklet_init(&nq->worker, bnxt_qplib_service_nq, | ||
385 | (unsigned long)nq); | ||
386 | else | ||
387 | tasklet_enable(&nq->worker); | ||
388 | |||
389 | snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx); | ||
390 | rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); | ||
391 | if (rc) | ||
392 | return rc; | ||
393 | |||
394 | cpumask_clear(&nq->mask); | ||
395 | cpumask_set_cpu(nq_indx, &nq->mask); | ||
396 | rc = irq_set_affinity_hint(nq->vector, &nq->mask); | ||
397 | if (rc) { | ||
398 | dev_warn(&nq->pdev->dev, | ||
399 | "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", | ||
400 | nq->vector, nq_indx); | ||
401 | } | ||
402 | nq->requested = true; | ||
403 | NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); | ||
404 | |||
405 | return rc; | ||
406 | } | ||
407 | |||
364 | int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, | 408 | int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, |
365 | int nq_idx, int msix_vector, int bar_reg_offset, | 409 | int nq_idx, int msix_vector, int bar_reg_offset, |
366 | int (*cqn_handler)(struct bnxt_qplib_nq *nq, | 410 | int (*cqn_handler)(struct bnxt_qplib_nq *nq, |
@@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, | |||
372 | resource_size_t nq_base; | 416 | resource_size_t nq_base; |
373 | int rc = -1; | 417 | int rc = -1; |
374 | 418 | ||
375 | nq->pdev = pdev; | ||
376 | nq->vector = msix_vector; | ||
377 | if (cqn_handler) | 419 | if (cqn_handler) |
378 | nq->cqn_handler = cqn_handler; | 420 | nq->cqn_handler = cqn_handler; |
379 | 421 | ||
380 | if (srqn_handler) | 422 | if (srqn_handler) |
381 | nq->srqn_handler = srqn_handler; | 423 | nq->srqn_handler = srqn_handler; |
382 | 424 | ||
383 | tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq); | ||
384 | |||
385 | /* Have a task to schedule CQ notifiers in post send case */ | 425 | /* Have a task to schedule CQ notifiers in post send case */ |
386 | nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); | 426 | nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); |
387 | if (!nq->cqn_wq) | 427 | if (!nq->cqn_wq) |
388 | goto fail; | 428 | return -ENOMEM; |
389 | |||
390 | nq->requested = false; | ||
391 | memset(nq->name, 0, 32); | ||
392 | sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx); | ||
393 | rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); | ||
394 | if (rc) { | ||
395 | dev_err(&nq->pdev->dev, | ||
396 | "Failed to request IRQ for NQ: %#x", rc); | ||
397 | goto fail; | ||
398 | } | ||
399 | |||
400 | cpumask_clear(&nq->mask); | ||
401 | cpumask_set_cpu(nq_idx, &nq->mask); | ||
402 | rc = irq_set_affinity_hint(nq->vector, &nq->mask); | ||
403 | if (rc) { | ||
404 | dev_warn(&nq->pdev->dev, | ||
405 | "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", | ||
406 | nq->vector, nq_idx); | ||
407 | } | ||
408 | 429 | ||
409 | nq->requested = true; | ||
410 | nq->bar_reg = NQ_CONS_PCI_BAR_REGION; | 430 | nq->bar_reg = NQ_CONS_PCI_BAR_REGION; |
411 | nq->bar_reg_off = bar_reg_offset; | 431 | nq->bar_reg_off = bar_reg_offset; |
412 | nq_base = pci_resource_start(pdev, nq->bar_reg); | 432 | nq_base = pci_resource_start(pdev, nq->bar_reg); |
@@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, | |||
419 | rc = -ENOMEM; | 439 | rc = -ENOMEM; |
420 | goto fail; | 440 | goto fail; |
421 | } | 441 | } |
422 | NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); | 442 | |
443 | rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); | ||
444 | if (rc) { | ||
445 | dev_err(&nq->pdev->dev, | ||
446 | "QPLIB: Failed to request irq for nq-idx %d", nq_idx); | ||
447 | goto fail; | ||
448 | } | ||
423 | 449 | ||
424 | return 0; | 450 | return 0; |
425 | fail: | 451 | fail: |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ade9f13c0fd1..72352ca80ace 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h | |||
@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work { | |||
467 | struct bnxt_qplib_cq *cq; | 467 | struct bnxt_qplib_cq *cq; |
468 | }; | 468 | }; |
469 | 469 | ||
470 | void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); | ||
470 | void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); | 471 | void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); |
472 | int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, | ||
473 | int msix_vector, bool need_init); | ||
471 | int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, | 474 | int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, |
472 | int nq_idx, int msix_vector, int bar_reg_offset, | 475 | int nq_idx, int msix_vector, int bar_reg_offset, |
473 | int (*cqn_handler)(struct bnxt_qplib_nq *nq, | 476 | int (*cqn_handler)(struct bnxt_qplib_nq *nq, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 80027a494730..2852d350ada1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | |||
@@ -582,19 +582,29 @@ fail: | |||
582 | return -ENOMEM; | 582 | return -ENOMEM; |
583 | } | 583 | } |
584 | 584 | ||
585 | void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) | 585 | void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) |
586 | { | 586 | { |
587 | unsigned long indx; | ||
588 | |||
589 | /* Make sure the HW channel is stopped! */ | ||
590 | synchronize_irq(rcfw->vector); | ||
591 | tasklet_disable(&rcfw->worker); | 587 | tasklet_disable(&rcfw->worker); |
592 | tasklet_kill(&rcfw->worker); | 588 | /* Mask h/w interrupts */ |
589 | CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, | ||
590 | rcfw->creq.max_elements); | ||
591 | /* Sync with last running IRQ-handler */ | ||
592 | synchronize_irq(rcfw->vector); | ||
593 | if (kill) | ||
594 | tasklet_kill(&rcfw->worker); | ||
593 | 595 | ||
594 | if (rcfw->requested) { | 596 | if (rcfw->requested) { |
595 | free_irq(rcfw->vector, rcfw); | 597 | free_irq(rcfw->vector, rcfw); |
596 | rcfw->requested = false; | 598 | rcfw->requested = false; |
597 | } | 599 | } |
600 | } | ||
601 | |||
602 | void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) | ||
603 | { | ||
604 | unsigned long indx; | ||
605 | |||
606 | bnxt_qplib_rcfw_stop_irq(rcfw, true); | ||
607 | |||
598 | if (rcfw->cmdq_bar_reg_iomem) | 608 | if (rcfw->cmdq_bar_reg_iomem) |
599 | iounmap(rcfw->cmdq_bar_reg_iomem); | 609 | iounmap(rcfw->cmdq_bar_reg_iomem); |
600 | rcfw->cmdq_bar_reg_iomem = NULL; | 610 | rcfw->cmdq_bar_reg_iomem = NULL; |
@@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) | |||
614 | rcfw->vector = 0; | 624 | rcfw->vector = 0; |
615 | } | 625 | } |
616 | 626 | ||
627 | int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, | ||
628 | bool need_init) | ||
629 | { | ||
630 | int rc; | ||
631 | |||
632 | if (rcfw->requested) | ||
633 | return -EFAULT; | ||
634 | |||
635 | rcfw->vector = msix_vector; | ||
636 | if (need_init) | ||
637 | tasklet_init(&rcfw->worker, | ||
638 | bnxt_qplib_service_creq, (unsigned long)rcfw); | ||
639 | else | ||
640 | tasklet_enable(&rcfw->worker); | ||
641 | rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, | ||
642 | "bnxt_qplib_creq", rcfw); | ||
643 | if (rc) | ||
644 | return rc; | ||
645 | rcfw->requested = true; | ||
646 | CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, | ||
647 | rcfw->creq.max_elements); | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
617 | int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | 652 | int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, |
618 | struct bnxt_qplib_rcfw *rcfw, | 653 | struct bnxt_qplib_rcfw *rcfw, |
619 | int msix_vector, | 654 | int msix_vector, |
@@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | |||
675 | rcfw->creq_qp_event_processed = 0; | 710 | rcfw->creq_qp_event_processed = 0; |
676 | rcfw->creq_func_event_processed = 0; | 711 | rcfw->creq_func_event_processed = 0; |
677 | 712 | ||
678 | rcfw->vector = msix_vector; | ||
679 | if (aeq_handler) | 713 | if (aeq_handler) |
680 | rcfw->aeq_handler = aeq_handler; | 714 | rcfw->aeq_handler = aeq_handler; |
715 | init_waitqueue_head(&rcfw->waitq); | ||
681 | 716 | ||
682 | tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, | 717 | rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); |
683 | (unsigned long)rcfw); | ||
684 | |||
685 | rcfw->requested = false; | ||
686 | rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, | ||
687 | "bnxt_qplib_creq", rcfw); | ||
688 | if (rc) { | 718 | if (rc) { |
689 | dev_err(&rcfw->pdev->dev, | 719 | dev_err(&rcfw->pdev->dev, |
690 | "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); | 720 | "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); |
691 | bnxt_qplib_disable_rcfw_channel(rcfw); | 721 | bnxt_qplib_disable_rcfw_channel(rcfw); |
692 | return rc; | 722 | return rc; |
693 | } | 723 | } |
694 | rcfw->requested = true; | ||
695 | |||
696 | init_waitqueue_head(&rcfw->waitq); | ||
697 | |||
698 | CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements); | ||
699 | 724 | ||
700 | init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); | 725 | init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); |
701 | init.cmdq_size_cmdq_lvl = cpu_to_le16( | 726 | init.cmdq_size_cmdq_lvl = cpu_to_le16( |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index c7cce2e4185e..46416dfe8830 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | |||
@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw { | |||
195 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); | 195 | void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); |
196 | int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, | 196 | int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, |
197 | struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz); | 197 | struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz); |
198 | void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); | ||
198 | void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); | 199 | void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); |
200 | int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, | ||
201 | bool need_init); | ||
199 | int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, | 202 | int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, |
200 | struct bnxt_qplib_rcfw *rcfw, | 203 | struct bnxt_qplib_rcfw *rcfw, |
201 | int msix_vector, | 204 | int msix_vector, |
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig index 25bf6955b6d0..fb8b7182f05e 100644 --- a/drivers/infiniband/ulp/srpt/Kconfig +++ b/drivers/infiniband/ulp/srpt/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config INFINIBAND_SRPT | 1 | config INFINIBAND_SRPT |
2 | tristate "InfiniBand SCSI RDMA Protocol target support" | 2 | tristate "InfiniBand SCSI RDMA Protocol target support" |
3 | depends on INFINIBAND_ADDR_TRANS && TARGET_CORE | 3 | depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE |
4 | ---help--- | 4 | ---help--- |
5 | 5 | ||
6 | Support for the SCSI RDMA Protocol (SRP) Target driver. The | 6 | Support for the SCSI RDMA Protocol (SRP) Target driver. The |
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index 29f99529b187..cfcb32559925 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c | |||
@@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client, | |||
130 | bool max_baseline, u8 *value) | 130 | bool max_baseline, u8 *value) |
131 | { | 131 | { |
132 | int error; | 132 | int error; |
133 | u8 val[3]; | 133 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
134 | 134 | ||
135 | error = i2c_smbus_read_block_data(client, | 135 | error = i2c_smbus_read_block_data(client, |
136 | max_baseline ? | 136 | max_baseline ? |
@@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client, | |||
149 | bool iap, u8 *version) | 149 | bool iap, u8 *version) |
150 | { | 150 | { |
151 | int error; | 151 | int error; |
152 | u8 val[3]; | 152 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
153 | 153 | ||
154 | error = i2c_smbus_read_block_data(client, | 154 | error = i2c_smbus_read_block_data(client, |
155 | iap ? ETP_SMBUS_IAP_VERSION_CMD : | 155 | iap ? ETP_SMBUS_IAP_VERSION_CMD : |
@@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, | |||
170 | u8 *clickpad) | 170 | u8 *clickpad) |
171 | { | 171 | { |
172 | int error; | 172 | int error; |
173 | u8 val[3]; | 173 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
174 | 174 | ||
175 | error = i2c_smbus_read_block_data(client, | 175 | error = i2c_smbus_read_block_data(client, |
176 | ETP_SMBUS_SM_VERSION_CMD, val); | 176 | ETP_SMBUS_SM_VERSION_CMD, val); |
@@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, | |||
188 | static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) | 188 | static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) |
189 | { | 189 | { |
190 | int error; | 190 | int error; |
191 | u8 val[3]; | 191 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
192 | 192 | ||
193 | error = i2c_smbus_read_block_data(client, | 193 | error = i2c_smbus_read_block_data(client, |
194 | ETP_SMBUS_UNIQUEID_CMD, val); | 194 | ETP_SMBUS_UNIQUEID_CMD, val); |
@@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client, | |||
205 | bool iap, u16 *csum) | 205 | bool iap, u16 *csum) |
206 | { | 206 | { |
207 | int error; | 207 | int error; |
208 | u8 val[3]; | 208 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
209 | 209 | ||
210 | error = i2c_smbus_read_block_data(client, | 210 | error = i2c_smbus_read_block_data(client, |
211 | iap ? ETP_SMBUS_FW_CHECKSUM_CMD : | 211 | iap ? ETP_SMBUS_FW_CHECKSUM_CMD : |
@@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client, | |||
226 | { | 226 | { |
227 | int ret; | 227 | int ret; |
228 | int error; | 228 | int error; |
229 | u8 val[3]; | 229 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
230 | 230 | ||
231 | ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); | 231 | ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); |
232 | if (ret != 3) { | 232 | if (ret != 3) { |
@@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client, | |||
246 | { | 246 | { |
247 | int ret; | 247 | int ret; |
248 | int error; | 248 | int error; |
249 | u8 val[3]; | 249 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
250 | 250 | ||
251 | ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); | 251 | ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); |
252 | if (ret != 3) { | 252 | if (ret != 3) { |
@@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client, | |||
267 | { | 267 | { |
268 | int ret; | 268 | int ret; |
269 | int error; | 269 | int error; |
270 | u8 val[3]; | 270 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
271 | 271 | ||
272 | ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); | 272 | ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); |
273 | if (ret != 3) { | 273 | if (ret != 3) { |
@@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client, | |||
294 | { | 294 | { |
295 | int error; | 295 | int error; |
296 | u16 constant; | 296 | u16 constant; |
297 | u8 val[3]; | 297 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
298 | 298 | ||
299 | error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); | 299 | error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); |
300 | if (error < 0) { | 300 | if (error < 0) { |
@@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client) | |||
345 | int len; | 345 | int len; |
346 | int error; | 346 | int error; |
347 | enum tp_mode mode; | 347 | enum tp_mode mode; |
348 | u8 val[3]; | 348 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
349 | u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; | 349 | u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; |
350 | u16 password; | 350 | u16 password; |
351 | 351 | ||
@@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client, | |||
419 | struct device *dev = &client->dev; | 419 | struct device *dev = &client->dev; |
420 | int error; | 420 | int error; |
421 | u16 result; | 421 | u16 result; |
422 | u8 val[3]; | 422 | u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; |
423 | 423 | ||
424 | /* | 424 | /* |
425 | * Due to the limitation of smbus protocol limiting | 425 | * Due to the limitation of smbus protocol limiting |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 60f2c463d1cc..a9591d278145 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = { | |||
172 | "LEN0048", /* X1 Carbon 3 */ | 172 | "LEN0048", /* X1 Carbon 3 */ |
173 | "LEN0046", /* X250 */ | 173 | "LEN0046", /* X250 */ |
174 | "LEN004a", /* W541 */ | 174 | "LEN004a", /* W541 */ |
175 | "LEN0071", /* T480 */ | ||
176 | "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ | ||
177 | "LEN0073", /* X1 Carbon G5 (Elantech) */ | ||
178 | "LEN0092", /* X1 Carbon 6 */ | ||
179 | "LEN0096", /* X280 */ | ||
180 | "LEN0097", /* X280 -> ALPS trackpoint */ | ||
175 | "LEN200f", /* T450s */ | 181 | "LEN200f", /* T450s */ |
176 | NULL | 182 | NULL |
177 | }; | 183 | }; |
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 9f561fe505cb..db9a80e1ee14 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c | |||
@@ -1768,6 +1768,18 @@ static const struct b53_chip_data b53_switch_chips[] = { | |||
1768 | .duplex_reg = B53_DUPLEX_STAT_FE, | 1768 | .duplex_reg = B53_DUPLEX_STAT_FE, |
1769 | }, | 1769 | }, |
1770 | { | 1770 | { |
1771 | .chip_id = BCM5389_DEVICE_ID, | ||
1772 | .dev_name = "BCM5389", | ||
1773 | .vlans = 4096, | ||
1774 | .enabled_ports = 0x1f, | ||
1775 | .arl_entries = 4, | ||
1776 | .cpu_port = B53_CPU_PORT, | ||
1777 | .vta_regs = B53_VTA_REGS, | ||
1778 | .duplex_reg = B53_DUPLEX_STAT_GE, | ||
1779 | .jumbo_pm_reg = B53_JUMBO_PORT_MASK, | ||
1780 | .jumbo_size_reg = B53_JUMBO_MAX_SIZE, | ||
1781 | }, | ||
1782 | { | ||
1771 | .chip_id = BCM5395_DEVICE_ID, | 1783 | .chip_id = BCM5395_DEVICE_ID, |
1772 | .dev_name = "BCM5395", | 1784 | .dev_name = "BCM5395", |
1773 | .vlans = 4096, | 1785 | .vlans = 4096, |
@@ -2099,6 +2111,7 @@ int b53_switch_detect(struct b53_device *dev) | |||
2099 | else | 2111 | else |
2100 | dev->chip_id = BCM5365_DEVICE_ID; | 2112 | dev->chip_id = BCM5365_DEVICE_ID; |
2101 | break; | 2113 | break; |
2114 | case BCM5389_DEVICE_ID: | ||
2102 | case BCM5395_DEVICE_ID: | 2115 | case BCM5395_DEVICE_ID: |
2103 | case BCM5397_DEVICE_ID: | 2116 | case BCM5397_DEVICE_ID: |
2104 | case BCM5398_DEVICE_ID: | 2117 | case BCM5398_DEVICE_ID: |
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index fa7556f5d4fb..a533a90e3904 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c | |||
@@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = { | |||
285 | #define B53_BRCM_OUI_1 0x0143bc00 | 285 | #define B53_BRCM_OUI_1 0x0143bc00 |
286 | #define B53_BRCM_OUI_2 0x03625c00 | 286 | #define B53_BRCM_OUI_2 0x03625c00 |
287 | #define B53_BRCM_OUI_3 0x00406000 | 287 | #define B53_BRCM_OUI_3 0x00406000 |
288 | #define B53_BRCM_OUI_4 0x01410c00 | ||
288 | 289 | ||
289 | static int b53_mdio_probe(struct mdio_device *mdiodev) | 290 | static int b53_mdio_probe(struct mdio_device *mdiodev) |
290 | { | 291 | { |
@@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev) | |||
311 | */ | 312 | */ |
312 | if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && | 313 | if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && |
313 | (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && | 314 | (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && |
314 | (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { | 315 | (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 && |
316 | (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) { | ||
315 | dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); | 317 | dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); |
316 | return -ENODEV; | 318 | return -ENODEV; |
317 | } | 319 | } |
@@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = { | |||
360 | { .compatible = "brcm,bcm53125" }, | 362 | { .compatible = "brcm,bcm53125" }, |
361 | { .compatible = "brcm,bcm53128" }, | 363 | { .compatible = "brcm,bcm53128" }, |
362 | { .compatible = "brcm,bcm5365" }, | 364 | { .compatible = "brcm,bcm5365" }, |
365 | { .compatible = "brcm,bcm5389" }, | ||
363 | { .compatible = "brcm,bcm5395" }, | 366 | { .compatible = "brcm,bcm5395" }, |
364 | { .compatible = "brcm,bcm5397" }, | 367 | { .compatible = "brcm,bcm5397" }, |
365 | { .compatible = "brcm,bcm5398" }, | 368 | { .compatible = "brcm,bcm5398" }, |
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index cc284a514de9..75d9ffb75da8 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h | |||
@@ -48,6 +48,7 @@ struct b53_io_ops { | |||
48 | enum { | 48 | enum { |
49 | BCM5325_DEVICE_ID = 0x25, | 49 | BCM5325_DEVICE_ID = 0x25, |
50 | BCM5365_DEVICE_ID = 0x65, | 50 | BCM5365_DEVICE_ID = 0x65, |
51 | BCM5389_DEVICE_ID = 0x89, | ||
51 | BCM5395_DEVICE_ID = 0x95, | 52 | BCM5395_DEVICE_ID = 0x95, |
52 | BCM5397_DEVICE_ID = 0x97, | 53 | BCM5397_DEVICE_ID = 0x97, |
53 | BCM5398_DEVICE_ID = 0x98, | 54 | BCM5398_DEVICE_ID = 0x98, |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c697e79e491e..8f755009ff38 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter) | |||
3309 | if ((val & POST_STAGE_FAT_LOG_START) | 3309 | if ((val & POST_STAGE_FAT_LOG_START) |
3310 | != POST_STAGE_FAT_LOG_START && | 3310 | != POST_STAGE_FAT_LOG_START && |
3311 | (val & POST_STAGE_ARMFW_UE) | 3311 | (val & POST_STAGE_ARMFW_UE) |
3312 | != POST_STAGE_ARMFW_UE) | 3312 | != POST_STAGE_ARMFW_UE && |
3313 | (val & POST_STAGE_RECOVERABLE_ERR) | ||
3314 | != POST_STAGE_RECOVERABLE_ERR) | ||
3313 | return; | 3315 | return; |
3314 | } | 3316 | } |
3315 | 3317 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 031d65c4178d..ba3035c08572 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -9049,7 +9049,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
9049 | { | 9049 | { |
9050 | const struct tc_action *a; | 9050 | const struct tc_action *a; |
9051 | LIST_HEAD(actions); | 9051 | LIST_HEAD(actions); |
9052 | int err; | ||
9053 | 9052 | ||
9054 | if (!tcf_exts_has_actions(exts)) | 9053 | if (!tcf_exts_has_actions(exts)) |
9055 | return -EINVAL; | 9054 | return -EINVAL; |
@@ -9070,11 +9069,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
9070 | 9069 | ||
9071 | if (!dev) | 9070 | if (!dev) |
9072 | return -EINVAL; | 9071 | return -EINVAL; |
9073 | err = handle_redirect_action(adapter, dev->ifindex, queue, | 9072 | return handle_redirect_action(adapter, dev->ifindex, |
9074 | action); | 9073 | queue, action); |
9075 | if (err == 0) | ||
9076 | return err; | ||
9077 | } | 9074 | } |
9075 | |||
9076 | return -EINVAL; | ||
9078 | } | 9077 | } |
9079 | 9078 | ||
9080 | return -EINVAL; | 9079 | return -EINVAL; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bb252b36994d..fc39f22e5c70 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -4422,6 +4422,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, | |||
4422 | NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); | 4422 | NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); |
4423 | return -EINVAL; | 4423 | return -EINVAL; |
4424 | } | 4424 | } |
4425 | if (is_vlan_dev(upper_dev) && | ||
4426 | vlan_dev_vlan_id(upper_dev) == 1) { | ||
4427 | NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); | ||
4428 | return -EINVAL; | ||
4429 | } | ||
4425 | break; | 4430 | break; |
4426 | case NETDEV_CHANGEUPPER: | 4431 | case NETDEV_CHANGEUPPER: |
4427 | upper_dev = info->upper_dev; | 4432 | upper_dev = info->upper_dev; |
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 7ed08486ae23..c805dcbebd02 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c | |||
@@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev) | |||
84 | for (i = 0; i < SONIC_NUM_RRS; i++) { | 84 | for (i = 0; i < SONIC_NUM_RRS; i++) { |
85 | dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), | 85 | dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), |
86 | SONIC_RBSIZE, DMA_FROM_DEVICE); | 86 | SONIC_RBSIZE, DMA_FROM_DEVICE); |
87 | if (!laddr) { | 87 | if (dma_mapping_error(lp->device, laddr)) { |
88 | while(i > 0) { /* free any that were mapped successfully */ | 88 | while(i > 0) { /* free any that were mapped successfully */ |
89 | i--; | 89 | i--; |
90 | dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); | 90 | dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); |
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index aa50331b7607..ce8071fc90c4 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c | |||
@@ -1681,8 +1681,8 @@ static int netsec_probe(struct platform_device *pdev) | |||
1681 | if (ret) | 1681 | if (ret) |
1682 | goto unreg_napi; | 1682 | goto unreg_napi; |
1683 | 1683 | ||
1684 | if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) | 1684 | if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) |
1685 | dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); | 1685 | dev_warn(&pdev->dev, "Failed to set DMA mask\n"); |
1686 | 1686 | ||
1687 | ret = register_netdev(ndev); | 1687 | ret = register_netdev(ndev); |
1688 | if (ret) { | 1688 | if (ret) { |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index be0fec17d95d..06d7c9e4dcda 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
1873 | if (IS_ERR(priv->txchan)) { | 1873 | if (IS_ERR(priv->txchan)) { |
1874 | dev_err(&pdev->dev, "error initializing tx dma channel\n"); | 1874 | dev_err(&pdev->dev, "error initializing tx dma channel\n"); |
1875 | rc = PTR_ERR(priv->txchan); | 1875 | rc = PTR_ERR(priv->txchan); |
1876 | goto no_cpdma_chan; | 1876 | goto err_free_dma; |
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, | 1879 | priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, |
@@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
1881 | if (IS_ERR(priv->rxchan)) { | 1881 | if (IS_ERR(priv->rxchan)) { |
1882 | dev_err(&pdev->dev, "error initializing rx dma channel\n"); | 1882 | dev_err(&pdev->dev, "error initializing rx dma channel\n"); |
1883 | rc = PTR_ERR(priv->rxchan); | 1883 | rc = PTR_ERR(priv->rxchan); |
1884 | goto no_cpdma_chan; | 1884 | goto err_free_txchan; |
1885 | } | 1885 | } |
1886 | 1886 | ||
1887 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1887 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1888 | if (!res) { | 1888 | if (!res) { |
1889 | dev_err(&pdev->dev, "error getting irq res\n"); | 1889 | dev_err(&pdev->dev, "error getting irq res\n"); |
1890 | rc = -ENOENT; | 1890 | rc = -ENOENT; |
1891 | goto no_cpdma_chan; | 1891 | goto err_free_rxchan; |
1892 | } | 1892 | } |
1893 | ndev->irq = res->start; | 1893 | ndev->irq = res->start; |
1894 | 1894 | ||
@@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
1914 | pm_runtime_put_noidle(&pdev->dev); | 1914 | pm_runtime_put_noidle(&pdev->dev); |
1915 | dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", | 1915 | dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", |
1916 | __func__, rc); | 1916 | __func__, rc); |
1917 | goto no_cpdma_chan; | 1917 | goto err_napi_del; |
1918 | } | 1918 | } |
1919 | 1919 | ||
1920 | /* register the network device */ | 1920 | /* register the network device */ |
@@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
1924 | dev_err(&pdev->dev, "error in register_netdev\n"); | 1924 | dev_err(&pdev->dev, "error in register_netdev\n"); |
1925 | rc = -ENODEV; | 1925 | rc = -ENODEV; |
1926 | pm_runtime_put(&pdev->dev); | 1926 | pm_runtime_put(&pdev->dev); |
1927 | goto no_cpdma_chan; | 1927 | goto err_napi_del; |
1928 | } | 1928 | } |
1929 | 1929 | ||
1930 | 1930 | ||
@@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
1937 | 1937 | ||
1938 | return 0; | 1938 | return 0; |
1939 | 1939 | ||
1940 | no_cpdma_chan: | 1940 | err_napi_del: |
1941 | if (priv->txchan) | 1941 | netif_napi_del(&priv->napi); |
1942 | cpdma_chan_destroy(priv->txchan); | 1942 | err_free_rxchan: |
1943 | if (priv->rxchan) | 1943 | cpdma_chan_destroy(priv->rxchan); |
1944 | cpdma_chan_destroy(priv->rxchan); | 1944 | err_free_txchan: |
1945 | cpdma_chan_destroy(priv->txchan); | ||
1946 | err_free_dma: | ||
1945 | cpdma_ctlr_destroy(priv->dma); | 1947 | cpdma_ctlr_destroy(priv->dma); |
1946 | no_pdata: | 1948 | no_pdata: |
1947 | if (of_phy_is_fixed_link(np)) | 1949 | if (of_phy_is_fixed_link(np)) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2265d2ccea47..c94fffee5ea9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1663,7 +1663,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1663 | else | 1663 | else |
1664 | *skb_xdp = 0; | 1664 | *skb_xdp = 0; |
1665 | 1665 | ||
1666 | preempt_disable(); | 1666 | local_bh_disable(); |
1667 | rcu_read_lock(); | 1667 | rcu_read_lock(); |
1668 | xdp_prog = rcu_dereference(tun->xdp_prog); | 1668 | xdp_prog = rcu_dereference(tun->xdp_prog); |
1669 | if (xdp_prog && !*skb_xdp) { | 1669 | if (xdp_prog && !*skb_xdp) { |
@@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1688 | if (err) | 1688 | if (err) |
1689 | goto err_redirect; | 1689 | goto err_redirect; |
1690 | rcu_read_unlock(); | 1690 | rcu_read_unlock(); |
1691 | preempt_enable(); | 1691 | local_bh_enable(); |
1692 | return NULL; | 1692 | return NULL; |
1693 | case XDP_TX: | 1693 | case XDP_TX: |
1694 | get_page(alloc_frag->page); | 1694 | get_page(alloc_frag->page); |
@@ -1697,7 +1697,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1697 | goto err_redirect; | 1697 | goto err_redirect; |
1698 | tun_xdp_flush(tun->dev); | 1698 | tun_xdp_flush(tun->dev); |
1699 | rcu_read_unlock(); | 1699 | rcu_read_unlock(); |
1700 | preempt_enable(); | 1700 | local_bh_enable(); |
1701 | return NULL; | 1701 | return NULL; |
1702 | case XDP_PASS: | 1702 | case XDP_PASS: |
1703 | delta = orig_data - xdp.data; | 1703 | delta = orig_data - xdp.data; |
@@ -1717,7 +1717,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1717 | skb = build_skb(buf, buflen); | 1717 | skb = build_skb(buf, buflen); |
1718 | if (!skb) { | 1718 | if (!skb) { |
1719 | rcu_read_unlock(); | 1719 | rcu_read_unlock(); |
1720 | preempt_enable(); | 1720 | local_bh_enable(); |
1721 | return ERR_PTR(-ENOMEM); | 1721 | return ERR_PTR(-ENOMEM); |
1722 | } | 1722 | } |
1723 | 1723 | ||
@@ -1727,7 +1727,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1727 | alloc_frag->offset += buflen; | 1727 | alloc_frag->offset += buflen; |
1728 | 1728 | ||
1729 | rcu_read_unlock(); | 1729 | rcu_read_unlock(); |
1730 | preempt_enable(); | 1730 | local_bh_enable(); |
1731 | 1731 | ||
1732 | return skb; | 1732 | return skb; |
1733 | 1733 | ||
@@ -1735,7 +1735,7 @@ err_redirect: | |||
1735 | put_page(alloc_frag->page); | 1735 | put_page(alloc_frag->page); |
1736 | err_xdp: | 1736 | err_xdp: |
1737 | rcu_read_unlock(); | 1737 | rcu_read_unlock(); |
1738 | preempt_enable(); | 1738 | local_bh_enable(); |
1739 | this_cpu_inc(tun->pcpu_stats->rx_dropped); | 1739 | this_cpu_inc(tun->pcpu_stats->rx_dropped); |
1740 | return NULL; | 1740 | return NULL; |
1741 | } | 1741 | } |
@@ -1931,16 +1931,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1931 | struct bpf_prog *xdp_prog; | 1931 | struct bpf_prog *xdp_prog; |
1932 | int ret; | 1932 | int ret; |
1933 | 1933 | ||
1934 | local_bh_disable(); | ||
1934 | rcu_read_lock(); | 1935 | rcu_read_lock(); |
1935 | xdp_prog = rcu_dereference(tun->xdp_prog); | 1936 | xdp_prog = rcu_dereference(tun->xdp_prog); |
1936 | if (xdp_prog) { | 1937 | if (xdp_prog) { |
1937 | ret = do_xdp_generic(xdp_prog, skb); | 1938 | ret = do_xdp_generic(xdp_prog, skb); |
1938 | if (ret != XDP_PASS) { | 1939 | if (ret != XDP_PASS) { |
1939 | rcu_read_unlock(); | 1940 | rcu_read_unlock(); |
1941 | local_bh_enable(); | ||
1940 | return total_len; | 1942 | return total_len; |
1941 | } | 1943 | } |
1942 | } | 1944 | } |
1943 | rcu_read_unlock(); | 1945 | rcu_read_unlock(); |
1946 | local_bh_enable(); | ||
1944 | } | 1947 | } |
1945 | 1948 | ||
1946 | /* Compute the costly rx hash only if needed for flow updates. | 1949 | /* Compute the costly rx hash only if needed for flow updates. |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 7220cd620717..0362acd5cdca 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = { | |||
609 | */ | 609 | */ |
610 | static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { | 610 | static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { |
611 | .description = "CDC MBIM", | 611 | .description = "CDC MBIM", |
612 | .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, | 612 | .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, |
613 | .bind = cdc_mbim_bind, | 613 | .bind = cdc_mbim_bind, |
614 | .unbind = cdc_mbim_unbind, | 614 | .unbind = cdc_mbim_unbind, |
615 | .manage_power = cdc_mbim_manage_power, | 615 | .manage_power = cdc_mbim_manage_power, |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 148e78f8b48c..8e8b51f171f4 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = { | |||
1103 | {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, | 1103 | {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, |
1104 | {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ | 1104 | {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ |
1105 | {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, | 1105 | {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, |
1106 | {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ | ||
1106 | {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ | 1107 | {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ |
1107 | {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ | 1108 | {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ |
1108 | {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ | 1109 | {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 6e9a9ecfb11c..7229991ae70d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
@@ -1600,14 +1600,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, | |||
1600 | struct iwl_trans *trans) | 1600 | struct iwl_trans *trans) |
1601 | { | 1601 | { |
1602 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1602 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1603 | int max_irqs, num_irqs, i, ret, nr_online_cpus; | 1603 | int max_irqs, num_irqs, i, ret; |
1604 | u16 pci_cmd; | 1604 | u16 pci_cmd; |
1605 | 1605 | ||
1606 | if (!trans->cfg->mq_rx_supported) | 1606 | if (!trans->cfg->mq_rx_supported) |
1607 | goto enable_msi; | 1607 | goto enable_msi; |
1608 | 1608 | ||
1609 | nr_online_cpus = num_online_cpus(); | 1609 | max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); |
1610 | max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); | ||
1611 | for (i = 0; i < max_irqs; i++) | 1610 | for (i = 0; i < max_irqs; i++) |
1612 | trans_pcie->msix_entries[i].entry = i; | 1611 | trans_pcie->msix_entries[i].entry = i; |
1613 | 1612 | ||
@@ -1633,16 +1632,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, | |||
1633 | * Two interrupts less: non rx causes shared with FBQ and RSS. | 1632 | * Two interrupts less: non rx causes shared with FBQ and RSS. |
1634 | * More than two interrupts: we will use fewer RSS queues. | 1633 | * More than two interrupts: we will use fewer RSS queues. |
1635 | */ | 1634 | */ |
1636 | if (num_irqs <= nr_online_cpus) { | 1635 | if (num_irqs <= max_irqs - 2) { |
1637 | trans_pcie->trans->num_rx_queues = num_irqs + 1; | 1636 | trans_pcie->trans->num_rx_queues = num_irqs + 1; |
1638 | trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | | 1637 | trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | |
1639 | IWL_SHARED_IRQ_FIRST_RSS; | 1638 | IWL_SHARED_IRQ_FIRST_RSS; |
1640 | } else if (num_irqs == nr_online_cpus + 1) { | 1639 | } else if (num_irqs == max_irqs - 1) { |
1641 | trans_pcie->trans->num_rx_queues = num_irqs; | 1640 | trans_pcie->trans->num_rx_queues = num_irqs; |
1642 | trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; | 1641 | trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; |
1643 | } else { | 1642 | } else { |
1644 | trans_pcie->trans->num_rx_queues = num_irqs - 1; | 1643 | trans_pcie->trans->num_rx_queues = num_irqs - 1; |
1645 | } | 1644 | } |
1645 | WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); | ||
1646 | 1646 | ||
1647 | trans_pcie->alloc_vecs = num_irqs; | 1647 | trans_pcie->alloc_vecs = num_irqs; |
1648 | trans_pcie->msix_enabled = true; | 1648 | trans_pcie->msix_enabled = true; |
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 7c1f8f561d4a..710e9641552e 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | |||
@@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, | |||
372 | 372 | ||
373 | /* | 373 | /* |
374 | * Determine IFS values | 374 | * Determine IFS values |
375 | * - Use TXOP_BACKOFF for probe and management frames except beacons | 375 | * - Use TXOP_BACKOFF for management frames except beacons |
376 | * - Use TXOP_SIFS for fragment bursts | 376 | * - Use TXOP_SIFS for fragment bursts |
377 | * - Use TXOP_HTTXOP for everything else | 377 | * - Use TXOP_HTTXOP for everything else |
378 | * | 378 | * |
379 | * Note: rt2800 devices won't use CTS protection (if used) | 379 | * Note: rt2800 devices won't use CTS protection (if used) |
380 | * for frames not transmitted with TXOP_HTTXOP | 380 | * for frames not transmitted with TXOP_HTTXOP |
381 | */ | 381 | */ |
382 | if ((ieee80211_is_mgmt(hdr->frame_control) && | 382 | if (ieee80211_is_mgmt(hdr->frame_control) && |
383 | !ieee80211_is_beacon(hdr->frame_control)) || | 383 | !ieee80211_is_beacon(hdr->frame_control)) |
384 | (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) | ||
385 | txdesc->u.ht.txop = TXOP_BACKOFF; | 384 | txdesc->u.ht.txop = TXOP_BACKOFF; |
386 | else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) | 385 | else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) |
387 | txdesc->u.ht.txop = TXOP_SIFS; | 386 | txdesc->u.ht.txop = TXOP_SIFS; |
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index dbb7464c018c..88a8b5916624 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig | |||
@@ -27,7 +27,7 @@ config NVME_FABRICS | |||
27 | 27 | ||
28 | config NVME_RDMA | 28 | config NVME_RDMA |
29 | tristate "NVM Express over Fabrics RDMA host driver" | 29 | tristate "NVM Express over Fabrics RDMA host driver" |
30 | depends on INFINIBAND_ADDR_TRANS && BLOCK | 30 | depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK |
31 | select NVME_CORE | 31 | select NVME_CORE |
32 | select NVME_FABRICS | 32 | select NVME_FABRICS |
33 | select SG_POOL | 33 | select SG_POOL |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 99b857e5a7a9..b9ca782fe82d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -1447,8 +1447,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) | |||
1447 | if (ns->lba_shift == 0) | 1447 | if (ns->lba_shift == 0) |
1448 | ns->lba_shift = 9; | 1448 | ns->lba_shift = 9; |
1449 | ns->noiob = le16_to_cpu(id->noiob); | 1449 | ns->noiob = le16_to_cpu(id->noiob); |
1450 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); | ||
1451 | ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); | 1450 | ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); |
1451 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); | ||
1452 | /* the PI implementation requires metadata equal t10 pi tuple size */ | 1452 | /* the PI implementation requires metadata equal t10 pi tuple size */ |
1453 | if (ns->ms == sizeof(struct t10_pi_tuple)) | 1453 | if (ns->ms == sizeof(struct t10_pi_tuple)) |
1454 | ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; | 1454 | ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; |
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 7595664ee753..3c7b61ddb0d1 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig | |||
@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP | |||
27 | 27 | ||
28 | config NVME_TARGET_RDMA | 28 | config NVME_TARGET_RDMA |
29 | tristate "NVMe over Fabrics RDMA target support" | 29 | tristate "NVMe over Fabrics RDMA target support" |
30 | depends on INFINIBAND_ADDR_TRANS | 30 | depends on INFINIBAND && INFINIBAND_ADDR_TRANS |
31 | depends on NVME_TARGET | 31 | depends on NVME_TARGET |
32 | select SGL_ALLOC | 32 | select SGL_ALLOC |
33 | help | 33 | help |
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index a32c5c00e0e7..ffffb9909ae1 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c | |||
@@ -163,6 +163,16 @@ MODULE_LICENSE("GPL"); | |||
163 | 163 | ||
164 | static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; | 164 | static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; |
165 | 165 | ||
166 | static bool ashs_present(void) | ||
167 | { | ||
168 | int i = 0; | ||
169 | while (ashs_ids[i]) { | ||
170 | if (acpi_dev_found(ashs_ids[i++])) | ||
171 | return true; | ||
172 | } | ||
173 | return false; | ||
174 | } | ||
175 | |||
166 | struct bios_args { | 176 | struct bios_args { |
167 | u32 arg0; | 177 | u32 arg0; |
168 | u32 arg1; | 178 | u32 arg1; |
@@ -1025,6 +1035,9 @@ static int asus_new_rfkill(struct asus_wmi *asus, | |||
1025 | 1035 | ||
1026 | static void asus_wmi_rfkill_exit(struct asus_wmi *asus) | 1036 | static void asus_wmi_rfkill_exit(struct asus_wmi *asus) |
1027 | { | 1037 | { |
1038 | if (asus->driver->wlan_ctrl_by_user && ashs_present()) | ||
1039 | return; | ||
1040 | |||
1028 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); | 1041 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); |
1029 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); | 1042 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); |
1030 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); | 1043 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); |
@@ -2121,16 +2134,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus) | |||
2121 | return 0; | 2134 | return 0; |
2122 | } | 2135 | } |
2123 | 2136 | ||
2124 | static bool ashs_present(void) | ||
2125 | { | ||
2126 | int i = 0; | ||
2127 | while (ashs_ids[i]) { | ||
2128 | if (acpi_dev_found(ashs_ids[i++])) | ||
2129 | return true; | ||
2130 | } | ||
2131 | return false; | ||
2132 | } | ||
2133 | |||
2134 | /* | 2137 | /* |
2135 | * WMI Driver | 2138 | * WMI Driver |
2136 | */ | 2139 | */ |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 04143c08bd6e..02c03e418c27 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -3034,7 +3034,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, | |||
3034 | cqr->callback_data = req; | 3034 | cqr->callback_data = req; |
3035 | cqr->status = DASD_CQR_FILLED; | 3035 | cqr->status = DASD_CQR_FILLED; |
3036 | cqr->dq = dq; | 3036 | cqr->dq = dq; |
3037 | req->completion_data = cqr; | 3037 | *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr; |
3038 | |||
3038 | blk_mq_start_request(req); | 3039 | blk_mq_start_request(req); |
3039 | spin_lock(&block->queue_lock); | 3040 | spin_lock(&block->queue_lock); |
3040 | list_add_tail(&cqr->blocklist, &block->ccw_queue); | 3041 | list_add_tail(&cqr->blocklist, &block->ccw_queue); |
@@ -3058,12 +3059,13 @@ out: | |||
3058 | */ | 3059 | */ |
3059 | enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) | 3060 | enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) |
3060 | { | 3061 | { |
3061 | struct dasd_ccw_req *cqr = req->completion_data; | ||
3062 | struct dasd_block *block = req->q->queuedata; | 3062 | struct dasd_block *block = req->q->queuedata; |
3063 | struct dasd_device *device; | 3063 | struct dasd_device *device; |
3064 | struct dasd_ccw_req *cqr; | ||
3064 | unsigned long flags; | 3065 | unsigned long flags; |
3065 | int rc = 0; | 3066 | int rc = 0; |
3066 | 3067 | ||
3068 | cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); | ||
3067 | if (!cqr) | 3069 | if (!cqr) |
3068 | return BLK_EH_NOT_HANDLED; | 3070 | return BLK_EH_NOT_HANDLED; |
3069 | 3071 | ||
@@ -3169,6 +3171,7 @@ static int dasd_alloc_queue(struct dasd_block *block) | |||
3169 | int rc; | 3171 | int rc; |
3170 | 3172 | ||
3171 | block->tag_set.ops = &dasd_mq_ops; | 3173 | block->tag_set.ops = &dasd_mq_ops; |
3174 | block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); | ||
3172 | block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; | 3175 | block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; |
3173 | block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; | 3176 | block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; |
3174 | block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 3177 | block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 36f6190931bc..456ce9f19569 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c | |||
@@ -51,6 +51,8 @@ struct srp_internal { | |||
51 | struct transport_container rport_attr_cont; | 51 | struct transport_container rport_attr_cont; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int scsi_is_srp_rport(const struct device *dev); | ||
55 | |||
54 | #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) | 56 | #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) |
55 | 57 | ||
56 | #define dev_to_rport(d) container_of(d, struct srp_rport, dev) | 58 | #define dev_to_rport(d) container_of(d, struct srp_rport, dev) |
@@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) | |||
60 | return dev_to_shost(r->dev.parent); | 62 | return dev_to_shost(r->dev.parent); |
61 | } | 63 | } |
62 | 64 | ||
65 | static int find_child_rport(struct device *dev, void *data) | ||
66 | { | ||
67 | struct device **child = data; | ||
68 | |||
69 | if (scsi_is_srp_rport(dev)) { | ||
70 | WARN_ON_ONCE(*child); | ||
71 | *child = dev; | ||
72 | } | ||
73 | return 0; | ||
74 | } | ||
75 | |||
63 | static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) | 76 | static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) |
64 | { | 77 | { |
65 | return transport_class_to_srp_rport(&shost->shost_gendev); | 78 | struct device *child = NULL; |
79 | |||
80 | WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, | ||
81 | find_child_rport) < 0); | ||
82 | return child ? dev_to_rport(child) : NULL; | ||
66 | } | 83 | } |
67 | 84 | ||
68 | /** | 85 | /** |
@@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) | |||
600 | struct srp_rport *rport = shost_to_rport(shost); | 617 | struct srp_rport *rport = shost_to_rport(shost); |
601 | 618 | ||
602 | pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); | 619 | pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); |
603 | return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && | 620 | return rport && rport->fast_io_fail_tmo < 0 && |
621 | rport->dev_loss_tmo < 0 && | ||
604 | i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? | 622 | i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? |
605 | BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; | 623 | BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; |
606 | } | 624 | } |
diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c index 8d8659463b3e..feeb17cebc25 100644 --- a/drivers/soc/lantiq/gphy.c +++ b/drivers/soc/lantiq/gphy.c | |||
@@ -30,7 +30,6 @@ struct xway_gphy_priv { | |||
30 | struct clk *gphy_clk_gate; | 30 | struct clk *gphy_clk_gate; |
31 | struct reset_control *gphy_reset; | 31 | struct reset_control *gphy_reset; |
32 | struct reset_control *gphy_reset2; | 32 | struct reset_control *gphy_reset2; |
33 | struct notifier_block gphy_reboot_nb; | ||
34 | void __iomem *membase; | 33 | void __iomem *membase; |
35 | char *fw_name; | 34 | char *fw_name; |
36 | }; | 35 | }; |
@@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = { | |||
64 | }; | 63 | }; |
65 | MODULE_DEVICE_TABLE(of, xway_gphy_match); | 64 | MODULE_DEVICE_TABLE(of, xway_gphy_match); |
66 | 65 | ||
67 | static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) | ||
68 | { | ||
69 | return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); | ||
70 | } | ||
71 | |||
72 | static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, | ||
73 | unsigned long code, void *unused) | ||
74 | { | ||
75 | struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); | ||
76 | |||
77 | if (priv) { | ||
78 | reset_control_assert(priv->gphy_reset); | ||
79 | reset_control_assert(priv->gphy_reset2); | ||
80 | } | ||
81 | |||
82 | return NOTIFY_DONE; | ||
83 | } | ||
84 | |||
85 | static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, | 66 | static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, |
86 | dma_addr_t *dev_addr) | 67 | dma_addr_t *dev_addr) |
87 | { | 68 | { |
@@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev) | |||
205 | reset_control_deassert(priv->gphy_reset); | 186 | reset_control_deassert(priv->gphy_reset); |
206 | reset_control_deassert(priv->gphy_reset2); | 187 | reset_control_deassert(priv->gphy_reset2); |
207 | 188 | ||
208 | /* assert the gphy reset because it can hang after a reboot: */ | ||
209 | priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; | ||
210 | priv->gphy_reboot_nb.priority = -1; | ||
211 | |||
212 | ret = register_reboot_notifier(&priv->gphy_reboot_nb); | ||
213 | if (ret) | ||
214 | dev_warn(dev, "Failed to register reboot notifier\n"); | ||
215 | |||
216 | platform_set_drvdata(pdev, priv); | 189 | platform_set_drvdata(pdev, priv); |
217 | 190 | ||
218 | return ret; | 191 | return ret; |
@@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev) | |||
220 | 193 | ||
221 | static int xway_gphy_remove(struct platform_device *pdev) | 194 | static int xway_gphy_remove(struct platform_device *pdev) |
222 | { | 195 | { |
223 | struct device *dev = &pdev->dev; | ||
224 | struct xway_gphy_priv *priv = platform_get_drvdata(pdev); | 196 | struct xway_gphy_priv *priv = platform_get_drvdata(pdev); |
225 | int ret; | ||
226 | |||
227 | reset_control_assert(priv->gphy_reset); | ||
228 | reset_control_assert(priv->gphy_reset2); | ||
229 | 197 | ||
230 | iowrite32be(0, priv->membase); | 198 | iowrite32be(0, priv->membase); |
231 | 199 | ||
232 | clk_disable_unprepare(priv->gphy_clk_gate); | 200 | clk_disable_unprepare(priv->gphy_clk_gate); |
233 | 201 | ||
234 | ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); | ||
235 | if (ret) | ||
236 | dev_warn(dev, "Failed to unregister reboot notifier\n"); | ||
237 | |||
238 | return 0; | 202 | return 0; |
239 | } | 203 | } |
240 | 204 | ||
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig index f3b1ad4bd3dc..ad049e6f24e4 100644 --- a/drivers/staging/lustre/lnet/Kconfig +++ b/drivers/staging/lustre/lnet/Kconfig | |||
@@ -34,7 +34,7 @@ config LNET_SELFTEST | |||
34 | 34 | ||
35 | config LNET_XPRT_IB | 35 | config LNET_XPRT_IB |
36 | tristate "LNET infiniband support" | 36 | tristate "LNET infiniband support" |
37 | depends on LNET && PCI && INFINIBAND_ADDR_TRANS | 37 | depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS |
38 | default LNET && INFINIBAND | 38 | default LNET && INFINIBAND |
39 | help | 39 | help |
40 | This option allows the LNET users to use infiniband as an | 40 | This option allows the LNET users to use infiniband as an |
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 2d2ceda9aa26..500911f16498 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c | |||
@@ -1255,7 +1255,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) | |||
1255 | /* Map empty entries to null UUID */ | 1255 | /* Map empty entries to null UUID */ |
1256 | uuid[0] = 0; | 1256 | uuid[0] = 0; |
1257 | uuid[1] = 0; | 1257 | uuid[1] = 0; |
1258 | } else { | 1258 | } else if (uuid[0] != 0 || uuid[1] != 0) { |
1259 | /* Upper two DWs are always one's */ | 1259 | /* Upper two DWs are always one's */ |
1260 | uuid[2] = 0xffffffff; | 1260 | uuid[2] = 0xffffffff; |
1261 | uuid[3] = 0xffffffff; | 1261 | uuid[3] = 0xffffffff; |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 5c212bf29640..3c082451ab1a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, | |||
404 | { | 404 | { |
405 | unsigned long pfn = 0; | 405 | unsigned long pfn = 0; |
406 | long ret, pinned = 0, lock_acct = 0; | 406 | long ret, pinned = 0, lock_acct = 0; |
407 | bool rsvd; | ||
407 | dma_addr_t iova = vaddr - dma->vaddr + dma->iova; | 408 | dma_addr_t iova = vaddr - dma->vaddr + dma->iova; |
408 | 409 | ||
409 | /* This code path is only user initiated */ | 410 | /* This code path is only user initiated */ |
@@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, | |||
414 | if (ret) | 415 | if (ret) |
415 | return ret; | 416 | return ret; |
416 | 417 | ||
417 | if (is_invalid_reserved_pfn(*pfn_base)) { | ||
418 | struct vm_area_struct *vma; | ||
419 | |||
420 | down_read(¤t->mm->mmap_sem); | ||
421 | vma = find_vma_intersection(current->mm, vaddr, vaddr + 1); | ||
422 | pinned = min_t(long, npage, vma_pages(vma)); | ||
423 | up_read(¤t->mm->mmap_sem); | ||
424 | return pinned; | ||
425 | } | ||
426 | |||
427 | pinned++; | 418 | pinned++; |
419 | rsvd = is_invalid_reserved_pfn(*pfn_base); | ||
428 | 420 | ||
429 | /* | 421 | /* |
430 | * Reserved pages aren't counted against the user, externally pinned | 422 | * Reserved pages aren't counted against the user, externally pinned |
431 | * pages are already counted against the user. | 423 | * pages are already counted against the user. |
432 | */ | 424 | */ |
433 | if (!vfio_find_vpfn(dma, iova)) { | 425 | if (!rsvd && !vfio_find_vpfn(dma, iova)) { |
434 | if (!lock_cap && current->mm->locked_vm + 1 > limit) { | 426 | if (!lock_cap && current->mm->locked_vm + 1 > limit) { |
435 | put_pfn(*pfn_base, dma->prot); | 427 | put_pfn(*pfn_base, dma->prot); |
436 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, | 428 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, |
@@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, | |||
450 | if (ret) | 442 | if (ret) |
451 | break; | 443 | break; |
452 | 444 | ||
453 | if (pfn != *pfn_base + pinned) { | 445 | if (pfn != *pfn_base + pinned || |
446 | rsvd != is_invalid_reserved_pfn(pfn)) { | ||
454 | put_pfn(pfn, dma->prot); | 447 | put_pfn(pfn, dma->prot); |
455 | break; | 448 | break; |
456 | } | 449 | } |
457 | 450 | ||
458 | if (!vfio_find_vpfn(dma, iova)) { | 451 | if (!rsvd && !vfio_find_vpfn(dma, iova)) { |
459 | if (!lock_cap && | 452 | if (!lock_cap && |
460 | current->mm->locked_vm + lock_acct + 1 > limit) { | 453 | current->mm->locked_vm + lock_acct + 1 > limit) { |
461 | put_pfn(pfn, dma->prot); | 454 | put_pfn(pfn, dma->prot); |
@@ -473,8 +466,10 @@ out: | |||
473 | 466 | ||
474 | unpin_out: | 467 | unpin_out: |
475 | if (ret) { | 468 | if (ret) { |
476 | for (pfn = *pfn_base ; pinned ; pfn++, pinned--) | 469 | if (!rsvd) { |
477 | put_pfn(pfn, dma->prot); | 470 | for (pfn = *pfn_base ; pinned ; pfn++, pinned--) |
471 | put_pfn(pfn, dma->prot); | ||
472 | } | ||
478 | 473 | ||
479 | return ret; | 474 | return ret; |
480 | } | 475 | } |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c4b49fca4871..e7cf7d21cfb5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -108,7 +108,9 @@ struct vhost_net_virtqueue { | |||
108 | /* vhost zerocopy support fields below: */ | 108 | /* vhost zerocopy support fields below: */ |
109 | /* last used idx for outstanding DMA zerocopy buffers */ | 109 | /* last used idx for outstanding DMA zerocopy buffers */ |
110 | int upend_idx; | 110 | int upend_idx; |
111 | /* first used idx for DMA done zerocopy buffers */ | 111 | /* For TX, first used idx for DMA done zerocopy buffers |
112 | * For RX, number of batched heads | ||
113 | */ | ||
112 | int done_idx; | 114 | int done_idx; |
113 | /* an array of userspace buffers info */ | 115 | /* an array of userspace buffers info */ |
114 | struct ubuf_info *ubuf_info; | 116 | struct ubuf_info *ubuf_info; |
@@ -629,6 +631,18 @@ static int sk_has_rx_data(struct sock *sk) | |||
629 | return skb_queue_empty(&sk->sk_receive_queue); | 631 | return skb_queue_empty(&sk->sk_receive_queue); |
630 | } | 632 | } |
631 | 633 | ||
634 | static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) | ||
635 | { | ||
636 | struct vhost_virtqueue *vq = &nvq->vq; | ||
637 | struct vhost_dev *dev = vq->dev; | ||
638 | |||
639 | if (!nvq->done_idx) | ||
640 | return; | ||
641 | |||
642 | vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); | ||
643 | nvq->done_idx = 0; | ||
644 | } | ||
645 | |||
632 | static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) | 646 | static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) |
633 | { | 647 | { |
634 | struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; | 648 | struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; |
@@ -638,6 +652,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) | |||
638 | int len = peek_head_len(rvq, sk); | 652 | int len = peek_head_len(rvq, sk); |
639 | 653 | ||
640 | if (!len && vq->busyloop_timeout) { | 654 | if (!len && vq->busyloop_timeout) { |
655 | /* Flush batched heads first */ | ||
656 | vhost_rx_signal_used(rvq); | ||
641 | /* Both tx vq and rx socket were polled here */ | 657 | /* Both tx vq and rx socket were polled here */ |
642 | mutex_lock_nested(&vq->mutex, 1); | 658 | mutex_lock_nested(&vq->mutex, 1); |
643 | vhost_disable_notify(&net->dev, vq); | 659 | vhost_disable_notify(&net->dev, vq); |
@@ -765,7 +781,7 @@ static void handle_rx(struct vhost_net *net) | |||
765 | }; | 781 | }; |
766 | size_t total_len = 0; | 782 | size_t total_len = 0; |
767 | int err, mergeable; | 783 | int err, mergeable; |
768 | s16 headcount, nheads = 0; | 784 | s16 headcount; |
769 | size_t vhost_hlen, sock_hlen; | 785 | size_t vhost_hlen, sock_hlen; |
770 | size_t vhost_len, sock_len; | 786 | size_t vhost_len, sock_len; |
771 | struct socket *sock; | 787 | struct socket *sock; |
@@ -794,8 +810,8 @@ static void handle_rx(struct vhost_net *net) | |||
794 | while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { | 810 | while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { |
795 | sock_len += sock_hlen; | 811 | sock_len += sock_hlen; |
796 | vhost_len = sock_len + vhost_hlen; | 812 | vhost_len = sock_len + vhost_hlen; |
797 | headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, | 813 | headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, |
798 | &in, vq_log, &log, | 814 | vhost_len, &in, vq_log, &log, |
799 | likely(mergeable) ? UIO_MAXIOV : 1); | 815 | likely(mergeable) ? UIO_MAXIOV : 1); |
800 | /* On error, stop handling until the next kick. */ | 816 | /* On error, stop handling until the next kick. */ |
801 | if (unlikely(headcount < 0)) | 817 | if (unlikely(headcount < 0)) |
@@ -866,12 +882,9 @@ static void handle_rx(struct vhost_net *net) | |||
866 | vhost_discard_vq_desc(vq, headcount); | 882 | vhost_discard_vq_desc(vq, headcount); |
867 | goto out; | 883 | goto out; |
868 | } | 884 | } |
869 | nheads += headcount; | 885 | nvq->done_idx += headcount; |
870 | if (nheads > VHOST_RX_BATCH) { | 886 | if (nvq->done_idx > VHOST_RX_BATCH) |
871 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, | 887 | vhost_rx_signal_used(nvq); |
872 | nheads); | ||
873 | nheads = 0; | ||
874 | } | ||
875 | if (unlikely(vq_log)) | 888 | if (unlikely(vq_log)) |
876 | vhost_log_write(vq, vq_log, log, vhost_len); | 889 | vhost_log_write(vq, vq_log, log, vhost_len); |
877 | total_len += vhost_len; | 890 | total_len += vhost_len; |
@@ -883,9 +896,7 @@ static void handle_rx(struct vhost_net *net) | |||
883 | } | 896 | } |
884 | vhost_net_enable_vq(net, vq); | 897 | vhost_net_enable_vq(net, vq); |
885 | out: | 898 | out: |
886 | if (nheads) | 899 | vhost_rx_signal_used(nvq); |
887 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, | ||
888 | nheads); | ||
889 | mutex_unlock(&vq->mutex); | 900 | mutex_unlock(&vq->mutex); |
890 | } | 901 | } |
891 | 902 | ||
diff --git a/fs/afs/security.c b/fs/afs/security.c index 1992b0ffa543..81dfedb7879f 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c | |||
@@ -372,18 +372,14 @@ int afs_permission(struct inode *inode, int mask) | |||
372 | mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file"); | 372 | mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file"); |
373 | 373 | ||
374 | if (S_ISDIR(inode->i_mode)) { | 374 | if (S_ISDIR(inode->i_mode)) { |
375 | if (mask & MAY_EXEC) { | 375 | if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) { |
376 | if (!(access & AFS_ACE_LOOKUP)) | 376 | if (!(access & AFS_ACE_LOOKUP)) |
377 | goto permission_denied; | 377 | goto permission_denied; |
378 | } else if (mask & MAY_READ) { | 378 | } |
379 | if (!(access & AFS_ACE_LOOKUP)) | 379 | if (mask & MAY_WRITE) { |
380 | goto permission_denied; | ||
381 | } else if (mask & MAY_WRITE) { | ||
382 | if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */ | 380 | if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */ |
383 | AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */ | 381 | AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */ |
384 | goto permission_denied; | 382 | goto permission_denied; |
385 | } else { | ||
386 | BUG(); | ||
387 | } | 383 | } |
388 | } else { | 384 | } else { |
389 | if (!(access & AFS_ACE_LOOKUP)) | 385 | if (!(access & AFS_ACE_LOOKUP)) |
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 1ed7e2fd2f35..c3b740813fc7 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c | |||
@@ -23,7 +23,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) | |||
23 | struct afs_uvldbentry__xdr *uvldb; | 23 | struct afs_uvldbentry__xdr *uvldb; |
24 | struct afs_vldb_entry *entry; | 24 | struct afs_vldb_entry *entry; |
25 | bool new_only = false; | 25 | bool new_only = false; |
26 | u32 tmp, nr_servers; | 26 | u32 tmp, nr_servers, vlflags; |
27 | int i, ret; | 27 | int i, ret; |
28 | 28 | ||
29 | _enter(""); | 29 | _enter(""); |
@@ -55,6 +55,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) | |||
55 | new_only = true; | 55 | new_only = true; |
56 | } | 56 | } |
57 | 57 | ||
58 | vlflags = ntohl(uvldb->flags); | ||
58 | for (i = 0; i < nr_servers; i++) { | 59 | for (i = 0; i < nr_servers; i++) { |
59 | struct afs_uuid__xdr *xdr; | 60 | struct afs_uuid__xdr *xdr; |
60 | struct afs_uuid *uuid; | 61 | struct afs_uuid *uuid; |
@@ -64,12 +65,13 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) | |||
64 | if (tmp & AFS_VLSF_DONTUSE || | 65 | if (tmp & AFS_VLSF_DONTUSE || |
65 | (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) | 66 | (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) |
66 | continue; | 67 | continue; |
67 | if (tmp & AFS_VLSF_RWVOL) | 68 | if (tmp & AFS_VLSF_RWVOL) { |
68 | entry->fs_mask[i] |= AFS_VOL_VTM_RW; | 69 | entry->fs_mask[i] |= AFS_VOL_VTM_RW; |
70 | if (vlflags & AFS_VLF_BACKEXISTS) | ||
71 | entry->fs_mask[i] |= AFS_VOL_VTM_BAK; | ||
72 | } | ||
69 | if (tmp & AFS_VLSF_ROVOL) | 73 | if (tmp & AFS_VLSF_ROVOL) |
70 | entry->fs_mask[i] |= AFS_VOL_VTM_RO; | 74 | entry->fs_mask[i] |= AFS_VOL_VTM_RO; |
71 | if (tmp & AFS_VLSF_BACKVOL) | ||
72 | entry->fs_mask[i] |= AFS_VOL_VTM_BAK; | ||
73 | if (!entry->fs_mask[i]) | 75 | if (!entry->fs_mask[i]) |
74 | continue; | 76 | continue; |
75 | 77 | ||
@@ -89,15 +91,14 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) | |||
89 | for (i = 0; i < AFS_MAXTYPES; i++) | 91 | for (i = 0; i < AFS_MAXTYPES; i++) |
90 | entry->vid[i] = ntohl(uvldb->volumeId[i]); | 92 | entry->vid[i] = ntohl(uvldb->volumeId[i]); |
91 | 93 | ||
92 | tmp = ntohl(uvldb->flags); | 94 | if (vlflags & AFS_VLF_RWEXISTS) |
93 | if (tmp & AFS_VLF_RWEXISTS) | ||
94 | __set_bit(AFS_VLDB_HAS_RW, &entry->flags); | 95 | __set_bit(AFS_VLDB_HAS_RW, &entry->flags); |
95 | if (tmp & AFS_VLF_ROEXISTS) | 96 | if (vlflags & AFS_VLF_ROEXISTS) |
96 | __set_bit(AFS_VLDB_HAS_RO, &entry->flags); | 97 | __set_bit(AFS_VLDB_HAS_RO, &entry->flags); |
97 | if (tmp & AFS_VLF_BACKEXISTS) | 98 | if (vlflags & AFS_VLF_BACKEXISTS) |
98 | __set_bit(AFS_VLDB_HAS_BAK, &entry->flags); | 99 | __set_bit(AFS_VLDB_HAS_BAK, &entry->flags); |
99 | 100 | ||
100 | if (!(tmp & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { | 101 | if (!(vlflags & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { |
101 | entry->error = -ENOMEDIUM; | 102 | entry->error = -ENOMEDIUM; |
102 | __set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags); | 103 | __set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags); |
103 | } | 104 | } |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index d61e2de8d0eb..5f132d59dfc2 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -197,7 +197,7 @@ config CIFS_SMB311 | |||
197 | 197 | ||
198 | config CIFS_SMB_DIRECT | 198 | config CIFS_SMB_DIRECT |
199 | bool "SMB Direct support (Experimental)" | 199 | bool "SMB Direct support (Experimental)" |
200 | depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y | 200 | depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y |
201 | help | 201 | help |
202 | Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. | 202 | Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. |
203 | SMB Direct allows transferring SMB packets over RDMA. If unsure, | 203 | SMB Direct allows transferring SMB packets over RDMA. If unsure, |
diff --git a/fs/inode.c b/fs/inode.c index 13ceb98c3bd3..3b55391072f3 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -178,6 +178,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) | |||
178 | mapping->a_ops = &empty_aops; | 178 | mapping->a_ops = &empty_aops; |
179 | mapping->host = inode; | 179 | mapping->host = inode; |
180 | mapping->flags = 0; | 180 | mapping->flags = 0; |
181 | mapping->wb_err = 0; | ||
181 | atomic_set(&mapping->i_mmap_writable, 0); | 182 | atomic_set(&mapping->i_mmap_writable, 0); |
182 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); | 183 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); |
183 | mapping->private_data = NULL; | 184 | mapping->private_data = NULL; |
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index dd2a8cf7d20b..ccb5aa8468e0 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h | |||
@@ -151,7 +151,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, | |||
151 | struct drm_encoder *encoder, | 151 | struct drm_encoder *encoder, |
152 | const struct dw_hdmi_plat_data *plat_data); | 152 | const struct dw_hdmi_plat_data *plat_data); |
153 | 153 | ||
154 | void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense); | 154 | void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); |
155 | 155 | ||
156 | void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); | 156 | void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); |
157 | void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); | 157 | void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); |
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index b9e22b7e2f28..d1171db23742 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h | |||
@@ -53,7 +53,7 @@ struct iio_buffer_access_funcs { | |||
53 | int (*request_update)(struct iio_buffer *buffer); | 53 | int (*request_update)(struct iio_buffer *buffer); |
54 | 54 | ||
55 | int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); | 55 | int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); |
56 | int (*set_length)(struct iio_buffer *buffer, int length); | 56 | int (*set_length)(struct iio_buffer *buffer, unsigned int length); |
57 | 57 | ||
58 | int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); | 58 | int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); |
59 | int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); | 59 | int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); |
@@ -72,10 +72,10 @@ struct iio_buffer_access_funcs { | |||
72 | */ | 72 | */ |
73 | struct iio_buffer { | 73 | struct iio_buffer { |
74 | /** @length: Number of datums in buffer. */ | 74 | /** @length: Number of datums in buffer. */ |
75 | int length; | 75 | unsigned int length; |
76 | 76 | ||
77 | /** @bytes_per_datum: Size of individual datum including timestamp. */ | 77 | /** @bytes_per_datum: Size of individual datum including timestamp. */ |
78 | int bytes_per_datum; | 78 | size_t bytes_per_datum; |
79 | 79 | ||
80 | /** | 80 | /** |
81 | * @access: Buffer access functions associated with the | 81 | * @access: Buffer access functions associated with the |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index bc01e06bc716..0be866c91f62 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
@@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio, | |||
435 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | 435 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
436 | __entry->pid = tsk->pid; | 436 | __entry->pid = tsk->pid; |
437 | __entry->oldprio = tsk->prio; | 437 | __entry->oldprio = tsk->prio; |
438 | __entry->newprio = pi_task ? pi_task->prio : tsk->prio; | 438 | __entry->newprio = pi_task ? |
439 | min(tsk->normal_prio, pi_task->prio) : | ||
440 | tsk->normal_prio; | ||
439 | /* XXX SCHED_DEADLINE bits missing */ | 441 | /* XXX SCHED_DEADLINE bits missing */ |
440 | ), | 442 | ), |
441 | 443 | ||
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9b8c6e310e9a..671486133988 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -2332,6 +2332,7 @@ struct bpf_map_info { | |||
2332 | __u32 map_flags; | 2332 | __u32 map_flags; |
2333 | char name[BPF_OBJ_NAME_LEN]; | 2333 | char name[BPF_OBJ_NAME_LEN]; |
2334 | __u32 ifindex; | 2334 | __u32 ifindex; |
2335 | __u32 :32; | ||
2335 | __u64 netns_dev; | 2336 | __u64 netns_dev; |
2336 | __u64 netns_ino; | 2337 | __u64 netns_ino; |
2337 | __u32 btf_id; | 2338 | __u32 btf_id; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 2017a39ab490..481951bf091d 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -193,7 +193,7 @@ EXPORT_SYMBOL_GPL(kthread_parkme); | |||
193 | 193 | ||
194 | void kthread_park_complete(struct task_struct *k) | 194 | void kthread_park_complete(struct task_struct *k) |
195 | { | 195 | { |
196 | complete(&to_kthread(k)->parked); | 196 | complete_all(&to_kthread(k)->parked); |
197 | } | 197 | } |
198 | 198 | ||
199 | static int kthread(void *_create) | 199 | static int kthread(void *_create) |
@@ -459,6 +459,7 @@ void kthread_unpark(struct task_struct *k) | |||
459 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | 459 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) |
460 | __kthread_bind(k, kthread->cpu, TASK_PARKED); | 460 | __kthread_bind(k, kthread->cpu, TASK_PARKED); |
461 | 461 | ||
462 | reinit_completion(&kthread->parked); | ||
462 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 463 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
463 | wake_up_state(k, TASK_PARKED); | 464 | wake_up_state(k, TASK_PARKED); |
464 | } | 465 | } |
@@ -483,9 +484,6 @@ int kthread_park(struct task_struct *k) | |||
483 | if (WARN_ON(k->flags & PF_EXITING)) | 484 | if (WARN_ON(k->flags & PF_EXITING)) |
484 | return -ENOSYS; | 485 | return -ENOSYS; |
485 | 486 | ||
486 | if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) | ||
487 | return -EBUSY; | ||
488 | |||
489 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 487 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
490 | if (k != current) { | 488 | if (k != current) { |
491 | wake_up_process(k); | 489 | wake_up_process(k); |
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 64cc564f5255..61a1125c1ae4 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c | |||
@@ -1708,7 +1708,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att | |||
1708 | rcu_read_unlock(); | 1708 | rcu_read_unlock(); |
1709 | 1709 | ||
1710 | if (rq && sched_debug_enabled) { | 1710 | if (rq && sched_debug_enabled) { |
1711 | pr_info("span: %*pbl (max cpu_capacity = %lu)\n", | 1711 | pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", |
1712 | cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); | 1712 | cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); |
1713 | } | 1713 | } |
1714 | 1714 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 414d7210b2ec..bcd93031d042 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -893,7 +893,7 @@ int __trace_bputs(unsigned long ip, const char *str) | |||
893 | EXPORT_SYMBOL_GPL(__trace_bputs); | 893 | EXPORT_SYMBOL_GPL(__trace_bputs); |
894 | 894 | ||
895 | #ifdef CONFIG_TRACER_SNAPSHOT | 895 | #ifdef CONFIG_TRACER_SNAPSHOT |
896 | static void tracing_snapshot_instance(struct trace_array *tr) | 896 | void tracing_snapshot_instance(struct trace_array *tr) |
897 | { | 897 | { |
898 | struct tracer *tracer = tr->current_trace; | 898 | struct tracer *tracer = tr->current_trace; |
899 | unsigned long flags; | 899 | unsigned long flags; |
@@ -949,7 +949,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, | |||
949 | struct trace_buffer *size_buf, int cpu_id); | 949 | struct trace_buffer *size_buf, int cpu_id); |
950 | static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); | 950 | static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); |
951 | 951 | ||
952 | static int alloc_snapshot(struct trace_array *tr) | 952 | int tracing_alloc_snapshot_instance(struct trace_array *tr) |
953 | { | 953 | { |
954 | int ret; | 954 | int ret; |
955 | 955 | ||
@@ -995,7 +995,7 @@ int tracing_alloc_snapshot(void) | |||
995 | struct trace_array *tr = &global_trace; | 995 | struct trace_array *tr = &global_trace; |
996 | int ret; | 996 | int ret; |
997 | 997 | ||
998 | ret = alloc_snapshot(tr); | 998 | ret = tracing_alloc_snapshot_instance(tr); |
999 | WARN_ON(ret < 0); | 999 | WARN_ON(ret < 0); |
1000 | 1000 | ||
1001 | return ret; | 1001 | return ret; |
@@ -5408,7 +5408,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) | |||
5408 | 5408 | ||
5409 | #ifdef CONFIG_TRACER_MAX_TRACE | 5409 | #ifdef CONFIG_TRACER_MAX_TRACE |
5410 | if (t->use_max_tr && !had_max_tr) { | 5410 | if (t->use_max_tr && !had_max_tr) { |
5411 | ret = alloc_snapshot(tr); | 5411 | ret = tracing_alloc_snapshot_instance(tr); |
5412 | if (ret < 0) | 5412 | if (ret < 0) |
5413 | goto out; | 5413 | goto out; |
5414 | } | 5414 | } |
@@ -6451,7 +6451,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
6451 | } | 6451 | } |
6452 | #endif | 6452 | #endif |
6453 | if (!tr->allocated_snapshot) { | 6453 | if (!tr->allocated_snapshot) { |
6454 | ret = alloc_snapshot(tr); | 6454 | ret = tracing_alloc_snapshot_instance(tr); |
6455 | if (ret < 0) | 6455 | if (ret < 0) |
6456 | break; | 6456 | break; |
6457 | } | 6457 | } |
@@ -7179,7 +7179,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, | |||
7179 | return ret; | 7179 | return ret; |
7180 | 7180 | ||
7181 | out_reg: | 7181 | out_reg: |
7182 | ret = alloc_snapshot(tr); | 7182 | ret = tracing_alloc_snapshot_instance(tr); |
7183 | if (ret < 0) | 7183 | if (ret < 0) |
7184 | goto out; | 7184 | goto out; |
7185 | 7185 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6fb46a06c9dc..507954b4e058 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -1817,6 +1817,17 @@ static inline void __init trace_event_init(void) { } | |||
1817 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } | 1817 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
1818 | #endif | 1818 | #endif |
1819 | 1819 | ||
1820 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
1821 | void tracing_snapshot_instance(struct trace_array *tr); | ||
1822 | int tracing_alloc_snapshot_instance(struct trace_array *tr); | ||
1823 | #else | ||
1824 | static inline void tracing_snapshot_instance(struct trace_array *tr) { } | ||
1825 | static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) | ||
1826 | { | ||
1827 | return 0; | ||
1828 | } | ||
1829 | #endif | ||
1830 | |||
1820 | extern struct trace_iterator *tracepoint_print_iter; | 1831 | extern struct trace_iterator *tracepoint_print_iter; |
1821 | 1832 | ||
1822 | #endif /* _LINUX_KERNEL_TRACE_H */ | 1833 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index d251cabcf69a..8b5bdcf64871 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
@@ -483,9 +483,10 @@ clear_event_triggers(struct trace_array *tr) | |||
483 | struct trace_event_file *file; | 483 | struct trace_event_file *file; |
484 | 484 | ||
485 | list_for_each_entry(file, &tr->events, list) { | 485 | list_for_each_entry(file, &tr->events, list) { |
486 | struct event_trigger_data *data; | 486 | struct event_trigger_data *data, *n; |
487 | list_for_each_entry_rcu(data, &file->triggers, list) { | 487 | list_for_each_entry_safe(data, n, &file->triggers, list) { |
488 | trace_event_trigger_enable_disable(file, 0); | 488 | trace_event_trigger_enable_disable(file, 0); |
489 | list_del_rcu(&data->list); | ||
489 | if (data->ops->free) | 490 | if (data->ops->free) |
490 | data->ops->free(data->ops, data); | 491 | data->ops->free(data->ops, data); |
491 | } | 492 | } |
@@ -642,6 +643,7 @@ event_trigger_callback(struct event_command *cmd_ops, | |||
642 | trigger_data->count = -1; | 643 | trigger_data->count = -1; |
643 | trigger_data->ops = trigger_ops; | 644 | trigger_data->ops = trigger_ops; |
644 | trigger_data->cmd_ops = cmd_ops; | 645 | trigger_data->cmd_ops = cmd_ops; |
646 | trigger_data->private_data = file; | ||
645 | INIT_LIST_HEAD(&trigger_data->list); | 647 | INIT_LIST_HEAD(&trigger_data->list); |
646 | INIT_LIST_HEAD(&trigger_data->named_list); | 648 | INIT_LIST_HEAD(&trigger_data->named_list); |
647 | 649 | ||
@@ -1053,7 +1055,12 @@ static void | |||
1053 | snapshot_trigger(struct event_trigger_data *data, void *rec, | 1055 | snapshot_trigger(struct event_trigger_data *data, void *rec, |
1054 | struct ring_buffer_event *event) | 1056 | struct ring_buffer_event *event) |
1055 | { | 1057 | { |
1056 | tracing_snapshot(); | 1058 | struct trace_event_file *file = data->private_data; |
1059 | |||
1060 | if (file) | ||
1061 | tracing_snapshot_instance(file->tr); | ||
1062 | else | ||
1063 | tracing_snapshot(); | ||
1057 | } | 1064 | } |
1058 | 1065 | ||
1059 | static void | 1066 | static void |
@@ -1076,7 +1083,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, | |||
1076 | { | 1083 | { |
1077 | int ret = register_trigger(glob, ops, data, file); | 1084 | int ret = register_trigger(glob, ops, data, file); |
1078 | 1085 | ||
1079 | if (ret > 0 && tracing_alloc_snapshot() != 0) { | 1086 | if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { |
1080 | unregister_trigger(glob, ops, data, file); | 1087 | unregister_trigger(glob, ops, data, file); |
1081 | ret = 0; | 1088 | ret = 0; |
1082 | } | 1089 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a3a1815f8e11..b9f3dbd885bd 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2431,7 +2431,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, | |||
2431 | __split_huge_page_tail(head, i, lruvec, list); | 2431 | __split_huge_page_tail(head, i, lruvec, list); |
2432 | /* Some pages can be beyond i_size: drop them from page cache */ | 2432 | /* Some pages can be beyond i_size: drop them from page cache */ |
2433 | if (head[i].index >= end) { | 2433 | if (head[i].index >= end) { |
2434 | __ClearPageDirty(head + i); | 2434 | ClearPageDirty(head + i); |
2435 | __delete_from_page_cache(head + i, NULL); | 2435 | __delete_from_page_cache(head + i, NULL); |
2436 | if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) | 2436 | if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) |
2437 | shmem_uncharge(head->mapping->host, 1); | 2437 | shmem_uncharge(head->mapping->host, 1); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 9b697323a88c..9270a4370d54 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1418,7 +1418,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) | |||
1418 | return ret; | 1418 | return ret; |
1419 | 1419 | ||
1420 | mapping = page_mapping(page); | 1420 | mapping = page_mapping(page); |
1421 | migrate_dirty = mapping && mapping->a_ops->migratepage; | 1421 | migrate_dirty = !mapping || mapping->a_ops->migratepage; |
1422 | unlock_page(page); | 1422 | unlock_page(page); |
1423 | if (!migrate_dirty) | 1423 | if (!migrate_dirty) |
1424 | return ret; | 1424 | return ret; |
diff --git a/net/9p/Kconfig b/net/9p/Kconfig index 46c39f7da444..e6014e0e51f7 100644 --- a/net/9p/Kconfig +++ b/net/9p/Kconfig | |||
@@ -32,7 +32,7 @@ config NET_9P_XEN | |||
32 | 32 | ||
33 | 33 | ||
34 | config NET_9P_RDMA | 34 | config NET_9P_RDMA |
35 | depends on INET && INFINIBAND_ADDR_TRANS | 35 | depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS |
36 | tristate "9P RDMA Transport (Experimental)" | 36 | tristate "9P RDMA Transport (Experimental)" |
37 | help | 37 | help |
38 | This builds support for an RDMA transport. | 38 | This builds support for an RDMA transport. |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index b286ed5596c3..c2138e7e8263 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1949,7 +1949,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1949 | int off, pad = 0; | 1949 | int off, pad = 0; |
1950 | unsigned int size_kern, match_size = mwt->match_size; | 1950 | unsigned int size_kern, match_size = mwt->match_size; |
1951 | 1951 | ||
1952 | strlcpy(name, mwt->u.name, sizeof(name)); | 1952 | if (strscpy(name, mwt->u.name, sizeof(name)) < 0) |
1953 | return -EINVAL; | ||
1953 | 1954 | ||
1954 | if (state->buf_kern_start) | 1955 | if (state->buf_kern_start) |
1955 | dst = state->buf_kern_start + state->buf_kern_offset; | 1956 | dst = state->buf_kern_start + state->buf_kern_offset; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index c476f0794132..bb7e80f4ced3 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -1214,9 +1214,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, | |||
1214 | cpumask_var_t mask; | 1214 | cpumask_var_t mask; |
1215 | unsigned long index; | 1215 | unsigned long index; |
1216 | 1216 | ||
1217 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
1218 | return -ENOMEM; | ||
1219 | |||
1220 | index = get_netdev_queue_index(queue); | 1217 | index = get_netdev_queue_index(queue); |
1221 | 1218 | ||
1222 | if (dev->num_tc) { | 1219 | if (dev->num_tc) { |
@@ -1226,6 +1223,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, | |||
1226 | return -EINVAL; | 1223 | return -EINVAL; |
1227 | } | 1224 | } |
1228 | 1225 | ||
1226 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
1227 | return -ENOMEM; | ||
1228 | |||
1229 | rcu_read_lock(); | 1229 | rcu_read_lock(); |
1230 | dev_maps = rcu_dereference(dev->xps_maps); | 1230 | dev_maps = rcu_dereference(dev->xps_maps); |
1231 | if (dev_maps) { | 1231 | if (dev_maps) { |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 6b0e362cc99b..38d906baf1df 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -328,7 +328,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) | |||
328 | 328 | ||
329 | if (tdev) { | 329 | if (tdev) { |
330 | hlen = tdev->hard_header_len + tdev->needed_headroom; | 330 | hlen = tdev->hard_header_len + tdev->needed_headroom; |
331 | mtu = tdev->mtu; | 331 | mtu = min(tdev->mtu, IP_MAX_MTU); |
332 | } | 332 | } |
333 | 333 | ||
334 | dev->needed_headroom = t_hlen + hlen; | 334 | dev->needed_headroom = t_hlen + hlen; |
@@ -362,7 +362,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, | |||
362 | nt = netdev_priv(dev); | 362 | nt = netdev_priv(dev); |
363 | t_hlen = nt->hlen + sizeof(struct iphdr); | 363 | t_hlen = nt->hlen + sizeof(struct iphdr); |
364 | dev->min_mtu = ETH_MIN_MTU; | 364 | dev->min_mtu = ETH_MIN_MTU; |
365 | dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; | 365 | dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; |
366 | ip_tunnel_add(itn, nt); | 366 | ip_tunnel_add(itn, nt); |
367 | return nt; | 367 | return nt; |
368 | 368 | ||
@@ -930,7 +930,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) | |||
930 | { | 930 | { |
931 | struct ip_tunnel *tunnel = netdev_priv(dev); | 931 | struct ip_tunnel *tunnel = netdev_priv(dev); |
932 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); | 932 | int t_hlen = tunnel->hlen + sizeof(struct iphdr); |
933 | int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; | 933 | int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; |
934 | 934 | ||
935 | if (new_mtu < ETH_MIN_MTU) | 935 | if (new_mtu < ETH_MIN_MTU) |
936 | return -EINVAL; | 936 | return -EINVAL; |
@@ -1107,7 +1107,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], | |||
1107 | 1107 | ||
1108 | mtu = ip_tunnel_bind_dev(dev); | 1108 | mtu = ip_tunnel_bind_dev(dev); |
1109 | if (tb[IFLA_MTU]) { | 1109 | if (tb[IFLA_MTU]) { |
1110 | unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; | 1110 | unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; |
1111 | 1111 | ||
1112 | mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, | 1112 | mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, |
1113 | (unsigned int)(max - sizeof(struct iphdr))); | 1113 | (unsigned int)(max - sizeof(struct iphdr))); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index da66aaac51ce..00e138a44cbb 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1692,8 +1692,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) | |||
1692 | if (new_mtu < ETH_MIN_MTU) | 1692 | if (new_mtu < ETH_MIN_MTU) |
1693 | return -EINVAL; | 1693 | return -EINVAL; |
1694 | } | 1694 | } |
1695 | if (new_mtu > 0xFFF8 - dev->hard_header_len) | 1695 | if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { |
1696 | return -EINVAL; | 1696 | if (new_mtu > IP6_MAX_MTU - dev->hard_header_len) |
1697 | return -EINVAL; | ||
1698 | } else { | ||
1699 | if (new_mtu > IP_MAX_MTU - dev->hard_header_len) | ||
1700 | return -EINVAL; | ||
1701 | } | ||
1697 | dev->mtu = new_mtu; | 1702 | dev->mtu = new_mtu; |
1698 | return 0; | 1703 | return 0; |
1699 | } | 1704 | } |
@@ -1841,7 +1846,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev) | |||
1841 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1846 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1842 | dev->mtu -= 8; | 1847 | dev->mtu -= 8; |
1843 | dev->min_mtu = ETH_MIN_MTU; | 1848 | dev->min_mtu = ETH_MIN_MTU; |
1844 | dev->max_mtu = 0xFFF8 - dev->hard_header_len; | 1849 | dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; |
1845 | 1850 | ||
1846 | return 0; | 1851 | return 0; |
1847 | 1852 | ||
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index eab39bd91548..19ccf0dc996c 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
@@ -122,7 +122,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | |||
122 | hdrlen = (osrh->hdrlen + 1) << 3; | 122 | hdrlen = (osrh->hdrlen + 1) << 3; |
123 | tot_len = hdrlen + sizeof(*hdr); | 123 | tot_len = hdrlen + sizeof(*hdr); |
124 | 124 | ||
125 | err = skb_cow_head(skb, tot_len); | 125 | err = skb_cow_head(skb, tot_len + skb->mac_len); |
126 | if (unlikely(err)) | 126 | if (unlikely(err)) |
127 | return err; | 127 | return err; |
128 | 128 | ||
@@ -181,7 +181,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) | |||
181 | 181 | ||
182 | hdrlen = (osrh->hdrlen + 1) << 3; | 182 | hdrlen = (osrh->hdrlen + 1) << 3; |
183 | 183 | ||
184 | err = skb_cow_head(skb, hdrlen); | 184 | err = skb_cow_head(skb, hdrlen + skb->mac_len); |
185 | if (unlikely(err)) | 185 | if (unlikely(err)) |
186 | return err; | 186 | return err; |
187 | 187 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 2afce37a7177..e9400ffa7875 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -1371,7 +1371,7 @@ static void ipip6_tunnel_setup(struct net_device *dev) | |||
1371 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; | 1371 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; |
1372 | dev->mtu = ETH_DATA_LEN - t_hlen; | 1372 | dev->mtu = ETH_DATA_LEN - t_hlen; |
1373 | dev->min_mtu = IPV6_MIN_MTU; | 1373 | dev->min_mtu = IPV6_MIN_MTU; |
1374 | dev->max_mtu = 0xFFF8 - t_hlen; | 1374 | dev->max_mtu = IP6_MAX_MTU - t_hlen; |
1375 | dev->flags = IFF_NOARP; | 1375 | dev->flags = IFF_NOARP; |
1376 | netif_keep_dst(dev); | 1376 | netif_keep_dst(dev); |
1377 | dev->addr_len = 4; | 1377 | dev->addr_len = 4; |
@@ -1583,7 +1583,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, | |||
1583 | if (tb[IFLA_MTU]) { | 1583 | if (tb[IFLA_MTU]) { |
1584 | u32 mtu = nla_get_u32(tb[IFLA_MTU]); | 1584 | u32 mtu = nla_get_u32(tb[IFLA_MTU]); |
1585 | 1585 | ||
1586 | if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) | 1586 | if (mtu >= IPV6_MIN_MTU && |
1587 | mtu <= IP6_MAX_MTU - dev->hard_header_len) | ||
1587 | dev->mtu = mtu; | 1588 | dev->mtu = mtu; |
1588 | } | 1589 | } |
1589 | 1590 | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 2cff209d0fc1..ef3defaf43b9 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -124,7 +124,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
124 | struct flowi6 *fl6 = &fl->u.ip6; | 124 | struct flowi6 *fl6 = &fl->u.ip6; |
125 | int onlyproto = 0; | 125 | int onlyproto = 0; |
126 | const struct ipv6hdr *hdr = ipv6_hdr(skb); | 126 | const struct ipv6hdr *hdr = ipv6_hdr(skb); |
127 | u16 offset = sizeof(*hdr); | 127 | u32 offset = sizeof(*hdr); |
128 | struct ipv6_opt_hdr *exthdr; | 128 | struct ipv6_opt_hdr *exthdr; |
129 | const unsigned char *nh = skb_network_header(skb); | 129 | const unsigned char *nh = skb_network_header(skb); |
130 | u16 nhoff = IP6CB(skb)->nhoff; | 130 | u16 nhoff = IP6CB(skb)->nhoff; |
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index dc76bc346829..d3601d421571 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c | |||
@@ -1671,7 +1671,7 @@ static struct file *kcm_clone(struct socket *osock) | |||
1671 | __module_get(newsock->ops->owner); | 1671 | __module_get(newsock->ops->owner); |
1672 | 1672 | ||
1673 | newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, | 1673 | newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, |
1674 | &kcm_proto, true); | 1674 | &kcm_proto, false); |
1675 | if (!newsk) { | 1675 | if (!newsk) { |
1676 | sock_release(newsock); | 1676 | sock_release(newsock); |
1677 | return ERR_PTR(-ENOMEM); | 1677 | return ERR_PTR(-ENOMEM); |
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 99f4c22e2c8f..82e6edf9c5d9 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c | |||
@@ -208,7 +208,7 @@ err: | |||
208 | static int ncsi_pkg_info_all_nl(struct sk_buff *skb, | 208 | static int ncsi_pkg_info_all_nl(struct sk_buff *skb, |
209 | struct netlink_callback *cb) | 209 | struct netlink_callback *cb) |
210 | { | 210 | { |
211 | struct nlattr *attrs[NCSI_ATTR_MAX]; | 211 | struct nlattr *attrs[NCSI_ATTR_MAX + 1]; |
212 | struct ncsi_package *np, *package; | 212 | struct ncsi_package *np, *package; |
213 | struct ncsi_dev_priv *ndp; | 213 | struct ncsi_dev_priv *ndp; |
214 | unsigned int package_id; | 214 | unsigned int package_id; |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index d4f68d0f7df7..af89bb5ffac7 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -2385,8 +2385,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2385 | struct ipvs_sync_daemon_cfg cfg; | 2385 | struct ipvs_sync_daemon_cfg cfg; |
2386 | 2386 | ||
2387 | memset(&cfg, 0, sizeof(cfg)); | 2387 | memset(&cfg, 0, sizeof(cfg)); |
2388 | strlcpy(cfg.mcast_ifn, dm->mcast_ifn, | 2388 | ret = -EINVAL; |
2389 | sizeof(cfg.mcast_ifn)); | 2389 | if (strscpy(cfg.mcast_ifn, dm->mcast_ifn, |
2390 | sizeof(cfg.mcast_ifn)) <= 0) | ||
2391 | goto out_dec; | ||
2390 | cfg.syncid = dm->syncid; | 2392 | cfg.syncid = dm->syncid; |
2391 | ret = start_sync_thread(ipvs, &cfg, dm->state); | 2393 | ret = start_sync_thread(ipvs, &cfg, dm->state); |
2392 | } else { | 2394 | } else { |
@@ -2424,12 +2426,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2424 | } | 2426 | } |
2425 | } | 2427 | } |
2426 | 2428 | ||
2429 | if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) && | ||
2430 | strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) == | ||
2431 | IP_VS_SCHEDNAME_MAXLEN) { | ||
2432 | ret = -EINVAL; | ||
2433 | goto out_unlock; | ||
2434 | } | ||
2435 | |||
2427 | /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ | 2436 | /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ |
2428 | if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && | 2437 | if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && |
2429 | usvc.protocol != IPPROTO_SCTP) { | 2438 | usvc.protocol != IPPROTO_SCTP) { |
2430 | pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", | 2439 | pr_err("set_ctl: invalid protocol: %d %pI4:%d\n", |
2431 | usvc.protocol, &usvc.addr.ip, | 2440 | usvc.protocol, &usvc.addr.ip, |
2432 | ntohs(usvc.port), usvc.sched_name); | 2441 | ntohs(usvc.port)); |
2433 | ret = -EFAULT; | 2442 | ret = -EFAULT; |
2434 | goto out_unlock; | 2443 | goto out_unlock; |
2435 | } | 2444 | } |
@@ -2851,7 +2860,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = { | |||
2851 | static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { | 2860 | static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { |
2852 | [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, | 2861 | [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, |
2853 | [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, | 2862 | [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, |
2854 | .len = IP_VS_IFNAME_MAXLEN }, | 2863 | .len = IP_VS_IFNAME_MAXLEN - 1 }, |
2855 | [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, | 2864 | [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, |
2856 | [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, | 2865 | [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, |
2857 | [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, | 2866 | [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, |
@@ -2869,7 +2878,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = { | |||
2869 | [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, | 2878 | [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, |
2870 | [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, | 2879 | [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, |
2871 | [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, | 2880 | [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, |
2872 | .len = IP_VS_SCHEDNAME_MAXLEN }, | 2881 | .len = IP_VS_SCHEDNAME_MAXLEN - 1 }, |
2873 | [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, | 2882 | [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, |
2874 | .len = IP_VS_PENAME_MAXLEN }, | 2883 | .len = IP_VS_PENAME_MAXLEN }, |
2875 | [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, | 2884 | [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2e8fd961746d..ca4c4d994ddb 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -1336,8 +1336,10 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain, | |||
1336 | rcu_assign_pointer(chain->stats, newstats); | 1336 | rcu_assign_pointer(chain->stats, newstats); |
1337 | synchronize_rcu(); | 1337 | synchronize_rcu(); |
1338 | free_percpu(oldstats); | 1338 | free_percpu(oldstats); |
1339 | } else | 1339 | } else { |
1340 | rcu_assign_pointer(chain->stats, newstats); | 1340 | rcu_assign_pointer(chain->stats, newstats); |
1341 | static_branch_inc(&nft_counters_enabled); | ||
1342 | } | ||
1341 | } | 1343 | } |
1342 | 1344 | ||
1343 | static void nf_tables_chain_free_chain_rules(struct nft_chain *chain) | 1345 | static void nf_tables_chain_free_chain_rules(struct nft_chain *chain) |
@@ -4944,7 +4946,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) | |||
4944 | if (idx > s_idx) | 4946 | if (idx > s_idx) |
4945 | memset(&cb->args[1], 0, | 4947 | memset(&cb->args[1], 0, |
4946 | sizeof(cb->args) - sizeof(cb->args[0])); | 4948 | sizeof(cb->args) - sizeof(cb->args[0])); |
4947 | if (filter && filter->table[0] && | 4949 | if (filter && filter->table && |
4948 | strcmp(filter->table, table->name)) | 4950 | strcmp(filter->table, table->name)) |
4949 | goto cont; | 4951 | goto cont; |
4950 | if (filter && | 4952 | if (filter && |
@@ -5624,7 +5626,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb, | |||
5624 | if (idx > s_idx) | 5626 | if (idx > s_idx) |
5625 | memset(&cb->args[1], 0, | 5627 | memset(&cb->args[1], 0, |
5626 | sizeof(cb->args) - sizeof(cb->args[0])); | 5628 | sizeof(cb->args) - sizeof(cb->args[0])); |
5627 | if (filter && filter->table[0] && | 5629 | if (filter && filter->table && |
5628 | strcmp(filter->table, table->name)) | 5630 | strcmp(filter->table, table->name)) |
5629 | goto cont; | 5631 | goto cont; |
5630 | 5632 | ||
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 47cf667b15ca..deff10adef9c 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
@@ -104,15 +104,15 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain, | |||
104 | if (!base_chain->stats) | 104 | if (!base_chain->stats) |
105 | return; | 105 | return; |
106 | 106 | ||
107 | local_bh_disable(); | ||
107 | stats = this_cpu_ptr(rcu_dereference(base_chain->stats)); | 108 | stats = this_cpu_ptr(rcu_dereference(base_chain->stats)); |
108 | if (stats) { | 109 | if (stats) { |
109 | local_bh_disable(); | ||
110 | u64_stats_update_begin(&stats->syncp); | 110 | u64_stats_update_begin(&stats->syncp); |
111 | stats->pkts++; | 111 | stats->pkts++; |
112 | stats->bytes += pkt->skb->len; | 112 | stats->bytes += pkt->skb->len; |
113 | u64_stats_update_end(&stats->syncp); | 113 | u64_stats_update_end(&stats->syncp); |
114 | local_bh_enable(); | ||
115 | } | 114 | } |
115 | local_bh_enable(); | ||
116 | } | 116 | } |
117 | 117 | ||
118 | struct nft_jumpstack { | 118 | struct nft_jumpstack { |
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 6ddf89183e7b..a0e5adf0b3b6 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c | |||
@@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl, | |||
115 | nfacct->flags = flags; | 115 | nfacct->flags = flags; |
116 | } | 116 | } |
117 | 117 | ||
118 | nla_strlcpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); | 118 | nla_strlcpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX); |
119 | 119 | ||
120 | if (tb[NFACCT_BYTES]) { | 120 | if (tb[NFACCT_BYTES]) { |
121 | atomic64_set(&nfacct->bytes, | 121 | atomic64_set(&nfacct->bytes, |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index fa026b269b36..cb5b5f207777 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
@@ -150,7 +150,7 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy, | |||
150 | return -EINVAL; | 150 | return -EINVAL; |
151 | 151 | ||
152 | nla_strlcpy(expect_policy->name, | 152 | nla_strlcpy(expect_policy->name, |
153 | nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); | 153 | tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN); |
154 | expect_policy->max_expected = | 154 | expect_policy->max_expected = |
155 | ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); | 155 | ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); |
156 | if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) | 156 | if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) |
@@ -235,7 +235,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[], | |||
235 | goto err1; | 235 | goto err1; |
236 | 236 | ||
237 | nla_strlcpy(helper->name, | 237 | nla_strlcpy(helper->name, |
238 | nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); | 238 | tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN); |
239 | size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); | 239 | size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); |
240 | if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { | 240 | if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { |
241 | ret = -ENOMEM; | 241 | ret = -ENOMEM; |
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index f8b19eacfa0c..1435ffc5f57e 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
@@ -881,22 +881,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb, | |||
881 | struct nft_object *obj, bool reset) | 881 | struct nft_object *obj, bool reset) |
882 | { | 882 | { |
883 | const struct nft_ct_helper_obj *priv = nft_obj_data(obj); | 883 | const struct nft_ct_helper_obj *priv = nft_obj_data(obj); |
884 | const struct nf_conntrack_helper *helper = priv->helper4; | 884 | const struct nf_conntrack_helper *helper; |
885 | u16 family; | 885 | u16 family; |
886 | 886 | ||
887 | if (priv->helper4 && priv->helper6) { | ||
888 | family = NFPROTO_INET; | ||
889 | helper = priv->helper4; | ||
890 | } else if (priv->helper6) { | ||
891 | family = NFPROTO_IPV6; | ||
892 | helper = priv->helper6; | ||
893 | } else { | ||
894 | family = NFPROTO_IPV4; | ||
895 | helper = priv->helper4; | ||
896 | } | ||
897 | |||
887 | if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) | 898 | if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) |
888 | return -1; | 899 | return -1; |
889 | 900 | ||
890 | if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) | 901 | if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) |
891 | return -1; | 902 | return -1; |
892 | 903 | ||
893 | if (priv->helper4 && priv->helper6) | ||
894 | family = NFPROTO_INET; | ||
895 | else if (priv->helper6) | ||
896 | family = NFPROTO_IPV6; | ||
897 | else | ||
898 | family = NFPROTO_IPV4; | ||
899 | |||
900 | if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) | 904 | if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) |
901 | return -1; | 905 | return -1; |
902 | 906 | ||
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index a9fc298ef4c3..72f13a1144dd 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c | |||
@@ -51,10 +51,13 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost) | |||
51 | return !limit->invert; | 51 | return !limit->invert; |
52 | } | 52 | } |
53 | 53 | ||
54 | /* Use same default as in iptables. */ | ||
55 | #define NFT_LIMIT_PKT_BURST_DEFAULT 5 | ||
56 | |||
54 | static int nft_limit_init(struct nft_limit *limit, | 57 | static int nft_limit_init(struct nft_limit *limit, |
55 | const struct nlattr * const tb[]) | 58 | const struct nlattr * const tb[], bool pkts) |
56 | { | 59 | { |
57 | u64 unit; | 60 | u64 unit, tokens; |
58 | 61 | ||
59 | if (tb[NFTA_LIMIT_RATE] == NULL || | 62 | if (tb[NFTA_LIMIT_RATE] == NULL || |
60 | tb[NFTA_LIMIT_UNIT] == NULL) | 63 | tb[NFTA_LIMIT_UNIT] == NULL) |
@@ -68,18 +71,25 @@ static int nft_limit_init(struct nft_limit *limit, | |||
68 | 71 | ||
69 | if (tb[NFTA_LIMIT_BURST]) | 72 | if (tb[NFTA_LIMIT_BURST]) |
70 | limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); | 73 | limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); |
71 | else | 74 | |
72 | limit->burst = 0; | 75 | if (pkts && limit->burst == 0) |
76 | limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT; | ||
73 | 77 | ||
74 | if (limit->rate + limit->burst < limit->rate) | 78 | if (limit->rate + limit->burst < limit->rate) |
75 | return -EOVERFLOW; | 79 | return -EOVERFLOW; |
76 | 80 | ||
77 | /* The token bucket size limits the number of tokens can be | 81 | if (pkts) { |
78 | * accumulated. tokens_max specifies the bucket size. | 82 | tokens = div_u64(limit->nsecs, limit->rate) * limit->burst; |
79 | * tokens_max = unit * (rate + burst) / rate. | 83 | } else { |
80 | */ | 84 | /* The token bucket size limits the number of tokens can be |
81 | limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), | 85 | * accumulated. tokens_max specifies the bucket size. |
82 | limit->rate); | 86 | * tokens_max = unit * (rate + burst) / rate. |
87 | */ | ||
88 | tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), | ||
89 | limit->rate); | ||
90 | } | ||
91 | |||
92 | limit->tokens = tokens; | ||
83 | limit->tokens_max = limit->tokens; | 93 | limit->tokens_max = limit->tokens; |
84 | 94 | ||
85 | if (tb[NFTA_LIMIT_FLAGS]) { | 95 | if (tb[NFTA_LIMIT_FLAGS]) { |
@@ -144,7 +154,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx, | |||
144 | struct nft_limit_pkts *priv = nft_expr_priv(expr); | 154 | struct nft_limit_pkts *priv = nft_expr_priv(expr); |
145 | int err; | 155 | int err; |
146 | 156 | ||
147 | err = nft_limit_init(&priv->limit, tb); | 157 | err = nft_limit_init(&priv->limit, tb, true); |
148 | if (err < 0) | 158 | if (err < 0) |
149 | return err; | 159 | return err; |
150 | 160 | ||
@@ -185,7 +195,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx, | |||
185 | { | 195 | { |
186 | struct nft_limit *priv = nft_expr_priv(expr); | 196 | struct nft_limit *priv = nft_expr_priv(expr); |
187 | 197 | ||
188 | return nft_limit_init(priv, tb); | 198 | return nft_limit_init(priv, tb, false); |
189 | } | 199 | } |
190 | 200 | ||
191 | static int nft_limit_bytes_dump(struct sk_buff *skb, | 201 | static int nft_limit_bytes_dump(struct sk_buff *skb, |
@@ -246,7 +256,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx, | |||
246 | struct nft_limit_pkts *priv = nft_obj_data(obj); | 256 | struct nft_limit_pkts *priv = nft_obj_data(obj); |
247 | int err; | 257 | int err; |
248 | 258 | ||
249 | err = nft_limit_init(&priv->limit, tb); | 259 | err = nft_limit_init(&priv->limit, tb, true); |
250 | if (err < 0) | 260 | if (err < 0) |
251 | return err; | 261 | return err; |
252 | 262 | ||
@@ -289,7 +299,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx, | |||
289 | { | 299 | { |
290 | struct nft_limit *priv = nft_obj_data(obj); | 300 | struct nft_limit *priv = nft_obj_data(obj); |
291 | 301 | ||
292 | return nft_limit_init(priv, tb); | 302 | return nft_limit_init(priv, tb, false); |
293 | } | 303 | } |
294 | 304 | ||
295 | static int nft_limit_obj_bytes_dump(struct sk_buff *skb, | 305 | static int nft_limit_obj_bytes_dump(struct sk_buff *skb, |
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 5348bd058c88..1105a23bda5e 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c | |||
@@ -259,7 +259,7 @@ static void nft_meta_set_eval(const struct nft_expr *expr, | |||
259 | struct sk_buff *skb = pkt->skb; | 259 | struct sk_buff *skb = pkt->skb; |
260 | u32 *sreg = ®s->data[meta->sreg]; | 260 | u32 *sreg = ®s->data[meta->sreg]; |
261 | u32 value = *sreg; | 261 | u32 value = *sreg; |
262 | u8 pkt_type; | 262 | u8 value8; |
263 | 263 | ||
264 | switch (meta->key) { | 264 | switch (meta->key) { |
265 | case NFT_META_MARK: | 265 | case NFT_META_MARK: |
@@ -269,15 +269,17 @@ static void nft_meta_set_eval(const struct nft_expr *expr, | |||
269 | skb->priority = value; | 269 | skb->priority = value; |
270 | break; | 270 | break; |
271 | case NFT_META_PKTTYPE: | 271 | case NFT_META_PKTTYPE: |
272 | pkt_type = nft_reg_load8(sreg); | 272 | value8 = nft_reg_load8(sreg); |
273 | 273 | ||
274 | if (skb->pkt_type != pkt_type && | 274 | if (skb->pkt_type != value8 && |
275 | skb_pkt_type_ok(pkt_type) && | 275 | skb_pkt_type_ok(value8) && |
276 | skb_pkt_type_ok(skb->pkt_type)) | 276 | skb_pkt_type_ok(skb->pkt_type)) |
277 | skb->pkt_type = pkt_type; | 277 | skb->pkt_type = value8; |
278 | break; | 278 | break; |
279 | case NFT_META_NFTRACE: | 279 | case NFT_META_NFTRACE: |
280 | skb->nf_trace = !!value; | 280 | value8 = nft_reg_load8(sreg); |
281 | |||
282 | skb->nf_trace = !!value8; | ||
281 | break; | 283 | break; |
282 | default: | 284 | default: |
283 | WARN_ON(1); | 285 | WARN_ON(1); |
diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 1a31502ee7db..bffde4b46c5d 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig | |||
@@ -8,7 +8,7 @@ config RDS | |||
8 | 8 | ||
9 | config RDS_RDMA | 9 | config RDS_RDMA |
10 | tristate "RDS over Infiniband" | 10 | tristate "RDS over Infiniband" |
11 | depends on RDS && INFINIBAND_ADDR_TRANS | 11 | depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS |
12 | ---help--- | 12 | ---help--- |
13 | Allow RDS to use Infiniband as a transport. | 13 | Allow RDS to use Infiniband as a transport. |
14 | This transport supports RDMA operations. | 14 | This transport supports RDMA operations. |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 4e74508515f4..3786feab0b83 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -1028,7 +1028,7 @@ errout_mask: | |||
1028 | fl_mask_put(head, fnew->mask, false); | 1028 | fl_mask_put(head, fnew->mask, false); |
1029 | 1029 | ||
1030 | errout_idr: | 1030 | errout_idr: |
1031 | if (fnew->handle) | 1031 | if (!fold) |
1032 | idr_remove(&head->handle_idr, fnew->handle); | 1032 | idr_remove(&head->handle_idr, fnew->handle); |
1033 | errout: | 1033 | errout: |
1034 | tcf_exts_destroy(&fnew->exts); | 1034 | tcf_exts_destroy(&fnew->exts); |
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 6358e5271070..ac09ca803296 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
@@ -50,7 +50,7 @@ config SUNRPC_DEBUG | |||
50 | 50 | ||
51 | config SUNRPC_XPRT_RDMA | 51 | config SUNRPC_XPRT_RDMA |
52 | tristate "RPC-over-RDMA transport" | 52 | tristate "RPC-over-RDMA transport" |
53 | depends on SUNRPC && INFINIBAND_ADDR_TRANS | 53 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS |
54 | default SUNRPC && INFINIBAND | 54 | default SUNRPC && INFINIBAND |
55 | select SG_POOL | 55 | select SG_POOL |
56 | help | 56 | help |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 40b54cc64243..5f48251c1319 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1658,7 +1658,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1658 | trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; | 1658 | trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; |
1659 | } | 1659 | } |
1660 | 1660 | ||
1661 | out: | ||
1662 | return &xdst0->u.dst; | 1661 | return &xdst0->u.dst; |
1663 | 1662 | ||
1664 | put_states: | 1663 | put_states: |
@@ -1667,8 +1666,8 @@ put_states: | |||
1667 | free_dst: | 1666 | free_dst: |
1668 | if (xdst0) | 1667 | if (xdst0) |
1669 | dst_release_immediate(&xdst0->u.dst); | 1668 | dst_release_immediate(&xdst0->u.dst); |
1670 | xdst0 = ERR_PTR(err); | 1669 | |
1671 | goto out; | 1670 | return ERR_PTR(err); |
1672 | } | 1671 | } |
1673 | 1672 | ||
1674 | static int xfrm_expand_policies(const struct flowi *fl, u16 family, | 1673 | static int xfrm_expand_policies(const struct flowi *fl, u16 family, |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 8057e19dc15f..3ce225e3f142 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -1494,7 +1494,7 @@ static int security_context_to_sid_core(struct selinux_state *state, | |||
1494 | scontext_len, &context, def_sid); | 1494 | scontext_len, &context, def_sid); |
1495 | if (rc == -EINVAL && force) { | 1495 | if (rc == -EINVAL && force) { |
1496 | context.str = str; | 1496 | context.str = str; |
1497 | context.len = scontext_len; | 1497 | context.len = strlen(str) + 1; |
1498 | str = NULL; | 1498 | str = NULL; |
1499 | } else if (rc) | 1499 | } else if (rc) |
1500 | goto out_unlock; | 1500 | goto out_unlock; |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9b8c6e310e9a..671486133988 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -2332,6 +2332,7 @@ struct bpf_map_info { | |||
2332 | __u32 map_flags; | 2332 | __u32 map_flags; |
2333 | char name[BPF_OBJ_NAME_LEN]; | 2333 | char name[BPF_OBJ_NAME_LEN]; |
2334 | __u32 ifindex; | 2334 | __u32 ifindex; |
2335 | __u32 :32; | ||
2335 | __u64 netns_dev; | 2336 | __u64 netns_dev; |
2336 | __u64 netns_ino; | 2337 | __u64 netns_ino; |
2337 | __u32 btf_id; | 2338 | __u32 btf_id; |