diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-07-16 03:09:24 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-07-16 03:09:24 -0400 |
commit | 500f0716b5f7fd6b0ff3d045588c7588ce2eee1d (patch) | |
tree | 05cfa2e77069af443f06bcfbf327f9aa2050eec7 | |
parent | 4eb44f69e77141992e305d9e75e021b196071cdd (diff) | |
parent | 9d3cce1e8b8561fed5f383d22a4d6949db4eadbe (diff) |
Merge 4.18-rc5 into usb-next
We need the USB fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
415 files changed, 3904 insertions, 2059 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index efc7aa7a0670..533ff5c68970 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -4846,3 +4846,8 @@ | |||
4846 | xirc2ps_cs= [NET,PCMCIA] | 4846 | xirc2ps_cs= [NET,PCMCIA] |
4847 | Format: | 4847 | Format: |
4848 | <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] | 4848 | <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] |
4849 | |||
4850 | xhci-hcd.quirks [USB,KNL] | ||
4851 | A hex value specifying bitmask with supplemental xhci | ||
4852 | host controller quirks. Meaning of each bit can be | ||
4853 | consulted in header drivers/usb/host/xhci.h. | ||
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index 6c9c69ec3986..114c7ce7b58d 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt | |||
@@ -50,6 +50,11 @@ LDFLAGS_MODULE | |||
50 | -------------------------------------------------- | 50 | -------------------------------------------------- |
51 | Additional options used for $(LD) when linking modules. | 51 | Additional options used for $(LD) when linking modules. |
52 | 52 | ||
53 | KBUILD_KCONFIG | ||
54 | -------------------------------------------------- | ||
55 | Set the top-level Kconfig file to the value of this environment | ||
56 | variable. The default name is "Kconfig". | ||
57 | |||
53 | KBUILD_VERBOSE | 58 | KBUILD_VERBOSE |
54 | -------------------------------------------------- | 59 | -------------------------------------------------- |
55 | Set the kbuild verbosity. Can be assigned same values as "V=...". | 60 | Set the kbuild verbosity. Can be assigned same values as "V=...". |
@@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the | |||
88 | directory name found in the arch/ directory. | 93 | directory name found in the arch/ directory. |
89 | But some architectures such as x86 and sparc have aliases. | 94 | But some architectures such as x86 and sparc have aliases. |
90 | x86: i386 for 32 bit, x86_64 for 64 bit | 95 | x86: i386 for 32 bit, x86_64 for 64 bit |
91 | sparc: sparc for 32 bit, sparc64 for 64 bit | 96 | sh: sh for 32 bit, sh64 for 64 bit |
97 | sparc: sparc32 for 32 bit, sparc64 for 64 bit | ||
92 | 98 | ||
93 | CROSS_COMPILE | 99 | CROSS_COMPILE |
94 | -------------------------------------------------- | 100 | -------------------------------------------------- |
@@ -148,15 +154,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then | |||
148 | the default option --strip-debug will be used. Otherwise, | 154 | the default option --strip-debug will be used. Otherwise, |
149 | INSTALL_MOD_STRIP value will be used as the options to the strip command. | 155 | INSTALL_MOD_STRIP value will be used as the options to the strip command. |
150 | 156 | ||
151 | INSTALL_FW_PATH | ||
152 | -------------------------------------------------- | ||
153 | INSTALL_FW_PATH specifies where to install the firmware blobs. | ||
154 | The default value is: | ||
155 | |||
156 | $(INSTALL_MOD_PATH)/lib/firmware | ||
157 | |||
158 | The value can be overridden in which case the default value is ignored. | ||
159 | |||
160 | INSTALL_HDR_PATH | 157 | INSTALL_HDR_PATH |
161 | -------------------------------------------------- | 158 | -------------------------------------------------- |
162 | INSTALL_HDR_PATH specifies where to install user space headers when | 159 | INSTALL_HDR_PATH specifies where to install user space headers when |
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt index 7233118f3a05..68c82914c0f3 100644 --- a/Documentation/kbuild/kconfig.txt +++ b/Documentation/kbuild/kconfig.txt | |||
@@ -2,9 +2,9 @@ This file contains some assistance for using "make *config". | |||
2 | 2 | ||
3 | Use "make help" to list all of the possible configuration targets. | 3 | Use "make help" to list all of the possible configuration targets. |
4 | 4 | ||
5 | The xconfig ('qconf') and menuconfig ('mconf') programs also | 5 | The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf') |
6 | have embedded help text. Be sure to check it for navigation, | 6 | programs also have embedded help text. Be sure to check that for |
7 | search, and other general help text. | 7 | navigation, search, and other general help text. |
8 | 8 | ||
9 | ====================================================================== | 9 | ====================================================================== |
10 | General | 10 | General |
@@ -17,13 +17,16 @@ this happens, using a previously working .config file and running | |||
17 | for you, so you may find that you need to see what NEW kernel | 17 | for you, so you may find that you need to see what NEW kernel |
18 | symbols have been introduced. | 18 | symbols have been introduced. |
19 | 19 | ||
20 | To see a list of new config symbols when using "make oldconfig", use | 20 | To see a list of new config symbols, use |
21 | 21 | ||
22 | cp user/some/old.config .config | 22 | cp user/some/old.config .config |
23 | make listnewconfig | 23 | make listnewconfig |
24 | 24 | ||
25 | and the config program will list any new symbols, one per line. | 25 | and the config program will list any new symbols, one per line. |
26 | 26 | ||
27 | Alternatively, you can use the brute force method: | ||
28 | |||
29 | make oldconfig | ||
27 | scripts/diffconfig .config.old .config | less | 30 | scripts/diffconfig .config.old .config | less |
28 | 31 | ||
29 | ______________________________________________________________________ | 32 | ______________________________________________________________________ |
@@ -160,7 +163,7 @@ Searching in menuconfig: | |||
160 | This lists all config symbols that contain "hotplug", | 163 | This lists all config symbols that contain "hotplug", |
161 | e.g., HOTPLUG_CPU, MEMORY_HOTPLUG. | 164 | e.g., HOTPLUG_CPU, MEMORY_HOTPLUG. |
162 | 165 | ||
163 | For search help, enter / followed TAB-TAB-TAB (to highlight | 166 | For search help, enter / followed by TAB-TAB (to highlight |
164 | <Help>) and Enter. This will tell you that you can also use | 167 | <Help>) and Enter. This will tell you that you can also use |
165 | regular expressions (regexes) in the search string, so if you | 168 | regular expressions (regexes) in the search string, so if you |
166 | are not interested in MEMORY_HOTPLUG, you could try | 169 | are not interested in MEMORY_HOTPLUG, you could try |
@@ -203,6 +206,39 @@ Example: | |||
203 | 206 | ||
204 | 207 | ||
205 | ====================================================================== | 208 | ====================================================================== |
209 | nconfig | ||
210 | -------------------------------------------------- | ||
211 | |||
212 | nconfig is an alternate text-based configurator. It lists function | ||
213 | keys across the bottom of the terminal (window) that execute commands. | ||
214 | You can also just use the corresponding numeric key to execute the | ||
215 | commands unless you are in a data entry window. E.g., instead of F6 | ||
216 | for Save, you can just press 6. | ||
217 | |||
218 | Use F1 for Global help or F3 for the Short help menu. | ||
219 | |||
220 | Searching in nconfig: | ||
221 | |||
222 | You can search either in the menu entry "prompt" strings | ||
223 | or in the configuration symbols. | ||
224 | |||
225 | Use / to begin a search through the menu entries. This does | ||
226 | not support regular expressions. Use <Down> or <Up> for | ||
227 | Next hit and Previous hit, respectively. Use <Esc> to | ||
228 | terminate the search mode. | ||
229 | |||
230 | F8 (SymSearch) searches the configuration symbols for the | ||
231 | given string or regular expression (regex). | ||
232 | |||
233 | NCONFIG_MODE | ||
234 | -------------------------------------------------- | ||
235 | This mode shows all sub-menus in one large tree. | ||
236 | |||
237 | Example: | ||
238 | make NCONFIG_MODE=single_menu nconfig | ||
239 | |||
240 | |||
241 | ====================================================================== | ||
206 | xconfig | 242 | xconfig |
207 | -------------------------------------------------- | 243 | -------------------------------------------------- |
208 | 244 | ||
@@ -230,8 +266,7 @@ gconfig | |||
230 | 266 | ||
231 | Searching in gconfig: | 267 | Searching in gconfig: |
232 | 268 | ||
233 | None (gconfig isn't maintained as well as xconfig or menuconfig); | 269 | There is no search command in gconfig. However, gconfig does |
234 | however, gconfig does have a few more viewing choices than | 270 | have several different viewing choices, modes, and options. |
235 | xconfig does. | ||
236 | 271 | ||
237 | ### | 272 | ### |
diff --git a/MAINTAINERS b/MAINTAINERS index f35f39f2072e..f8c87c3b98c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -581,7 +581,7 @@ W: https://www.infradead.org/~dhowells/kafs/ | |||
581 | 581 | ||
582 | AGPGART DRIVER | 582 | AGPGART DRIVER |
583 | M: David Airlie <airlied@linux.ie> | 583 | M: David Airlie <airlied@linux.ie> |
584 | T: git git://people.freedesktop.org/~airlied/linux (part of drm maint) | 584 | T: git git://anongit.freedesktop.org/drm/drm |
585 | S: Maintained | 585 | S: Maintained |
586 | F: drivers/char/agp/ | 586 | F: drivers/char/agp/ |
587 | F: include/linux/agp* | 587 | F: include/linux/agp* |
@@ -4460,6 +4460,7 @@ F: Documentation/blockdev/drbd/ | |||
4460 | 4460 | ||
4461 | DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS | 4461 | DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS |
4462 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 4462 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
4463 | R: "Rafael J. Wysocki" <rafael@kernel.org> | ||
4463 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git | 4464 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git |
4464 | S: Supported | 4465 | S: Supported |
4465 | F: Documentation/kobject.txt | 4466 | F: Documentation/kobject.txt |
@@ -4630,7 +4631,7 @@ F: include/uapi/drm/vmwgfx_drm.h | |||
4630 | DRM DRIVERS | 4631 | DRM DRIVERS |
4631 | M: David Airlie <airlied@linux.ie> | 4632 | M: David Airlie <airlied@linux.ie> |
4632 | L: dri-devel@lists.freedesktop.org | 4633 | L: dri-devel@lists.freedesktop.org |
4633 | T: git git://people.freedesktop.org/~airlied/linux | 4634 | T: git git://anongit.freedesktop.org/drm/drm |
4634 | B: https://bugs.freedesktop.org/ | 4635 | B: https://bugs.freedesktop.org/ |
4635 | C: irc://chat.freenode.net/dri-devel | 4636 | C: irc://chat.freenode.net/dri-devel |
4636 | S: Maintained | 4637 | S: Maintained |
@@ -10213,11 +10214,13 @@ F: sound/soc/codecs/sgtl5000* | |||
10213 | 10214 | ||
10214 | NXP TDA998X DRM DRIVER | 10215 | NXP TDA998X DRM DRIVER |
10215 | M: Russell King <linux@armlinux.org.uk> | 10216 | M: Russell King <linux@armlinux.org.uk> |
10216 | S: Supported | 10217 | S: Maintained |
10217 | T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel | 10218 | T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel |
10218 | T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes | 10219 | T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes |
10219 | F: drivers/gpu/drm/i2c/tda998x_drv.c | 10220 | F: drivers/gpu/drm/i2c/tda998x_drv.c |
10220 | F: include/drm/i2c/tda998x.h | 10221 | F: include/drm/i2c/tda998x.h |
10222 | F: include/dt-bindings/display/tda998x.h | ||
10223 | K: "nxp,tda998x" | ||
10221 | 10224 | ||
10222 | NXP TFA9879 DRIVER | 10225 | NXP TFA9879 DRIVER |
10223 | M: Peter Rosin <peda@axentia.se> | 10226 | M: Peter Rosin <peda@axentia.se> |
@@ -11835,7 +11838,7 @@ S: Supported | |||
11835 | F: arch/hexagon/ | 11838 | F: arch/hexagon/ |
11836 | 11839 | ||
11837 | QUALCOMM HIDMA DRIVER | 11840 | QUALCOMM HIDMA DRIVER |
11838 | M: Sinan Kaya <okaya@codeaurora.org> | 11841 | M: Sinan Kaya <okaya@kernel.org> |
11839 | L: linux-arm-kernel@lists.infradead.org | 11842 | L: linux-arm-kernel@lists.infradead.org |
11840 | L: linux-arm-msm@vger.kernel.org | 11843 | L: linux-arm-msm@vger.kernel.org |
11841 | L: dmaengine@vger.kernel.org | 11844 | L: dmaengine@vger.kernel.org |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 18 | 3 | PATCHLEVEL = 18 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc3 | 5 | EXTRAVERSION = -rc5 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ | |||
353 | else if [ -x /bin/bash ]; then echo /bin/bash; \ | 353 | else if [ -x /bin/bash ]; then echo /bin/bash; \ |
354 | else echo sh; fi ; fi) | 354 | else echo sh; fi ; fi) |
355 | 355 | ||
356 | HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS) | 356 | HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) |
357 | HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS) | 357 | HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) |
358 | HOST_LFS_LIBS := $(shell getconf LFS_LIBS) | 358 | HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) |
359 | 359 | ||
360 | HOSTCC = gcc | 360 | HOSTCC = gcc |
361 | HOSTCXX = g++ | 361 | HOSTCXX = g++ |
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA | |||
507 | KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO | 507 | KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO |
508 | endif | 508 | endif |
509 | 509 | ||
510 | ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y) | ||
511 | CC_CAN_LINK := y | ||
512 | export CC_CAN_LINK | ||
513 | endif | ||
514 | |||
515 | # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. | 510 | # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. |
516 | # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. | 511 | # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. |
517 | # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), | 512 | # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), |
@@ -1717,6 +1712,6 @@ endif # skip-makefile | |||
1717 | PHONY += FORCE | 1712 | PHONY += FORCE |
1718 | FORCE: | 1713 | FORCE: |
1719 | 1714 | ||
1720 | # Declare the contents of the .PHONY variable as phony. We keep that | 1715 | # Declare the contents of the PHONY variable as phony. We keep that |
1721 | # information in a variable so we can use it in if_changed and friends. | 1716 | # information in a variable so we can use it in if_changed and friends. |
1722 | .PHONY: $(PHONY) | 1717 | .PHONY: $(PHONY) |
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index f9e8667f5886..73b514dddf65 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi | |||
@@ -168,7 +168,6 @@ | |||
168 | AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */ | 168 | AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */ |
169 | AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */ | 169 | AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */ |
170 | AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */ | 170 | AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */ |
171 | AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */ | ||
172 | >; | 171 | >; |
173 | }; | 172 | }; |
174 | 173 | ||
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi index ca294914bbb1..23ea381d363f 100644 --- a/arch/arm/boot/dts/am3517.dtsi +++ b/arch/arm/boot/dts/am3517.dtsi | |||
@@ -39,6 +39,8 @@ | |||
39 | ti,davinci-ctrl-ram-size = <0x2000>; | 39 | ti,davinci-ctrl-ram-size = <0x2000>; |
40 | ti,davinci-rmii-en = /bits/ 8 <1>; | 40 | ti,davinci-rmii-en = /bits/ 8 <1>; |
41 | local-mac-address = [ 00 00 00 00 00 00 ]; | 41 | local-mac-address = [ 00 00 00 00 00 00 ]; |
42 | clocks = <&emac_ick>; | ||
43 | clock-names = "ick"; | ||
42 | }; | 44 | }; |
43 | 45 | ||
44 | davinci_mdio: ethernet@5c030000 { | 46 | davinci_mdio: ethernet@5c030000 { |
@@ -49,6 +51,8 @@ | |||
49 | bus_freq = <1000000>; | 51 | bus_freq = <1000000>; |
50 | #address-cells = <1>; | 52 | #address-cells = <1>; |
51 | #size-cells = <0>; | 53 | #size-cells = <0>; |
54 | clocks = <&emac_fck>; | ||
55 | clock-names = "fck"; | ||
52 | }; | 56 | }; |
53 | 57 | ||
54 | uart4: serial@4809e000 { | 58 | uart4: serial@4809e000 { |
@@ -87,6 +91,11 @@ | |||
87 | }; | 91 | }; |
88 | }; | 92 | }; |
89 | 93 | ||
94 | /* Table Table 5-79 of the TRM shows 480ab000 is reserved */ | ||
95 | &usb_otg_hs { | ||
96 | status = "disabled"; | ||
97 | }; | ||
98 | |||
90 | &iva { | 99 | &iva { |
91 | status = "disabled"; | 100 | status = "disabled"; |
92 | }; | 101 | }; |
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 440351ad0b80..d4be3fd0b6f4 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts | |||
@@ -610,6 +610,8 @@ | |||
610 | 610 | ||
611 | touchscreen-size-x = <480>; | 611 | touchscreen-size-x = <480>; |
612 | touchscreen-size-y = <272>; | 612 | touchscreen-size-y = <272>; |
613 | |||
614 | wakeup-source; | ||
613 | }; | 615 | }; |
614 | 616 | ||
615 | tlv320aic3106: tlv320aic3106@1b { | 617 | tlv320aic3106: tlv320aic3106@1b { |
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 18edc9bc7927..929459c42760 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi | |||
@@ -547,7 +547,7 @@ | |||
547 | 547 | ||
548 | thermal: thermal@e8078 { | 548 | thermal: thermal@e8078 { |
549 | compatible = "marvell,armada380-thermal"; | 549 | compatible = "marvell,armada380-thermal"; |
550 | reg = <0xe4078 0x4>, <0xe4074 0x4>; | 550 | reg = <0xe4078 0x4>, <0xe4070 0x8>; |
551 | status = "okay"; | 551 | status = "okay"; |
552 | }; | 552 | }; |
553 | 553 | ||
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 9dcd14edc202..e03495a799ce 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -1580,7 +1580,6 @@ | |||
1580 | dr_mode = "otg"; | 1580 | dr_mode = "otg"; |
1581 | snps,dis_u3_susphy_quirk; | 1581 | snps,dis_u3_susphy_quirk; |
1582 | snps,dis_u2_susphy_quirk; | 1582 | snps,dis_u2_susphy_quirk; |
1583 | snps,dis_metastability_quirk; | ||
1584 | }; | 1583 | }; |
1585 | }; | 1584 | }; |
1586 | 1585 | ||
@@ -1608,6 +1607,7 @@ | |||
1608 | dr_mode = "otg"; | 1607 | dr_mode = "otg"; |
1609 | snps,dis_u3_susphy_quirk; | 1608 | snps,dis_u3_susphy_quirk; |
1610 | snps,dis_u2_susphy_quirk; | 1609 | snps,dis_u2_susphy_quirk; |
1610 | snps,dis_metastability_quirk; | ||
1611 | }; | 1611 | }; |
1612 | }; | 1612 | }; |
1613 | 1613 | ||
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts index df9eca94d812..8a878687197b 100644 --- a/arch/arm/boot/dts/imx51-zii-rdu1.dts +++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts | |||
@@ -770,7 +770,7 @@ | |||
770 | 770 | ||
771 | pinctrl_ts: tsgrp { | 771 | pinctrl_ts: tsgrp { |
772 | fsl,pins = < | 772 | fsl,pins = < |
773 | MX51_PAD_CSI1_D8__GPIO3_12 0x85 | 773 | MX51_PAD_CSI1_D8__GPIO3_12 0x04 |
774 | MX51_PAD_CSI1_D9__GPIO3_13 0x85 | 774 | MX51_PAD_CSI1_D9__GPIO3_13 0x85 |
775 | >; | 775 | >; |
776 | }; | 776 | }; |
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index 054591dc9a00..4cd2f4a2bff4 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig | |||
@@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y | |||
141 | CONFIG_USB_CHIPIDEA=y | 141 | CONFIG_USB_CHIPIDEA=y |
142 | CONFIG_USB_CHIPIDEA_UDC=y | 142 | CONFIG_USB_CHIPIDEA_UDC=y |
143 | CONFIG_USB_CHIPIDEA_HOST=y | 143 | CONFIG_USB_CHIPIDEA_HOST=y |
144 | CONFIG_USB_CHIPIDEA_ULPI=y | ||
144 | CONFIG_NOP_USB_XCEIV=y | 145 | CONFIG_NOP_USB_XCEIV=y |
145 | CONFIG_USB_GADGET=y | 146 | CONFIG_USB_GADGET=y |
146 | CONFIG_USB_ETH=m | 147 | CONFIG_USB_ETH=m |
148 | CONFIG_USB_ULPI_BUS=y | ||
147 | CONFIG_MMC=y | 149 | CONFIG_MMC=y |
148 | CONFIG_MMC_SDHCI=y | 150 | CONFIG_MMC_SDHCI=y |
149 | CONFIG_MMC_SDHCI_PLTFM=y | 151 | CONFIG_MMC_SDHCI_PLTFM=y |
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index f70507ab91ee..200ebda47e0c 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig | |||
@@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y | |||
302 | CONFIG_USB_CHIPIDEA=y | 302 | CONFIG_USB_CHIPIDEA=y |
303 | CONFIG_USB_CHIPIDEA_UDC=y | 303 | CONFIG_USB_CHIPIDEA_UDC=y |
304 | CONFIG_USB_CHIPIDEA_HOST=y | 304 | CONFIG_USB_CHIPIDEA_HOST=y |
305 | CONFIG_USB_CHIPIDEA_ULPI=y | ||
305 | CONFIG_USB_SERIAL=m | 306 | CONFIG_USB_SERIAL=m |
306 | CONFIG_USB_SERIAL_GENERIC=y | 307 | CONFIG_USB_SERIAL_GENERIC=y |
307 | CONFIG_USB_SERIAL_FTDI_SIO=m | 308 | CONFIG_USB_SERIAL_FTDI_SIO=m |
@@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m | |||
338 | CONFIG_USB_FUNCTIONFS=m | 339 | CONFIG_USB_FUNCTIONFS=m |
339 | CONFIG_USB_MASS_STORAGE=m | 340 | CONFIG_USB_MASS_STORAGE=m |
340 | CONFIG_USB_G_SERIAL=m | 341 | CONFIG_USB_G_SERIAL=m |
342 | CONFIG_USB_ULPI_BUS=y | ||
341 | CONFIG_MMC=y | 343 | CONFIG_MMC=y |
342 | CONFIG_MMC_SDHCI=y | 344 | CONFIG_MMC_SDHCI=y |
343 | CONFIG_MMC_SDHCI_PLTFM=y | 345 | CONFIG_MMC_SDHCI_PLTFM=y |
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S index 3c1e203e53b9..57caa742016e 100644 --- a/arch/arm/crypto/speck-neon-core.S +++ b/arch/arm/crypto/speck-neon-core.S | |||
@@ -272,9 +272,11 @@ | |||
272 | * Allocate stack space to store 128 bytes worth of tweaks. For | 272 | * Allocate stack space to store 128 bytes worth of tweaks. For |
273 | * performance, this space is aligned to a 16-byte boundary so that we | 273 | * performance, this space is aligned to a 16-byte boundary so that we |
274 | * can use the load/store instructions that declare 16-byte alignment. | 274 | * can use the load/store instructions that declare 16-byte alignment. |
275 | * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'. | ||
275 | */ | 276 | */ |
276 | sub sp, #128 | 277 | sub r12, sp, #128 |
277 | bic sp, #0xf | 278 | bic r12, #0xf |
279 | mov sp, r12 | ||
278 | 280 | ||
279 | .if \n == 64 | 281 | .if \n == 64 |
280 | // Load first tweak | 282 | // Load first tweak |
diff --git a/arch/arm/firmware/Makefile b/arch/arm/firmware/Makefile index a71f16536b6c..6e41336b0bc4 100644 --- a/arch/arm/firmware/Makefile +++ b/arch/arm/firmware/Makefile | |||
@@ -1 +1,4 @@ | |||
1 | obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o | 1 | obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o |
2 | |||
3 | # tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc | ||
4 | KCOV_INSTRUMENT := n | ||
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index dd546d65a383..7a9b86978ee1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -177,7 +177,7 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1]) | |||
177 | bic r0, r0, #CR_I | 177 | bic r0, r0, #CR_I |
178 | #endif | 178 | #endif |
179 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | 179 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
180 | isb | 180 | instr_sync |
181 | #elif defined (CONFIG_CPU_V7M) | 181 | #elif defined (CONFIG_CPU_V7M) |
182 | #ifdef CONFIG_ARM_MPU | 182 | #ifdef CONFIG_ARM_MPU |
183 | ldreq r3, [r12, MPU_CTRL] | 183 | ldreq r3, [r12, MPU_CTRL] |
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 69df3620eca5..1c73694c871a 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
@@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void) | |||
109 | static inline void omap5_erratum_workaround_801819(void) { } | 109 | static inline void omap5_erratum_workaround_801819(void) { } |
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | ||
113 | /* | ||
114 | * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with | ||
115 | * ICIALLU) to activate the workaround for secondary Core. | ||
116 | * NOTE: it is assumed that the primary core's configuration is done | ||
117 | * by the boot loader (kernel will detect a misconfiguration and complain | ||
118 | * if this is not done). | ||
119 | * | ||
120 | * In General Purpose(GP) devices, ACR bit settings can only be done | ||
121 | * by ROM code in "secure world" using the smc call and there is no | ||
122 | * option to update the "firmware" on such devices. This also works for | ||
123 | * High security(HS) devices, as a backup option in case the | ||
124 | * "update" is not done in the "security firmware". | ||
125 | */ | ||
126 | static void omap5_secondary_harden_predictor(void) | ||
127 | { | ||
128 | u32 acr, acr_mask; | ||
129 | |||
130 | asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); | ||
131 | |||
132 | /* | ||
133 | * ACTLR[0] (Enable invalidates of BTB with ICIALLU) | ||
134 | */ | ||
135 | acr_mask = BIT(0); | ||
136 | |||
137 | /* Do we already have it done.. if yes, skip expensive smc */ | ||
138 | if ((acr & acr_mask) == acr_mask) | ||
139 | return; | ||
140 | |||
141 | acr |= acr_mask; | ||
142 | omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); | ||
143 | |||
144 | pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n", | ||
145 | __func__, smp_processor_id()); | ||
146 | } | ||
147 | #else | ||
148 | static inline void omap5_secondary_harden_predictor(void) { } | ||
149 | #endif | ||
150 | |||
112 | static void omap4_secondary_init(unsigned int cpu) | 151 | static void omap4_secondary_init(unsigned int cpu) |
113 | { | 152 | { |
114 | /* | 153 | /* |
@@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu) | |||
131 | set_cntfreq(); | 170 | set_cntfreq(); |
132 | /* Configure ACR to disable streaming WA for 801819 */ | 171 | /* Configure ACR to disable streaming WA for 801819 */ |
133 | omap5_erratum_workaround_801819(); | 172 | omap5_erratum_workaround_801819(); |
173 | /* Enable ACR to allow for ICUALLU workaround */ | ||
174 | omap5_secondary_harden_predictor(); | ||
134 | } | 175 | } |
135 | 176 | ||
136 | /* | 177 | /* |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 9c10248fadcc..4e8c2116808e 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void) | |||
185 | { | 185 | { |
186 | int i; | 186 | int i; |
187 | 187 | ||
188 | for (i = 0; i < pxa_internal_irq_nr / 32; i++) { | 188 | for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { |
189 | void __iomem *base = irq_base(i); | 189 | void __iomem *base = irq_base(i); |
190 | 190 | ||
191 | saved_icmr[i] = __raw_readl(base + ICMR); | 191 | saved_icmr[i] = __raw_readl(base + ICMR); |
@@ -204,7 +204,7 @@ static void pxa_irq_resume(void) | |||
204 | { | 204 | { |
205 | int i; | 205 | int i; |
206 | 206 | ||
207 | for (i = 0; i < pxa_internal_irq_nr / 32; i++) { | 207 | for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { |
208 | void __iomem *base = irq_base(i); | 208 | void __iomem *base = irq_base(i); |
209 | 209 | ||
210 | __raw_writel(saved_icmr[i], base + ICMR); | 210 | __raw_writel(saved_icmr[i], base + ICMR); |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c186474422f3..0cc8e04295a4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused) | |||
736 | return 0; | 736 | return 0; |
737 | } | 737 | } |
738 | 738 | ||
739 | static int kernel_set_to_readonly __read_mostly; | ||
740 | |||
739 | void mark_rodata_ro(void) | 741 | void mark_rodata_ro(void) |
740 | { | 742 | { |
743 | kernel_set_to_readonly = 1; | ||
741 | stop_machine(__mark_rodata_ro, NULL, NULL); | 744 | stop_machine(__mark_rodata_ro, NULL, NULL); |
742 | debug_checkwx(); | 745 | debug_checkwx(); |
743 | } | 746 | } |
744 | 747 | ||
745 | void set_kernel_text_rw(void) | 748 | void set_kernel_text_rw(void) |
746 | { | 749 | { |
750 | if (!kernel_set_to_readonly) | ||
751 | return; | ||
752 | |||
747 | set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, | 753 | set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, |
748 | current->active_mm); | 754 | current->active_mm); |
749 | } | 755 | } |
750 | 756 | ||
751 | void set_kernel_text_ro(void) | 757 | void set_kernel_text_ro(void) |
752 | { | 758 | { |
759 | if (!kernel_set_to_readonly) | ||
760 | return; | ||
761 | |||
753 | set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, | 762 | set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, |
754 | current->active_mm); | 763 | current->active_mm); |
755 | } | 764 | } |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6e8b71613039..f6a62ae44a65 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
1844 | /* there are 2 passes here */ | 1844 | /* there are 2 passes here */ |
1845 | bpf_jit_dump(prog->len, image_size, 2, ctx.target); | 1845 | bpf_jit_dump(prog->len, image_size, 2, ctx.target); |
1846 | 1846 | ||
1847 | set_memory_ro((unsigned long)header, header->pages); | 1847 | bpf_jit_binary_lock_ro(header); |
1848 | prog->bpf_func = (void *)ctx.target; | 1848 | prog->bpf_func = (void *)ctx.target; |
1849 | prog->jited = 1; | 1849 | prog->jited = 1; |
1850 | prog->jited_len = image_size; | 1850 | prog->jited_len = image_size; |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 45272266dafb..e7101b19d590 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -10,7 +10,7 @@ | |||
10 | # | 10 | # |
11 | # Copyright (C) 1995-2001 by Russell King | 11 | # Copyright (C) 1995-2001 by Russell King |
12 | 12 | ||
13 | LDFLAGS_vmlinux :=-p --no-undefined -X | 13 | LDFLAGS_vmlinux :=--no-undefined -X |
14 | CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) | 14 | CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) |
15 | GZFLAGS :=-9 | 15 | GZFLAGS :=-9 |
16 | 16 | ||
@@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) | |||
60 | KBUILD_CPPFLAGS += -mbig-endian | 60 | KBUILD_CPPFLAGS += -mbig-endian |
61 | CHECKFLAGS += -D__AARCH64EB__ | 61 | CHECKFLAGS += -D__AARCH64EB__ |
62 | AS += -EB | 62 | AS += -EB |
63 | LD += -EB | 63 | # We must use the linux target here, since distributions don't tend to package |
64 | LDFLAGS += -maarch64linuxb | 64 | # the ELF linker scripts with binutils, and this results in a build failure. |
65 | LDFLAGS += -EB -maarch64linuxb | ||
65 | UTS_MACHINE := aarch64_be | 66 | UTS_MACHINE := aarch64_be |
66 | else | 67 | else |
67 | KBUILD_CPPFLAGS += -mlittle-endian | 68 | KBUILD_CPPFLAGS += -mlittle-endian |
68 | CHECKFLAGS += -D__AARCH64EL__ | 69 | CHECKFLAGS += -D__AARCH64EL__ |
69 | AS += -EL | 70 | AS += -EL |
70 | LD += -EL | 71 | LDFLAGS += -EL -maarch64linux # See comment above |
71 | LDFLAGS += -maarch64linux | ||
72 | UTS_MACHINE := aarch64 | 72 | UTS_MACHINE := aarch64 |
73 | endif | 73 | endif |
74 | 74 | ||
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h | |||
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); | |||
29 | static __must_check inline bool may_use_simd(void) | 29 | static __must_check inline bool may_use_simd(void) |
30 | { | 30 | { |
31 | /* | 31 | /* |
32 | * The raw_cpu_read() is racy if called with preemption enabled. | 32 | * kernel_neon_busy is only set while preemption is disabled, |
33 | * This is not a bug: kernel_neon_busy is only set when | 33 | * and is clear whenever preemption is enabled. Since |
34 | * preemption is disabled, so we cannot migrate to another CPU | 34 | * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy |
35 | * while it is set, nor can we migrate to a CPU where it is set. | 35 | * cannot change under our feet -- if it's set we cannot be |
36 | * So, if we find it clear on some CPU then we're guaranteed to | 36 | * migrated, and if it's clear we cannot be migrated to a CPU |
37 | * find it clear on any CPU we could migrate to. | 37 | * where it is set. |
38 | * | ||
39 | * If we are in between kernel_neon_begin()...kernel_neon_end(), | ||
40 | * the flag will be set, but preemption is also disabled, so we | ||
41 | * can't migrate to another CPU and spuriously see it become | ||
42 | * false. | ||
43 | */ | 38 | */ |
44 | return !in_irq() && !irqs_disabled() && !in_nmi() && | 39 | return !in_irq() && !irqs_disabled() && !in_nmi() && |
45 | !raw_cpu_read(kernel_neon_busy); | 40 | !this_cpu_read(kernel_neon_busy); |
46 | } | 41 | } |
47 | 42 | ||
48 | #else /* ! CONFIG_KERNEL_MODE_NEON */ | 43 | #else /* ! CONFIG_KERNEL_MODE_NEON */ |
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 8b707c249026..12fe700632f4 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h | |||
@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) | |||
44 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, | 44 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, |
45 | unsigned long address) | 45 | unsigned long address) |
46 | { | 46 | { |
47 | pgtable_page_dtor(page); | ||
47 | __free_page(page); | 48 | __free_page(page); |
48 | } | 49 | } |
49 | 50 | ||
@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
74 | return page; | 75 | return page; |
75 | } | 76 | } |
76 | 77 | ||
77 | extern inline void pte_free(struct mm_struct *mm, struct page *page) | 78 | static inline void pte_free(struct mm_struct *mm, struct page *page) |
78 | { | 79 | { |
80 | pgtable_page_dtor(page); | ||
79 | __free_page(page); | 81 | __free_page(page); |
80 | } | 82 | } |
81 | 83 | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 8d85046adcc8..9670e70139fd 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/kallsyms.h> | 29 | #include <linux/kallsyms.h> |
30 | #include <linux/random.h> | 30 | #include <linux/random.h> |
31 | #include <linux/prctl.h> | 31 | #include <linux/prctl.h> |
32 | #include <linux/nmi.h> | ||
32 | 33 | ||
33 | #include <asm/asm.h> | 34 | #include <asm/asm.h> |
34 | #include <asm/bootinfo.h> | 35 | #include <asm/bootinfo.h> |
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp) | |||
655 | return sp & ALMASK; | 656 | return sp & ALMASK; |
656 | } | 657 | } |
657 | 658 | ||
658 | static void arch_dump_stack(void *info) | 659 | static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); |
660 | static struct cpumask backtrace_csd_busy; | ||
661 | |||
662 | static void handle_backtrace(void *info) | ||
659 | { | 663 | { |
660 | struct pt_regs *regs; | 664 | nmi_cpu_backtrace(get_irq_regs()); |
665 | cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); | ||
666 | } | ||
661 | 667 | ||
662 | regs = get_irq_regs(); | 668 | static void raise_backtrace(cpumask_t *mask) |
669 | { | ||
670 | call_single_data_t *csd; | ||
671 | int cpu; | ||
663 | 672 | ||
664 | if (regs) | 673 | for_each_cpu(cpu, mask) { |
665 | show_regs(regs); | 674 | /* |
675 | * If we previously sent an IPI to the target CPU & it hasn't | ||
676 | * cleared its bit in the busy cpumask then it didn't handle | ||
677 | * our previous IPI & it's not safe for us to reuse the | ||
678 | * call_single_data_t. | ||
679 | */ | ||
680 | if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { | ||
681 | pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", | ||
682 | cpu); | ||
683 | continue; | ||
684 | } | ||
666 | 685 | ||
667 | dump_stack(); | 686 | csd = &per_cpu(backtrace_csd, cpu); |
687 | csd->func = handle_backtrace; | ||
688 | smp_call_function_single_async(cpu, csd); | ||
689 | } | ||
668 | } | 690 | } |
669 | 691 | ||
670 | void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) | 692 | void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) |
671 | { | 693 | { |
672 | long this_cpu = get_cpu(); | 694 | nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); |
673 | |||
674 | if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) | ||
675 | dump_stack(); | ||
676 | |||
677 | smp_call_function_many(mask, arch_dump_stack, NULL, 1); | ||
678 | |||
679 | put_cpu(); | ||
680 | } | 695 | } |
681 | 696 | ||
682 | int mips_get_process_fp_mode(struct task_struct *task) | 697 | int mips_get_process_fp_mode(struct task_struct *task) |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index d67fa74622ee..8d505a21396e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs) | |||
351 | void show_regs(struct pt_regs *regs) | 351 | void show_regs(struct pt_regs *regs) |
352 | { | 352 | { |
353 | __show_regs((struct pt_regs *)regs); | 353 | __show_regs((struct pt_regs *)regs); |
354 | dump_stack(); | ||
354 | } | 355 | } |
355 | 356 | ||
356 | void show_registers(struct pt_regs *regs) | 357 | void show_registers(struct pt_regs *regs) |
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1986e09fb457..1601d90b087b 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/export.h> | 9 | #include <linux/export.h> |
10 | #include <asm/addrspace.h> | 10 | #include <asm/addrspace.h> |
11 | #include <asm/byteorder.h> | 11 | #include <asm/byteorder.h> |
12 | #include <linux/ioport.h> | ||
12 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
13 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
14 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, | |||
98 | return error; | 99 | return error; |
99 | } | 100 | } |
100 | 101 | ||
102 | static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, | ||
103 | void *arg) | ||
104 | { | ||
105 | unsigned long i; | ||
106 | |||
107 | for (i = 0; i < nr_pages; i++) { | ||
108 | if (pfn_valid(start_pfn + i) && | ||
109 | !PageReserved(pfn_to_page(start_pfn + i))) | ||
110 | return 1; | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
101 | /* | 116 | /* |
102 | * Generic mapping function (not visible outside): | 117 | * Generic mapping function (not visible outside): |
103 | */ | 118 | */ |
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, | |||
116 | 131 | ||
117 | void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) | 132 | void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) |
118 | { | 133 | { |
134 | unsigned long offset, pfn, last_pfn; | ||
119 | struct vm_struct * area; | 135 | struct vm_struct * area; |
120 | unsigned long offset; | ||
121 | phys_addr_t last_addr; | 136 | phys_addr_t last_addr; |
122 | void * addr; | 137 | void * addr; |
123 | 138 | ||
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long | |||
137 | return (void __iomem *) CKSEG1ADDR(phys_addr); | 152 | return (void __iomem *) CKSEG1ADDR(phys_addr); |
138 | 153 | ||
139 | /* | 154 | /* |
140 | * Don't allow anybody to remap normal RAM that we're using.. | 155 | * Don't allow anybody to remap RAM that may be allocated by the page |
156 | * allocator, since that could lead to races & data clobbering. | ||
141 | */ | 157 | */ |
142 | if (phys_addr < virt_to_phys(high_memory)) { | 158 | pfn = PFN_DOWN(phys_addr); |
143 | char *t_addr, *t_end; | 159 | last_pfn = PFN_DOWN(last_addr); |
144 | struct page *page; | 160 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, |
145 | 161 | __ioremap_check_ram) == 1) { | |
146 | t_addr = __va(phys_addr); | 162 | WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", |
147 | t_end = t_addr + (size - 1); | 163 | &phys_addr, &last_addr); |
148 | 164 | return NULL; | |
149 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) | ||
150 | if(!PageReserved(page)) | ||
151 | return NULL; | ||
152 | } | 165 | } |
153 | 166 | ||
154 | /* | 167 | /* |
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index 3e1a46615120..8999b9226512 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h | |||
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) | |||
98 | __free_page(pte); | 98 | __free_page(pte); |
99 | } | 99 | } |
100 | 100 | ||
101 | #define __pte_free_tlb(tlb, pte, addr) \ | ||
102 | do { \ | ||
103 | pgtable_page_dtor(pte); \ | ||
104 | tlb_remove_page((tlb), (pte)); \ | ||
105 | } while (0) | ||
101 | 106 | ||
102 | #define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte)) | ||
103 | #define pmd_pgtable(pmd) pmd_page(pmd) | 107 | #define pmd_pgtable(pmd) pmd_page(pmd) |
104 | 108 | ||
105 | #define check_pgt_cache() do { } while (0) | 109 | #define check_pgt_cache() do { } while (0) |
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 690d55272ba6..0c826ad6e994 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S | |||
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler) | |||
277 | l.addi r3,r1,0 // pt_regs | 277 | l.addi r3,r1,0 // pt_regs |
278 | /* r4 set be EXCEPTION_HANDLE */ // effective address of fault | 278 | /* r4 set be EXCEPTION_HANDLE */ // effective address of fault |
279 | 279 | ||
280 | /* | ||
281 | * __PHX__: TODO | ||
282 | * | ||
283 | * all this can be written much simpler. look at | ||
284 | * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part | ||
285 | */ | ||
286 | #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX | 280 | #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX |
287 | l.lwz r6,PT_PC(r3) // address of an offending insn | 281 | l.lwz r6,PT_PC(r3) // address of an offending insn |
288 | l.lwz r6,0(r6) // instruction that caused pf | 282 | l.lwz r6,0(r6) // instruction that caused pf |
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler) | |||
314 | 308 | ||
315 | #else | 309 | #else |
316 | 310 | ||
317 | l.lwz r6,PT_SR(r3) // SR | 311 | l.mfspr r6,r0,SPR_SR // SR |
318 | l.andi r6,r6,SPR_SR_DSX // check for delay slot exception | 312 | l.andi r6,r6,SPR_SR_DSX // check for delay slot exception |
319 | l.sfne r6,r0 // exception happened in delay slot | 313 | l.sfne r6,r0 // exception happened in delay slot |
320 | l.bnf 7f | 314 | l.bnf 7f |
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index fb02b2a1d6f2..9fc6b60140f0 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S | |||
@@ -210,8 +210,7 @@ | |||
210 | * r4 - EEAR exception EA | 210 | * r4 - EEAR exception EA |
211 | * r10 - current pointing to current_thread_info struct | 211 | * r10 - current pointing to current_thread_info struct |
212 | * r12 - syscall 0, since we didn't come from syscall | 212 | * r12 - syscall 0, since we didn't come from syscall |
213 | * r13 - temp it actually contains new SR, not needed anymore | 213 | * r30 - handler address of the handler we'll jump to |
214 | * r31 - handler address of the handler we'll jump to | ||
215 | * | 214 | * |
216 | * handler has to save remaining registers to the exception | 215 | * handler has to save remaining registers to the exception |
217 | * ksp frame *before* tainting them! | 216 | * ksp frame *before* tainting them! |
@@ -244,6 +243,7 @@ | |||
244 | /* r1 is KSP, r30 is __pa(KSP) */ ;\ | 243 | /* r1 is KSP, r30 is __pa(KSP) */ ;\ |
245 | tophys (r30,r1) ;\ | 244 | tophys (r30,r1) ;\ |
246 | l.sw PT_GPR12(r30),r12 ;\ | 245 | l.sw PT_GPR12(r30),r12 ;\ |
246 | /* r4 use for tmp before EA */ ;\ | ||
247 | l.mfspr r12,r0,SPR_EPCR_BASE ;\ | 247 | l.mfspr r12,r0,SPR_EPCR_BASE ;\ |
248 | l.sw PT_PC(r30),r12 ;\ | 248 | l.sw PT_PC(r30),r12 ;\ |
249 | l.mfspr r12,r0,SPR_ESR_BASE ;\ | 249 | l.mfspr r12,r0,SPR_ESR_BASE ;\ |
@@ -263,7 +263,10 @@ | |||
263 | /* r12 == 1 if we come from syscall */ ;\ | 263 | /* r12 == 1 if we come from syscall */ ;\ |
264 | CLEAR_GPR(r12) ;\ | 264 | CLEAR_GPR(r12) ;\ |
265 | /* ----- turn on MMU ----- */ ;\ | 265 | /* ----- turn on MMU ----- */ ;\ |
266 | l.ori r30,r0,(EXCEPTION_SR) ;\ | 266 | /* Carry DSX into exception SR */ ;\ |
267 | l.mfspr r30,r0,SPR_SR ;\ | ||
268 | l.andi r30,r30,SPR_SR_DSX ;\ | ||
269 | l.ori r30,r30,(EXCEPTION_SR) ;\ | ||
267 | l.mtspr r0,r30,SPR_ESR_BASE ;\ | 270 | l.mtspr r0,r30,SPR_ESR_BASE ;\ |
268 | /* r30: EA address of handler */ ;\ | 271 | /* r30: EA address of handler */ ;\ |
269 | LOAD_SYMBOL_2_GPR(r30,handler) ;\ | 272 | LOAD_SYMBOL_2_GPR(r30,handler) ;\ |
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index fac246e6f37a..d8981cbb852a 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c | |||
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs) | |||
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
302 | #else | 302 | #else |
303 | return regs->sr & SPR_SR_DSX; | 303 | return mfspr(SPR_SR) & SPR_SR_DSX; |
304 | #endif | 304 | #endif |
305 | } | 305 | } |
306 | 306 | ||
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index f12680c9b947..4764fdeb4f1f 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig | |||
@@ -107,6 +107,7 @@ config ARCH_RV32I | |||
107 | select GENERIC_LIB_ASHLDI3 | 107 | select GENERIC_LIB_ASHLDI3 |
108 | select GENERIC_LIB_ASHRDI3 | 108 | select GENERIC_LIB_ASHRDI3 |
109 | select GENERIC_LIB_LSHRDI3 | 109 | select GENERIC_LIB_LSHRDI3 |
110 | select GENERIC_LIB_UCMPDI2 | ||
110 | 111 | ||
111 | config ARCH_RV64I | 112 | config ARCH_RV64I |
112 | bool "RV64I" | 113 | bool "RV64I" |
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h index 5cae4c30cd8e..1e0dfc36aab9 100644 --- a/arch/riscv/include/uapi/asm/elf.h +++ b/arch/riscv/include/uapi/asm/elf.h | |||
@@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t; | |||
21 | 21 | ||
22 | typedef union __riscv_fp_state elf_fpregset_t; | 22 | typedef union __riscv_fp_state elf_fpregset_t; |
23 | 23 | ||
24 | #define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32) | 24 | #if __riscv_xlen == 64 |
25 | #define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff) | 25 | #define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info) |
26 | #define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info) | ||
27 | #else | ||
28 | #define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info) | ||
29 | #define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info) | ||
30 | #endif | ||
26 | 31 | ||
27 | /* | 32 | /* |
28 | * RISC-V relocation types | 33 | * RISC-V relocation types |
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index b74cbfbce2d0..7bcdaed15703 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c | |||
@@ -16,10 +16,6 @@ | |||
16 | #include <linux/irqchip.h> | 16 | #include <linux/irqchip.h> |
17 | #include <linux/irqdomain.h> | 17 | #include <linux/irqdomain.h> |
18 | 18 | ||
19 | #ifdef CONFIG_RISCV_INTC | ||
20 | #include <linux/irqchip/irq-riscv-intc.h> | ||
21 | #endif | ||
22 | |||
23 | void __init init_IRQ(void) | 19 | void __init init_IRQ(void) |
24 | { | 20 | { |
25 | irqchip_init(); | 21 | irqchip_init(); |
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 1d5e9b934b8c..3303ed2cd419 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c | |||
@@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) | |||
37 | static int apply_r_riscv_branch_rela(struct module *me, u32 *location, | 37 | static int apply_r_riscv_branch_rela(struct module *me, u32 *location, |
38 | Elf_Addr v) | 38 | Elf_Addr v) |
39 | { | 39 | { |
40 | s64 offset = (void *)v - (void *)location; | 40 | ptrdiff_t offset = (void *)v - (void *)location; |
41 | u32 imm12 = (offset & 0x1000) << (31 - 12); | 41 | u32 imm12 = (offset & 0x1000) << (31 - 12); |
42 | u32 imm11 = (offset & 0x800) >> (11 - 7); | 42 | u32 imm11 = (offset & 0x800) >> (11 - 7); |
43 | u32 imm10_5 = (offset & 0x7e0) << (30 - 10); | 43 | u32 imm10_5 = (offset & 0x7e0) << (30 - 10); |
@@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location, | |||
50 | static int apply_r_riscv_jal_rela(struct module *me, u32 *location, | 50 | static int apply_r_riscv_jal_rela(struct module *me, u32 *location, |
51 | Elf_Addr v) | 51 | Elf_Addr v) |
52 | { | 52 | { |
53 | s64 offset = (void *)v - (void *)location; | 53 | ptrdiff_t offset = (void *)v - (void *)location; |
54 | u32 imm20 = (offset & 0x100000) << (31 - 20); | 54 | u32 imm20 = (offset & 0x100000) << (31 - 20); |
55 | u32 imm19_12 = (offset & 0xff000); | 55 | u32 imm19_12 = (offset & 0xff000); |
56 | u32 imm11 = (offset & 0x800) << (20 - 11); | 56 | u32 imm11 = (offset & 0x800) << (20 - 11); |
@@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location, | |||
63 | static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, | 63 | static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, |
64 | Elf_Addr v) | 64 | Elf_Addr v) |
65 | { | 65 | { |
66 | s64 offset = (void *)v - (void *)location; | 66 | ptrdiff_t offset = (void *)v - (void *)location; |
67 | u16 imm8 = (offset & 0x100) << (12 - 8); | 67 | u16 imm8 = (offset & 0x100) << (12 - 8); |
68 | u16 imm7_6 = (offset & 0xc0) >> (6 - 5); | 68 | u16 imm7_6 = (offset & 0xc0) >> (6 - 5); |
69 | u16 imm5 = (offset & 0x20) >> (5 - 2); | 69 | u16 imm5 = (offset & 0x20) >> (5 - 2); |
@@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, | |||
78 | static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, | 78 | static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, |
79 | Elf_Addr v) | 79 | Elf_Addr v) |
80 | { | 80 | { |
81 | s64 offset = (void *)v - (void *)location; | 81 | ptrdiff_t offset = (void *)v - (void *)location; |
82 | u16 imm11 = (offset & 0x800) << (12 - 11); | 82 | u16 imm11 = (offset & 0x800) << (12 - 11); |
83 | u16 imm10 = (offset & 0x400) >> (10 - 8); | 83 | u16 imm10 = (offset & 0x400) >> (10 - 8); |
84 | u16 imm9_8 = (offset & 0x300) << (12 - 11); | 84 | u16 imm9_8 = (offset & 0x300) << (12 - 11); |
@@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, | |||
96 | static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, | 96 | static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, |
97 | Elf_Addr v) | 97 | Elf_Addr v) |
98 | { | 98 | { |
99 | s64 offset = (void *)v - (void *)location; | 99 | ptrdiff_t offset = (void *)v - (void *)location; |
100 | s32 hi20; | 100 | s32 hi20; |
101 | 101 | ||
102 | if (offset != (s32)offset) { | 102 | if (offset != (s32)offset) { |
@@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, | |||
178 | static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, | 178 | static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, |
179 | Elf_Addr v) | 179 | Elf_Addr v) |
180 | { | 180 | { |
181 | s64 offset = (void *)v - (void *)location; | 181 | ptrdiff_t offset = (void *)v - (void *)location; |
182 | s32 hi20; | 182 | s32 hi20; |
183 | 183 | ||
184 | /* Always emit the got entry */ | 184 | /* Always emit the got entry */ |
@@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, | |||
200 | static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, | 200 | static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, |
201 | Elf_Addr v) | 201 | Elf_Addr v) |
202 | { | 202 | { |
203 | s64 offset = (void *)v - (void *)location; | 203 | ptrdiff_t offset = (void *)v - (void *)location; |
204 | s32 fill_v = offset; | 204 | s32 fill_v = offset; |
205 | u32 hi20, lo12; | 205 | u32 hi20, lo12; |
206 | 206 | ||
@@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, | |||
227 | static int apply_r_riscv_call_rela(struct module *me, u32 *location, | 227 | static int apply_r_riscv_call_rela(struct module *me, u32 *location, |
228 | Elf_Addr v) | 228 | Elf_Addr v) |
229 | { | 229 | { |
230 | s64 offset = (void *)v - (void *)location; | 230 | ptrdiff_t offset = (void *)v - (void *)location; |
231 | s32 fill_v = offset; | 231 | s32 fill_v = offset; |
232 | u32 hi20, lo12; | 232 | u32 hi20, lo12; |
233 | 233 | ||
@@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location, | |||
263 | static int apply_r_riscv_add32_rela(struct module *me, u32 *location, | 263 | static int apply_r_riscv_add32_rela(struct module *me, u32 *location, |
264 | Elf_Addr v) | 264 | Elf_Addr v) |
265 | { | 265 | { |
266 | *(u32 *)location += (*(u32 *)v); | 266 | *(u32 *)location += (u32)v; |
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, | 270 | static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, |
271 | Elf_Addr v) | 271 | Elf_Addr v) |
272 | { | 272 | { |
273 | *(u32 *)location -= (*(u32 *)v); | 273 | *(u32 *)location -= (u32)v; |
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | 276 | ||
@@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | |||
347 | unsigned int j; | 347 | unsigned int j; |
348 | 348 | ||
349 | for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { | 349 | for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { |
350 | u64 hi20_loc = | 350 | unsigned long hi20_loc = |
351 | sechdrs[sechdrs[relsec].sh_info].sh_addr | 351 | sechdrs[sechdrs[relsec].sh_info].sh_addr |
352 | + rel[j].r_offset; | 352 | + rel[j].r_offset; |
353 | u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info); | 353 | u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info); |
@@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | |||
360 | Elf_Sym *hi20_sym = | 360 | Elf_Sym *hi20_sym = |
361 | (Elf_Sym *)sechdrs[symindex].sh_addr | 361 | (Elf_Sym *)sechdrs[symindex].sh_addr |
362 | + ELF_RISCV_R_SYM(rel[j].r_info); | 362 | + ELF_RISCV_R_SYM(rel[j].r_info); |
363 | u64 hi20_sym_val = | 363 | unsigned long hi20_sym_val = |
364 | hi20_sym->st_value | 364 | hi20_sym->st_value |
365 | + rel[j].r_addend; | 365 | + rel[j].r_addend; |
366 | 366 | ||
367 | /* Calculate lo12 */ | 367 | /* Calculate lo12 */ |
368 | u64 offset = hi20_sym_val - hi20_loc; | 368 | size_t offset = hi20_sym_val - hi20_loc; |
369 | if (IS_ENABLED(CONFIG_MODULE_SECTIONS) | 369 | if (IS_ENABLED(CONFIG_MODULE_SECTIONS) |
370 | && hi20_type == R_RISCV_GOT_HI20) { | 370 | && hi20_type == R_RISCV_GOT_HI20) { |
371 | offset = module_emit_got_entry( | 371 | offset = module_emit_got_entry( |
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index ba3e80712797..9f82a7e34c64 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c | |||
@@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target, | |||
50 | struct pt_regs *regs; | 50 | struct pt_regs *regs; |
51 | 51 | ||
52 | regs = task_pt_regs(target); | 52 | regs = task_pt_regs(target); |
53 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); | 53 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); |
54 | return ret; | 54 | return ret; |
55 | } | 55 | } |
56 | 56 | ||
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index ee44a48faf79..f0d2070866d4 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
@@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p) | |||
220 | riscv_fill_hwcap(); | 220 | riscv_fill_hwcap(); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int __init riscv_device_init(void) | ||
224 | { | ||
225 | return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
226 | } | ||
227 | subsys_initcall_sync(riscv_device_init); | ||
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index c77df8142be2..58a522f9bcc3 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c | |||
@@ -28,7 +28,9 @@ static void __init zone_sizes_init(void) | |||
28 | { | 28 | { |
29 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; | 29 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; |
30 | 30 | ||
31 | #ifdef CONFIG_ZONE_DMA32 | ||
31 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); | 32 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); |
33 | #endif | ||
32 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 34 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
33 | 35 | ||
34 | free_area_init_nodes(max_zone_pfns); | 36 | free_area_init_nodes(max_zone_pfns); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index baed39772c84..e44bb2b2873e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -160,6 +160,7 @@ config S390 | |||
160 | select HAVE_OPROFILE | 160 | select HAVE_OPROFILE |
161 | select HAVE_PERF_EVENTS | 161 | select HAVE_PERF_EVENTS |
162 | select HAVE_REGS_AND_STACK_ACCESS_API | 162 | select HAVE_REGS_AND_STACK_ACCESS_API |
163 | select HAVE_RSEQ | ||
163 | select HAVE_SYSCALL_TRACEPOINTS | 164 | select HAVE_SYSCALL_TRACEPOINTS |
164 | select HAVE_VIRT_CPU_ACCOUNTING | 165 | select HAVE_VIRT_CPU_ACCOUNTING |
165 | select MODULES_USE_ELF_RELA | 166 | select MODULES_USE_ELF_RELA |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 607c5e9fba3d..2ce28bf0c5ec 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
@@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); | |||
183 | COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); | 183 | COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); |
184 | COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); | 184 | COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); |
185 | COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) | 185 | COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) |
186 | COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig) | ||
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index f03402efab4b..150130c897c3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -357,6 +357,10 @@ ENTRY(system_call) | |||
357 | stg %r2,__PT_R2(%r11) # store return value | 357 | stg %r2,__PT_R2(%r11) # store return value |
358 | 358 | ||
359 | .Lsysc_return: | 359 | .Lsysc_return: |
360 | #ifdef CONFIG_DEBUG_RSEQ | ||
361 | lgr %r2,%r11 | ||
362 | brasl %r14,rseq_syscall | ||
363 | #endif | ||
360 | LOCKDEP_SYS_EXIT | 364 | LOCKDEP_SYS_EXIT |
361 | .Lsysc_tif: | 365 | .Lsysc_tif: |
362 | TSTMSK __PT_FLAGS(%r11),_PIF_WORK | 366 | TSTMSK __PT_FLAGS(%r11),_PIF_WORK |
@@ -1265,7 +1269,7 @@ cleanup_critical: | |||
1265 | jl 0f | 1269 | jl 0f |
1266 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end | 1270 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end |
1267 | jl .Lcleanup_load_fpu_regs | 1271 | jl .Lcleanup_load_fpu_regs |
1268 | 0: BR_EX %r14 | 1272 | 0: BR_EX %r14,%r11 |
1269 | 1273 | ||
1270 | .align 8 | 1274 | .align 8 |
1271 | .Lcleanup_table: | 1275 | .Lcleanup_table: |
@@ -1301,7 +1305,7 @@ cleanup_critical: | |||
1301 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE | 1305 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE |
1302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 1306 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
1303 | larl %r9,sie_exit # skip forward to sie_exit | 1307 | larl %r9,sie_exit # skip forward to sie_exit |
1304 | BR_EX %r14 | 1308 | BR_EX %r14,%r11 |
1305 | #endif | 1309 | #endif |
1306 | 1310 | ||
1307 | .Lcleanup_system_call: | 1311 | .Lcleanup_system_call: |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 2d2960ab3e10..22f08245aa5d 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs) | |||
498 | } | 498 | } |
499 | /* No longer in a system call */ | 499 | /* No longer in a system call */ |
500 | clear_pt_regs_flag(regs, PIF_SYSCALL); | 500 | clear_pt_regs_flag(regs, PIF_SYSCALL); |
501 | 501 | rseq_signal_deliver(&ksig, regs); | |
502 | if (is_compat_task()) | 502 | if (is_compat_task()) |
503 | handle_signal32(&ksig, oldset, regs); | 503 | handle_signal32(&ksig, oldset, regs); |
504 | else | 504 | else |
@@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs) | |||
537 | { | 537 | { |
538 | clear_thread_flag(TIF_NOTIFY_RESUME); | 538 | clear_thread_flag(TIF_NOTIFY_RESUME); |
539 | tracehook_notify_resume(regs); | 539 | tracehook_notify_resume(regs); |
540 | rseq_handle_notify_resume(NULL, regs); | ||
540 | } | 541 | } |
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 8b210ead7956..022fc099b628 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl | |||
@@ -389,3 +389,5 @@ | |||
389 | 379 common statx sys_statx compat_sys_statx | 389 | 379 common statx sys_statx compat_sys_statx |
390 | 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi | 390 | 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi |
391 | 381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load | 391 | 381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load |
392 | 382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents | ||
393 | 383 common rseq sys_rseq compat_sys_rseq | ||
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 84bd6329a88d..e3bd5627afef 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c | |||
@@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
252 | spin_unlock_bh(&mm->context.lock); | 252 | spin_unlock_bh(&mm->context.lock); |
253 | if (mask != 0) | 253 | if (mask != 0) |
254 | return; | 254 | return; |
255 | } else { | ||
256 | atomic_xor_bits(&page->_refcount, 3U << 24); | ||
255 | } | 257 | } |
256 | 258 | ||
257 | pgtable_page_dtor(page); | 259 | pgtable_page_dtor(page); |
@@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table) | |||
304 | break; | 306 | break; |
305 | /* fallthrough */ | 307 | /* fallthrough */ |
306 | case 3: /* 4K page table with pgstes */ | 308 | case 3: /* 4K page table with pgstes */ |
309 | if (mask & 3) | ||
310 | atomic_xor_bits(&page->_refcount, 3 << 24); | ||
307 | pgtable_page_dtor(page); | 311 | pgtable_page_dtor(page); |
308 | __free_page(page); | 312 | __free_page(page); |
309 | break; | 313 | break; |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d2db8acb1a55..5f0234ec8038 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) | |||
1286 | goto free_addrs; | 1286 | goto free_addrs; |
1287 | } | 1287 | } |
1288 | if (bpf_jit_prog(&jit, fp)) { | 1288 | if (bpf_jit_prog(&jit, fp)) { |
1289 | bpf_jit_binary_free(header); | ||
1289 | fp = orig_fp; | 1290 | fp = orig_fp; |
1290 | goto free_addrs; | 1291 | goto free_addrs; |
1291 | } | 1292 | } |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index e57665b4ba1c..e98522ea6f09 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) | |||
114 | struct pci_setup_rom *rom = NULL; | 114 | struct pci_setup_rom *rom = NULL; |
115 | efi_status_t status; | 115 | efi_status_t status; |
116 | unsigned long size; | 116 | unsigned long size; |
117 | uint64_t attributes, romsize; | 117 | uint64_t romsize; |
118 | void *romimage; | 118 | void *romimage; |
119 | 119 | ||
120 | status = efi_call_proto(efi_pci_io_protocol, attributes, pci, | ||
121 | EfiPciIoAttributeOperationGet, 0ULL, | ||
122 | &attributes); | ||
123 | if (status != EFI_SUCCESS) | ||
124 | return status; | ||
125 | |||
126 | /* | 120 | /* |
127 | * Some firmware images contain EFI function pointers at the place where the | 121 | * Some firmware images contain EFI function pointers at the place where |
128 | * romimage and romsize fields are supposed to be. Typically the EFI | 122 | * the romimage and romsize fields are supposed to be. Typically the EFI |
129 | * code is mapped at high addresses, translating to an unrealistically | 123 | * code is mapped at high addresses, translating to an unrealistically |
130 | * large romsize. The UEFI spec limits the size of option ROMs to 16 | 124 | * large romsize. The UEFI spec limits the size of option ROMs to 16 |
131 | * MiB so we reject any ROMs over 16 MiB in size to catch this. | 125 | * MiB so we reject any ROMs over 16 MiB in size to catch this. |
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index 9254e0b6cc06..717bf0776421 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S | |||
@@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail) | |||
535 | movdqu STATE3, 0x40(STATEP) | 535 | movdqu STATE3, 0x40(STATEP) |
536 | 536 | ||
537 | FRAME_END | 537 | FRAME_END |
538 | ret | ||
538 | ENDPROC(crypto_aegis128_aesni_enc_tail) | 539 | ENDPROC(crypto_aegis128_aesni_enc_tail) |
539 | 540 | ||
540 | .macro decrypt_block a s0 s1 s2 s3 s4 i | 541 | .macro decrypt_block a s0 s1 s2 s3 s4 i |
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S index 9263c344f2c7..4eda2b8db9e1 100644 --- a/arch/x86/crypto/aegis128l-aesni-asm.S +++ b/arch/x86/crypto/aegis128l-aesni-asm.S | |||
@@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail) | |||
645 | state_store0 | 645 | state_store0 |
646 | 646 | ||
647 | FRAME_END | 647 | FRAME_END |
648 | ret | ||
648 | ENDPROC(crypto_aegis128l_aesni_enc_tail) | 649 | ENDPROC(crypto_aegis128l_aesni_enc_tail) |
649 | 650 | ||
650 | /* | 651 | /* |
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S index 1d977d515bf9..32aae8397268 100644 --- a/arch/x86/crypto/aegis256-aesni-asm.S +++ b/arch/x86/crypto/aegis256-aesni-asm.S | |||
@@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail) | |||
543 | state_store0 | 543 | state_store0 |
544 | 544 | ||
545 | FRAME_END | 545 | FRAME_END |
546 | ret | ||
546 | ENDPROC(crypto_aegis256_aesni_enc_tail) | 547 | ENDPROC(crypto_aegis256_aesni_enc_tail) |
547 | 548 | ||
548 | /* | 549 | /* |
diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S index 37d422e77931..07653d4582a6 100644 --- a/arch/x86/crypto/morus1280-avx2-asm.S +++ b/arch/x86/crypto/morus1280-avx2-asm.S | |||
@@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail) | |||
453 | vmovdqu STATE4, (4 * 32)(%rdi) | 453 | vmovdqu STATE4, (4 * 32)(%rdi) |
454 | 454 | ||
455 | FRAME_END | 455 | FRAME_END |
456 | ret | ||
456 | ENDPROC(crypto_morus1280_avx2_enc_tail) | 457 | ENDPROC(crypto_morus1280_avx2_enc_tail) |
457 | 458 | ||
458 | /* | 459 | /* |
diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S index 1fe637c7be9d..bd1aa1b60869 100644 --- a/arch/x86/crypto/morus1280-sse2-asm.S +++ b/arch/x86/crypto/morus1280-sse2-asm.S | |||
@@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail) | |||
652 | movdqu STATE4_HI, (9 * 16)(%rdi) | 652 | movdqu STATE4_HI, (9 * 16)(%rdi) |
653 | 653 | ||
654 | FRAME_END | 654 | FRAME_END |
655 | ret | ||
655 | ENDPROC(crypto_morus1280_sse2_enc_tail) | 656 | ENDPROC(crypto_morus1280_sse2_enc_tail) |
656 | 657 | ||
657 | /* | 658 | /* |
diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S index 71c72a0a0862..efa02816d921 100644 --- a/arch/x86/crypto/morus640-sse2-asm.S +++ b/arch/x86/crypto/morus640-sse2-asm.S | |||
@@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail) | |||
437 | movdqu STATE4, (4 * 16)(%rdi) | 437 | movdqu STATE4, (4 * 16)(%rdi) |
438 | 438 | ||
439 | FRAME_END | 439 | FRAME_END |
440 | ret | ||
440 | ENDPROC(crypto_morus640_sse2_enc_tail) | 441 | ENDPROC(crypto_morus640_sse2_enc_tail) |
441 | 442 | ||
442 | /* | 443 | /* |
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index f68855499391..402338365651 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c | |||
@@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) | |||
114 | ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; | 114 | ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; |
115 | nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); | 115 | nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); |
116 | } | 116 | } |
117 | if (nr_bank < 0) | ||
118 | goto ipi_mask_ex_done; | ||
117 | if (!nr_bank) | 119 | if (!nr_bank) |
118 | ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; | 120 | ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; |
119 | 121 | ||
@@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) | |||
158 | 160 | ||
159 | for_each_cpu(cur_cpu, mask) { | 161 | for_each_cpu(cur_cpu, mask) { |
160 | vcpu = hv_cpu_number_to_vp_number(cur_cpu); | 162 | vcpu = hv_cpu_number_to_vp_number(cur_cpu); |
163 | if (vcpu == VP_INVAL) | ||
164 | goto ipi_mask_done; | ||
165 | |||
161 | /* | 166 | /* |
162 | * This particular version of the IPI hypercall can | 167 | * This particular version of the IPI hypercall can |
163 | * only target upto 64 CPUs. | 168 | * only target upto 64 CPUs. |
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 4c431e1c1eff..1ff420217298 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c | |||
@@ -265,7 +265,7 @@ void __init hyperv_init(void) | |||
265 | { | 265 | { |
266 | u64 guest_id, required_msrs; | 266 | u64 guest_id, required_msrs; |
267 | union hv_x64_msr_hypercall_contents hypercall_msr; | 267 | union hv_x64_msr_hypercall_contents hypercall_msr; |
268 | int cpuhp; | 268 | int cpuhp, i; |
269 | 269 | ||
270 | if (x86_hyper_type != X86_HYPER_MS_HYPERV) | 270 | if (x86_hyper_type != X86_HYPER_MS_HYPERV) |
271 | return; | 271 | return; |
@@ -293,6 +293,9 @@ void __init hyperv_init(void) | |||
293 | if (!hv_vp_index) | 293 | if (!hv_vp_index) |
294 | return; | 294 | return; |
295 | 295 | ||
296 | for (i = 0; i < num_possible_cpus(); i++) | ||
297 | hv_vp_index[i] = VP_INVAL; | ||
298 | |||
296 | hv_vp_assist_page = kcalloc(num_possible_cpus(), | 299 | hv_vp_assist_page = kcalloc(num_possible_cpus(), |
297 | sizeof(*hv_vp_assist_page), GFP_KERNEL); | 300 | sizeof(*hv_vp_assist_page), GFP_KERNEL); |
298 | if (!hv_vp_assist_page) { | 301 | if (!hv_vp_assist_page) { |
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 219faaec51df..990770f9e76b 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h | |||
@@ -46,6 +46,65 @@ | |||
46 | #define _ASM_SI __ASM_REG(si) | 46 | #define _ASM_SI __ASM_REG(si) |
47 | #define _ASM_DI __ASM_REG(di) | 47 | #define _ASM_DI __ASM_REG(di) |
48 | 48 | ||
49 | #ifndef __x86_64__ | ||
50 | /* 32 bit */ | ||
51 | |||
52 | #define _ASM_ARG1 _ASM_AX | ||
53 | #define _ASM_ARG2 _ASM_DX | ||
54 | #define _ASM_ARG3 _ASM_CX | ||
55 | |||
56 | #define _ASM_ARG1L eax | ||
57 | #define _ASM_ARG2L edx | ||
58 | #define _ASM_ARG3L ecx | ||
59 | |||
60 | #define _ASM_ARG1W ax | ||
61 | #define _ASM_ARG2W dx | ||
62 | #define _ASM_ARG3W cx | ||
63 | |||
64 | #define _ASM_ARG1B al | ||
65 | #define _ASM_ARG2B dl | ||
66 | #define _ASM_ARG3B cl | ||
67 | |||
68 | #else | ||
69 | /* 64 bit */ | ||
70 | |||
71 | #define _ASM_ARG1 _ASM_DI | ||
72 | #define _ASM_ARG2 _ASM_SI | ||
73 | #define _ASM_ARG3 _ASM_DX | ||
74 | #define _ASM_ARG4 _ASM_CX | ||
75 | #define _ASM_ARG5 r8 | ||
76 | #define _ASM_ARG6 r9 | ||
77 | |||
78 | #define _ASM_ARG1Q rdi | ||
79 | #define _ASM_ARG2Q rsi | ||
80 | #define _ASM_ARG3Q rdx | ||
81 | #define _ASM_ARG4Q rcx | ||
82 | #define _ASM_ARG5Q r8 | ||
83 | #define _ASM_ARG6Q r9 | ||
84 | |||
85 | #define _ASM_ARG1L edi | ||
86 | #define _ASM_ARG2L esi | ||
87 | #define _ASM_ARG3L edx | ||
88 | #define _ASM_ARG4L ecx | ||
89 | #define _ASM_ARG5L r8d | ||
90 | #define _ASM_ARG6L r9d | ||
91 | |||
92 | #define _ASM_ARG1W di | ||
93 | #define _ASM_ARG2W si | ||
94 | #define _ASM_ARG3W dx | ||
95 | #define _ASM_ARG4W cx | ||
96 | #define _ASM_ARG5W r8w | ||
97 | #define _ASM_ARG6W r9w | ||
98 | |||
99 | #define _ASM_ARG1B dil | ||
100 | #define _ASM_ARG2B sil | ||
101 | #define _ASM_ARG3B dl | ||
102 | #define _ASM_ARG4B cl | ||
103 | #define _ASM_ARG5B r8b | ||
104 | #define _ASM_ARG6B r9b | ||
105 | |||
106 | #endif | ||
107 | |||
49 | /* | 108 | /* |
50 | * Macros to generate condition code outputs from inline assembly, | 109 | * Macros to generate condition code outputs from inline assembly, |
51 | * The output operand must be type "bool". | 110 | * The output operand must be type "bool". |
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 89f08955fff7..c4fc17220df9 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -13,7 +13,7 @@ | |||
13 | * Interrupt control: | 13 | * Interrupt control: |
14 | */ | 14 | */ |
15 | 15 | ||
16 | static inline unsigned long native_save_fl(void) | 16 | extern inline unsigned long native_save_fl(void) |
17 | { | 17 | { |
18 | unsigned long flags; | 18 | unsigned long flags; |
19 | 19 | ||
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 3cd14311edfa..5a7375ed5f7c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/hyperv-tlfs.h> | 9 | #include <asm/hyperv-tlfs.h> |
10 | #include <asm/nospec-branch.h> | 10 | #include <asm/nospec-branch.h> |
11 | 11 | ||
12 | #define VP_INVAL U32_MAX | ||
13 | |||
12 | struct ms_hyperv_info { | 14 | struct ms_hyperv_info { |
13 | u32 features; | 15 | u32 features; |
14 | u32 misc_features; | 16 | u32 misc_features; |
@@ -20,7 +22,6 @@ struct ms_hyperv_info { | |||
20 | 22 | ||
21 | extern struct ms_hyperv_info ms_hyperv; | 23 | extern struct ms_hyperv_info ms_hyperv; |
22 | 24 | ||
23 | |||
24 | /* | 25 | /* |
25 | * Generate the guest ID. | 26 | * Generate the guest ID. |
26 | */ | 27 | */ |
@@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, | |||
281 | */ | 282 | */ |
282 | for_each_cpu(cpu, cpus) { | 283 | for_each_cpu(cpu, cpus) { |
283 | vcpu = hv_cpu_number_to_vp_number(cpu); | 284 | vcpu = hv_cpu_number_to_vp_number(cpu); |
285 | if (vcpu == VP_INVAL) | ||
286 | return -1; | ||
284 | vcpu_bank = vcpu / 64; | 287 | vcpu_bank = vcpu / 64; |
285 | vcpu_offset = vcpu % 64; | 288 | vcpu_offset = vcpu % 64; |
286 | __set_bit(vcpu_offset, (unsigned long *) | 289 | __set_bit(vcpu_offset, (unsigned long *) |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 02d6f5cf4e70..8824d01c0c35 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o | |||
61 | obj-y += tsc.o tsc_msr.o io_delay.o rtc.o | 61 | obj-y += tsc.o tsc_msr.o io_delay.o rtc.o |
62 | obj-y += pci-iommu_table.o | 62 | obj-y += pci-iommu_table.o |
63 | obj-y += resource.o | 63 | obj-y += resource.o |
64 | obj-y += irqflags.o | ||
64 | 65 | ||
65 | obj-y += process.o | 66 | obj-y += process.o |
66 | obj-y += fpu/ | 67 | obj-y += fpu/ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 082d7875cef8..38915fbfae73 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -543,7 +543,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) | |||
543 | nodes_per_socket = ((value >> 3) & 7) + 1; | 543 | nodes_per_socket = ((value >> 3) & 7) + 1; |
544 | } | 544 | } |
545 | 545 | ||
546 | if (c->x86 >= 0x15 && c->x86 <= 0x17) { | 546 | if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && |
547 | !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && | ||
548 | c->x86 >= 0x15 && c->x86 <= 0x17) { | ||
547 | unsigned int bit; | 549 | unsigned int bit; |
548 | 550 | ||
549 | switch (c->x86) { | 551 | switch (c->x86) { |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 404df26b7de8..5c0ea39311fe 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -155,7 +155,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) | |||
155 | guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; | 155 | guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; |
156 | 156 | ||
157 | /* SSBD controlled in MSR_SPEC_CTRL */ | 157 | /* SSBD controlled in MSR_SPEC_CTRL */ |
158 | if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) | 158 | if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || |
159 | static_cpu_has(X86_FEATURE_AMD_SSBD)) | ||
159 | hostval |= ssbd_tif_to_spec_ctrl(ti->flags); | 160 | hostval |= ssbd_tif_to_spec_ctrl(ti->flags); |
160 | 161 | ||
161 | if (hostval != guestval) { | 162 | if (hostval != guestval) { |
@@ -533,9 +534,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) | |||
533 | * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may | 534 | * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may |
534 | * use a completely different MSR and bit dependent on family. | 535 | * use a completely different MSR and bit dependent on family. |
535 | */ | 536 | */ |
536 | if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) | 537 | if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && |
538 | !static_cpu_has(X86_FEATURE_AMD_SSBD)) { | ||
537 | x86_amd_ssb_disable(); | 539 | x86_amd_ssb_disable(); |
538 | else { | 540 | } else { |
539 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; | 541 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
540 | x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; | 542 | x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; |
541 | wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); | 543 | wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 4021d3859499..40eee6cc4124 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
106 | 106 | ||
107 | memset(line, 0, LINE_SIZE); | 107 | memset(line, 0, LINE_SIZE); |
108 | 108 | ||
109 | length = strncpy_from_user(line, buf, LINE_SIZE - 1); | 109 | len = min_t(size_t, len, LINE_SIZE - 1); |
110 | length = strncpy_from_user(line, buf, len); | ||
110 | if (length < 0) | 111 | if (length < 0) |
111 | return length; | 112 | return length; |
112 | 113 | ||
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S new file mode 100644 index 000000000000..ddeeaac8adda --- /dev/null +++ b/arch/x86/kernel/irqflags.S | |||
@@ -0,0 +1,26 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #include <asm/asm.h> | ||
4 | #include <asm/export.h> | ||
5 | #include <linux/linkage.h> | ||
6 | |||
7 | /* | ||
8 | * unsigned long native_save_fl(void) | ||
9 | */ | ||
10 | ENTRY(native_save_fl) | ||
11 | pushf | ||
12 | pop %_ASM_AX | ||
13 | ret | ||
14 | ENDPROC(native_save_fl) | ||
15 | EXPORT_SYMBOL(native_save_fl) | ||
16 | |||
17 | /* | ||
18 | * void native_restore_fl(unsigned long flags) | ||
19 | * %eax/%rdi: flags | ||
20 | */ | ||
21 | ENTRY(native_restore_fl) | ||
22 | push %_ASM_ARG1 | ||
23 | popf | ||
24 | ret | ||
25 | ENDPROC(native_restore_fl) | ||
26 | EXPORT_SYMBOL(native_restore_fl) | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2f7d1d2a5c3..db9656e13ea0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused) | |||
221 | #ifdef CONFIG_X86_32 | 221 | #ifdef CONFIG_X86_32 |
222 | /* switch away from the initial page table */ | 222 | /* switch away from the initial page table */ |
223 | load_cr3(swapper_pg_dir); | 223 | load_cr3(swapper_pg_dir); |
224 | /* | ||
225 | * Initialize the CR4 shadow before doing anything that could | ||
226 | * try to read it. | ||
227 | */ | ||
228 | cr4_init_shadow(); | ||
224 | __flush_tlb_all(); | 229 | __flush_tlb_all(); |
225 | #endif | 230 | #endif |
226 | load_current_idt(); | 231 | load_current_idt(); |
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 2e9ee023e6bc..81a8e33115ad 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile | |||
@@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string | |||
6 | targets += $(purgatory-y) | 6 | targets += $(purgatory-y) |
7 | PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) | 7 | PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) |
8 | 8 | ||
9 | $(obj)/sha256.o: $(srctree)/lib/sha256.c | 9 | $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE |
10 | $(call if_changed_rule,cc_o_c) | 10 | $(call if_changed_rule,cc_o_c) |
11 | 11 | ||
12 | LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib | 12 | LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 8d4e2e1ae60b..439a94bf89ad 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
@@ -1207,12 +1207,20 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1207 | 1207 | ||
1208 | xen_setup_features(); | 1208 | xen_setup_features(); |
1209 | 1209 | ||
1210 | xen_setup_machphys_mapping(); | ||
1211 | |||
1212 | /* Install Xen paravirt ops */ | 1210 | /* Install Xen paravirt ops */ |
1213 | pv_info = xen_info; | 1211 | pv_info = xen_info; |
1214 | pv_init_ops.patch = paravirt_patch_default; | 1212 | pv_init_ops.patch = paravirt_patch_default; |
1215 | pv_cpu_ops = xen_cpu_ops; | 1213 | pv_cpu_ops = xen_cpu_ops; |
1214 | xen_init_irq_ops(); | ||
1215 | |||
1216 | /* | ||
1217 | * Setup xen_vcpu early because it is needed for | ||
1218 | * local_irq_disable(), irqs_disabled(), e.g. in printk(). | ||
1219 | * | ||
1220 | * Don't do the full vcpu_info placement stuff until we have | ||
1221 | * the cpu_possible_mask and a non-dummy shared_info. | ||
1222 | */ | ||
1223 | xen_vcpu_info_reset(0); | ||
1216 | 1224 | ||
1217 | x86_platform.get_nmi_reason = xen_get_nmi_reason; | 1225 | x86_platform.get_nmi_reason = xen_get_nmi_reason; |
1218 | 1226 | ||
@@ -1225,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1225 | * Set up some pagetable state before starting to set any ptes. | 1233 | * Set up some pagetable state before starting to set any ptes. |
1226 | */ | 1234 | */ |
1227 | 1235 | ||
1236 | xen_setup_machphys_mapping(); | ||
1228 | xen_init_mmu_ops(); | 1237 | xen_init_mmu_ops(); |
1229 | 1238 | ||
1230 | /* Prevent unwanted bits from being set in PTEs. */ | 1239 | /* Prevent unwanted bits from being set in PTEs. */ |
1231 | __supported_pte_mask &= ~_PAGE_GLOBAL; | 1240 | __supported_pte_mask &= ~_PAGE_GLOBAL; |
1241 | __default_kernel_pte_mask &= ~_PAGE_GLOBAL; | ||
1232 | 1242 | ||
1233 | /* | 1243 | /* |
1234 | * Prevent page tables from being allocated in highmem, even | 1244 | * Prevent page tables from being allocated in highmem, even |
@@ -1249,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1249 | get_cpu_cap(&boot_cpu_data); | 1259 | get_cpu_cap(&boot_cpu_data); |
1250 | x86_configure_nx(); | 1260 | x86_configure_nx(); |
1251 | 1261 | ||
1252 | xen_init_irq_ops(); | ||
1253 | |||
1254 | /* Let's presume PV guests always boot on vCPU with id 0. */ | 1262 | /* Let's presume PV guests always boot on vCPU with id 0. */ |
1255 | per_cpu(xen_vcpu_id, 0) = 0; | 1263 | per_cpu(xen_vcpu_id, 0) = 0; |
1256 | 1264 | ||
1257 | /* | ||
1258 | * Setup xen_vcpu early because idt_setup_early_handler needs it for | ||
1259 | * local_irq_disable(), irqs_disabled(). | ||
1260 | * | ||
1261 | * Don't do the full vcpu_info placement stuff until we have | ||
1262 | * the cpu_possible_mask and a non-dummy shared_info. | ||
1263 | */ | ||
1264 | xen_vcpu_info_reset(0); | ||
1265 | |||
1266 | idt_setup_early_handler(); | 1265 | idt_setup_early_handler(); |
1267 | 1266 | ||
1268 | xen_init_capabilities(); | 1267 | xen_init_capabilities(); |
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 74179852e46c..7515a19fd324 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c | |||
@@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = { | |||
128 | 128 | ||
129 | void __init xen_init_irq_ops(void) | 129 | void __init xen_init_irq_ops(void) |
130 | { | 130 | { |
131 | /* For PVH we use default pv_irq_ops settings. */ | 131 | pv_irq_ops = xen_irq_ops; |
132 | if (!xen_feature(XENFEAT_hvm_callback_vector)) | ||
133 | pv_irq_ops = xen_irq_ops; | ||
134 | x86_init.irqs.intr_init = xen_init_IRQ; | 132 | x86_init.irqs.intr_init = xen_init_IRQ; |
135 | } | 133 | } |
diff --git a/block/bsg.c b/block/bsg.c index 66602c489956..3da540faf673 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode) | |||
267 | } else if (hdr->din_xfer_len) { | 267 | } else if (hdr->din_xfer_len) { |
268 | ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp), | 268 | ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp), |
269 | hdr->din_xfer_len, GFP_KERNEL); | 269 | hdr->din_xfer_len, GFP_KERNEL); |
270 | } else { | ||
271 | ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL); | ||
272 | } | 270 | } |
273 | 271 | ||
274 | if (ret) | 272 | if (ret) |
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index fc0c2e2328cd..fe9d46d81750 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c | |||
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) | |||
51 | return_ACPI_STATUS(status); | 51 | return_ACPI_STATUS(status); |
52 | } | 52 | } |
53 | 53 | ||
54 | /* | 54 | /* Disable all GPEs */ |
55 | * 1) Disable all GPEs | ||
56 | * 2) Enable all wakeup GPEs | ||
57 | */ | ||
58 | status = acpi_hw_disable_all_gpes(); | 55 | status = acpi_hw_disable_all_gpes(); |
59 | if (ACPI_FAILURE(status)) { | 56 | if (ACPI_FAILURE(status)) { |
60 | return_ACPI_STATUS(status); | 57 | return_ACPI_STATUS(status); |
61 | } | 58 | } |
59 | /* | ||
60 | * If the target sleep state is S5, clear all GPEs and fixed events too | ||
61 | */ | ||
62 | if (sleep_state == ACPI_STATE_S5) { | ||
63 | status = acpi_hw_clear_acpi_status(); | ||
64 | if (ACPI_FAILURE(status)) { | ||
65 | return_ACPI_STATUS(status); | ||
66 | } | ||
67 | } | ||
62 | acpi_gbl_system_awake_and_running = FALSE; | 68 | acpi_gbl_system_awake_and_running = FALSE; |
63 | 69 | ||
70 | /* Enable all wakeup GPEs */ | ||
64 | status = acpi_hw_enable_all_wakeup_gpes(); | 71 | status = acpi_hw_enable_all_wakeup_gpes(); |
65 | if (ACPI_FAILURE(status)) { | 72 | if (ACPI_FAILURE(status)) { |
66 | return_ACPI_STATUS(status); | 73 | return_ACPI_STATUS(status); |
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 5a64ddaed8a3..e47430272692 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c | |||
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name, | |||
182 | switch (lookup_status) { | 182 | switch (lookup_status) { |
183 | case AE_ALREADY_EXISTS: | 183 | case AE_ALREADY_EXISTS: |
184 | 184 | ||
185 | acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); | 185 | acpi_os_printf(ACPI_MSG_BIOS_ERROR); |
186 | message = "Failure creating"; | 186 | message = "Failure creating"; |
187 | break; | 187 | break; |
188 | 188 | ||
189 | case AE_NOT_FOUND: | 189 | case AE_NOT_FOUND: |
190 | 190 | ||
191 | acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); | 191 | acpi_os_printf(ACPI_MSG_BIOS_ERROR); |
192 | message = "Could not resolve"; | 192 | message = "Could not resolve"; |
193 | break; | 193 | break; |
194 | 194 | ||
195 | default: | 195 | default: |
196 | 196 | ||
197 | acpi_os_printf("\n" ACPI_MSG_ERROR); | 197 | acpi_os_printf(ACPI_MSG_ERROR); |
198 | message = "Failure resolving"; | 198 | message = "Failure resolving"; |
199 | break; | 199 | break; |
200 | } | 200 | } |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b0113a5802a3..d79ad844c78f 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook) | |||
717 | */ | 717 | */ |
718 | pr_err("extension failed to load: %s", hook->name); | 718 | pr_err("extension failed to load: %s", hook->name); |
719 | __battery_hook_unregister(hook, 0); | 719 | __battery_hook_unregister(hook, 0); |
720 | return; | 720 | goto end; |
721 | } | 721 | } |
722 | } | 722 | } |
723 | pr_info("new extension: %s\n", hook->name); | 723 | pr_info("new extension: %s\n", hook->name); |
724 | end: | ||
724 | mutex_unlock(&hook_mutex); | 725 | mutex_unlock(&hook_mutex); |
725 | } | 726 | } |
726 | EXPORT_SYMBOL_GPL(battery_hook_register); | 727 | EXPORT_SYMBOL_GPL(battery_hook_register); |
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register); | |||
732 | */ | 733 | */ |
733 | static void battery_hook_add_battery(struct acpi_battery *battery) | 734 | static void battery_hook_add_battery(struct acpi_battery *battery) |
734 | { | 735 | { |
735 | struct acpi_battery_hook *hook_node; | 736 | struct acpi_battery_hook *hook_node, *tmp; |
736 | 737 | ||
737 | mutex_lock(&hook_mutex); | 738 | mutex_lock(&hook_mutex); |
738 | INIT_LIST_HEAD(&battery->list); | 739 | INIT_LIST_HEAD(&battery->list); |
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery) | |||
744 | * when a battery gets hotplugged or initialized | 745 | * when a battery gets hotplugged or initialized |
745 | * during the battery module initialization. | 746 | * during the battery module initialization. |
746 | */ | 747 | */ |
747 | list_for_each_entry(hook_node, &battery_hook_list, list) { | 748 | list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) { |
748 | if (hook_node->add_battery(battery->bat)) { | 749 | if (hook_node->add_battery(battery->bat)) { |
749 | /* | 750 | /* |
750 | * The notification of the extensions has failed, to | 751 | * The notification of the extensions has failed, to |
751 | * prevent further errors we will unload the extension. | 752 | * prevent further errors we will unload the extension. |
752 | */ | 753 | */ |
753 | __battery_hook_unregister(hook_node, 0); | ||
754 | pr_err("error in extension, unloading: %s", | 754 | pr_err("error in extension, unloading: %s", |
755 | hook_node->name); | 755 | hook_node->name); |
756 | __battery_hook_unregister(hook_node, 0); | ||
756 | } | 757 | } |
757 | } | 758 | } |
758 | mutex_unlock(&hook_mutex); | 759 | mutex_unlock(&hook_mutex); |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index d15814e1727f..7c479002e798 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
408 | const guid_t *guid; | 408 | const guid_t *guid; |
409 | int rc, i; | 409 | int rc, i; |
410 | 410 | ||
411 | if (cmd_rc) | ||
412 | *cmd_rc = -EINVAL; | ||
411 | func = cmd; | 413 | func = cmd; |
412 | if (cmd == ND_CMD_CALL) { | 414 | if (cmd == ND_CMD_CALL) { |
413 | call_pkg = buf; | 415 | call_pkg = buf; |
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
518 | * If we return an error (like elsewhere) then caller wouldn't | 520 | * If we return an error (like elsewhere) then caller wouldn't |
519 | * be able to rely upon data returned to make calculation. | 521 | * be able to rely upon data returned to make calculation. |
520 | */ | 522 | */ |
523 | if (cmd_rc) | ||
524 | *cmd_rc = 0; | ||
521 | return 0; | 525 | return 0; |
522 | } | 526 | } |
523 | 527 | ||
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev, | |||
1273 | 1277 | ||
1274 | mutex_lock(&acpi_desc->init_mutex); | 1278 | mutex_lock(&acpi_desc->init_mutex); |
1275 | rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, | 1279 | rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, |
1276 | work_busy(&acpi_desc->dwork.work) | 1280 | acpi_desc->scrub_busy |
1277 | && !acpi_desc->cancel ? "+\n" : "\n"); | 1281 | && !acpi_desc->cancel ? "+\n" : "\n"); |
1278 | mutex_unlock(&acpi_desc->init_mutex); | 1282 | mutex_unlock(&acpi_desc->init_mutex); |
1279 | } | 1283 | } |
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, | |||
2939 | return 0; | 2943 | return 0; |
2940 | } | 2944 | } |
2941 | 2945 | ||
2946 | static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) | ||
2947 | { | ||
2948 | lockdep_assert_held(&acpi_desc->init_mutex); | ||
2949 | |||
2950 | acpi_desc->scrub_busy = 1; | ||
2951 | /* note this should only be set from within the workqueue */ | ||
2952 | if (tmo) | ||
2953 | acpi_desc->scrub_tmo = tmo; | ||
2954 | queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); | ||
2955 | } | ||
2956 | |||
2957 | static void sched_ars(struct acpi_nfit_desc *acpi_desc) | ||
2958 | { | ||
2959 | __sched_ars(acpi_desc, 0); | ||
2960 | } | ||
2961 | |||
2962 | static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) | ||
2963 | { | ||
2964 | lockdep_assert_held(&acpi_desc->init_mutex); | ||
2965 | |||
2966 | acpi_desc->scrub_busy = 0; | ||
2967 | acpi_desc->scrub_count++; | ||
2968 | if (acpi_desc->scrub_count_state) | ||
2969 | sysfs_notify_dirent(acpi_desc->scrub_count_state); | ||
2970 | } | ||
2971 | |||
2942 | static void acpi_nfit_scrub(struct work_struct *work) | 2972 | static void acpi_nfit_scrub(struct work_struct *work) |
2943 | { | 2973 | { |
2944 | struct acpi_nfit_desc *acpi_desc; | 2974 | struct acpi_nfit_desc *acpi_desc; |
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work) | |||
2949 | mutex_lock(&acpi_desc->init_mutex); | 2979 | mutex_lock(&acpi_desc->init_mutex); |
2950 | query_rc = acpi_nfit_query_poison(acpi_desc); | 2980 | query_rc = acpi_nfit_query_poison(acpi_desc); |
2951 | tmo = __acpi_nfit_scrub(acpi_desc, query_rc); | 2981 | tmo = __acpi_nfit_scrub(acpi_desc, query_rc); |
2952 | if (tmo) { | 2982 | if (tmo) |
2953 | queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); | 2983 | __sched_ars(acpi_desc, tmo); |
2954 | acpi_desc->scrub_tmo = tmo; | 2984 | else |
2955 | } else { | 2985 | notify_ars_done(acpi_desc); |
2956 | acpi_desc->scrub_count++; | ||
2957 | if (acpi_desc->scrub_count_state) | ||
2958 | sysfs_notify_dirent(acpi_desc->scrub_count_state); | ||
2959 | } | ||
2960 | memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); | 2986 | memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); |
2961 | mutex_unlock(&acpi_desc->init_mutex); | 2987 | mutex_unlock(&acpi_desc->init_mutex); |
2962 | } | 2988 | } |
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) | |||
3037 | break; | 3063 | break; |
3038 | } | 3064 | } |
3039 | 3065 | ||
3040 | queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); | 3066 | sched_ars(acpi_desc); |
3041 | return 0; | 3067 | return 0; |
3042 | } | 3068 | } |
3043 | 3069 | ||
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) | |||
3239 | } | 3265 | } |
3240 | } | 3266 | } |
3241 | if (scheduled) { | 3267 | if (scheduled) { |
3242 | queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); | 3268 | sched_ars(acpi_desc); |
3243 | dev_dbg(dev, "ars_scan triggered\n"); | 3269 | dev_dbg(dev, "ars_scan triggered\n"); |
3244 | } | 3270 | } |
3245 | mutex_unlock(&acpi_desc->init_mutex); | 3271 | mutex_unlock(&acpi_desc->init_mutex); |
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 7d15856a739f..a97ff42fe311 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h | |||
@@ -203,6 +203,7 @@ struct acpi_nfit_desc { | |||
203 | unsigned int max_ars; | 203 | unsigned int max_ars; |
204 | unsigned int scrub_count; | 204 | unsigned int scrub_count; |
205 | unsigned int scrub_mode; | 205 | unsigned int scrub_mode; |
206 | unsigned int scrub_busy:1; | ||
206 | unsigned int cancel:1; | 207 | unsigned int cancel:1; |
207 | unsigned long dimm_cmd_force_en; | 208 | unsigned long dimm_cmd_force_en; |
208 | unsigned long bus_cmd_force_en; | 209 | unsigned long bus_cmd_force_en; |
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index e5ea1974d1e3..d1e26cb599bf 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c | |||
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, | |||
481 | if (cpu_node) { | 481 | if (cpu_node) { |
482 | cpu_node = acpi_find_processor_package_id(table, cpu_node, | 482 | cpu_node = acpi_find_processor_package_id(table, cpu_node, |
483 | level, flag); | 483 | level, flag); |
484 | /* Only the first level has a guaranteed id */ | 484 | /* |
485 | if (level == 0) | 485 | * As per specification if the processor structure represents |
486 | * an actual processor, then ACPI processor ID must be valid. | ||
487 | * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID | ||
488 | * should be set if the UID is valid | ||
489 | */ | ||
490 | if (level == 0 || | ||
491 | cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) | ||
486 | return cpu_node->acpi_processor_id; | 492 | return cpu_node->acpi_processor_id; |
487 | return ACPI_PTR_DIFF(cpu_node, table); | 493 | return ACPI_PTR_DIFF(cpu_node, table); |
488 | } | 494 | } |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 2b16e7c8fff3..39b181d6bd0d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG | |||
398 | 398 | ||
399 | config SATA_HIGHBANK | 399 | config SATA_HIGHBANK |
400 | tristate "Calxeda Highbank SATA support" | 400 | tristate "Calxeda Highbank SATA support" |
401 | depends on HAS_DMA | ||
402 | depends on ARCH_HIGHBANK || COMPILE_TEST | 401 | depends on ARCH_HIGHBANK || COMPILE_TEST |
403 | help | 402 | help |
404 | This option enables support for the Calxeda Highbank SoC's | 403 | This option enables support for the Calxeda Highbank SoC's |
@@ -408,7 +407,6 @@ config SATA_HIGHBANK | |||
408 | 407 | ||
409 | config SATA_MV | 408 | config SATA_MV |
410 | tristate "Marvell SATA support" | 409 | tristate "Marvell SATA support" |
411 | depends on HAS_DMA | ||
412 | depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ | 410 | depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ |
413 | ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST | 411 | ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST |
414 | select GENERIC_PHY | 412 | select GENERIC_PHY |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 738fb22978dd..b2b9eba1d214 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
400 | { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ | 400 | { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ |
401 | { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ | 401 | { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ |
402 | { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */ | 402 | { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */ |
403 | { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */ | ||
403 | 404 | ||
404 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 405 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
405 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 406 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) | |||
1280 | return strcmp(buf, dmi->driver_data) < 0; | 1281 | return strcmp(buf, dmi->driver_data) < 0; |
1281 | } | 1282 | } |
1282 | 1283 | ||
1284 | static bool ahci_broken_lpm(struct pci_dev *pdev) | ||
1285 | { | ||
1286 | static const struct dmi_system_id sysids[] = { | ||
1287 | /* Various Lenovo 50 series have LPM issues with older BIOSen */ | ||
1288 | { | ||
1289 | .matches = { | ||
1290 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1291 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"), | ||
1292 | }, | ||
1293 | .driver_data = "20180406", /* 1.31 */ | ||
1294 | }, | ||
1295 | { | ||
1296 | .matches = { | ||
1297 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1298 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"), | ||
1299 | }, | ||
1300 | .driver_data = "20180420", /* 1.28 */ | ||
1301 | }, | ||
1302 | { | ||
1303 | .matches = { | ||
1304 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1305 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"), | ||
1306 | }, | ||
1307 | .driver_data = "20180315", /* 1.33 */ | ||
1308 | }, | ||
1309 | { | ||
1310 | .matches = { | ||
1311 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1312 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"), | ||
1313 | }, | ||
1314 | /* | ||
1315 | * Note date based on release notes, 2.35 has been | ||
1316 | * reported to be good, but I've been unable to get | ||
1317 | * a hold of the reporter to get the DMI BIOS date. | ||
1318 | * TODO: fix this. | ||
1319 | */ | ||
1320 | .driver_data = "20180310", /* 2.35 */ | ||
1321 | }, | ||
1322 | { } /* terminate list */ | ||
1323 | }; | ||
1324 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | ||
1325 | int year, month, date; | ||
1326 | char buf[9]; | ||
1327 | |||
1328 | if (!dmi) | ||
1329 | return false; | ||
1330 | |||
1331 | dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); | ||
1332 | snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); | ||
1333 | |||
1334 | return strcmp(buf, dmi->driver_data) < 0; | ||
1335 | } | ||
1336 | |||
1283 | static bool ahci_broken_online(struct pci_dev *pdev) | 1337 | static bool ahci_broken_online(struct pci_dev *pdev) |
1284 | { | 1338 | { |
1285 | #define ENCODE_BUSDEVFN(bus, slot, func) \ | 1339 | #define ENCODE_BUSDEVFN(bus, slot, func) \ |
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1694 | "quirky BIOS, skipping spindown on poweroff\n"); | 1748 | "quirky BIOS, skipping spindown on poweroff\n"); |
1695 | } | 1749 | } |
1696 | 1750 | ||
1751 | if (ahci_broken_lpm(pdev)) { | ||
1752 | pi.flags |= ATA_FLAG_NO_LPM; | ||
1753 | dev_warn(&pdev->dev, | ||
1754 | "BIOS update required for Link Power Management support\n"); | ||
1755 | } | ||
1756 | |||
1697 | if (ahci_broken_suspend(pdev)) { | 1757 | if (ahci_broken_suspend(pdev)) { |
1698 | hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; | 1758 | hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; |
1699 | dev_warn(&pdev->dev, | 1759 | dev_warn(&pdev->dev, |
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index 0045dacd814b..72d90b4c3aae 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c | |||
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) | |||
82 | * | 82 | * |
83 | * Return: 0 on success; Error code otherwise. | 83 | * Return: 0 on success; Error code otherwise. |
84 | */ | 84 | */ |
85 | int ahci_mvebu_stop_engine(struct ata_port *ap) | 85 | static int ahci_mvebu_stop_engine(struct ata_port *ap) |
86 | { | 86 | { |
87 | void __iomem *port_mmio = ahci_port_base(ap); | 87 | void __iomem *port_mmio = ahci_port_base(ap); |
88 | u32 tmp, port_fbs; | 88 | u32 tmp, port_fbs; |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 965842a08743..09620c2ffa0f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
38 | #include <linux/nospec.h> | ||
38 | #include <linux/blkdev.h> | 39 | #include <linux/blkdev.h> |
39 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
40 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, | |||
1146 | 1147 | ||
1147 | /* get the slot number from the message */ | 1148 | /* get the slot number from the message */ |
1148 | pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; | 1149 | pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; |
1149 | if (pmp < EM_MAX_SLOTS) | 1150 | if (pmp < EM_MAX_SLOTS) { |
1151 | pmp = array_index_nospec(pmp, EM_MAX_SLOTS); | ||
1150 | emp = &pp->em_priv[pmp]; | 1152 | emp = &pp->em_priv[pmp]; |
1151 | else | 1153 | } else { |
1152 | return -EINVAL; | 1154 | return -EINVAL; |
1155 | } | ||
1153 | 1156 | ||
1154 | /* mask off the activity bits if we are in sw_activity | 1157 | /* mask off the activity bits if we are in sw_activity |
1155 | * mode, user should turn off sw_activity before setting | 1158 | * mode, user should turn off sw_activity before setting |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 27d15ed7fa3d..cc71c63df381 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev) | |||
2493 | (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) | 2493 | (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) |
2494 | dev->horkage |= ATA_HORKAGE_NOLPM; | 2494 | dev->horkage |= ATA_HORKAGE_NOLPM; |
2495 | 2495 | ||
2496 | if (ap->flags & ATA_FLAG_NO_LPM) | ||
2497 | dev->horkage |= ATA_HORKAGE_NOLPM; | ||
2498 | |||
2496 | if (dev->horkage & ATA_HORKAGE_NOLPM) { | 2499 | if (dev->horkage & ATA_HORKAGE_NOLPM) { |
2497 | ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); | 2500 | ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); |
2498 | dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; | 2501 | dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index d5412145d76d..01306c018398 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, | |||
614 | list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { | 614 | list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { |
615 | struct ata_queued_cmd *qc; | 615 | struct ata_queued_cmd *qc; |
616 | 616 | ||
617 | for (i = 0; i < ATA_MAX_QUEUE; i++) { | 617 | ata_qc_for_each_raw(ap, qc, i) { |
618 | qc = __ata_qc_from_tag(ap, i); | ||
619 | if (qc->flags & ATA_QCFLAG_ACTIVE && | 618 | if (qc->flags & ATA_QCFLAG_ACTIVE && |
620 | qc->scsicmd == scmd) | 619 | qc->scsicmd == scmd) |
621 | break; | 620 | break; |
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh); | |||
818 | 817 | ||
819 | static int ata_eh_nr_in_flight(struct ata_port *ap) | 818 | static int ata_eh_nr_in_flight(struct ata_port *ap) |
820 | { | 819 | { |
820 | struct ata_queued_cmd *qc; | ||
821 | unsigned int tag; | 821 | unsigned int tag; |
822 | int nr = 0; | 822 | int nr = 0; |
823 | 823 | ||
824 | /* count only non-internal commands */ | 824 | /* count only non-internal commands */ |
825 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 825 | ata_qc_for_each(ap, qc, tag) { |
826 | if (ata_tag_internal(tag)) | 826 | if (qc) |
827 | continue; | ||
828 | if (ata_qc_from_tag(ap, tag)) | ||
829 | nr++; | 827 | nr++; |
830 | } | 828 | } |
831 | 829 | ||
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) | |||
847 | goto out_unlock; | 845 | goto out_unlock; |
848 | 846 | ||
849 | if (cnt == ap->fastdrain_cnt) { | 847 | if (cnt == ap->fastdrain_cnt) { |
848 | struct ata_queued_cmd *qc; | ||
850 | unsigned int tag; | 849 | unsigned int tag; |
851 | 850 | ||
852 | /* No progress during the last interval, tag all | 851 | /* No progress during the last interval, tag all |
853 | * in-flight qcs as timed out and freeze the port. | 852 | * in-flight qcs as timed out and freeze the port. |
854 | */ | 853 | */ |
855 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 854 | ata_qc_for_each(ap, qc, tag) { |
856 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | ||
857 | if (qc) | 855 | if (qc) |
858 | qc->err_mask |= AC_ERR_TIMEOUT; | 856 | qc->err_mask |= AC_ERR_TIMEOUT; |
859 | } | 857 | } |
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap) | |||
999 | 997 | ||
1000 | static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) | 998 | static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) |
1001 | { | 999 | { |
1000 | struct ata_queued_cmd *qc; | ||
1002 | int tag, nr_aborted = 0; | 1001 | int tag, nr_aborted = 0; |
1003 | 1002 | ||
1004 | WARN_ON(!ap->ops->error_handler); | 1003 | WARN_ON(!ap->ops->error_handler); |
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) | |||
1007 | ata_eh_set_pending(ap, 0); | 1006 | ata_eh_set_pending(ap, 0); |
1008 | 1007 | ||
1009 | /* include internal tag in iteration */ | 1008 | /* include internal tag in iteration */ |
1010 | for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) { | 1009 | ata_qc_for_each_with_internal(ap, qc, tag) { |
1011 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | ||
1012 | |||
1013 | if (qc && (!link || qc->dev->link == link)) { | 1010 | if (qc && (!link || qc->dev->link == link)) { |
1014 | qc->flags |= ATA_QCFLAG_FAILED; | 1011 | qc->flags |= ATA_QCFLAG_FAILED; |
1015 | ata_qc_complete(qc); | 1012 | ata_qc_complete(qc); |
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) | |||
1712 | return; | 1709 | return; |
1713 | 1710 | ||
1714 | /* has LLDD analyzed already? */ | 1711 | /* has LLDD analyzed already? */ |
1715 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1712 | ata_qc_for_each_raw(ap, qc, tag) { |
1716 | qc = __ata_qc_from_tag(ap, tag); | ||
1717 | |||
1718 | if (!(qc->flags & ATA_QCFLAG_FAILED)) | 1713 | if (!(qc->flags & ATA_QCFLAG_FAILED)) |
1719 | continue; | 1714 | continue; |
1720 | 1715 | ||
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
2136 | { | 2131 | { |
2137 | struct ata_port *ap = link->ap; | 2132 | struct ata_port *ap = link->ap; |
2138 | struct ata_eh_context *ehc = &link->eh_context; | 2133 | struct ata_eh_context *ehc = &link->eh_context; |
2134 | struct ata_queued_cmd *qc; | ||
2139 | struct ata_device *dev; | 2135 | struct ata_device *dev; |
2140 | unsigned int all_err_mask = 0, eflags = 0; | 2136 | unsigned int all_err_mask = 0, eflags = 0; |
2141 | int tag, nr_failed = 0, nr_quiet = 0; | 2137 | int tag, nr_failed = 0, nr_quiet = 0; |
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) | |||
2168 | 2164 | ||
2169 | all_err_mask |= ehc->i.err_mask; | 2165 | all_err_mask |= ehc->i.err_mask; |
2170 | 2166 | ||
2171 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 2167 | ata_qc_for_each_raw(ap, qc, tag) { |
2172 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | ||
2173 | |||
2174 | if (!(qc->flags & ATA_QCFLAG_FAILED) || | 2168 | if (!(qc->flags & ATA_QCFLAG_FAILED) || |
2175 | ata_dev_phys_link(qc->dev) != link) | 2169 | ata_dev_phys_link(qc->dev) != link) |
2176 | continue; | 2170 | continue; |
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2436 | { | 2430 | { |
2437 | struct ata_port *ap = link->ap; | 2431 | struct ata_port *ap = link->ap; |
2438 | struct ata_eh_context *ehc = &link->eh_context; | 2432 | struct ata_eh_context *ehc = &link->eh_context; |
2433 | struct ata_queued_cmd *qc; | ||
2439 | const char *frozen, *desc; | 2434 | const char *frozen, *desc; |
2440 | char tries_buf[6] = ""; | 2435 | char tries_buf[6] = ""; |
2441 | int tag, nr_failed = 0; | 2436 | int tag, nr_failed = 0; |
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2447 | if (ehc->i.desc[0] != '\0') | 2442 | if (ehc->i.desc[0] != '\0') |
2448 | desc = ehc->i.desc; | 2443 | desc = ehc->i.desc; |
2449 | 2444 | ||
2450 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 2445 | ata_qc_for_each_raw(ap, qc, tag) { |
2451 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | ||
2452 | |||
2453 | if (!(qc->flags & ATA_QCFLAG_FAILED) || | 2446 | if (!(qc->flags & ATA_QCFLAG_FAILED) || |
2454 | ata_dev_phys_link(qc->dev) != link || | 2447 | ata_dev_phys_link(qc->dev) != link || |
2455 | ((qc->flags & ATA_QCFLAG_QUIET) && | 2448 | ((qc->flags & ATA_QCFLAG_QUIET) && |
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2511 | ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); | 2504 | ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); |
2512 | #endif | 2505 | #endif |
2513 | 2506 | ||
2514 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 2507 | ata_qc_for_each_raw(ap, qc, tag) { |
2515 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | ||
2516 | struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; | 2508 | struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; |
2517 | char data_buf[20] = ""; | 2509 | char data_buf[20] = ""; |
2518 | char cdb_buf[70] = ""; | 2510 | char cdb_buf[70] = ""; |
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
3992 | */ | 3984 | */ |
3993 | void ata_eh_finish(struct ata_port *ap) | 3985 | void ata_eh_finish(struct ata_port *ap) |
3994 | { | 3986 | { |
3987 | struct ata_queued_cmd *qc; | ||
3995 | int tag; | 3988 | int tag; |
3996 | 3989 | ||
3997 | /* retry or finish qcs */ | 3990 | /* retry or finish qcs */ |
3998 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 3991 | ata_qc_for_each_raw(ap, qc, tag) { |
3999 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | ||
4000 | |||
4001 | if (!(qc->flags & ATA_QCFLAG_FAILED)) | 3992 | if (!(qc->flags & ATA_QCFLAG_FAILED)) |
4002 | continue; | 3993 | continue; |
4003 | 3994 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 6a91d04351d9..aad1b01447de 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) | |||
3805 | */ | 3805 | */ |
3806 | goto invalid_param_len; | 3806 | goto invalid_param_len; |
3807 | } | 3807 | } |
3808 | if (block > dev->n_sectors) | ||
3809 | goto out_of_range; | ||
3810 | 3808 | ||
3811 | all = cdb[14] & 0x1; | 3809 | all = cdb[14] & 0x1; |
3810 | if (all) { | ||
3811 | /* | ||
3812 | * Ignore the block address (zone ID) as defined by ZBC. | ||
3813 | */ | ||
3814 | block = 0; | ||
3815 | } else if (block >= dev->n_sectors) { | ||
3816 | /* | ||
3817 | * Block must be a valid zone ID (a zone start LBA). | ||
3818 | */ | ||
3819 | fp = 2; | ||
3820 | goto invalid_fld; | ||
3821 | } | ||
3812 | 3822 | ||
3813 | if (ata_ncq_enabled(qc->dev) && | 3823 | if (ata_ncq_enabled(qc->dev) && |
3814 | ata_fpdma_zac_mgmt_out_supported(qc->dev)) { | 3824 | ata_fpdma_zac_mgmt_out_supported(qc->dev)) { |
@@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) | |||
3837 | invalid_fld: | 3847 | invalid_fld: |
3838 | ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); | 3848 | ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); |
3839 | return 1; | 3849 | return 1; |
3840 | out_of_range: | ||
3841 | /* "Logical Block Address out of range" */ | ||
3842 | ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00); | ||
3843 | return 1; | ||
3844 | invalid_param_len: | 3850 | invalid_param_len: |
3845 | /* "Parameter list length error" */ | 3851 | /* "Parameter list length error" */ |
3846 | ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); | 3852 | ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index b8d9cfc60374..4dc528bf8e85 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag, | |||
395 | { | 395 | { |
396 | /* We let libATA core do actual (queue) tag allocation */ | 396 | /* We let libATA core do actual (queue) tag allocation */ |
397 | 397 | ||
398 | /* all non NCQ/queued commands should have tag#0 */ | ||
399 | if (ata_tag_internal(tag)) { | ||
400 | DPRINTK("mapping internal cmds to tag#0\n"); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { | 398 | if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { |
405 | DPRINTK("tag %d invalid : out of range\n", tag); | 399 | DPRINTK("tag %d invalid : out of range\n", tag); |
406 | return 0; | 400 | return 0; |
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) | |||
1229 | 1223 | ||
1230 | /* Workaround for data length mismatch errata */ | 1224 | /* Workaround for data length mismatch errata */ |
1231 | if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) { | 1225 | if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) { |
1232 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 1226 | ata_qc_for_each_with_internal(ap, qc, tag) { |
1233 | qc = ata_qc_from_tag(ap, tag); | ||
1234 | if (qc && ata_is_atapi(qc->tf.protocol)) { | 1227 | if (qc && ata_is_atapi(qc->tf.protocol)) { |
1235 | u32 hcontrol; | 1228 | u32 hcontrol; |
1236 | /* Set HControl[27] to clear error registers */ | 1229 | /* Set HControl[27] to clear error registers */ |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 10ae11aa1926..72c9b922a77b 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) | |||
675 | struct ata_port *ap = ata_shost_to_port(sdev->host); | 675 | struct ata_port *ap = ata_shost_to_port(sdev->host); |
676 | struct nv_adma_port_priv *pp = ap->private_data; | 676 | struct nv_adma_port_priv *pp = ap->private_data; |
677 | struct nv_adma_port_priv *port0, *port1; | 677 | struct nv_adma_port_priv *port0, *port1; |
678 | struct scsi_device *sdev0, *sdev1; | ||
679 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 678 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
680 | unsigned long segment_boundary, flags; | 679 | unsigned long segment_boundary, flags; |
681 | unsigned short sg_tablesize; | 680 | unsigned short sg_tablesize; |
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) | |||
736 | 735 | ||
737 | port0 = ap->host->ports[0]->private_data; | 736 | port0 = ap->host->ports[0]->private_data; |
738 | port1 = ap->host->ports[1]->private_data; | 737 | port1 = ap->host->ports[1]->private_data; |
739 | sdev0 = ap->host->ports[0]->link.device[0].sdev; | ||
740 | sdev1 = ap->host->ports[1]->link.device[0].sdev; | ||
741 | if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || | 738 | if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || |
742 | (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { | 739 | (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { |
743 | /* | 740 | /* |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index ff81a576347e..82532c299bb5 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev) | |||
1618 | skb_queue_head_init(&iadev->rx_dma_q); | 1618 | skb_queue_head_init(&iadev->rx_dma_q); |
1619 | iadev->rx_free_desc_qhead = NULL; | 1619 | iadev->rx_free_desc_qhead = NULL; |
1620 | 1620 | ||
1621 | iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); | 1621 | iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL); |
1622 | if (!iadev->rx_open) { | 1622 | if (!iadev->rx_open) { |
1623 | printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", | 1623 | printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", |
1624 | dev->number); | 1624 | dev->number); |
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a8d2eb0ceb8d..2c288d1f42bb 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c | |||
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) | |||
1483 | return -EFAULT; | 1483 | return -EFAULT; |
1484 | if (pool < 0 || pool > ZATM_LAST_POOL) | 1484 | if (pool < 0 || pool > ZATM_LAST_POOL) |
1485 | return -EINVAL; | 1485 | return -EINVAL; |
1486 | pool = array_index_nospec(pool, | ||
1487 | ZATM_LAST_POOL + 1); | ||
1486 | if (copy_from_user(&info, | 1488 | if (copy_from_user(&info, |
1487 | &((struct zatm_pool_req __user *) arg)->info, | 1489 | &((struct zatm_pool_req __user *) arg)->info, |
1488 | sizeof(info))) return -EFAULT; | 1490 | sizeof(info))) return -EFAULT; |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index c298de8a8308..9e8484189034 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev) | |||
2235 | } | 2235 | } |
2236 | 2236 | ||
2237 | static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, | 2237 | static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, |
2238 | unsigned int index) | 2238 | unsigned int index, bool power_on) |
2239 | { | 2239 | { |
2240 | struct of_phandle_args pd_args; | 2240 | struct of_phandle_args pd_args; |
2241 | struct generic_pm_domain *pd; | 2241 | struct generic_pm_domain *pd; |
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, | |||
2271 | dev->pm_domain->detach = genpd_dev_pm_detach; | 2271 | dev->pm_domain->detach = genpd_dev_pm_detach; |
2272 | dev->pm_domain->sync = genpd_dev_pm_sync; | 2272 | dev->pm_domain->sync = genpd_dev_pm_sync; |
2273 | 2273 | ||
2274 | genpd_lock(pd); | 2274 | if (power_on) { |
2275 | ret = genpd_power_on(pd, 0); | 2275 | genpd_lock(pd); |
2276 | genpd_unlock(pd); | 2276 | ret = genpd_power_on(pd, 0); |
2277 | genpd_unlock(pd); | ||
2278 | } | ||
2277 | 2279 | ||
2278 | if (ret) | 2280 | if (ret) |
2279 | genpd_remove_device(pd, dev); | 2281 | genpd_remove_device(pd, dev); |
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev) | |||
2307 | "#power-domain-cells") != 1) | 2309 | "#power-domain-cells") != 1) |
2308 | return 0; | 2310 | return 0; |
2309 | 2311 | ||
2310 | return __genpd_dev_pm_attach(dev, dev->of_node, 0); | 2312 | return __genpd_dev_pm_attach(dev, dev->of_node, 0, true); |
2311 | } | 2313 | } |
2312 | EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); | 2314 | EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); |
2313 | 2315 | ||
@@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, | |||
2359 | } | 2361 | } |
2360 | 2362 | ||
2361 | /* Try to attach the device to the PM domain at the specified index. */ | 2363 | /* Try to attach the device to the PM domain at the specified index. */ |
2362 | ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index); | 2364 | ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); |
2363 | if (ret < 1) { | 2365 | if (ret < 1) { |
2364 | device_unregister(genpd_dev); | 2366 | device_unregister(genpd_dev); |
2365 | return ret ? ERR_PTR(ret) : NULL; | 2367 | return ret ? ERR_PTR(ret) : NULL; |
2366 | } | 2368 | } |
2367 | 2369 | ||
2368 | pm_runtime_set_active(genpd_dev); | ||
2369 | pm_runtime_enable(genpd_dev); | 2370 | pm_runtime_enable(genpd_dev); |
2371 | genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); | ||
2370 | 2372 | ||
2371 | return genpd_dev; | 2373 | return genpd_dev; |
2372 | } | 2374 | } |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 1476cb3439f4..5e793dd7adfb 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio) | |||
282 | what = COMPLETED_OK; | 282 | what = COMPLETED_OK; |
283 | } | 283 | } |
284 | 284 | ||
285 | bio_put(req->private_bio); | ||
286 | req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); | 285 | req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); |
286 | bio_put(bio); | ||
287 | 287 | ||
288 | /* not req_mod(), we need irqsave here! */ | 288 | /* not req_mod(), we need irqsave here! */ |
289 | spin_lock_irqsave(&device->resource->req_lock, flags); | 289 | spin_lock_irqsave(&device->resource->req_lock, flags); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d6b6f434fd4b..4cb1d1be3cfb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, | |||
1613 | arg = (unsigned long) compat_ptr(arg); | 1613 | arg = (unsigned long) compat_ptr(arg); |
1614 | case LOOP_SET_FD: | 1614 | case LOOP_SET_FD: |
1615 | case LOOP_CHANGE_FD: | 1615 | case LOOP_CHANGE_FD: |
1616 | case LOOP_SET_BLOCK_SIZE: | ||
1616 | err = lo_ioctl(bdev, mode, cmd, arg); | 1617 | err = lo_ioctl(bdev, mode, cmd, arg); |
1617 | break; | 1618 | break; |
1618 | default: | 1619 | default: |
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 1cc29629d238..80d60f43db56 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata) | |||
169 | const char *name; | 169 | const char *name; |
170 | int nr_fck = 0, nr_ick = 0, i, error = 0; | 170 | int nr_fck = 0, nr_ick = 0, i, error = 0; |
171 | 171 | ||
172 | ddata->clock_roles = devm_kzalloc(ddata->dev, | 172 | ddata->clock_roles = devm_kcalloc(ddata->dev, |
173 | sizeof(*ddata->clock_roles) * | ||
174 | SYSC_MAX_CLOCKS, | 173 | SYSC_MAX_CLOCKS, |
174 | sizeof(*ddata->clock_roles), | ||
175 | GFP_KERNEL); | 175 | GFP_KERNEL); |
176 | if (!ddata->clock_roles) | 176 | if (!ddata->clock_roles) |
177 | return -ENOMEM; | 177 | return -ENOMEM; |
@@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata) | |||
200 | return -EINVAL; | 200 | return -EINVAL; |
201 | } | 201 | } |
202 | 202 | ||
203 | ddata->clocks = devm_kzalloc(ddata->dev, | 203 | ddata->clocks = devm_kcalloc(ddata->dev, |
204 | sizeof(*ddata->clocks) * ddata->nr_clocks, | 204 | ddata->nr_clocks, sizeof(*ddata->clocks), |
205 | GFP_KERNEL); | 205 | GFP_KERNEL); |
206 | if (!ddata->clocks) | 206 | if (!ddata->clocks) |
207 | return -ENOMEM; | 207 | return -ENOMEM; |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index ad353be871bf..90ec010bffbd 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2088 | return 0; | 2088 | return 0; |
2089 | 2089 | ||
2090 | out_err: | 2090 | out_err: |
2091 | ipmi_unregister_smi(new_smi->intf); | 2091 | if (new_smi->intf) { |
2092 | new_smi->intf = NULL; | 2092 | ipmi_unregister_smi(new_smi->intf); |
2093 | new_smi->intf = NULL; | ||
2094 | } | ||
2093 | 2095 | ||
2094 | kfree(init_name); | 2096 | kfree(init_name); |
2095 | 2097 | ||
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index fbfc05e3f3d1..bb882ab161fe 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c | |||
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc) | |||
210 | int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc) | 210 | int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc) |
211 | { | 211 | { |
212 | unsigned long flags; | 212 | unsigned long flags; |
213 | int ret = 0; | 213 | int ret = -ENODATA; |
214 | u8 status; | 214 | u8 status; |
215 | 215 | ||
216 | spin_lock_irqsave(&kcs_bmc->lock, flags); | 216 | spin_lock_irqsave(&kcs_bmc->lock, flags); |
217 | 217 | ||
218 | if (!kcs_bmc->running) { | 218 | status = read_status(kcs_bmc); |
219 | kcs_force_abort(kcs_bmc); | 219 | if (status & KCS_STATUS_IBF) { |
220 | ret = -ENODEV; | 220 | if (!kcs_bmc->running) |
221 | goto out_unlock; | 221 | kcs_force_abort(kcs_bmc); |
222 | } | 222 | else if (status & KCS_STATUS_CMD_DAT) |
223 | 223 | kcs_bmc_handle_cmd(kcs_bmc); | |
224 | status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT); | 224 | else |
225 | 225 | kcs_bmc_handle_data(kcs_bmc); | |
226 | switch (status) { | ||
227 | case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT: | ||
228 | kcs_bmc_handle_cmd(kcs_bmc); | ||
229 | break; | ||
230 | |||
231 | case KCS_STATUS_IBF: | ||
232 | kcs_bmc_handle_data(kcs_bmc); | ||
233 | break; | ||
234 | 226 | ||
235 | default: | 227 | ret = 0; |
236 | ret = -ENODATA; | ||
237 | break; | ||
238 | } | 228 | } |
239 | 229 | ||
240 | out_unlock: | ||
241 | spin_unlock_irqrestore(&kcs_bmc->lock, flags); | 230 | spin_unlock_irqrestore(&kcs_bmc->lock, flags); |
242 | 231 | ||
243 | return ret; | 232 | return ret; |
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index ae40cbe770f0..0bb25dd009d1 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile | |||
@@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/ | |||
96 | obj-$(CONFIG_ARCH_STI) += st/ | 96 | obj-$(CONFIG_ARCH_STI) += st/ |
97 | obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ | 97 | obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ |
98 | obj-$(CONFIG_ARCH_SUNXI) += sunxi/ | 98 | obj-$(CONFIG_ARCH_SUNXI) += sunxi/ |
99 | obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/ | 99 | obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/ |
100 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ | 100 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ |
101 | obj-y += ti/ | 101 | obj-y += ti/ |
102 | obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ | 102 | obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ |
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c index aae62a5b8734..d1bbee19ed0f 100644 --- a/drivers/clk/davinci/da8xx-cfgchip.c +++ b/drivers/clk/davinci/da8xx-cfgchip.c | |||
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap) | |||
672 | 672 | ||
673 | usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap); | 673 | usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap); |
674 | if (IS_ERR(usb1)) { | 674 | if (IS_ERR(usb1)) { |
675 | if (PTR_ERR(usb0) == -EPROBE_DEFER) | 675 | if (PTR_ERR(usb1) == -EPROBE_DEFER) |
676 | return -EPROBE_DEFER; | 676 | return -EPROBE_DEFER; |
677 | 677 | ||
678 | dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n", | 678 | dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n", |
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h index 6a42529d31a9..cc5614567a70 100644 --- a/drivers/clk/davinci/psc.h +++ b/drivers/clk/davinci/psc.h | |||
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data; | |||
107 | #ifdef CONFIG_ARCH_DAVINCI_DM355 | 107 | #ifdef CONFIG_ARCH_DAVINCI_DM355 |
108 | extern const struct davinci_psc_init_data dm355_psc_init_data; | 108 | extern const struct davinci_psc_init_data dm355_psc_init_data; |
109 | #endif | 109 | #endif |
110 | #ifdef CONFIG_ARCH_DAVINCI_DM356 | 110 | #ifdef CONFIG_ARCH_DAVINCI_DM365 |
111 | extern const struct davinci_psc_init_data dm365_psc_init_data; | 111 | extern const struct davinci_psc_init_data dm365_psc_init_data; |
112 | #endif | 112 | #endif |
113 | #ifdef CONFIG_ARCH_DAVINCI_DM644x | 113 | #ifdef CONFIG_ARCH_DAVINCI_DM644x |
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index acaa14cfa25c..49454700f2e5 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile | |||
@@ -1,24 +1,24 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Common objects | 2 | # Common objects |
3 | lib-$(CONFIG_SUNXI_CCU) += ccu_common.o | 3 | obj-y += ccu_common.o |
4 | lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o | 4 | obj-y += ccu_mmc_timing.o |
5 | lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o | 5 | obj-y += ccu_reset.o |
6 | 6 | ||
7 | # Base clock types | 7 | # Base clock types |
8 | lib-$(CONFIG_SUNXI_CCU) += ccu_div.o | 8 | obj-y += ccu_div.o |
9 | lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o | 9 | obj-y += ccu_frac.o |
10 | lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o | 10 | obj-y += ccu_gate.o |
11 | lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o | 11 | obj-y += ccu_mux.o |
12 | lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o | 12 | obj-y += ccu_mult.o |
13 | lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o | 13 | obj-y += ccu_phase.o |
14 | lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o | 14 | obj-y += ccu_sdm.o |
15 | 15 | ||
16 | # Multi-factor clocks | 16 | # Multi-factor clocks |
17 | lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o | 17 | obj-y += ccu_nk.o |
18 | lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o | 18 | obj-y += ccu_nkm.o |
19 | lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o | 19 | obj-y += ccu_nkmp.o |
20 | lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o | 20 | obj-y += ccu_nm.o |
21 | lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o | 21 | obj-y += ccu_mp.o |
22 | 22 | ||
23 | # SoC support | 23 | # SoC support |
24 | obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o | 24 | obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o |
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o | |||
38 | obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o | 38 | obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o |
39 | obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o | 39 | obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o |
40 | obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o | 40 | obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o |
41 | |||
42 | # The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our | ||
43 | # case, we want to use that goal, but even though lib.a will be properly | ||
44 | # generated, it will not be linked in, eventually resulting in a linker error | ||
45 | # for missing symbols. | ||
46 | # | ||
47 | # We can work around that by explicitly adding lib.a to the obj-y goal. This is | ||
48 | # an undocumented behaviour, but works well for now. | ||
49 | obj-$(CONFIG_SUNXI_CCU) += lib.a | ||
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 57cb2f00fc07..d8c7f5750cdb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type, | |||
735 | clk->features |= CLOCK_EVT_FEAT_DYNIRQ; | 735 | clk->features |= CLOCK_EVT_FEAT_DYNIRQ; |
736 | clk->name = "arch_mem_timer"; | 736 | clk->name = "arch_mem_timer"; |
737 | clk->rating = 400; | 737 | clk->rating = 400; |
738 | clk->cpumask = cpu_all_mask; | 738 | clk->cpumask = cpu_possible_mask; |
739 | if (arch_timer_mem_use_virtual) { | 739 | if (arch_timer_mem_use_virtual) { |
740 | clk->set_state_shutdown = arch_timer_shutdown_virt_mem; | 740 | clk->set_state_shutdown = arch_timer_shutdown_virt_mem; |
741 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; | 741 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; |
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index de2f8297a210..108c37fca782 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, | |||
189 | 189 | ||
190 | /* prevent private mappings from being established */ | 190 | /* prevent private mappings from being established */ |
191 | if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { | 191 | if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { |
192 | dev_info(dev, "%s: %s: fail, attempted private mapping\n", | 192 | dev_info_ratelimited(dev, |
193 | "%s: %s: fail, attempted private mapping\n", | ||
193 | current->comm, func); | 194 | current->comm, func); |
194 | return -EINVAL; | 195 | return -EINVAL; |
195 | } | 196 | } |
196 | 197 | ||
197 | mask = dax_region->align - 1; | 198 | mask = dax_region->align - 1; |
198 | if (vma->vm_start & mask || vma->vm_end & mask) { | 199 | if (vma->vm_start & mask || vma->vm_end & mask) { |
199 | dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", | 200 | dev_info_ratelimited(dev, |
201 | "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", | ||
200 | current->comm, func, vma->vm_start, vma->vm_end, | 202 | current->comm, func, vma->vm_start, vma->vm_end, |
201 | mask); | 203 | mask); |
202 | return -EINVAL; | 204 | return -EINVAL; |
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, | |||
204 | 206 | ||
205 | if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV | 207 | if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV |
206 | && (vma->vm_flags & VM_DONTCOPY) == 0) { | 208 | && (vma->vm_flags & VM_DONTCOPY) == 0) { |
207 | dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", | 209 | dev_info_ratelimited(dev, |
210 | "%s: %s: fail, dax range requires MADV_DONTFORK\n", | ||
208 | current->comm, func); | 211 | current->comm, func); |
209 | return -EINVAL; | 212 | return -EINVAL; |
210 | } | 213 | } |
211 | 214 | ||
212 | if (!vma_is_dax(vma)) { | 215 | if (!vma_is_dax(vma)) { |
213 | dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", | 216 | dev_info_ratelimited(dev, |
217 | "%s: %s: fail, vma is not DAX capable\n", | ||
214 | current->comm, func); | 218 | current->comm, func); |
215 | return -EINVAL; | 219 | return -EINVAL; |
216 | } | 220 | } |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index fa31cccbe04f..6bfa217ed6d0 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
794 | struct k3_dma_dev *d = ofdma->of_dma_data; | 794 | struct k3_dma_dev *d = ofdma->of_dma_data; |
795 | unsigned int request = dma_spec->args[0]; | 795 | unsigned int request = dma_spec->args[0]; |
796 | 796 | ||
797 | if (request > d->dma_requests) | 797 | if (request >= d->dma_requests) |
798 | return NULL; | 798 | return NULL; |
799 | 799 | ||
800 | return dma_get_slave_channel(&(d->chans[request].vc.chan)); | 800 | return dma_get_slave_channel(&(d->chans[request].vc.chan)); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index defcdde4d358..de0957fe9668 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
3033 | pd->src_addr_widths = PL330_DMA_BUSWIDTHS; | 3033 | pd->src_addr_widths = PL330_DMA_BUSWIDTHS; |
3034 | pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; | 3034 | pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; |
3035 | pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 3035 | pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
3036 | pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | 3036 | pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
3037 | pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? | 3037 | pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? |
3038 | 1 : PL330_MAX_BURST); | 3038 | 1 : PL330_MAX_BURST); |
3039 | 3039 | ||
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 9b5ca8691f27..a4a931ddf6f6 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c | |||
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
1485 | od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; | 1485 | od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; |
1486 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; | 1486 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; |
1487 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 1487 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1488 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1488 | if (__dma_omap15xx(od->plat->dma_attr)) |
1489 | od->ddev.residue_granularity = | ||
1490 | DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
1491 | else | ||
1492 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1489 | od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ | 1493 | od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ |
1490 | od->ddev.dev = &pdev->dev; | 1494 | od->ddev.dev = &pdev->dev; |
1491 | INIT_LIST_HEAD(&od->ddev.channels); | 1495 | INIT_LIST_HEAD(&od->ddev.channels); |
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index dd4edd8f22ce..7fa793672a7a 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c | |||
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev, | |||
455 | 455 | ||
456 | mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, | 456 | mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, |
457 | &altera_cvp_ops, conf); | 457 | &altera_cvp_ops, conf); |
458 | if (!mgr) | 458 | if (!mgr) { |
459 | return -ENOMEM; | 459 | ret = -ENOMEM; |
460 | goto err_unmap; | ||
461 | } | ||
460 | 462 | ||
461 | pci_set_drvdata(pdev, mgr); | 463 | pci_set_drvdata(pdev, mgr); |
462 | 464 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a59c07590cee..7dcbac8af9a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -190,6 +190,7 @@ struct amdgpu_job; | |||
190 | struct amdgpu_irq_src; | 190 | struct amdgpu_irq_src; |
191 | struct amdgpu_fpriv; | 191 | struct amdgpu_fpriv; |
192 | struct amdgpu_bo_va_mapping; | 192 | struct amdgpu_bo_va_mapping; |
193 | struct amdgpu_atif; | ||
193 | 194 | ||
194 | enum amdgpu_cp_irq { | 195 | enum amdgpu_cp_irq { |
195 | AMDGPU_CP_IRQ_GFX_EOP = 0, | 196 | AMDGPU_CP_IRQ_GFX_EOP = 0, |
@@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch { | |||
1269 | /* | 1270 | /* |
1270 | * ACPI | 1271 | * ACPI |
1271 | */ | 1272 | */ |
1272 | struct amdgpu_atif_notification_cfg { | ||
1273 | bool enabled; | ||
1274 | int command_code; | ||
1275 | }; | ||
1276 | |||
1277 | struct amdgpu_atif_notifications { | ||
1278 | bool display_switch; | ||
1279 | bool expansion_mode_change; | ||
1280 | bool thermal_state; | ||
1281 | bool forced_power_state; | ||
1282 | bool system_power_state; | ||
1283 | bool display_conf_change; | ||
1284 | bool px_gfx_switch; | ||
1285 | bool brightness_change; | ||
1286 | bool dgpu_display_event; | ||
1287 | }; | ||
1288 | |||
1289 | struct amdgpu_atif_functions { | ||
1290 | bool system_params; | ||
1291 | bool sbios_requests; | ||
1292 | bool select_active_disp; | ||
1293 | bool lid_state; | ||
1294 | bool get_tv_standard; | ||
1295 | bool set_tv_standard; | ||
1296 | bool get_panel_expansion_mode; | ||
1297 | bool set_panel_expansion_mode; | ||
1298 | bool temperature_change; | ||
1299 | bool graphics_device_types; | ||
1300 | }; | ||
1301 | |||
1302 | struct amdgpu_atif { | ||
1303 | struct amdgpu_atif_notifications notifications; | ||
1304 | struct amdgpu_atif_functions functions; | ||
1305 | struct amdgpu_atif_notification_cfg notification_cfg; | ||
1306 | struct amdgpu_encoder *encoder_for_bl; | ||
1307 | }; | ||
1308 | |||
1309 | struct amdgpu_atcs_functions { | 1273 | struct amdgpu_atcs_functions { |
1310 | bool get_ext_state; | 1274 | bool get_ext_state; |
1311 | bool pcie_perf_req; | 1275 | bool pcie_perf_req; |
@@ -1466,7 +1430,7 @@ struct amdgpu_device { | |||
1466 | #if defined(CONFIG_DEBUG_FS) | 1430 | #if defined(CONFIG_DEBUG_FS) |
1467 | struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; | 1431 | struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
1468 | #endif | 1432 | #endif |
1469 | struct amdgpu_atif atif; | 1433 | struct amdgpu_atif *atif; |
1470 | struct amdgpu_atcs atcs; | 1434 | struct amdgpu_atcs atcs; |
1471 | struct mutex srbm_mutex; | 1435 | struct mutex srbm_mutex; |
1472 | /* GRBM index mutex. Protects concurrent access to GRBM index */ | 1436 | /* GRBM index mutex. Protects concurrent access to GRBM index */ |
@@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; | |||
1894 | static inline bool amdgpu_has_atpx(void) { return false; } | 1858 | static inline bool amdgpu_has_atpx(void) { return false; } |
1895 | #endif | 1859 | #endif |
1896 | 1860 | ||
1861 | #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) | ||
1862 | void *amdgpu_atpx_get_dhandle(void); | ||
1863 | #else | ||
1864 | static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } | ||
1865 | #endif | ||
1866 | |||
1897 | /* | 1867 | /* |
1898 | * KMS | 1868 | * KMS |
1899 | */ | 1869 | */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 8fa850a070e0..0d8c3fc6eace 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | |||
@@ -34,6 +34,45 @@ | |||
34 | #include "amd_acpi.h" | 34 | #include "amd_acpi.h" |
35 | #include "atom.h" | 35 | #include "atom.h" |
36 | 36 | ||
37 | struct amdgpu_atif_notification_cfg { | ||
38 | bool enabled; | ||
39 | int command_code; | ||
40 | }; | ||
41 | |||
42 | struct amdgpu_atif_notifications { | ||
43 | bool display_switch; | ||
44 | bool expansion_mode_change; | ||
45 | bool thermal_state; | ||
46 | bool forced_power_state; | ||
47 | bool system_power_state; | ||
48 | bool display_conf_change; | ||
49 | bool px_gfx_switch; | ||
50 | bool brightness_change; | ||
51 | bool dgpu_display_event; | ||
52 | }; | ||
53 | |||
54 | struct amdgpu_atif_functions { | ||
55 | bool system_params; | ||
56 | bool sbios_requests; | ||
57 | bool select_active_disp; | ||
58 | bool lid_state; | ||
59 | bool get_tv_standard; | ||
60 | bool set_tv_standard; | ||
61 | bool get_panel_expansion_mode; | ||
62 | bool set_panel_expansion_mode; | ||
63 | bool temperature_change; | ||
64 | bool graphics_device_types; | ||
65 | }; | ||
66 | |||
67 | struct amdgpu_atif { | ||
68 | acpi_handle handle; | ||
69 | |||
70 | struct amdgpu_atif_notifications notifications; | ||
71 | struct amdgpu_atif_functions functions; | ||
72 | struct amdgpu_atif_notification_cfg notification_cfg; | ||
73 | struct amdgpu_encoder *encoder_for_bl; | ||
74 | }; | ||
75 | |||
37 | /* Call the ATIF method | 76 | /* Call the ATIF method |
38 | */ | 77 | */ |
39 | /** | 78 | /** |
@@ -46,8 +85,9 @@ | |||
46 | * Executes the requested ATIF function (all asics). | 85 | * Executes the requested ATIF function (all asics). |
47 | * Returns a pointer to the acpi output buffer. | 86 | * Returns a pointer to the acpi output buffer. |
48 | */ | 87 | */ |
49 | static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, | 88 | static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, |
50 | struct acpi_buffer *params) | 89 | int function, |
90 | struct acpi_buffer *params) | ||
51 | { | 91 | { |
52 | acpi_status status; | 92 | acpi_status status; |
53 | union acpi_object atif_arg_elements[2]; | 93 | union acpi_object atif_arg_elements[2]; |
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, | |||
70 | atif_arg_elements[1].integer.value = 0; | 110 | atif_arg_elements[1].integer.value = 0; |
71 | } | 111 | } |
72 | 112 | ||
73 | status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); | 113 | status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, |
114 | &buffer); | ||
74 | 115 | ||
75 | /* Fail only if calling the method fails and ATIF is supported */ | 116 | /* Fail only if calling the method fails and ATIF is supported */ |
76 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 117 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas | |||
141 | * (all asics). | 182 | * (all asics). |
142 | * returns 0 on success, error on failure. | 183 | * returns 0 on success, error on failure. |
143 | */ | 184 | */ |
144 | static int amdgpu_atif_verify_interface(acpi_handle handle, | 185 | static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif) |
145 | struct amdgpu_atif *atif) | ||
146 | { | 186 | { |
147 | union acpi_object *info; | 187 | union acpi_object *info; |
148 | struct atif_verify_interface output; | 188 | struct atif_verify_interface output; |
149 | size_t size; | 189 | size_t size; |
150 | int err = 0; | 190 | int err = 0; |
151 | 191 | ||
152 | info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); | 192 | info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); |
153 | if (!info) | 193 | if (!info) |
154 | return -EIO; | 194 | return -EIO; |
155 | 195 | ||
@@ -176,6 +216,35 @@ out: | |||
176 | return err; | 216 | return err; |
177 | } | 217 | } |
178 | 218 | ||
219 | static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle) | ||
220 | { | ||
221 | acpi_handle handle = NULL; | ||
222 | char acpi_method_name[255] = { 0 }; | ||
223 | struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; | ||
224 | acpi_status status; | ||
225 | |||
226 | /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only | ||
227 | * systems, ATIF is in the dGPU's namespace. | ||
228 | */ | ||
229 | status = acpi_get_handle(dhandle, "ATIF", &handle); | ||
230 | if (ACPI_SUCCESS(status)) | ||
231 | goto out; | ||
232 | |||
233 | if (amdgpu_has_atpx()) { | ||
234 | status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF", | ||
235 | &handle); | ||
236 | if (ACPI_SUCCESS(status)) | ||
237 | goto out; | ||
238 | } | ||
239 | |||
240 | DRM_DEBUG_DRIVER("No ATIF handle found\n"); | ||
241 | return NULL; | ||
242 | out: | ||
243 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | ||
244 | DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); | ||
245 | return handle; | ||
246 | } | ||
247 | |||
179 | /** | 248 | /** |
180 | * amdgpu_atif_get_notification_params - determine notify configuration | 249 | * amdgpu_atif_get_notification_params - determine notify configuration |
181 | * | 250 | * |
@@ -188,15 +257,16 @@ out: | |||
188 | * where n is specified in the result if a notifier is used. | 257 | * where n is specified in the result if a notifier is used. |
189 | * Returns 0 on success, error on failure. | 258 | * Returns 0 on success, error on failure. |
190 | */ | 259 | */ |
191 | static int amdgpu_atif_get_notification_params(acpi_handle handle, | 260 | static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif) |
192 | struct amdgpu_atif_notification_cfg *n) | ||
193 | { | 261 | { |
194 | union acpi_object *info; | 262 | union acpi_object *info; |
263 | struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg; | ||
195 | struct atif_system_params params; | 264 | struct atif_system_params params; |
196 | size_t size; | 265 | size_t size; |
197 | int err = 0; | 266 | int err = 0; |
198 | 267 | ||
199 | info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); | 268 | info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, |
269 | NULL); | ||
200 | if (!info) { | 270 | if (!info) { |
201 | err = -EIO; | 271 | err = -EIO; |
202 | goto out; | 272 | goto out; |
@@ -250,14 +320,15 @@ out: | |||
250 | * (all asics). | 320 | * (all asics). |
251 | * Returns 0 on success, error on failure. | 321 | * Returns 0 on success, error on failure. |
252 | */ | 322 | */ |
253 | static int amdgpu_atif_get_sbios_requests(acpi_handle handle, | 323 | static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif, |
254 | struct atif_sbios_requests *req) | 324 | struct atif_sbios_requests *req) |
255 | { | 325 | { |
256 | union acpi_object *info; | 326 | union acpi_object *info; |
257 | size_t size; | 327 | size_t size; |
258 | int count = 0; | 328 | int count = 0; |
259 | 329 | ||
260 | info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); | 330 | info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, |
331 | NULL); | ||
261 | if (!info) | 332 | if (!info) |
262 | return -EIO; | 333 | return -EIO; |
263 | 334 | ||
@@ -290,11 +361,10 @@ out: | |||
290 | * Returns NOTIFY code | 361 | * Returns NOTIFY code |
291 | */ | 362 | */ |
292 | static int amdgpu_atif_handler(struct amdgpu_device *adev, | 363 | static int amdgpu_atif_handler(struct amdgpu_device *adev, |
293 | struct acpi_bus_event *event) | 364 | struct acpi_bus_event *event) |
294 | { | 365 | { |
295 | struct amdgpu_atif *atif = &adev->atif; | 366 | struct amdgpu_atif *atif = adev->atif; |
296 | struct atif_sbios_requests req; | 367 | struct atif_sbios_requests req; |
297 | acpi_handle handle; | ||
298 | int count; | 368 | int count; |
299 | 369 | ||
300 | DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", | 370 | DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", |
@@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, | |||
303 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) | 373 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) |
304 | return NOTIFY_DONE; | 374 | return NOTIFY_DONE; |
305 | 375 | ||
306 | if (!atif->notification_cfg.enabled || | 376 | if (!atif || |
377 | !atif->notification_cfg.enabled || | ||
307 | event->type != atif->notification_cfg.command_code) | 378 | event->type != atif->notification_cfg.command_code) |
308 | /* Not our event */ | 379 | /* Not our event */ |
309 | return NOTIFY_DONE; | 380 | return NOTIFY_DONE; |
310 | 381 | ||
311 | /* Check pending SBIOS requests */ | 382 | /* Check pending SBIOS requests */ |
312 | handle = ACPI_HANDLE(&adev->pdev->dev); | 383 | count = amdgpu_atif_get_sbios_requests(atif, &req); |
313 | count = amdgpu_atif_get_sbios_requests(handle, &req); | ||
314 | 384 | ||
315 | if (count <= 0) | 385 | if (count <= 0) |
316 | return NOTIFY_DONE; | 386 | return NOTIFY_DONE; |
@@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb, | |||
641 | */ | 711 | */ |
642 | int amdgpu_acpi_init(struct amdgpu_device *adev) | 712 | int amdgpu_acpi_init(struct amdgpu_device *adev) |
643 | { | 713 | { |
644 | acpi_handle handle; | 714 | acpi_handle handle, atif_handle; |
645 | struct amdgpu_atif *atif = &adev->atif; | 715 | struct amdgpu_atif *atif; |
646 | struct amdgpu_atcs *atcs = &adev->atcs; | 716 | struct amdgpu_atcs *atcs = &adev->atcs; |
647 | int ret; | 717 | int ret; |
648 | 718 | ||
@@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) | |||
658 | DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); | 728 | DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); |
659 | } | 729 | } |
660 | 730 | ||
731 | /* Probe for ATIF, and initialize it if found */ | ||
732 | atif_handle = amdgpu_atif_probe_handle(handle); | ||
733 | if (!atif_handle) | ||
734 | goto out; | ||
735 | |||
736 | atif = kzalloc(sizeof(*atif), GFP_KERNEL); | ||
737 | if (!atif) { | ||
738 | DRM_WARN("Not enough memory to initialize ATIF\n"); | ||
739 | goto out; | ||
740 | } | ||
741 | atif->handle = atif_handle; | ||
742 | |||
661 | /* Call the ATIF method */ | 743 | /* Call the ATIF method */ |
662 | ret = amdgpu_atif_verify_interface(handle, atif); | 744 | ret = amdgpu_atif_verify_interface(atif); |
663 | if (ret) { | 745 | if (ret) { |
664 | DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); | 746 | DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); |
747 | kfree(atif); | ||
665 | goto out; | 748 | goto out; |
666 | } | 749 | } |
750 | adev->atif = atif; | ||
667 | 751 | ||
668 | if (atif->notifications.brightness_change) { | 752 | if (atif->notifications.brightness_change) { |
669 | struct drm_encoder *tmp; | 753 | struct drm_encoder *tmp; |
@@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) | |||
693 | } | 777 | } |
694 | 778 | ||
695 | if (atif->functions.system_params) { | 779 | if (atif->functions.system_params) { |
696 | ret = amdgpu_atif_get_notification_params(handle, | 780 | ret = amdgpu_atif_get_notification_params(atif); |
697 | &atif->notification_cfg); | ||
698 | if (ret) { | 781 | if (ret) { |
699 | DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", | 782 | DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", |
700 | ret); | 783 | ret); |
@@ -720,4 +803,6 @@ out: | |||
720 | void amdgpu_acpi_fini(struct amdgpu_device *adev) | 803 | void amdgpu_acpi_fini(struct amdgpu_device *adev) |
721 | { | 804 | { |
722 | unregister_acpi_notifier(&adev->acpi_nb); | 805 | unregister_acpi_notifier(&adev->acpi_nb); |
806 | if (adev->atif) | ||
807 | kfree(adev->atif); | ||
723 | } | 808 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index daa06e7c5bb7..9ab89371d9e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) { | |||
90 | return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; | 90 | return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; |
91 | } | 91 | } |
92 | 92 | ||
93 | #if defined(CONFIG_ACPI) | ||
94 | void *amdgpu_atpx_get_dhandle(void) { | ||
95 | return amdgpu_atpx_priv.dhandle; | ||
96 | } | ||
97 | #endif | ||
98 | |||
93 | /** | 99 | /** |
94 | * amdgpu_atpx_call - call an ATPX method | 100 | * amdgpu_atpx_call - call an ATPX method |
95 | * | 101 | * |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f70eeed9ed76..7aaa263ad8c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
231 | if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) | 231 | if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) |
232 | fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; | 232 | fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; |
233 | 233 | ||
234 | /* wrap the last IB with fence */ | ||
235 | if (job && job->uf_addr) { | ||
236 | amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, | ||
237 | fence_flags | AMDGPU_FENCE_FLAG_64BIT); | ||
238 | } | ||
239 | |||
234 | r = amdgpu_fence_emit(ring, f, fence_flags); | 240 | r = amdgpu_fence_emit(ring, f, fence_flags); |
235 | if (r) { | 241 | if (r) { |
236 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); | 242 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); |
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
243 | if (ring->funcs->insert_end) | 249 | if (ring->funcs->insert_end) |
244 | ring->funcs->insert_end(ring); | 250 | ring->funcs->insert_end(ring); |
245 | 251 | ||
246 | /* wrap the last IB with fence */ | ||
247 | if (job && job->uf_addr) { | ||
248 | amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, | ||
249 | fence_flags | AMDGPU_FENCE_FLAG_64BIT); | ||
250 | } | ||
251 | |||
252 | if (patch_offset != ~0 && ring->funcs->patch_cond_exec) | 252 | if (patch_offset != ~0 && ring->funcs->patch_cond_exec) |
253 | amdgpu_ring_patch_cond_exec(ring, patch_offset); | 253 | amdgpu_ring_patch_cond_exec(ring, patch_offset); |
254 | 254 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index b455da487782..fc818b4d849c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
1882 | if (!amdgpu_device_has_dc_support(adev)) { | 1882 | if (!amdgpu_device_has_dc_support(adev)) { |
1883 | mutex_lock(&adev->pm.mutex); | 1883 | mutex_lock(&adev->pm.mutex); |
1884 | amdgpu_dpm_get_active_displays(adev); | 1884 | amdgpu_dpm_get_active_displays(adev); |
1885 | adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; | 1885 | adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; |
1886 | adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); | 1886 | adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); |
1887 | adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); | 1887 | adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); |
1888 | /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ | 1888 | /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 0999c843f623..a71b97519cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { | |||
900 | .emit_frame_size = | 900 | .emit_frame_size = |
901 | 4 + /* vce_v3_0_emit_pipeline_sync */ | 901 | 4 + /* vce_v3_0_emit_pipeline_sync */ |
902 | 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ | 902 | 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ |
903 | .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ | 903 | .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ |
904 | .emit_ib = amdgpu_vce_ring_emit_ib, | 904 | .emit_ib = amdgpu_vce_ring_emit_ib, |
905 | .emit_fence = amdgpu_vce_ring_emit_fence, | 905 | .emit_fence = amdgpu_vce_ring_emit_fence, |
906 | .test_ring = amdgpu_vce_ring_test_ring, | 906 | .test_ring = amdgpu_vce_ring_test_ring, |
@@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { | |||
924 | 6 + /* vce_v3_0_emit_vm_flush */ | 924 | 6 + /* vce_v3_0_emit_vm_flush */ |
925 | 4 + /* vce_v3_0_emit_pipeline_sync */ | 925 | 4 + /* vce_v3_0_emit_pipeline_sync */ |
926 | 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ | 926 | 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ |
927 | .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ | 927 | .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ |
928 | .emit_ib = vce_v3_0_ring_emit_ib, | 928 | .emit_ib = vce_v3_0_ring_emit_ib, |
929 | .emit_vm_flush = vce_v3_0_emit_vm_flush, | 929 | .emit_vm_flush = vce_v3_0_emit_vm_flush, |
930 | .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, | 930 | .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3a8d6356afc2..770c6b24be0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) | |||
2175 | return color_space; | 2175 | return color_space; |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) | ||
2179 | { | ||
2180 | if (timing_out->display_color_depth <= COLOR_DEPTH_888) | ||
2181 | return; | ||
2182 | |||
2183 | timing_out->display_color_depth--; | ||
2184 | } | ||
2185 | |||
2186 | static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, | ||
2187 | const struct drm_display_info *info) | ||
2188 | { | ||
2189 | int normalized_clk; | ||
2190 | if (timing_out->display_color_depth <= COLOR_DEPTH_888) | ||
2191 | return; | ||
2192 | do { | ||
2193 | normalized_clk = timing_out->pix_clk_khz; | ||
2194 | /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ | ||
2195 | if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) | ||
2196 | normalized_clk /= 2; | ||
2197 | /* Adjusting pix clock following on HDMI spec based on colour depth */ | ||
2198 | switch (timing_out->display_color_depth) { | ||
2199 | case COLOR_DEPTH_101010: | ||
2200 | normalized_clk = (normalized_clk * 30) / 24; | ||
2201 | break; | ||
2202 | case COLOR_DEPTH_121212: | ||
2203 | normalized_clk = (normalized_clk * 36) / 24; | ||
2204 | break; | ||
2205 | case COLOR_DEPTH_161616: | ||
2206 | normalized_clk = (normalized_clk * 48) / 24; | ||
2207 | break; | ||
2208 | default: | ||
2209 | return; | ||
2210 | } | ||
2211 | if (normalized_clk <= info->max_tmds_clock) | ||
2212 | return; | ||
2213 | reduce_mode_colour_depth(timing_out); | ||
2214 | |||
2215 | } while (timing_out->display_color_depth > COLOR_DEPTH_888); | ||
2216 | |||
2217 | } | ||
2178 | /*****************************************************************************/ | 2218 | /*****************************************************************************/ |
2179 | 2219 | ||
2180 | static void | 2220 | static void |
@@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, | |||
2183 | const struct drm_connector *connector) | 2223 | const struct drm_connector *connector) |
2184 | { | 2224 | { |
2185 | struct dc_crtc_timing *timing_out = &stream->timing; | 2225 | struct dc_crtc_timing *timing_out = &stream->timing; |
2226 | const struct drm_display_info *info = &connector->display_info; | ||
2186 | 2227 | ||
2187 | memset(timing_out, 0, sizeof(struct dc_crtc_timing)); | 2228 | memset(timing_out, 0, sizeof(struct dc_crtc_timing)); |
2188 | 2229 | ||
@@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, | |||
2191 | timing_out->v_border_top = 0; | 2232 | timing_out->v_border_top = 0; |
2192 | timing_out->v_border_bottom = 0; | 2233 | timing_out->v_border_bottom = 0; |
2193 | /* TODO: un-hardcode */ | 2234 | /* TODO: un-hardcode */ |
2194 | 2235 | if (drm_mode_is_420_only(info, mode_in) | |
2195 | if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) | 2236 | && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) |
2237 | timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; | ||
2238 | else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) | ||
2196 | && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) | 2239 | && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) |
2197 | timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; | 2240 | timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; |
2198 | else | 2241 | else |
@@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, | |||
2228 | 2271 | ||
2229 | stream->out_transfer_func->type = TF_TYPE_PREDEFINED; | 2272 | stream->out_transfer_func->type = TF_TYPE_PREDEFINED; |
2230 | stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; | 2273 | stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; |
2274 | if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) | ||
2275 | adjust_colour_depth_from_display_info(timing_out, info); | ||
2231 | } | 2276 | } |
2232 | 2277 | ||
2233 | static void fill_audio_info(struct audio_info *audio_info, | 2278 | static void fill_audio_info(struct audio_info *audio_info, |
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 092d800b703a..33b4de4ad66e 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h | |||
@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1 | |||
1433 | uint8_t acggfxclkspreadpercent; | 1433 | uint8_t acggfxclkspreadpercent; |
1434 | uint16_t acggfxclkspreadfreq; | 1434 | uint16_t acggfxclkspreadfreq; |
1435 | 1435 | ||
1436 | uint32_t boardreserved[10]; | 1436 | uint8_t Vr2_I2C_address; |
1437 | uint8_t padding_vr2[3]; | ||
1438 | |||
1439 | uint32_t boardreserved[9]; | ||
1437 | }; | 1440 | }; |
1438 | 1441 | ||
1439 | /* | 1442 | /* |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 5325661fedff..d27c1c9df286 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | |||
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI | |||
512 | return 0; | 512 | return 0; |
513 | } | 513 | } |
514 | 514 | ||
515 | static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, | ||
516 | struct pp_atomfwctrl_bios_boot_up_values *boot_values, | ||
517 | struct atom_firmware_info_v3_2 *fw_info) | ||
518 | { | ||
519 | uint32_t frequency = 0; | ||
520 | |||
521 | boot_values->ulRevision = fw_info->firmware_revision; | ||
522 | boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; | ||
523 | boot_values->ulUClk = fw_info->bootup_mclk_in10khz; | ||
524 | boot_values->usVddc = fw_info->bootup_vddc_mv; | ||
525 | boot_values->usVddci = fw_info->bootup_vddci_mv; | ||
526 | boot_values->usMvddc = fw_info->bootup_mvddc_mv; | ||
527 | boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; | ||
528 | boot_values->ucCoolingID = fw_info->coolingsolution_id; | ||
529 | boot_values->ulSocClk = 0; | ||
530 | boot_values->ulDCEFClk = 0; | ||
531 | |||
532 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) | ||
533 | boot_values->ulSocClk = frequency; | ||
534 | |||
535 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) | ||
536 | boot_values->ulDCEFClk = frequency; | ||
537 | |||
538 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) | ||
539 | boot_values->ulEClk = frequency; | ||
540 | |||
541 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) | ||
542 | boot_values->ulVClk = frequency; | ||
543 | |||
544 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) | ||
545 | boot_values->ulDClk = frequency; | ||
546 | } | ||
547 | |||
548 | static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, | ||
549 | struct pp_atomfwctrl_bios_boot_up_values *boot_values, | ||
550 | struct atom_firmware_info_v3_1 *fw_info) | ||
551 | { | ||
552 | uint32_t frequency = 0; | ||
553 | |||
554 | boot_values->ulRevision = fw_info->firmware_revision; | ||
555 | boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; | ||
556 | boot_values->ulUClk = fw_info->bootup_mclk_in10khz; | ||
557 | boot_values->usVddc = fw_info->bootup_vddc_mv; | ||
558 | boot_values->usVddci = fw_info->bootup_vddci_mv; | ||
559 | boot_values->usMvddc = fw_info->bootup_mvddc_mv; | ||
560 | boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; | ||
561 | boot_values->ucCoolingID = fw_info->coolingsolution_id; | ||
562 | boot_values->ulSocClk = 0; | ||
563 | boot_values->ulDCEFClk = 0; | ||
564 | |||
565 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) | ||
566 | boot_values->ulSocClk = frequency; | ||
567 | |||
568 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) | ||
569 | boot_values->ulDCEFClk = frequency; | ||
570 | |||
571 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) | ||
572 | boot_values->ulEClk = frequency; | ||
573 | |||
574 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) | ||
575 | boot_values->ulVClk = frequency; | ||
576 | |||
577 | if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) | ||
578 | boot_values->ulDClk = frequency; | ||
579 | } | ||
580 | |||
515 | int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, | 581 | int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, |
516 | struct pp_atomfwctrl_bios_boot_up_values *boot_values) | 582 | struct pp_atomfwctrl_bios_boot_up_values *boot_values) |
517 | { | 583 | { |
518 | struct atom_firmware_info_v3_1 *info = NULL; | 584 | struct atom_firmware_info_v3_2 *fwinfo_3_2; |
585 | struct atom_firmware_info_v3_1 *fwinfo_3_1; | ||
586 | struct atom_common_table_header *info = NULL; | ||
519 | uint16_t ix; | 587 | uint16_t ix; |
520 | 588 | ||
521 | ix = GetIndexIntoMasterDataTable(firmwareinfo); | 589 | ix = GetIndexIntoMasterDataTable(firmwareinfo); |
522 | info = (struct atom_firmware_info_v3_1 *) | 590 | info = (struct atom_common_table_header *) |
523 | smu_atom_get_data_table(hwmgr->adev, | 591 | smu_atom_get_data_table(hwmgr->adev, |
524 | ix, NULL, NULL, NULL); | 592 | ix, NULL, NULL, NULL); |
525 | 593 | ||
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, | |||
528 | return -EINVAL; | 596 | return -EINVAL; |
529 | } | 597 | } |
530 | 598 | ||
531 | boot_values->ulRevision = info->firmware_revision; | 599 | if ((info->format_revision == 3) && (info->content_revision == 2)) { |
532 | boot_values->ulGfxClk = info->bootup_sclk_in10khz; | 600 | fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info; |
533 | boot_values->ulUClk = info->bootup_mclk_in10khz; | 601 | pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr, |
534 | boot_values->usVddc = info->bootup_vddc_mv; | 602 | boot_values, fwinfo_3_2); |
535 | boot_values->usVddci = info->bootup_vddci_mv; | 603 | } else if ((info->format_revision == 3) && (info->content_revision == 1)) { |
536 | boot_values->usMvddc = info->bootup_mvddc_mv; | 604 | fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info; |
537 | boot_values->usVddGfx = info->bootup_vddgfx_mv; | 605 | pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr, |
538 | boot_values->ucCoolingID = info->coolingsolution_id; | 606 | boot_values, fwinfo_3_1); |
539 | boot_values->ulSocClk = 0; | 607 | } else { |
540 | boot_values->ulDCEFClk = 0; | 608 | pr_info("Fw info table revision does not match!"); |
609 | return -EINVAL; | ||
610 | } | ||
541 | 611 | ||
542 | return 0; | 612 | return 0; |
543 | } | 613 | } |
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, | |||
629 | param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; | 699 | param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; |
630 | param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; | 700 | param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; |
631 | 701 | ||
702 | param->Vr2_I2C_address = info->Vr2_I2C_address; | ||
703 | |||
632 | return 0; | 704 | return 0; |
633 | } | 705 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index fe10aa4db5e6..22e21668c93a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h | |||
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values { | |||
136 | uint32_t ulUClk; | 136 | uint32_t ulUClk; |
137 | uint32_t ulSocClk; | 137 | uint32_t ulSocClk; |
138 | uint32_t ulDCEFClk; | 138 | uint32_t ulDCEFClk; |
139 | uint32_t ulEClk; | ||
140 | uint32_t ulVClk; | ||
141 | uint32_t ulDClk; | ||
139 | uint16_t usVddc; | 142 | uint16_t usVddc; |
140 | uint16_t usVddci; | 143 | uint16_t usVddci; |
141 | uint16_t usMvddc; | 144 | uint16_t usMvddc; |
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters | |||
207 | uint8_t acggfxclkspreadenabled; | 210 | uint8_t acggfxclkspreadenabled; |
208 | uint8_t acggfxclkspreadpercent; | 211 | uint8_t acggfxclkspreadpercent; |
209 | uint16_t acggfxclkspreadfreq; | 212 | uint16_t acggfxclkspreadfreq; |
213 | |||
214 | uint8_t Vr2_I2C_address; | ||
210 | }; | 215 | }; |
211 | 216 | ||
212 | int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, | 217 | int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 782e2098824d..c98e5de777cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | |||
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr) | |||
81 | 81 | ||
82 | data->registry_data.disallowed_features = 0x0; | 82 | data->registry_data.disallowed_features = 0x0; |
83 | data->registry_data.od_state_in_dc_support = 0; | 83 | data->registry_data.od_state_in_dc_support = 0; |
84 | data->registry_data.thermal_support = 1; | ||
84 | data->registry_data.skip_baco_hardware = 0; | 85 | data->registry_data.skip_baco_hardware = 0; |
85 | 86 | ||
86 | data->registry_data.log_avfs_param = 0; | 87 | data->registry_data.log_avfs_param = 0; |
@@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) | |||
803 | data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; | 804 | data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; |
804 | data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; | 805 | data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; |
805 | data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; | 806 | data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; |
807 | data->vbios_boot_state.eclock = boot_up_values.ulEClk; | ||
808 | data->vbios_boot_state.dclock = boot_up_values.ulDClk; | ||
809 | data->vbios_boot_state.vclock = boot_up_values.ulVClk; | ||
806 | smum_send_msg_to_smc_with_parameter(hwmgr, | 810 | smum_send_msg_to_smc_with_parameter(hwmgr, |
807 | PPSMC_MSG_SetMinDeepSleepDcefclk, | 811 | PPSMC_MSG_SetMinDeepSleepDcefclk, |
808 | (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); | 812 | (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h index e81ded1ec198..49b38df8c7f2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | |||
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state { | |||
167 | uint32_t mem_clock; | 167 | uint32_t mem_clock; |
168 | uint32_t soc_clock; | 168 | uint32_t soc_clock; |
169 | uint32_t dcef_clock; | 169 | uint32_t dcef_clock; |
170 | uint32_t eclock; | ||
171 | uint32_t dclock; | ||
172 | uint32_t vclock; | ||
170 | }; | 173 | }; |
171 | 174 | ||
172 | #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 | 175 | #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index 888ddca902d8..29914700ee82 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | |||
@@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable | |||
230 | ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; | 230 | ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; |
231 | } | 231 | } |
232 | 232 | ||
233 | ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; | ||
234 | |||
233 | return 0; | 235 | return 0; |
234 | } | 236 | } |
235 | 237 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index 2f8a3b983cce..b08526fd1619 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | |||
@@ -499,7 +499,10 @@ typedef struct { | |||
499 | uint8_t AcgGfxclkSpreadPercent; | 499 | uint8_t AcgGfxclkSpreadPercent; |
500 | uint16_t AcgGfxclkSpreadFreq; | 500 | uint16_t AcgGfxclkSpreadFreq; |
501 | 501 | ||
502 | uint32_t BoardReserved[10]; | 502 | uint8_t Vr2_I2C_address; |
503 | uint8_t padding_vr2[3]; | ||
504 | |||
505 | uint32_t BoardReserved[9]; | ||
503 | 506 | ||
504 | 507 | ||
505 | uint32_t MmHubPadding[7]; | 508 | uint32_t MmHubPadding[7]; |
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 250effa0e6b8..a6e8f4591e63 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <drm/bridge/mhl.h> | 14 | #include <drm/bridge/mhl.h> |
15 | #include <drm/drm_crtc.h> | 15 | #include <drm/drm_crtc.h> |
16 | #include <drm/drm_edid.h> | 16 | #include <drm/drm_edid.h> |
17 | #include <drm/drm_encoder.h> | ||
17 | 18 | ||
18 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
19 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
@@ -72,9 +73,7 @@ struct sii8620 { | |||
72 | struct regulator_bulk_data supplies[2]; | 73 | struct regulator_bulk_data supplies[2]; |
73 | struct mutex lock; /* context lock, protects fields below */ | 74 | struct mutex lock; /* context lock, protects fields below */ |
74 | int error; | 75 | int error; |
75 | int pixel_clock; | ||
76 | unsigned int use_packed_pixel:1; | 76 | unsigned int use_packed_pixel:1; |
77 | int video_code; | ||
78 | enum sii8620_mode mode; | 77 | enum sii8620_mode mode; |
79 | enum sii8620_sink_type sink_type; | 78 | enum sii8620_sink_type sink_type; |
80 | u8 cbus_status; | 79 | u8 cbus_status; |
@@ -82,7 +81,6 @@ struct sii8620 { | |||
82 | u8 xstat[MHL_XDS_SIZE]; | 81 | u8 xstat[MHL_XDS_SIZE]; |
83 | u8 devcap[MHL_DCAP_SIZE]; | 82 | u8 devcap[MHL_DCAP_SIZE]; |
84 | u8 xdevcap[MHL_XDC_SIZE]; | 83 | u8 xdevcap[MHL_XDC_SIZE]; |
85 | u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; | ||
86 | bool feature_complete; | 84 | bool feature_complete; |
87 | bool devcap_read; | 85 | bool devcap_read; |
88 | bool sink_detected; | 86 | bool sink_detected; |
@@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx) | |||
1017 | 1015 | ||
1018 | static void sii8620_set_format(struct sii8620 *ctx) | 1016 | static void sii8620_set_format(struct sii8620 *ctx) |
1019 | { | 1017 | { |
1018 | u8 out_fmt; | ||
1019 | |||
1020 | if (sii8620_is_mhl3(ctx)) { | 1020 | if (sii8620_is_mhl3(ctx)) { |
1021 | sii8620_setbits(ctx, REG_M3_P0CTRL, | 1021 | sii8620_setbits(ctx, REG_M3_P0CTRL, |
1022 | BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, | 1022 | BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, |
1023 | ctx->use_packed_pixel ? ~0 : 0); | 1023 | ctx->use_packed_pixel ? ~0 : 0); |
1024 | } else { | 1024 | } else { |
1025 | if (ctx->use_packed_pixel) { | ||
1026 | sii8620_write_seq_static(ctx, | ||
1027 | REG_VID_MODE, BIT_VID_MODE_M1080P, | ||
1028 | REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, | ||
1029 | REG_MHLTX_CTL6, 0x60 | ||
1030 | ); | ||
1031 | } else { | ||
1025 | sii8620_write_seq_static(ctx, | 1032 | sii8620_write_seq_static(ctx, |
1026 | REG_VID_MODE, 0, | 1033 | REG_VID_MODE, 0, |
1027 | REG_MHL_TOP_CTL, 1, | 1034 | REG_MHL_TOP_CTL, 1, |
1028 | REG_MHLTX_CTL6, 0xa0 | 1035 | REG_MHLTX_CTL6, 0xa0 |
1029 | ); | 1036 | ); |
1037 | } | ||
1030 | } | 1038 | } |
1031 | 1039 | ||
1040 | if (ctx->use_packed_pixel) | ||
1041 | out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL); | ||
1042 | else | ||
1043 | out_fmt = VAL_TPI_FORMAT(RGB, FULL); | ||
1044 | |||
1032 | sii8620_write_seq(ctx, | 1045 | sii8620_write_seq(ctx, |
1033 | REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), | 1046 | REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), |
1034 | REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), | 1047 | REG_TPI_OUTPUT, out_fmt, |
1035 | ); | 1048 | ); |
1036 | } | 1049 | } |
1037 | 1050 | ||
@@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame, | |||
1082 | return frm_len; | 1095 | return frm_len; |
1083 | } | 1096 | } |
1084 | 1097 | ||
1085 | static void sii8620_set_infoframes(struct sii8620 *ctx) | 1098 | static void sii8620_set_infoframes(struct sii8620 *ctx, |
1099 | struct drm_display_mode *mode) | ||
1086 | { | 1100 | { |
1087 | struct mhl3_infoframe mhl_frm; | 1101 | struct mhl3_infoframe mhl_frm; |
1088 | union hdmi_infoframe frm; | 1102 | union hdmi_infoframe frm; |
1089 | u8 buf[31]; | 1103 | u8 buf[31]; |
1090 | int ret; | 1104 | int ret; |
1091 | 1105 | ||
1106 | ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, | ||
1107 | mode, | ||
1108 | true); | ||
1109 | if (ctx->use_packed_pixel) | ||
1110 | frm.avi.colorspace = HDMI_COLORSPACE_YUV422; | ||
1111 | |||
1112 | if (!ret) | ||
1113 | ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); | ||
1114 | if (ret > 0) | ||
1115 | sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); | ||
1116 | |||
1092 | if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { | 1117 | if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { |
1093 | sii8620_write(ctx, REG_TPI_SC, | 1118 | sii8620_write(ctx, REG_TPI_SC, |
1094 | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); | 1119 | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); |
1095 | sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3, | ||
1096 | ARRAY_SIZE(ctx->avif) - 3); | ||
1097 | sii8620_write(ctx, REG_PKT_FILTER_0, | 1120 | sii8620_write(ctx, REG_PKT_FILTER_0, |
1098 | BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | | 1121 | BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | |
1099 | BIT_PKT_FILTER_0_DROP_MPEG_PKT | | 1122 | BIT_PKT_FILTER_0_DROP_MPEG_PKT | |
@@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) | |||
1102 | return; | 1125 | return; |
1103 | } | 1126 | } |
1104 | 1127 | ||
1105 | ret = hdmi_avi_infoframe_init(&frm.avi); | ||
1106 | frm.avi.colorspace = HDMI_COLORSPACE_YUV422; | ||
1107 | frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; | ||
1108 | frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9; | ||
1109 | frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709; | ||
1110 | frm.avi.video_code = ctx->video_code; | ||
1111 | if (!ret) | ||
1112 | ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); | ||
1113 | if (ret > 0) | ||
1114 | sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); | ||
1115 | sii8620_write(ctx, REG_PKT_FILTER_0, | 1128 | sii8620_write(ctx, REG_PKT_FILTER_0, |
1116 | BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | | 1129 | BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | |
1117 | BIT_PKT_FILTER_0_DROP_MPEG_PKT | | 1130 | BIT_PKT_FILTER_0_DROP_MPEG_PKT | |
@@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) | |||
1131 | 1144 | ||
1132 | static void sii8620_start_video(struct sii8620 *ctx) | 1145 | static void sii8620_start_video(struct sii8620 *ctx) |
1133 | { | 1146 | { |
1147 | struct drm_display_mode *mode = | ||
1148 | &ctx->bridge.encoder->crtc->state->adjusted_mode; | ||
1149 | |||
1134 | if (!sii8620_is_mhl3(ctx)) | 1150 | if (!sii8620_is_mhl3(ctx)) |
1135 | sii8620_stop_video(ctx); | 1151 | sii8620_stop_video(ctx); |
1136 | 1152 | ||
@@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx) | |||
1149 | sii8620_set_format(ctx); | 1165 | sii8620_set_format(ctx); |
1150 | 1166 | ||
1151 | if (!sii8620_is_mhl3(ctx)) { | 1167 | if (!sii8620_is_mhl3(ctx)) { |
1152 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | 1168 | u8 link_mode = MHL_DST_LM_PATH_ENABLED; |
1153 | MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED); | 1169 | |
1170 | if (ctx->use_packed_pixel) | ||
1171 | link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL; | ||
1172 | else | ||
1173 | link_mode |= MHL_DST_LM_CLK_MODE_NORMAL; | ||
1174 | |||
1175 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode); | ||
1154 | sii8620_set_auto_zone(ctx); | 1176 | sii8620_set_auto_zone(ctx); |
1155 | } else { | 1177 | } else { |
1156 | static const struct { | 1178 | static const struct { |
@@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx) | |||
1167 | MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, | 1189 | MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, |
1168 | }; | 1190 | }; |
1169 | u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; | 1191 | u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; |
1170 | int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); | 1192 | int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3); |
1171 | int i; | 1193 | int i; |
1172 | 1194 | ||
1173 | for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) | 1195 | for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) |
@@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx) | |||
1196 | clk_spec[i].link_rate); | 1218 | clk_spec[i].link_rate); |
1197 | } | 1219 | } |
1198 | 1220 | ||
1199 | sii8620_set_infoframes(ctx); | 1221 | sii8620_set_infoframes(ctx, mode); |
1200 | } | 1222 | } |
1201 | 1223 | ||
1202 | static void sii8620_disable_hpd(struct sii8620 *ctx) | 1224 | static void sii8620_disable_hpd(struct sii8620 *ctx) |
@@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx) | |||
1661 | 1683 | ||
1662 | static void sii8620_status_changed_path(struct sii8620 *ctx) | 1684 | static void sii8620_status_changed_path(struct sii8620 *ctx) |
1663 | { | 1685 | { |
1664 | if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { | 1686 | u8 link_mode; |
1665 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | 1687 | |
1666 | MHL_DST_LM_CLK_MODE_NORMAL | 1688 | if (ctx->use_packed_pixel) |
1667 | | MHL_DST_LM_PATH_ENABLED); | 1689 | link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL; |
1668 | } else { | 1690 | else |
1669 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | 1691 | link_mode = MHL_DST_LM_CLK_MODE_NORMAL; |
1670 | MHL_DST_LM_CLK_MODE_NORMAL); | 1692 | |
1671 | } | 1693 | if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) |
1694 | link_mode |= MHL_DST_LM_PATH_ENABLED; | ||
1695 | |||
1696 | sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), | ||
1697 | link_mode); | ||
1672 | } | 1698 | } |
1673 | 1699 | ||
1674 | static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) | 1700 | static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) |
@@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge, | |||
2242 | mutex_lock(&ctx->lock); | 2268 | mutex_lock(&ctx->lock); |
2243 | 2269 | ||
2244 | ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); | 2270 | ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); |
2245 | ctx->video_code = drm_match_cea_mode(adjusted_mode); | ||
2246 | ctx->pixel_clock = adjusted_mode->clock; | ||
2247 | 2271 | ||
2248 | mutex_unlock(&ctx->lock); | 2272 | mutex_unlock(&ctx->lock); |
2249 | 2273 | ||
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 1f8031e30f53..cdb10f885a4f 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c | |||
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref) | |||
532 | 532 | ||
533 | drm_mode_object_unregister(blob->dev, &blob->base); | 533 | drm_mode_object_unregister(blob->dev, &blob->base); |
534 | 534 | ||
535 | kfree(blob); | 535 | kvfree(blob); |
536 | } | 536 | } |
537 | 537 | ||
538 | /** | 538 | /** |
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, | |||
559 | if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) | 559 | if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) |
560 | return ERR_PTR(-EINVAL); | 560 | return ERR_PTR(-EINVAL); |
561 | 561 | ||
562 | blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); | 562 | blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); |
563 | if (!blob) | 563 | if (!blob) |
564 | return ERR_PTR(-ENOMEM); | 564 | return ERR_PTR(-ENOMEM); |
565 | 565 | ||
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, | |||
576 | ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, | 576 | ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, |
577 | true, drm_property_free_blob); | 577 | true, drm_property_free_blob); |
578 | if (ret) { | 578 | if (ret) { |
579 | kfree(blob); | 579 | kvfree(blob); |
580 | return ERR_PTR(-EINVAL); | 580 | return ERR_PTR(-EINVAL); |
581 | } | 581 | } |
582 | 582 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index e5013a999147..540b59fb4103 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
@@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = { | |||
631 | }, | 631 | }, |
632 | }; | 632 | }; |
633 | 633 | ||
634 | static struct platform_device *etnaviv_drm; | ||
635 | |||
634 | static int __init etnaviv_init(void) | 636 | static int __init etnaviv_init(void) |
635 | { | 637 | { |
638 | struct platform_device *pdev; | ||
636 | int ret; | 639 | int ret; |
637 | struct device_node *np; | 640 | struct device_node *np; |
638 | 641 | ||
@@ -644,7 +647,7 @@ static int __init etnaviv_init(void) | |||
644 | 647 | ||
645 | ret = platform_driver_register(&etnaviv_platform_driver); | 648 | ret = platform_driver_register(&etnaviv_platform_driver); |
646 | if (ret != 0) | 649 | if (ret != 0) |
647 | platform_driver_unregister(&etnaviv_gpu_driver); | 650 | goto unregister_gpu_driver; |
648 | 651 | ||
649 | /* | 652 | /* |
650 | * If the DT contains at least one available GPU device, instantiate | 653 | * If the DT contains at least one available GPU device, instantiate |
@@ -653,20 +656,33 @@ static int __init etnaviv_init(void) | |||
653 | for_each_compatible_node(np, NULL, "vivante,gc") { | 656 | for_each_compatible_node(np, NULL, "vivante,gc") { |
654 | if (!of_device_is_available(np)) | 657 | if (!of_device_is_available(np)) |
655 | continue; | 658 | continue; |
656 | 659 | pdev = platform_device_register_simple("etnaviv", -1, | |
657 | platform_device_register_simple("etnaviv", -1, NULL, 0); | 660 | NULL, 0); |
661 | if (IS_ERR(pdev)) { | ||
662 | ret = PTR_ERR(pdev); | ||
663 | of_node_put(np); | ||
664 | goto unregister_platform_driver; | ||
665 | } | ||
666 | etnaviv_drm = pdev; | ||
658 | of_node_put(np); | 667 | of_node_put(np); |
659 | break; | 668 | break; |
660 | } | 669 | } |
661 | 670 | ||
671 | return 0; | ||
672 | |||
673 | unregister_platform_driver: | ||
674 | platform_driver_unregister(&etnaviv_platform_driver); | ||
675 | unregister_gpu_driver: | ||
676 | platform_driver_unregister(&etnaviv_gpu_driver); | ||
662 | return ret; | 677 | return ret; |
663 | } | 678 | } |
664 | module_init(etnaviv_init); | 679 | module_init(etnaviv_init); |
665 | 680 | ||
666 | static void __exit etnaviv_exit(void) | 681 | static void __exit etnaviv_exit(void) |
667 | { | 682 | { |
668 | platform_driver_unregister(&etnaviv_gpu_driver); | 683 | platform_device_unregister(etnaviv_drm); |
669 | platform_driver_unregister(&etnaviv_platform_driver); | 684 | platform_driver_unregister(&etnaviv_platform_driver); |
685 | platform_driver_unregister(&etnaviv_gpu_driver); | ||
670 | } | 686 | } |
671 | module_exit(etnaviv_exit); | 687 | module_exit(etnaviv_exit); |
672 | 688 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index dd430f0f8ff5..90f17ff7888e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h | |||
@@ -131,6 +131,9 @@ struct etnaviv_gpu { | |||
131 | struct work_struct sync_point_work; | 131 | struct work_struct sync_point_work; |
132 | int sync_point_event; | 132 | int sync_point_event; |
133 | 133 | ||
134 | /* hang detection */ | ||
135 | u32 hangcheck_dma_addr; | ||
136 | |||
134 | void __iomem *mmio; | 137 | void __iomem *mmio; |
135 | int irq; | 138 | int irq; |
136 | 139 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index a74eb57af15b..50d6b88cb7aa 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include "etnaviv_gem.h" | 10 | #include "etnaviv_gem.h" |
11 | #include "etnaviv_gpu.h" | 11 | #include "etnaviv_gpu.h" |
12 | #include "etnaviv_sched.h" | 12 | #include "etnaviv_sched.h" |
13 | #include "state.xml.h" | ||
13 | 14 | ||
14 | static int etnaviv_job_hang_limit = 0; | 15 | static int etnaviv_job_hang_limit = 0; |
15 | module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); | 16 | module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); |
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) | |||
85 | { | 86 | { |
86 | struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); | 87 | struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); |
87 | struct etnaviv_gpu *gpu = submit->gpu; | 88 | struct etnaviv_gpu *gpu = submit->gpu; |
89 | u32 dma_addr; | ||
90 | int change; | ||
91 | |||
92 | /* | ||
93 | * If the GPU managed to complete this jobs fence, the timout is | ||
94 | * spurious. Bail out. | ||
95 | */ | ||
96 | if (fence_completed(gpu, submit->out_fence->seqno)) | ||
97 | return; | ||
98 | |||
99 | /* | ||
100 | * If the GPU is still making forward progress on the front-end (which | ||
101 | * should never loop) we shift out the timeout to give it a chance to | ||
102 | * finish the job. | ||
103 | */ | ||
104 | dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | ||
105 | change = dma_addr - gpu->hangcheck_dma_addr; | ||
106 | if (change < 0 || change > 16) { | ||
107 | gpu->hangcheck_dma_addr = dma_addr; | ||
108 | schedule_delayed_work(&sched_job->work_tdr, | ||
109 | sched_job->sched->timeout); | ||
110 | return; | ||
111 | } | ||
88 | 112 | ||
89 | /* block scheduler */ | 113 | /* block scheduler */ |
90 | kthread_park(gpu->sched.thread); | 114 | kthread_park(gpu->sched.thread); |
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 82c95c34447f..e868773ea509 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, | |||
265 | unsigned long val; | 265 | unsigned long val; |
266 | 266 | ||
267 | val = readl(ctx->addr + DECON_WINCONx(win)); | 267 | val = readl(ctx->addr + DECON_WINCONx(win)); |
268 | val &= ~WINCONx_BPPMODE_MASK; | 268 | val &= WINCONx_ENWIN_F; |
269 | 269 | ||
270 | switch (fb->format->format) { | 270 | switch (fb->format->format) { |
271 | case DRM_FORMAT_XRGB1555: | 271 | case DRM_FORMAT_XRGB1555: |
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, | |||
356 | writel(val, ctx->addr + DECON_VIDOSDxB(win)); | 356 | writel(val, ctx->addr + DECON_VIDOSDxB(win)); |
357 | } | 357 | } |
358 | 358 | ||
359 | val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | | 359 | val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) | |
360 | VIDOSD_Wx_ALPHA_B_F(0x0); | 360 | VIDOSD_Wx_ALPHA_B_F(0xff); |
361 | writel(val, ctx->addr + DECON_VIDOSDxC(win)); | 361 | writel(val, ctx->addr + DECON_VIDOSDxC(win)); |
362 | 362 | ||
363 | val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | | 363 | val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index a81b4a5e24a7..ed3cc2989f93 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -420,7 +420,7 @@ err_mode_config_cleanup: | |||
420 | err_free_private: | 420 | err_free_private: |
421 | kfree(private); | 421 | kfree(private); |
422 | err_free_drm: | 422 | err_free_drm: |
423 | drm_dev_unref(drm); | 423 | drm_dev_put(drm); |
424 | 424 | ||
425 | return ret; | 425 | return ret; |
426 | } | 426 | } |
@@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev) | |||
444 | drm->dev_private = NULL; | 444 | drm->dev_private = NULL; |
445 | dev_set_drvdata(dev, NULL); | 445 | dev_set_drvdata(dev, NULL); |
446 | 446 | ||
447 | drm_dev_unref(drm); | 447 | drm_dev_put(drm); |
448 | } | 448 | } |
449 | 449 | ||
450 | static const struct component_master_ops exynos_drm_ops = { | 450 | static const struct component_master_ops exynos_drm_ops = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 7fcc1a7ab1a0..27b7d34d776c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
138 | 138 | ||
139 | err: | 139 | err: |
140 | while (i--) | 140 | while (i--) |
141 | drm_gem_object_unreference_unlocked(&exynos_gem[i]->base); | 141 | drm_gem_object_put_unlocked(&exynos_gem[i]->base); |
142 | 142 | ||
143 | return ERR_PTR(ret); | 143 | return ERR_PTR(ret); |
144 | } | 144 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 6127ef25acd6..e8d0670bb5f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation) | |||
470 | static void fimc_set_window(struct fimc_context *ctx, | 470 | static void fimc_set_window(struct fimc_context *ctx, |
471 | struct exynos_drm_ipp_buffer *buf) | 471 | struct exynos_drm_ipp_buffer *buf) |
472 | { | 472 | { |
473 | unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; | ||
473 | u32 cfg, h1, h2, v1, v2; | 474 | u32 cfg, h1, h2, v1, v2; |
474 | 475 | ||
475 | /* cropped image */ | 476 | /* cropped image */ |
476 | h1 = buf->rect.x; | 477 | h1 = buf->rect.x; |
477 | h2 = buf->buf.width - buf->rect.w - buf->rect.x; | 478 | h2 = real_width - buf->rect.w - buf->rect.x; |
478 | v1 = buf->rect.y; | 479 | v1 = buf->rect.y; |
479 | v2 = buf->buf.height - buf->rect.h - buf->rect.y; | 480 | v2 = buf->buf.height - buf->rect.h - buf->rect.y; |
480 | 481 | ||
481 | DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", | 482 | DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", |
482 | buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, | 483 | buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, |
483 | buf->buf.width, buf->buf.height); | 484 | real_width, buf->buf.height); |
484 | DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); | 485 | DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); |
485 | 486 | ||
486 | /* | 487 | /* |
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx, | |||
503 | static void fimc_src_set_size(struct fimc_context *ctx, | 504 | static void fimc_src_set_size(struct fimc_context *ctx, |
504 | struct exynos_drm_ipp_buffer *buf) | 505 | struct exynos_drm_ipp_buffer *buf) |
505 | { | 506 | { |
507 | unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; | ||
506 | u32 cfg; | 508 | u32 cfg; |
507 | 509 | ||
508 | DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); | 510 | DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); |
509 | 511 | ||
510 | /* original size */ | 512 | /* original size */ |
511 | cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) | | 513 | cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) | |
512 | EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); | 514 | EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); |
513 | 515 | ||
514 | fimc_write(ctx, cfg, EXYNOS_ORGISIZE); | 516 | fimc_write(ctx, cfg, EXYNOS_ORGISIZE); |
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx, | |||
529 | * for now, we support only ITU601 8 bit mode | 531 | * for now, we support only ITU601 8 bit mode |
530 | */ | 532 | */ |
531 | cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | | 533 | cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | |
532 | EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) | | 534 | EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) | |
533 | EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); | 535 | EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); |
534 | fimc_write(ctx, cfg, EXYNOS_CISRCFMT); | 536 | fimc_write(ctx, cfg, EXYNOS_CISRCFMT); |
535 | 537 | ||
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) | |||
842 | static void fimc_dst_set_size(struct fimc_context *ctx, | 844 | static void fimc_dst_set_size(struct fimc_context *ctx, |
843 | struct exynos_drm_ipp_buffer *buf) | 845 | struct exynos_drm_ipp_buffer *buf) |
844 | { | 846 | { |
847 | unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; | ||
845 | u32 cfg, cfg_ext; | 848 | u32 cfg, cfg_ext; |
846 | 849 | ||
847 | DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); | 850 | DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); |
848 | 851 | ||
849 | /* original size */ | 852 | /* original size */ |
850 | cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) | | 853 | cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) | |
851 | EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); | 854 | EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); |
852 | 855 | ||
853 | fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); | 856 | fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 6e1494fa71b4..bdf5a7655228 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, | |||
143 | DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); | 143 | DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); |
144 | 144 | ||
145 | /* drop reference from allocate - handle holds it now. */ | 145 | /* drop reference from allocate - handle holds it now. */ |
146 | drm_gem_object_unreference_unlocked(obj); | 146 | drm_gem_object_put_unlocked(obj); |
147 | 147 | ||
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
@@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | |||
186 | 186 | ||
187 | exynos_gem = to_exynos_gem(obj); | 187 | exynos_gem = to_exynos_gem(obj); |
188 | 188 | ||
189 | drm_gem_object_unreference_unlocked(obj); | 189 | drm_gem_object_put_unlocked(obj); |
190 | 190 | ||
191 | return exynos_gem->size; | 191 | return exynos_gem->size; |
192 | } | 192 | } |
@@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | |||
329 | return; | 329 | return; |
330 | } | 330 | } |
331 | 331 | ||
332 | drm_gem_object_unreference_unlocked(obj); | 332 | drm_gem_object_put_unlocked(obj); |
333 | 333 | ||
334 | /* | 334 | /* |
335 | * decrease obj->refcount one more time because we has already | 335 | * decrease obj->refcount one more time because we has already |
336 | * increased it at exynos_drm_gem_get_dma_addr(). | 336 | * increased it at exynos_drm_gem_get_dma_addr(). |
337 | */ | 337 | */ |
338 | drm_gem_object_unreference_unlocked(obj); | 338 | drm_gem_object_put_unlocked(obj); |
339 | } | 339 | } |
340 | 340 | ||
341 | static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, | 341 | static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, |
@@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, | |||
383 | args->flags = exynos_gem->flags; | 383 | args->flags = exynos_gem->flags; |
384 | args->size = exynos_gem->size; | 384 | args->size = exynos_gem->size; |
385 | 385 | ||
386 | drm_gem_object_unreference_unlocked(obj); | 386 | drm_gem_object_put_unlocked(obj); |
387 | 387 | ||
388 | return 0; | 388 | return 0; |
389 | } | 389 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 35ac66730563..7ba414b52faa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c | |||
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt) | |||
492 | GSC_IN_CHROMA_ORDER_CRCB); | 492 | GSC_IN_CHROMA_ORDER_CRCB); |
493 | break; | 493 | break; |
494 | case DRM_FORMAT_NV21: | 494 | case DRM_FORMAT_NV21: |
495 | cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P); | ||
496 | break; | ||
495 | case DRM_FORMAT_NV61: | 497 | case DRM_FORMAT_NV61: |
496 | cfg |= (GSC_IN_CHROMA_ORDER_CRCB | | 498 | cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P); |
497 | GSC_IN_YUV420_2P); | ||
498 | break; | 499 | break; |
499 | case DRM_FORMAT_YUV422: | 500 | case DRM_FORMAT_YUV422: |
500 | cfg |= GSC_IN_YUV422_3P; | 501 | cfg |= GSC_IN_YUV422_3P; |
501 | break; | 502 | break; |
502 | case DRM_FORMAT_YUV420: | 503 | case DRM_FORMAT_YUV420: |
504 | cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P); | ||
505 | break; | ||
503 | case DRM_FORMAT_YVU420: | 506 | case DRM_FORMAT_YVU420: |
504 | cfg |= GSC_IN_YUV420_3P; | 507 | cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P); |
505 | break; | 508 | break; |
506 | case DRM_FORMAT_NV12: | 509 | case DRM_FORMAT_NV12: |
510 | cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); | ||
511 | break; | ||
507 | case DRM_FORMAT_NV16: | 512 | case DRM_FORMAT_NV16: |
508 | cfg |= (GSC_IN_CHROMA_ORDER_CBCR | | 513 | cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P); |
509 | GSC_IN_YUV420_2P); | ||
510 | break; | 514 | break; |
511 | } | 515 | } |
512 | 516 | ||
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation) | |||
523 | 527 | ||
524 | switch (degree) { | 528 | switch (degree) { |
525 | case DRM_MODE_ROTATE_0: | 529 | case DRM_MODE_ROTATE_0: |
526 | if (rotation & DRM_MODE_REFLECT_Y) | ||
527 | cfg |= GSC_IN_ROT_XFLIP; | ||
528 | if (rotation & DRM_MODE_REFLECT_X) | 530 | if (rotation & DRM_MODE_REFLECT_X) |
531 | cfg |= GSC_IN_ROT_XFLIP; | ||
532 | if (rotation & DRM_MODE_REFLECT_Y) | ||
529 | cfg |= GSC_IN_ROT_YFLIP; | 533 | cfg |= GSC_IN_ROT_YFLIP; |
530 | break; | 534 | break; |
531 | case DRM_MODE_ROTATE_90: | 535 | case DRM_MODE_ROTATE_90: |
532 | cfg |= GSC_IN_ROT_90; | 536 | cfg |= GSC_IN_ROT_90; |
533 | if (rotation & DRM_MODE_REFLECT_Y) | ||
534 | cfg |= GSC_IN_ROT_XFLIP; | ||
535 | if (rotation & DRM_MODE_REFLECT_X) | 537 | if (rotation & DRM_MODE_REFLECT_X) |
538 | cfg |= GSC_IN_ROT_XFLIP; | ||
539 | if (rotation & DRM_MODE_REFLECT_Y) | ||
536 | cfg |= GSC_IN_ROT_YFLIP; | 540 | cfg |= GSC_IN_ROT_YFLIP; |
537 | break; | 541 | break; |
538 | case DRM_MODE_ROTATE_180: | 542 | case DRM_MODE_ROTATE_180: |
539 | cfg |= GSC_IN_ROT_180; | 543 | cfg |= GSC_IN_ROT_180; |
540 | if (rotation & DRM_MODE_REFLECT_Y) | ||
541 | cfg &= ~GSC_IN_ROT_XFLIP; | ||
542 | if (rotation & DRM_MODE_REFLECT_X) | 544 | if (rotation & DRM_MODE_REFLECT_X) |
545 | cfg &= ~GSC_IN_ROT_XFLIP; | ||
546 | if (rotation & DRM_MODE_REFLECT_Y) | ||
543 | cfg &= ~GSC_IN_ROT_YFLIP; | 547 | cfg &= ~GSC_IN_ROT_YFLIP; |
544 | break; | 548 | break; |
545 | case DRM_MODE_ROTATE_270: | 549 | case DRM_MODE_ROTATE_270: |
546 | cfg |= GSC_IN_ROT_270; | 550 | cfg |= GSC_IN_ROT_270; |
547 | if (rotation & DRM_MODE_REFLECT_Y) | ||
548 | cfg &= ~GSC_IN_ROT_XFLIP; | ||
549 | if (rotation & DRM_MODE_REFLECT_X) | 551 | if (rotation & DRM_MODE_REFLECT_X) |
552 | cfg &= ~GSC_IN_ROT_XFLIP; | ||
553 | if (rotation & DRM_MODE_REFLECT_Y) | ||
550 | cfg &= ~GSC_IN_ROT_YFLIP; | 554 | cfg &= ~GSC_IN_ROT_YFLIP; |
551 | break; | 555 | break; |
552 | } | 556 | } |
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx, | |||
577 | cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | | 581 | cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | |
578 | GSC_SRCIMG_WIDTH_MASK); | 582 | GSC_SRCIMG_WIDTH_MASK); |
579 | 583 | ||
580 | cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) | | 584 | cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | |
581 | GSC_SRCIMG_HEIGHT(buf->buf.height)); | 585 | GSC_SRCIMG_HEIGHT(buf->buf.height)); |
582 | 586 | ||
583 | gsc_write(cfg, GSC_SRCIMG_SIZE); | 587 | gsc_write(cfg, GSC_SRCIMG_SIZE); |
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt) | |||
672 | GSC_OUT_CHROMA_ORDER_CRCB); | 676 | GSC_OUT_CHROMA_ORDER_CRCB); |
673 | break; | 677 | break; |
674 | case DRM_FORMAT_NV21: | 678 | case DRM_FORMAT_NV21: |
675 | case DRM_FORMAT_NV61: | ||
676 | cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); | 679 | cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); |
677 | break; | 680 | break; |
681 | case DRM_FORMAT_NV61: | ||
682 | cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P); | ||
683 | break; | ||
678 | case DRM_FORMAT_YUV422: | 684 | case DRM_FORMAT_YUV422: |
685 | cfg |= GSC_OUT_YUV422_3P; | ||
686 | break; | ||
679 | case DRM_FORMAT_YUV420: | 687 | case DRM_FORMAT_YUV420: |
688 | cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P); | ||
689 | break; | ||
680 | case DRM_FORMAT_YVU420: | 690 | case DRM_FORMAT_YVU420: |
681 | cfg |= GSC_OUT_YUV420_3P; | 691 | cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P); |
682 | break; | 692 | break; |
683 | case DRM_FORMAT_NV12: | 693 | case DRM_FORMAT_NV12: |
694 | cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); | ||
695 | break; | ||
684 | case DRM_FORMAT_NV16: | 696 | case DRM_FORMAT_NV16: |
685 | cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | | 697 | cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P); |
686 | GSC_OUT_YUV420_2P); | ||
687 | break; | 698 | break; |
688 | } | 699 | } |
689 | 700 | ||
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx, | |||
868 | /* original size */ | 879 | /* original size */ |
869 | cfg = gsc_read(GSC_DSTIMG_SIZE); | 880 | cfg = gsc_read(GSC_DSTIMG_SIZE); |
870 | cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); | 881 | cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); |
871 | cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) | | 882 | cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | |
872 | GSC_DSTIMG_HEIGHT(buf->buf.height); | 883 | GSC_DSTIMG_HEIGHT(buf->buf.height); |
873 | gsc_write(cfg, GSC_DSTIMG_SIZE); | 884 | gsc_write(cfg, GSC_DSTIMG_SIZE); |
874 | 885 | ||
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = { | |||
1341 | }; | 1352 | }; |
1342 | 1353 | ||
1343 | static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { | 1354 | static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { |
1344 | { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) }, | 1355 | { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) }, |
1345 | { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, | 1356 | { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, |
1346 | { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, | 1357 | { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, |
1347 | { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, | 1358 | { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 26374e58c557..b435db8fc916 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
@@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf, | |||
345 | int ret = 0; | 345 | int ret = 0; |
346 | int i; | 346 | int i; |
347 | 347 | ||
348 | /* basic checks */ | ||
349 | if (buf->buf.width == 0 || buf->buf.height == 0) | ||
350 | return -EINVAL; | ||
351 | buf->format = drm_format_info(buf->buf.fourcc); | ||
352 | for (i = 0; i < buf->format->num_planes; i++) { | ||
353 | unsigned int width = (i == 0) ? buf->buf.width : | ||
354 | DIV_ROUND_UP(buf->buf.width, buf->format->hsub); | ||
355 | |||
356 | if (buf->buf.pitch[i] == 0) | ||
357 | buf->buf.pitch[i] = width * buf->format->cpp[i]; | ||
358 | if (buf->buf.pitch[i] < width * buf->format->cpp[i]) | ||
359 | return -EINVAL; | ||
360 | if (!buf->buf.gem_id[i]) | ||
361 | return -ENOENT; | ||
362 | } | ||
363 | |||
364 | /* pitch for additional planes must match */ | ||
365 | if (buf->format->num_planes > 2 && | ||
366 | buf->buf.pitch[1] != buf->buf.pitch[2]) | ||
367 | return -EINVAL; | ||
368 | |||
369 | /* get GEM buffers and check their size */ | 348 | /* get GEM buffers and check their size */ |
370 | for (i = 0; i < buf->format->num_planes; i++) { | 349 | for (i = 0; i < buf->format->num_planes; i++) { |
371 | unsigned int height = (i == 0) ? buf->buf.height : | 350 | unsigned int height = (i == 0) ? buf->buf.height : |
@@ -428,7 +407,7 @@ enum drm_ipp_size_id { | |||
428 | IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX | 407 | IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX |
429 | }; | 408 | }; |
430 | 409 | ||
431 | static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = { | 410 | static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = { |
432 | [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, | 411 | [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, |
433 | [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, | 412 | [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, |
434 | DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, | 413 | DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, |
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf, | |||
495 | enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; | 474 | enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; |
496 | struct drm_ipp_limit l; | 475 | struct drm_ipp_limit l; |
497 | struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; | 476 | struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; |
477 | int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; | ||
498 | 478 | ||
499 | if (!limits) | 479 | if (!limits) |
500 | return 0; | 480 | return 0; |
501 | 481 | ||
502 | __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); | 482 | __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); |
503 | if (!__size_limit_check(buf->buf.width, &l.h) || | 483 | if (!__size_limit_check(real_width, &l.h) || |
504 | !__size_limit_check(buf->buf.height, &l.v)) | 484 | !__size_limit_check(buf->buf.height, &l.v)) |
505 | return -EINVAL; | 485 | return -EINVAL; |
506 | 486 | ||
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits( | |||
560 | return 0; | 540 | return 0; |
561 | } | 541 | } |
562 | 542 | ||
543 | static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task, | ||
544 | struct exynos_drm_ipp_buffer *buf, | ||
545 | struct exynos_drm_ipp_buffer *src, | ||
546 | struct exynos_drm_ipp_buffer *dst, | ||
547 | bool rotate, bool swap) | ||
548 | { | ||
549 | const struct exynos_drm_ipp_formats *fmt; | ||
550 | int ret, i; | ||
551 | |||
552 | fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier, | ||
553 | buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE : | ||
554 | DRM_EXYNOS_IPP_FORMAT_DESTINATION); | ||
555 | if (!fmt) { | ||
556 | DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task, | ||
557 | buf == src ? "src" : "dst"); | ||
558 | return -EINVAL; | ||
559 | } | ||
560 | |||
561 | /* basic checks */ | ||
562 | if (buf->buf.width == 0 || buf->buf.height == 0) | ||
563 | return -EINVAL; | ||
564 | |||
565 | buf->format = drm_format_info(buf->buf.fourcc); | ||
566 | for (i = 0; i < buf->format->num_planes; i++) { | ||
567 | unsigned int width = (i == 0) ? buf->buf.width : | ||
568 | DIV_ROUND_UP(buf->buf.width, buf->format->hsub); | ||
569 | |||
570 | if (buf->buf.pitch[i] == 0) | ||
571 | buf->buf.pitch[i] = width * buf->format->cpp[i]; | ||
572 | if (buf->buf.pitch[i] < width * buf->format->cpp[i]) | ||
573 | return -EINVAL; | ||
574 | if (!buf->buf.gem_id[i]) | ||
575 | return -ENOENT; | ||
576 | } | ||
577 | |||
578 | /* pitch for additional planes must match */ | ||
579 | if (buf->format->num_planes > 2 && | ||
580 | buf->buf.pitch[1] != buf->buf.pitch[2]) | ||
581 | return -EINVAL; | ||
582 | |||
583 | /* check driver limits */ | ||
584 | ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits, | ||
585 | fmt->num_limits, | ||
586 | rotate, | ||
587 | buf == dst ? swap : false); | ||
588 | if (ret) | ||
589 | return ret; | ||
590 | ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, | ||
591 | fmt->limits, | ||
592 | fmt->num_limits, swap); | ||
593 | return ret; | ||
594 | } | ||
595 | |||
563 | static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) | 596 | static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) |
564 | { | 597 | { |
565 | struct exynos_drm_ipp *ipp = task->ipp; | 598 | struct exynos_drm_ipp *ipp = task->ipp; |
566 | const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt; | ||
567 | struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; | 599 | struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; |
568 | unsigned int rotation = task->transform.rotation; | 600 | unsigned int rotation = task->transform.rotation; |
569 | int ret = 0; | 601 | int ret = 0; |
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) | |||
607 | return -EINVAL; | 639 | return -EINVAL; |
608 | } | 640 | } |
609 | 641 | ||
610 | src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier, | 642 | ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap); |
611 | DRM_EXYNOS_IPP_FORMAT_SOURCE); | ||
612 | if (!src_fmt) { | ||
613 | DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task); | ||
614 | return -EINVAL; | ||
615 | } | ||
616 | ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits, | ||
617 | src_fmt->num_limits, | ||
618 | rotate, false); | ||
619 | if (ret) | ||
620 | return ret; | ||
621 | ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, | ||
622 | src_fmt->limits, | ||
623 | src_fmt->num_limits, swap); | ||
624 | if (ret) | 643 | if (ret) |
625 | return ret; | 644 | return ret; |
626 | 645 | ||
627 | dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier, | 646 | ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap); |
628 | DRM_EXYNOS_IPP_FORMAT_DESTINATION); | ||
629 | if (!dst_fmt) { | ||
630 | DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task); | ||
631 | return -EINVAL; | ||
632 | } | ||
633 | ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits, | ||
634 | dst_fmt->num_limits, | ||
635 | false, swap); | ||
636 | if (ret) | ||
637 | return ret; | ||
638 | ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, | ||
639 | dst_fmt->limits, | ||
640 | dst_fmt->num_limits, swap); | ||
641 | if (ret) | 647 | if (ret) |
642 | return ret; | 648 | return ret; |
643 | 649 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 38a2a7f1204b..7098c6d35266 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane) | |||
132 | if (plane->state) { | 132 | if (plane->state) { |
133 | exynos_state = to_exynos_plane_state(plane->state); | 133 | exynos_state = to_exynos_plane_state(plane->state); |
134 | if (exynos_state->base.fb) | 134 | if (exynos_state->base.fb) |
135 | drm_framebuffer_unreference(exynos_state->base.fb); | 135 | drm_framebuffer_put(exynos_state->base.fb); |
136 | kfree(exynos_state); | 136 | kfree(exynos_state); |
137 | plane->state = NULL; | 137 | plane->state = NULL; |
138 | } | 138 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 1a76dd3d52e1..a820a68429b9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot, | |||
168 | val &= ~ROT_CONTROL_FLIP_MASK; | 168 | val &= ~ROT_CONTROL_FLIP_MASK; |
169 | 169 | ||
170 | if (rotation & DRM_MODE_REFLECT_X) | 170 | if (rotation & DRM_MODE_REFLECT_X) |
171 | val |= ROT_CONTROL_FLIP_HORIZONTAL; | ||
172 | if (rotation & DRM_MODE_REFLECT_Y) | ||
173 | val |= ROT_CONTROL_FLIP_VERTICAL; | 171 | val |= ROT_CONTROL_FLIP_VERTICAL; |
172 | if (rotation & DRM_MODE_REFLECT_Y) | ||
173 | val |= ROT_CONTROL_FLIP_HORIZONTAL; | ||
174 | 174 | ||
175 | val &= ~ROT_CONTROL_ROT_MASK; | 175 | val &= ~ROT_CONTROL_ROT_MASK; |
176 | 176 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 91d4382343d0..0ddb6eec7b11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) | 30 | #define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) |
31 | #define SCALER_MAX_CLK 4 | 31 | #define SCALER_MAX_CLK 4 |
32 | #define SCALER_AUTOSUSPEND_DELAY 2000 | 32 | #define SCALER_AUTOSUSPEND_DELAY 2000 |
33 | #define SCALER_RESET_WAIT_RETRIES 100 | ||
33 | 34 | ||
34 | struct scaler_data { | 35 | struct scaler_data { |
35 | const char *clk_name[SCALER_MAX_CLK]; | 36 | const char *clk_name[SCALER_MAX_CLK]; |
@@ -51,9 +52,9 @@ struct scaler_context { | |||
51 | static u32 scaler_get_format(u32 drm_fmt) | 52 | static u32 scaler_get_format(u32 drm_fmt) |
52 | { | 53 | { |
53 | switch (drm_fmt) { | 54 | switch (drm_fmt) { |
54 | case DRM_FORMAT_NV21: | ||
55 | return SCALER_YUV420_2P_UV; | ||
56 | case DRM_FORMAT_NV12: | 55 | case DRM_FORMAT_NV12: |
56 | return SCALER_YUV420_2P_UV; | ||
57 | case DRM_FORMAT_NV21: | ||
57 | return SCALER_YUV420_2P_VU; | 58 | return SCALER_YUV420_2P_VU; |
58 | case DRM_FORMAT_YUV420: | 59 | case DRM_FORMAT_YUV420: |
59 | return SCALER_YUV420_3P; | 60 | return SCALER_YUV420_3P; |
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt) | |||
63 | return SCALER_YUV422_1P_UYVY; | 64 | return SCALER_YUV422_1P_UYVY; |
64 | case DRM_FORMAT_YVYU: | 65 | case DRM_FORMAT_YVYU: |
65 | return SCALER_YUV422_1P_YVYU; | 66 | return SCALER_YUV422_1P_YVYU; |
66 | case DRM_FORMAT_NV61: | ||
67 | return SCALER_YUV422_2P_UV; | ||
68 | case DRM_FORMAT_NV16: | 67 | case DRM_FORMAT_NV16: |
68 | return SCALER_YUV422_2P_UV; | ||
69 | case DRM_FORMAT_NV61: | ||
69 | return SCALER_YUV422_2P_VU; | 70 | return SCALER_YUV422_2P_VU; |
70 | case DRM_FORMAT_YUV422: | 71 | case DRM_FORMAT_YUV422: |
71 | return SCALER_YUV422_3P; | 72 | return SCALER_YUV422_3P; |
72 | case DRM_FORMAT_NV42: | ||
73 | return SCALER_YUV444_2P_UV; | ||
74 | case DRM_FORMAT_NV24: | 73 | case DRM_FORMAT_NV24: |
74 | return SCALER_YUV444_2P_UV; | ||
75 | case DRM_FORMAT_NV42: | ||
75 | return SCALER_YUV444_2P_VU; | 76 | return SCALER_YUV444_2P_VU; |
76 | case DRM_FORMAT_YUV444: | 77 | case DRM_FORMAT_YUV444: |
77 | return SCALER_YUV444_3P; | 78 | return SCALER_YUV444_3P; |
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt) | |||
100 | return 0; | 101 | return 0; |
101 | } | 102 | } |
102 | 103 | ||
104 | static inline int scaler_reset(struct scaler_context *scaler) | ||
105 | { | ||
106 | int retry = SCALER_RESET_WAIT_RETRIES; | ||
107 | |||
108 | scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); | ||
109 | do { | ||
110 | cpu_relax(); | ||
111 | } while (retry > 1 && | ||
112 | scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); | ||
113 | do { | ||
114 | cpu_relax(); | ||
115 | scaler_write(1, SCALER_INT_EN); | ||
116 | } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); | ||
117 | |||
118 | return retry ? 0 : -EIO; | ||
119 | } | ||
120 | |||
103 | static inline void scaler_enable_int(struct scaler_context *scaler) | 121 | static inline void scaler_enable_int(struct scaler_context *scaler) |
104 | { | 122 | { |
105 | u32 val; | 123 | u32 val; |
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp, | |||
354 | u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); | 372 | u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); |
355 | struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; | 373 | struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; |
356 | 374 | ||
357 | scaler->task = task; | ||
358 | |||
359 | pm_runtime_get_sync(scaler->dev); | 375 | pm_runtime_get_sync(scaler->dev); |
376 | if (scaler_reset(scaler)) { | ||
377 | pm_runtime_put(scaler->dev); | ||
378 | return -EIO; | ||
379 | } | ||
380 | |||
381 | scaler->task = task; | ||
360 | 382 | ||
361 | scaler_set_src_fmt(scaler, src_fmt); | 383 | scaler_set_src_fmt(scaler, src_fmt); |
362 | scaler_set_src_base(scaler, &task->src); | 384 | scaler_set_src_base(scaler, &task->src); |
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler) | |||
394 | 416 | ||
395 | static inline u32 scaler_get_int_status(struct scaler_context *scaler) | 417 | static inline u32 scaler_get_int_status(struct scaler_context *scaler) |
396 | { | 418 | { |
397 | return scaler_read(SCALER_INT_STATUS); | 419 | u32 val = scaler_read(SCALER_INT_STATUS); |
420 | |||
421 | scaler_write(val, SCALER_INT_STATUS); | ||
422 | |||
423 | return val; | ||
398 | } | 424 | } |
399 | 425 | ||
400 | static inline int scaler_task_done(u32 val) | 426 | static inline int scaler_task_done(u32 val) |
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h index 4704a993cbb7..16b39734115c 100644 --- a/drivers/gpu/drm/exynos/regs-gsc.h +++ b/drivers/gpu/drm/exynos/regs-gsc.h | |||
@@ -138,6 +138,7 @@ | |||
138 | #define GSC_OUT_YUV420_3P (3 << 4) | 138 | #define GSC_OUT_YUV420_3P (3 << 4) |
139 | #define GSC_OUT_YUV422_1P (4 << 4) | 139 | #define GSC_OUT_YUV422_1P (4 << 4) |
140 | #define GSC_OUT_YUV422_2P (5 << 4) | 140 | #define GSC_OUT_YUV422_2P (5 << 4) |
141 | #define GSC_OUT_YUV422_3P (6 << 4) | ||
141 | #define GSC_OUT_YUV444 (7 << 4) | 142 | #define GSC_OUT_YUV444 (7 << 4) |
142 | #define GSC_OUT_TILE_TYPE_MASK (1 << 2) | 143 | #define GSC_OUT_TILE_TYPE_MASK (1 << 2) |
143 | #define GSC_OUT_TILE_C_16x8 (0 << 2) | 144 | #define GSC_OUT_TILE_C_16x8 (0 << 2) |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6d8180e8d1e2..4b072ade8c38 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
@@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | |||
196 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | | 196 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | |
197 | TRANS_DDI_PORT_MASK); | 197 | TRANS_DDI_PORT_MASK); |
198 | vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= | 198 | vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= |
199 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | | 199 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | |
200 | (PORT_B << TRANS_DDI_PORT_SHIFT) | | 200 | (PORT_B << TRANS_DDI_PORT_SHIFT) | |
201 | TRANS_DDI_FUNC_ENABLE); | 201 | TRANS_DDI_FUNC_ENABLE); |
202 | if (IS_BROADWELL(dev_priv)) { | 202 | if (IS_BROADWELL(dev_priv)) { |
@@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | |||
216 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | | 216 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | |
217 | TRANS_DDI_PORT_MASK); | 217 | TRANS_DDI_PORT_MASK); |
218 | vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= | 218 | vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= |
219 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | | 219 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | |
220 | (PORT_C << TRANS_DDI_PORT_SHIFT) | | 220 | (PORT_C << TRANS_DDI_PORT_SHIFT) | |
221 | TRANS_DDI_FUNC_ENABLE); | 221 | TRANS_DDI_FUNC_ENABLE); |
222 | if (IS_BROADWELL(dev_priv)) { | 222 | if (IS_BROADWELL(dev_priv)) { |
@@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | |||
236 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | | 236 | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | |
237 | TRANS_DDI_PORT_MASK); | 237 | TRANS_DDI_PORT_MASK); |
238 | vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= | 238 | vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= |
239 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | | 239 | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | |
240 | (PORT_D << TRANS_DDI_PORT_SHIFT) | | 240 | (PORT_D << TRANS_DDI_PORT_SHIFT) | |
241 | TRANS_DDI_FUNC_ENABLE); | 241 | TRANS_DDI_FUNC_ENABLE); |
242 | if (IS_BROADWELL(dev_priv)) { | 242 | if (IS_BROADWELL(dev_priv)) { |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 23296547da95..4efec8fa6c1d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) | |||
1592 | vgpu_free_mm(mm); | 1592 | vgpu_free_mm(mm); |
1593 | return ERR_PTR(-ENOMEM); | 1593 | return ERR_PTR(-ENOMEM); |
1594 | } | 1594 | } |
1595 | mm->ggtt_mm.last_partial_off = -1UL; | ||
1595 | 1596 | ||
1596 | return mm; | 1597 | return mm; |
1597 | } | 1598 | } |
@@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) | |||
1616 | invalidate_ppgtt_mm(mm); | 1617 | invalidate_ppgtt_mm(mm); |
1617 | } else { | 1618 | } else { |
1618 | vfree(mm->ggtt_mm.virtual_ggtt); | 1619 | vfree(mm->ggtt_mm.virtual_ggtt); |
1620 | mm->ggtt_mm.last_partial_off = -1UL; | ||
1619 | } | 1621 | } |
1620 | 1622 | ||
1621 | vgpu_free_mm(mm); | 1623 | vgpu_free_mm(mm); |
@@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
1868 | memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, | 1870 | memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, |
1869 | bytes); | 1871 | bytes); |
1870 | 1872 | ||
1873 | /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes | ||
1874 | * write, we assume the two 4 bytes writes are consecutive. | ||
1875 | * Otherwise, we abort and report error | ||
1876 | */ | ||
1877 | if (bytes < info->gtt_entry_size) { | ||
1878 | if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { | ||
1879 | /* the first partial part*/ | ||
1880 | ggtt_mm->ggtt_mm.last_partial_off = off; | ||
1881 | ggtt_mm->ggtt_mm.last_partial_data = e.val64; | ||
1882 | return 0; | ||
1883 | } else if ((g_gtt_index == | ||
1884 | (ggtt_mm->ggtt_mm.last_partial_off >> | ||
1885 | info->gtt_entry_size_shift)) && | ||
1886 | (off != ggtt_mm->ggtt_mm.last_partial_off)) { | ||
1887 | /* the second partial part */ | ||
1888 | |||
1889 | int last_off = ggtt_mm->ggtt_mm.last_partial_off & | ||
1890 | (info->gtt_entry_size - 1); | ||
1891 | |||
1892 | memcpy((void *)&e.val64 + last_off, | ||
1893 | (void *)&ggtt_mm->ggtt_mm.last_partial_data + | ||
1894 | last_off, bytes); | ||
1895 | |||
1896 | ggtt_mm->ggtt_mm.last_partial_off = -1UL; | ||
1897 | } else { | ||
1898 | int last_offset; | ||
1899 | |||
1900 | gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", | ||
1901 | ggtt_mm->ggtt_mm.last_partial_off, off, | ||
1902 | bytes, info->gtt_entry_size); | ||
1903 | |||
1904 | /* set host ggtt entry to scratch page and clear | ||
1905 | * virtual ggtt entry as not present for last | ||
1906 | * partially write offset | ||
1907 | */ | ||
1908 | last_offset = ggtt_mm->ggtt_mm.last_partial_off & | ||
1909 | (~(info->gtt_entry_size - 1)); | ||
1910 | |||
1911 | ggtt_get_host_entry(ggtt_mm, &m, last_offset); | ||
1912 | ggtt_invalidate_pte(vgpu, &m); | ||
1913 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); | ||
1914 | ops->clear_present(&m); | ||
1915 | ggtt_set_host_entry(ggtt_mm, &m, last_offset); | ||
1916 | ggtt_invalidate(gvt->dev_priv); | ||
1917 | |||
1918 | ggtt_get_guest_entry(ggtt_mm, &e, last_offset); | ||
1919 | ops->clear_present(&e); | ||
1920 | ggtt_set_guest_entry(ggtt_mm, &e, last_offset); | ||
1921 | |||
1922 | ggtt_mm->ggtt_mm.last_partial_off = off; | ||
1923 | ggtt_mm->ggtt_mm.last_partial_data = e.val64; | ||
1924 | |||
1925 | return 0; | ||
1926 | } | ||
1927 | } | ||
1928 | |||
1871 | if (ops->test_present(&e)) { | 1929 | if (ops->test_present(&e)) { |
1872 | gfn = ops->get_pfn(&e); | 1930 | gfn = ops->get_pfn(&e); |
1873 | m = e; | 1931 | m = e; |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3792f2b7f4ff..97e62647418a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
@@ -150,6 +150,8 @@ struct intel_vgpu_mm { | |||
150 | } ppgtt_mm; | 150 | } ppgtt_mm; |
151 | struct { | 151 | struct { |
152 | void *virtual_ggtt; | 152 | void *virtual_ggtt; |
153 | unsigned long last_partial_off; | ||
154 | u64 last_partial_data; | ||
153 | } ggtt_mm; | 155 | } ggtt_mm; |
154 | }; | 156 | }; |
155 | }; | 157 | }; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d44ad7bc1e94..17c5097721e8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf) | |||
2002 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | 2002 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); |
2003 | struct i915_vma *vma; | 2003 | struct i915_vma *vma; |
2004 | pgoff_t page_offset; | 2004 | pgoff_t page_offset; |
2005 | unsigned int flags; | ||
2006 | int ret; | 2005 | int ret; |
2007 | 2006 | ||
2008 | /* We don't use vmf->pgoff since that has the fake offset */ | 2007 | /* We don't use vmf->pgoff since that has the fake offset */ |
@@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf) | |||
2038 | goto err_unlock; | 2037 | goto err_unlock; |
2039 | } | 2038 | } |
2040 | 2039 | ||
2041 | /* If the object is smaller than a couple of partial vma, it is | ||
2042 | * not worth only creating a single partial vma - we may as well | ||
2043 | * clear enough space for the full object. | ||
2044 | */ | ||
2045 | flags = PIN_MAPPABLE; | ||
2046 | if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) | ||
2047 | flags |= PIN_NONBLOCK | PIN_NONFAULT; | ||
2048 | 2040 | ||
2049 | /* Now pin it into the GTT as needed */ | 2041 | /* Now pin it into the GTT as needed */ |
2050 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); | 2042 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
2043 | PIN_MAPPABLE | | ||
2044 | PIN_NONBLOCK | | ||
2045 | PIN_NONFAULT); | ||
2051 | if (IS_ERR(vma)) { | 2046 | if (IS_ERR(vma)) { |
2052 | /* Use a partial view if it is bigger than available space */ | 2047 | /* Use a partial view if it is bigger than available space */ |
2053 | struct i915_ggtt_view view = | 2048 | struct i915_ggtt_view view = |
2054 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); | 2049 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); |
2050 | unsigned int flags; | ||
2055 | 2051 | ||
2056 | /* Userspace is now writing through an untracked VMA, abandon | 2052 | flags = PIN_MAPPABLE; |
2053 | if (view.type == I915_GGTT_VIEW_NORMAL) | ||
2054 | flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ | ||
2055 | |||
2056 | /* | ||
2057 | * Userspace is now writing through an untracked VMA, abandon | ||
2057 | * all hope that the hardware is able to track future writes. | 2058 | * all hope that the hardware is able to track future writes. |
2058 | */ | 2059 | */ |
2059 | obj->frontbuffer_ggtt_origin = ORIGIN_CPU; | 2060 | obj->frontbuffer_ggtt_origin = ORIGIN_CPU; |
2060 | 2061 | ||
2061 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); | 2062 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); |
2063 | if (IS_ERR(vma) && !view.type) { | ||
2064 | flags = PIN_MAPPABLE; | ||
2065 | view.type = I915_GGTT_VIEW_PARTIAL; | ||
2066 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); | ||
2067 | } | ||
2062 | } | 2068 | } |
2063 | if (IS_ERR(vma)) { | 2069 | if (IS_ERR(vma)) { |
2064 | ret = PTR_ERR(vma); | 2070 | ret = PTR_ERR(vma); |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 9324d476e0a7..0531c01c3604 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj, | |||
109 | obj->base.size >> PAGE_SHIFT)); | 109 | obj->base.size >> PAGE_SHIFT)); |
110 | vma->size = view->partial.size; | 110 | vma->size = view->partial.size; |
111 | vma->size <<= PAGE_SHIFT; | 111 | vma->size <<= PAGE_SHIFT; |
112 | GEM_BUG_ON(vma->size >= obj->base.size); | 112 | GEM_BUG_ON(vma->size > obj->base.size); |
113 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { | 113 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { |
114 | vma->size = intel_rotation_info_size(&view->rotated); | 114 | vma->size = intel_rotation_info_size(&view->rotated); |
115 | vma->size <<= PAGE_SHIFT; | 115 | vma->size <<= PAGE_SHIFT; |
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 2ebdc6d5a76e..d5583190f3e4 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | |||
137 | 137 | ||
138 | if (cmd > (char *) urb->transfer_buffer) { | 138 | if (cmd > (char *) urb->transfer_buffer) { |
139 | /* Send partial buffer remaining before exiting */ | 139 | /* Send partial buffer remaining before exiting */ |
140 | int len = cmd - (char *) urb->transfer_buffer; | 140 | int len; |
141 | if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) | ||
142 | *cmd++ = 0xAF; | ||
143 | len = cmd - (char *) urb->transfer_buffer; | ||
141 | ret = udl_submit_urb(dev, urb, len); | 144 | ret = udl_submit_urb(dev, urb, len); |
142 | bytes_sent += len; | 145 | bytes_sent += len; |
143 | } else | 146 | } else |
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 0c87b1ac6b68..b992644c17e6 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c | |||
@@ -153,11 +153,11 @@ static void udl_compress_hline16( | |||
153 | raw_pixels_count_byte = cmd++; /* we'll know this later */ | 153 | raw_pixels_count_byte = cmd++; /* we'll know this later */ |
154 | raw_pixel_start = pixel; | 154 | raw_pixel_start = pixel; |
155 | 155 | ||
156 | cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, | 156 | cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, |
157 | min((int)(pixel_end - pixel) / bpp, | 157 | (unsigned long)(pixel_end - pixel) / bpp, |
158 | (int)(cmd_buffer_end - cmd) / 2))) * bpp; | 158 | (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; |
159 | 159 | ||
160 | prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); | 160 | prefetch_range((void *) pixel, cmd_pixel_end - pixel); |
161 | pixel_val16 = get_pixel_val16(pixel, bpp); | 161 | pixel_val16 = get_pixel_val16(pixel, bpp); |
162 | 162 | ||
163 | while (pixel < cmd_pixel_end) { | 163 | while (pixel < cmd_pixel_end) { |
@@ -193,6 +193,9 @@ static void udl_compress_hline16( | |||
193 | if (pixel > raw_pixel_start) { | 193 | if (pixel > raw_pixel_start) { |
194 | /* finalize last RAW span */ | 194 | /* finalize last RAW span */ |
195 | *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; | 195 | *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; |
196 | } else { | ||
197 | /* undo unused byte */ | ||
198 | cmd--; | ||
196 | } | 199 | } |
197 | 200 | ||
198 | *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; | 201 | *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f858cc72011d..3942ee61bd1c 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev) | |||
1952 | } | 1952 | } |
1953 | hdev->io_started = false; | 1953 | hdev->io_started = false; |
1954 | 1954 | ||
1955 | clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); | ||
1956 | |||
1955 | if (!hdev->driver) { | 1957 | if (!hdev->driver) { |
1956 | id = hid_match_device(hdev, hdrv); | 1958 | id = hid_match_device(hdev, hdrv); |
1957 | if (id == NULL) { | 1959 | if (id == NULL) { |
@@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data) | |||
2215 | struct hid_device *hdev = to_hid_device(dev); | 2217 | struct hid_device *hdev = to_hid_device(dev); |
2216 | 2218 | ||
2217 | if (hdev->driver == hdrv && | 2219 | if (hdev->driver == hdrv && |
2218 | !hdrv->match(hdev, hid_ignore_special_drivers)) | 2220 | !hdrv->match(hdev, hid_ignore_special_drivers) && |
2221 | !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) | ||
2219 | return device_reprobe(dev); | 2222 | return device_reprobe(dev); |
2220 | 2223 | ||
2221 | return 0; | 2224 | return 0; |
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 8469b6964ff6..b48100236df8 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c | |||
@@ -1154,6 +1154,8 @@ copy_rest: | |||
1154 | goto out; | 1154 | goto out; |
1155 | if (list->tail > list->head) { | 1155 | if (list->tail > list->head) { |
1156 | len = list->tail - list->head; | 1156 | len = list->tail - list->head; |
1157 | if (len > count) | ||
1158 | len = count; | ||
1157 | 1159 | ||
1158 | if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { | 1160 | if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { |
1159 | ret = -EFAULT; | 1161 | ret = -EFAULT; |
@@ -1163,6 +1165,8 @@ copy_rest: | |||
1163 | list->head += len; | 1165 | list->head += len; |
1164 | } else { | 1166 | } else { |
1165 | len = HID_DEBUG_BUFSIZE - list->head; | 1167 | len = HID_DEBUG_BUFSIZE - list->head; |
1168 | if (len > count) | ||
1169 | len = count; | ||
1166 | 1170 | ||
1167 | if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { | 1171 | if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { |
1168 | ret = -EFAULT; | 1172 | ret = -EFAULT; |
@@ -1170,7 +1174,9 @@ copy_rest: | |||
1170 | } | 1174 | } |
1171 | list->head = 0; | 1175 | list->head = 0; |
1172 | ret += len; | 1176 | ret += len; |
1173 | goto copy_rest; | 1177 | count -= len; |
1178 | if (count > 0) | ||
1179 | goto copy_rest; | ||
1174 | } | 1180 | } |
1175 | 1181 | ||
1176 | } | 1182 | } |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index c1652bb7bd15..eae0cb3ddec6 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) | |||
484 | return; | 484 | return; |
485 | } | 485 | } |
486 | 486 | ||
487 | if ((ret_size > size) || (ret_size <= 2)) { | 487 | if ((ret_size > size) || (ret_size < 2)) { |
488 | dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", | 488 | dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", |
489 | __func__, size, ret_size); | 489 | __func__, size, ret_size); |
490 | return; | 490 | return; |
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index e3ce233f8bdc..23872d08308c 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/hiddev.h> | 36 | #include <linux/hiddev.h> |
37 | #include <linux/compat.h> | 37 | #include <linux/compat.h> |
38 | #include <linux/vmalloc.h> | 38 | #include <linux/vmalloc.h> |
39 | #include <linux/nospec.h> | ||
39 | #include "usbhid.h" | 40 | #include "usbhid.h" |
40 | 41 | ||
41 | #ifdef CONFIG_USB_DYNAMIC_MINORS | 42 | #ifdef CONFIG_USB_DYNAMIC_MINORS |
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | |||
469 | 470 | ||
470 | if (uref->field_index >= report->maxfield) | 471 | if (uref->field_index >= report->maxfield) |
471 | goto inval; | 472 | goto inval; |
473 | uref->field_index = array_index_nospec(uref->field_index, | ||
474 | report->maxfield); | ||
472 | 475 | ||
473 | field = report->field[uref->field_index]; | 476 | field = report->field[uref->field_index]; |
474 | if (uref->usage_index >= field->maxusage) | 477 | if (uref->usage_index >= field->maxusage) |
475 | goto inval; | 478 | goto inval; |
479 | uref->usage_index = array_index_nospec(uref->usage_index, | ||
480 | field->maxusage); | ||
476 | 481 | ||
477 | uref->usage_code = field->usage[uref->usage_index].hid; | 482 | uref->usage_code = field->usage[uref->usage_index].hid; |
478 | 483 | ||
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | |||
499 | 504 | ||
500 | if (uref->field_index >= report->maxfield) | 505 | if (uref->field_index >= report->maxfield) |
501 | goto inval; | 506 | goto inval; |
507 | uref->field_index = array_index_nospec(uref->field_index, | ||
508 | report->maxfield); | ||
502 | 509 | ||
503 | field = report->field[uref->field_index]; | 510 | field = report->field[uref->field_index]; |
504 | 511 | ||
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
753 | 760 | ||
754 | if (finfo.field_index >= report->maxfield) | 761 | if (finfo.field_index >= report->maxfield) |
755 | break; | 762 | break; |
763 | finfo.field_index = array_index_nospec(finfo.field_index, | ||
764 | report->maxfield); | ||
756 | 765 | ||
757 | field = report->field[finfo.field_index]; | 766 | field = report->field[finfo.field_index]; |
758 | memset(&finfo, 0, sizeof(finfo)); | 767 | memset(&finfo, 0, sizeof(finfo)); |
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
797 | 806 | ||
798 | if (cinfo.index >= hid->maxcollection) | 807 | if (cinfo.index >= hid->maxcollection) |
799 | break; | 808 | break; |
809 | cinfo.index = array_index_nospec(cinfo.index, | ||
810 | hid->maxcollection); | ||
800 | 811 | ||
801 | cinfo.type = hid->collection[cinfo.index].type; | 812 | cinfo.type = hid->collection[cinfo.index].type; |
802 | cinfo.usage = hid->collection[cinfo.index].usage; | 813 | cinfo.usage = hid->collection[cinfo.index].usage; |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 0bb44d0088ed..ad7afa74d365 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom) | |||
3365 | if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) | 3365 | if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) |
3366 | features->device_type |= WACOM_DEVICETYPE_PAD; | 3366 | features->device_type |= WACOM_DEVICETYPE_PAD; |
3367 | 3367 | ||
3368 | features->x_max = 4096; | 3368 | if (features->type == INTUOSHT2) { |
3369 | features->y_max = 4096; | 3369 | features->x_max = features->x_max / 10; |
3370 | features->y_max = features->y_max / 10; | ||
3371 | } | ||
3372 | else { | ||
3373 | features->x_max = 4096; | ||
3374 | features->y_max = 4096; | ||
3375 | } | ||
3370 | } | 3376 | } |
3371 | else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { | 3377 | else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { |
3372 | features->device_type |= WACOM_DEVICETYPE_PAD; | 3378 | features->device_type |= WACOM_DEVICETYPE_PAD; |
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 44cffad43701..c4d176f5ed79 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c | |||
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = { | |||
234 | .name = "cht_wc_ext_chrg_irq_chip", | 234 | .name = "cht_wc_ext_chrg_irq_chip", |
235 | }; | 235 | }; |
236 | 236 | ||
237 | static const char * const bq24190_suppliers[] = { "fusb302-typec-source" }; | 237 | static const char * const bq24190_suppliers[] = { |
238 | "tcpm-source-psy-i2c-fusb302" }; | ||
238 | 239 | ||
239 | static const struct property_entry bq24190_props[] = { | 240 | static const struct property_entry bq24190_props[] = { |
240 | PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers), | 241 | PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers), |
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index e866c481bfc3..fce52bdab2b7 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -127,7 +127,7 @@ enum stu300_error { | |||
127 | 127 | ||
128 | /* | 128 | /* |
129 | * The number of address send athemps tried before giving up. | 129 | * The number of address send athemps tried before giving up. |
130 | * If the first one failes it seems like 5 to 8 attempts are required. | 130 | * If the first one fails it seems like 5 to 8 attempts are required. |
131 | */ | 131 | */ |
132 | #define NUM_ADDR_RESEND_ATTEMPTS 12 | 132 | #define NUM_ADDR_RESEND_ATTEMPTS 12 |
133 | 133 | ||
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 5fccd1f1bca8..797def5319f1 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
@@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev) | |||
545 | { | 545 | { |
546 | u32 cnfg; | 546 | u32 cnfg; |
547 | 547 | ||
548 | /* | ||
549 | * NACK interrupt is generated before the I2C controller generates | ||
550 | * the STOP condition on the bus. So wait for 2 clock periods | ||
551 | * before disabling the controller so that the STOP condition has | ||
552 | * been delivered properly. | ||
553 | */ | ||
554 | udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); | ||
555 | |||
548 | cnfg = i2c_readl(i2c_dev, I2C_CNFG); | 556 | cnfg = i2c_readl(i2c_dev, I2C_CNFG); |
549 | if (cnfg & I2C_CNFG_PACKET_MODE_EN) | 557 | if (cnfg & I2C_CNFG_PACKET_MODE_EN) |
550 | i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); | 558 | i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); |
@@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, | |||
706 | if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) | 714 | if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) |
707 | return 0; | 715 | return 0; |
708 | 716 | ||
709 | /* | ||
710 | * NACK interrupt is generated before the I2C controller generates | ||
711 | * the STOP condition on the bus. So wait for 2 clock periods | ||
712 | * before resetting the controller so that the STOP condition has | ||
713 | * been delivered properly. | ||
714 | */ | ||
715 | if (i2c_dev->msg_err == I2C_ERR_NO_ACK) | ||
716 | udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); | ||
717 | |||
718 | tegra_i2c_init(i2c_dev); | 717 | tegra_i2c_init(i2c_dev); |
719 | if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { | 718 | if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { |
720 | if (msg->flags & I2C_M_IGNORE_NAK) | 719 | if (msg->flags & I2C_M_IGNORE_NAK) |
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 31d16ada6e7d..301285c54603 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) | |||
198 | 198 | ||
199 | val = !val; | 199 | val = !val; |
200 | bri->set_scl(adap, val); | 200 | bri->set_scl(adap, val); |
201 | ndelay(RECOVERY_NDELAY); | 201 | |
202 | /* | ||
203 | * If we can set SDA, we will always create STOP here to ensure | ||
204 | * the additional pulses will do no harm. This is achieved by | ||
205 | * letting SDA follow SCL half a cycle later. | ||
206 | */ | ||
207 | ndelay(RECOVERY_NDELAY / 2); | ||
208 | if (bri->set_sda) | ||
209 | bri->set_sda(adap, val); | ||
210 | ndelay(RECOVERY_NDELAY / 2); | ||
202 | } | 211 | } |
203 | 212 | ||
204 | /* check if recovery actually succeeded */ | 213 | /* check if recovery actually succeeded */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 3e90b6a1d9d2..cc06e8404e9b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
3488 | struct ib_flow_attr *flow_attr; | 3488 | struct ib_flow_attr *flow_attr; |
3489 | struct ib_qp *qp; | 3489 | struct ib_qp *qp; |
3490 | struct ib_uflow_resources *uflow_res; | 3490 | struct ib_uflow_resources *uflow_res; |
3491 | struct ib_uverbs_flow_spec_hdr *kern_spec; | ||
3491 | int err = 0; | 3492 | int err = 0; |
3492 | void *kern_spec; | ||
3493 | void *ib_spec; | 3493 | void *ib_spec; |
3494 | int i; | 3494 | int i; |
3495 | 3495 | ||
@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
3538 | if (!kern_flow_attr) | 3538 | if (!kern_flow_attr) |
3539 | return -ENOMEM; | 3539 | return -ENOMEM; |
3540 | 3540 | ||
3541 | memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); | 3541 | *kern_flow_attr = cmd.flow_attr; |
3542 | err = ib_copy_from_udata(kern_flow_attr + 1, ucore, | 3542 | err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore, |
3543 | cmd.flow_attr.size); | 3543 | cmd.flow_attr.size); |
3544 | if (err) | 3544 | if (err) |
3545 | goto err_free_attr; | 3545 | goto err_free_attr; |
@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
3559 | goto err_uobj; | 3559 | goto err_uobj; |
3560 | } | 3560 | } |
3561 | 3561 | ||
3562 | if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { | ||
3563 | err = -EINVAL; | ||
3564 | goto err_put; | ||
3565 | } | ||
3566 | |||
3562 | flow_attr = kzalloc(struct_size(flow_attr, flows, | 3567 | flow_attr = kzalloc(struct_size(flow_attr, flows, |
3563 | cmd.flow_attr.num_of_specs), GFP_KERNEL); | 3568 | cmd.flow_attr.num_of_specs), GFP_KERNEL); |
3564 | if (!flow_attr) { | 3569 | if (!flow_attr) { |
@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
3578 | flow_attr->flags = kern_flow_attr->flags; | 3583 | flow_attr->flags = kern_flow_attr->flags; |
3579 | flow_attr->size = sizeof(*flow_attr); | 3584 | flow_attr->size = sizeof(*flow_attr); |
3580 | 3585 | ||
3581 | kern_spec = kern_flow_attr + 1; | 3586 | kern_spec = kern_flow_attr->flow_specs; |
3582 | ib_spec = flow_attr + 1; | 3587 | ib_spec = flow_attr + 1; |
3583 | for (i = 0; i < flow_attr->num_of_specs && | 3588 | for (i = 0; i < flow_attr->num_of_specs && |
3584 | cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && | 3589 | cmd.flow_attr.size >= sizeof(*kern_spec) && |
3585 | cmd.flow_attr.size >= | 3590 | cmd.flow_attr.size >= kern_spec->size; |
3586 | ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { | 3591 | i++) { |
3587 | err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, | 3592 | err = kern_spec_to_ib_spec( |
3588 | uflow_res); | 3593 | file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec, |
3594 | ib_spec, uflow_res); | ||
3589 | if (err) | 3595 | if (err) |
3590 | goto err_free; | 3596 | goto err_free; |
3591 | 3597 | ||
3592 | flow_attr->size += | 3598 | flow_attr->size += |
3593 | ((union ib_flow_spec *) ib_spec)->size; | 3599 | ((union ib_flow_spec *) ib_spec)->size; |
3594 | cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; | 3600 | cmd.flow_attr.size -= kern_spec->size; |
3595 | kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; | 3601 | kern_spec = ((void *)kern_spec) + kern_spec->size; |
3596 | ib_spec += ((union ib_flow_spec *) ib_spec)->size; | 3602 | ib_spec += ((union ib_flow_spec *) ib_spec)->size; |
3597 | } | 3603 | } |
3598 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { | 3604 | if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 1445918e3239..7b76e6f81aeb 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) | |||
774 | { | 774 | { |
775 | struct c4iw_mr *mhp = to_c4iw_mr(ibmr); | 775 | struct c4iw_mr *mhp = to_c4iw_mr(ibmr); |
776 | 776 | ||
777 | if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) | 777 | if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) |
778 | return -ENOMEM; | 778 | return -ENOMEM; |
779 | 779 | ||
780 | mhp->mpl[mhp->mpl_len++] = addr; | 780 | mhp->mpl[mhp->mpl_len++] = addr; |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 1a1a47ac53c6..f15c93102081 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
271 | 271 | ||
272 | lockdep_assert_held(&qp->s_lock); | 272 | lockdep_assert_held(&qp->s_lock); |
273 | ps->s_txreq = get_txreq(ps->dev, qp); | 273 | ps->s_txreq = get_txreq(ps->dev, qp); |
274 | if (IS_ERR(ps->s_txreq)) | 274 | if (!ps->s_txreq) |
275 | goto bail_no_tx; | 275 | goto bail_no_tx; |
276 | 276 | ||
277 | if (priv->hdr_type == HFI1_PKT_TYPE_9B) { | 277 | if (priv->hdr_type == HFI1_PKT_TYPE_9B) { |
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index b7b671017e59..e254dcec6f64 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright(c) 2015, 2016 Intel Corporation. | 2 | * Copyright(c) 2015 - 2018 Intel Corporation. |
3 | * | 3 | * |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
5 | * redistributing this file, you may do so under either license. | 5 | * redistributing this file, you may do so under either license. |
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
72 | int middle = 0; | 72 | int middle = 0; |
73 | 73 | ||
74 | ps->s_txreq = get_txreq(ps->dev, qp); | 74 | ps->s_txreq = get_txreq(ps->dev, qp); |
75 | if (IS_ERR(ps->s_txreq)) | 75 | if (!ps->s_txreq) |
76 | goto bail_no_tx; | 76 | goto bail_no_tx; |
77 | 77 | ||
78 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { | 78 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { |
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 1ab332f1866e..70d39fc450a1 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright(c) 2015, 2016 Intel Corporation. | 2 | * Copyright(c) 2015 - 2018 Intel Corporation. |
3 | * | 3 | * |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
5 | * redistributing this file, you may do so under either license. | 5 | * redistributing this file, you may do so under either license. |
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
503 | u32 lid; | 503 | u32 lid; |
504 | 504 | ||
505 | ps->s_txreq = get_txreq(ps->dev, qp); | 505 | ps->s_txreq = get_txreq(ps->dev, qp); |
506 | if (IS_ERR(ps->s_txreq)) | 506 | if (!ps->s_txreq) |
507 | goto bail_no_tx; | 507 | goto bail_no_tx; |
508 | 508 | ||
509 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { | 509 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { |
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index 873e48ea923f..c4ab2d5b4502 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright(c) 2016 - 2017 Intel Corporation. | 2 | * Copyright(c) 2016 - 2018 Intel Corporation. |
3 | * | 3 | * |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
5 | * redistributing this file, you may do so under either license. | 5 | * redistributing this file, you may do so under either license. |
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | |||
94 | struct rvt_qp *qp) | 94 | struct rvt_qp *qp) |
95 | __must_hold(&qp->s_lock) | 95 | __must_hold(&qp->s_lock) |
96 | { | 96 | { |
97 | struct verbs_txreq *tx = ERR_PTR(-EBUSY); | 97 | struct verbs_txreq *tx = NULL; |
98 | 98 | ||
99 | write_seqlock(&dev->txwait_lock); | 99 | write_seqlock(&dev->txwait_lock); |
100 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { | 100 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { |
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 729244c3086c..1c19bbc764b2 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright(c) 2016 Intel Corporation. | 2 | * Copyright(c) 2016 - 2018 Intel Corporation. |
3 | * | 3 | * |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
5 | * redistributing this file, you may do so under either license. | 5 | * redistributing this file, you may do so under either license. |
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, | |||
83 | if (unlikely(!tx)) { | 83 | if (unlikely(!tx)) { |
84 | /* call slow path to get the lock */ | 84 | /* call slow path to get the lock */ |
85 | tx = __get_txreq(dev, qp); | 85 | tx = __get_txreq(dev, qp); |
86 | if (IS_ERR(tx)) | 86 | if (!tx) |
87 | return tx; | 87 | return tx; |
88 | } | 88 | } |
89 | tx->qp = qp; | 89 | tx->qp = qp; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e3e330f59c2c..b3ba9a222550 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -6113,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
6113 | dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), | 6113 | dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), |
6114 | MLX5_CAP_GEN(mdev, num_vhca_ports)); | 6114 | MLX5_CAP_GEN(mdev, num_vhca_ports)); |
6115 | 6115 | ||
6116 | if (MLX5_VPORT_MANAGER(mdev) && | 6116 | if (MLX5_ESWITCH_MANAGER(mdev) && |
6117 | mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { | 6117 | mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { |
6118 | dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); | 6118 | dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); |
6119 | 6119 | ||
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 0af7b7905550..f5de5adc9b1a 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
266 | 266 | ||
267 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + | 267 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + |
268 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); | 268 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); |
269 | if (desc_size == 0 || srq->msrq.max_gs > desc_size) | 269 | if (desc_size == 0 || srq->msrq.max_gs > desc_size) { |
270 | return ERR_PTR(-EINVAL); | 270 | err = -EINVAL; |
271 | goto err_srq; | ||
272 | } | ||
271 | desc_size = roundup_pow_of_two(desc_size); | 273 | desc_size = roundup_pow_of_two(desc_size); |
272 | desc_size = max_t(size_t, 32, desc_size); | 274 | desc_size = max_t(size_t, 32, desc_size); |
273 | if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) | 275 | if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) { |
274 | return ERR_PTR(-EINVAL); | 276 | err = -EINVAL; |
277 | goto err_srq; | ||
278 | } | ||
275 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / | 279 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / |
276 | sizeof(struct mlx5_wqe_data_seg); | 280 | sizeof(struct mlx5_wqe_data_seg); |
277 | srq->msrq.wqe_shift = ilog2(desc_size); | 281 | srq->msrq.wqe_shift = ilog2(desc_size); |
278 | buf_size = srq->msrq.max * desc_size; | 282 | buf_size = srq->msrq.max * desc_size; |
279 | if (buf_size < desc_size) | 283 | if (buf_size < desc_size) { |
280 | return ERR_PTR(-EINVAL); | 284 | err = -EINVAL; |
285 | goto err_srq; | ||
286 | } | ||
281 | in.type = init_attr->srq_type; | 287 | in.type = init_attr->srq_type; |
282 | 288 | ||
283 | if (pd->uobject) | 289 | if (pd->uobject) |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e055d228bfb9..689ffe538370 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -142,7 +142,6 @@ config DMAR_TABLE | |||
142 | config INTEL_IOMMU | 142 | config INTEL_IOMMU |
143 | bool "Support for Intel IOMMU using DMA Remapping Devices" | 143 | bool "Support for Intel IOMMU using DMA Remapping Devices" |
144 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) | 144 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) |
145 | select DMA_DIRECT_OPS | ||
146 | select IOMMU_API | 145 | select IOMMU_API |
147 | select IOMMU_IOVA | 146 | select IOMMU_IOVA |
148 | select NEED_DMA_MAP_STATE | 147 | select NEED_DMA_MAP_STATE |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 14e4b3722428..b344a883f116 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
32 | #include <linux/dmar.h> | 32 | #include <linux/dmar.h> |
33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
34 | #include <linux/dma-direct.h> | ||
35 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
36 | #include <linux/memory.h> | 35 | #include <linux/memory.h> |
37 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
@@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, | |||
3713 | dma_addr_t *dma_handle, gfp_t flags, | 3712 | dma_addr_t *dma_handle, gfp_t flags, |
3714 | unsigned long attrs) | 3713 | unsigned long attrs) |
3715 | { | 3714 | { |
3716 | void *vaddr; | 3715 | struct page *page = NULL; |
3716 | int order; | ||
3717 | 3717 | ||
3718 | vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); | 3718 | size = PAGE_ALIGN(size); |
3719 | if (iommu_no_mapping(dev) || !vaddr) | 3719 | order = get_order(size); |
3720 | return vaddr; | ||
3721 | 3720 | ||
3722 | *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), | 3721 | if (!iommu_no_mapping(dev)) |
3723 | PAGE_ALIGN(size), DMA_BIDIRECTIONAL, | 3722 | flags &= ~(GFP_DMA | GFP_DMA32); |
3724 | dev->coherent_dma_mask); | 3723 | else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { |
3725 | if (!*dma_handle) | 3724 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
3726 | goto out_free_pages; | 3725 | flags |= GFP_DMA; |
3727 | return vaddr; | 3726 | else |
3727 | flags |= GFP_DMA32; | ||
3728 | } | ||
3729 | |||
3730 | if (gfpflags_allow_blocking(flags)) { | ||
3731 | unsigned int count = size >> PAGE_SHIFT; | ||
3732 | |||
3733 | page = dma_alloc_from_contiguous(dev, count, order, flags); | ||
3734 | if (page && iommu_no_mapping(dev) && | ||
3735 | page_to_phys(page) + size > dev->coherent_dma_mask) { | ||
3736 | dma_release_from_contiguous(dev, page, count); | ||
3737 | page = NULL; | ||
3738 | } | ||
3739 | } | ||
3740 | |||
3741 | if (!page) | ||
3742 | page = alloc_pages(flags, order); | ||
3743 | if (!page) | ||
3744 | return NULL; | ||
3745 | memset(page_address(page), 0, size); | ||
3746 | |||
3747 | *dma_handle = __intel_map_single(dev, page_to_phys(page), size, | ||
3748 | DMA_BIDIRECTIONAL, | ||
3749 | dev->coherent_dma_mask); | ||
3750 | if (*dma_handle) | ||
3751 | return page_address(page); | ||
3752 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | ||
3753 | __free_pages(page, order); | ||
3728 | 3754 | ||
3729 | out_free_pages: | ||
3730 | dma_direct_free(dev, size, vaddr, *dma_handle, attrs); | ||
3731 | return NULL; | 3755 | return NULL; |
3732 | } | 3756 | } |
3733 | 3757 | ||
3734 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, | 3758 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, |
3735 | dma_addr_t dma_handle, unsigned long attrs) | 3759 | dma_addr_t dma_handle, unsigned long attrs) |
3736 | { | 3760 | { |
3737 | if (!iommu_no_mapping(dev)) | 3761 | int order; |
3738 | intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); | 3762 | struct page *page = virt_to_page(vaddr); |
3739 | dma_direct_free(dev, size, vaddr, dma_handle, attrs); | 3763 | |
3764 | size = PAGE_ALIGN(size); | ||
3765 | order = get_order(size); | ||
3766 | |||
3767 | intel_unmap(dev, dma_handle, size); | ||
3768 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | ||
3769 | __free_pages(page, order); | ||
3740 | } | 3770 | } |
3741 | 3771 | ||
3742 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, | 3772 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 29b0cd9ec951..994aed2f9dff 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev) | |||
5547 | else | 5547 | else |
5548 | pr_warn("md: personality for level %s is not loaded!\n", | 5548 | pr_warn("md: personality for level %s is not loaded!\n", |
5549 | mddev->clevel); | 5549 | mddev->clevel); |
5550 | return -EINVAL; | 5550 | err = -EINVAL; |
5551 | goto abort; | ||
5551 | } | 5552 | } |
5552 | spin_unlock(&pers_lock); | 5553 | spin_unlock(&pers_lock); |
5553 | if (mddev->level != pers->level) { | 5554 | if (mddev->level != pers->level) { |
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev) | |||
5560 | pers->start_reshape == NULL) { | 5561 | pers->start_reshape == NULL) { |
5561 | /* This personality cannot handle reshaping... */ | 5562 | /* This personality cannot handle reshaping... */ |
5562 | module_put(pers->owner); | 5563 | module_put(pers->owner); |
5563 | return -EINVAL; | 5564 | err = -EINVAL; |
5565 | goto abort; | ||
5564 | } | 5566 | } |
5565 | 5567 | ||
5566 | if (pers->sync_request) { | 5568 | if (pers->sync_request) { |
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev) | |||
5629 | mddev->private = NULL; | 5631 | mddev->private = NULL; |
5630 | module_put(pers->owner); | 5632 | module_put(pers->owner); |
5631 | bitmap_destroy(mddev); | 5633 | bitmap_destroy(mddev); |
5632 | return err; | 5634 | goto abort; |
5633 | } | 5635 | } |
5634 | if (mddev->queue) { | 5636 | if (mddev->queue) { |
5635 | bool nonrot = true; | 5637 | bool nonrot = true; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 478cf446827f..35bd3a62451b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev) | |||
3893 | disk->rdev->saved_raid_disk < 0) | 3893 | disk->rdev->saved_raid_disk < 0) |
3894 | conf->fullsync = 1; | 3894 | conf->fullsync = 1; |
3895 | } | 3895 | } |
3896 | |||
3897 | if (disk->replacement && | ||
3898 | !test_bit(In_sync, &disk->replacement->flags) && | ||
3899 | disk->replacement->saved_raid_disk < 0) { | ||
3900 | conf->fullsync = 1; | ||
3901 | } | ||
3902 | |||
3896 | disk->recovery_disabled = mddev->recovery_disabled - 1; | 3903 | disk->recovery_disabled = mddev->recovery_disabled - 1; |
3897 | } | 3904 | } |
3898 | 3905 | ||
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 40826bba06b6..fcfab6635f9c 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c | |||
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev) | |||
207 | bpf_prog_array_free(rcdev->raw->progs); | 207 | bpf_prog_array_free(rcdev->raw->progs); |
208 | } | 208 | } |
209 | 209 | ||
210 | int lirc_prog_attach(const union bpf_attr *attr) | 210 | int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
211 | { | 211 | { |
212 | struct bpf_prog *prog; | ||
213 | struct rc_dev *rcdev; | 212 | struct rc_dev *rcdev; |
214 | int ret; | 213 | int ret; |
215 | 214 | ||
216 | if (attr->attach_flags) | 215 | if (attr->attach_flags) |
217 | return -EINVAL; | 216 | return -EINVAL; |
218 | 217 | ||
219 | prog = bpf_prog_get_type(attr->attach_bpf_fd, | ||
220 | BPF_PROG_TYPE_LIRC_MODE2); | ||
221 | if (IS_ERR(prog)) | ||
222 | return PTR_ERR(prog); | ||
223 | |||
224 | rcdev = rc_dev_get_from_fd(attr->target_fd); | 218 | rcdev = rc_dev_get_from_fd(attr->target_fd); |
225 | if (IS_ERR(rcdev)) { | 219 | if (IS_ERR(rcdev)) |
226 | bpf_prog_put(prog); | ||
227 | return PTR_ERR(rcdev); | 220 | return PTR_ERR(rcdev); |
228 | } | ||
229 | 221 | ||
230 | ret = lirc_bpf_attach(rcdev, prog); | 222 | ret = lirc_bpf_attach(rcdev, prog); |
231 | if (ret) | ||
232 | bpf_prog_put(prog); | ||
233 | 223 | ||
234 | put_device(&rcdev->dev); | 224 | put_device(&rcdev->dev); |
235 | 225 | ||
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index e05c3245930a..fa840666bdd1 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c | |||
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file) | |||
507 | static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) | 507 | static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) |
508 | { | 508 | { |
509 | void __iomem *address = (void __iomem *)file->private_data; | 509 | void __iomem *address = (void __iomem *)file->private_data; |
510 | unsigned char *page; | ||
511 | int retval; | ||
512 | int len = 0; | 510 | int len = 0; |
513 | unsigned int value; | 511 | unsigned int value; |
514 | 512 | char lbuf[20]; | |
515 | if (*offset < 0) | ||
516 | return -EINVAL; | ||
517 | if (count == 0 || count > 1024) | ||
518 | return 0; | ||
519 | if (*offset != 0) | ||
520 | return 0; | ||
521 | |||
522 | page = (unsigned char *)__get_free_page(GFP_KERNEL); | ||
523 | if (!page) | ||
524 | return -ENOMEM; | ||
525 | 513 | ||
526 | value = readl(address); | 514 | value = readl(address); |
527 | len = sprintf(page, "%d\n", value); | 515 | len = snprintf(lbuf, sizeof(lbuf), "%d\n", value); |
528 | |||
529 | if (copy_to_user(buf, page, len)) { | ||
530 | retval = -EFAULT; | ||
531 | goto exit; | ||
532 | } | ||
533 | *offset += len; | ||
534 | retval = len; | ||
535 | 516 | ||
536 | exit: | 517 | return simple_read_from_buffer(buf, count, offset, lbuf, len); |
537 | free_page((unsigned long)page); | ||
538 | return retval; | ||
539 | } | 518 | } |
540 | 519 | ||
541 | static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) | 520 | static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) |
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index b0b8f18a85e3..6649f0d56d2f 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
@@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
310 | if (&cl->link == &dev->file_list) { | 310 | if (&cl->link == &dev->file_list) { |
311 | /* A message for not connected fixed address clients | 311 | /* A message for not connected fixed address clients |
312 | * should be silently discarded | 312 | * should be silently discarded |
313 | * On power down client may be force cleaned, | ||
314 | * silently discard such messages | ||
313 | */ | 315 | */ |
314 | if (hdr_is_fixed(mei_hdr)) { | 316 | if (hdr_is_fixed(mei_hdr) || |
317 | dev->dev_state == MEI_DEV_POWER_DOWN) { | ||
315 | mei_irq_discard_msg(dev, mei_hdr); | 318 | mei_irq_discard_msg(dev, mei_hdr); |
316 | ret = 0; | 319 | ret = 0; |
317 | goto reset_slots; | 320 | goto reset_slots; |
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index efd733472a35..56c6f79a5c5a 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c | |||
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b, | |||
467 | unsigned int num_pages, bool is_2m_pages, unsigned int *target) | 467 | unsigned int num_pages, bool is_2m_pages, unsigned int *target) |
468 | { | 468 | { |
469 | unsigned long status; | 469 | unsigned long status; |
470 | unsigned long pfn = page_to_pfn(b->page); | 470 | unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); |
471 | 471 | ||
472 | STATS_INC(b->stats.lock[is_2m_pages]); | 472 | STATS_INC(b->stats.lock[is_2m_pages]); |
473 | 473 | ||
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b, | |||
515 | unsigned int num_pages, bool is_2m_pages, unsigned int *target) | 515 | unsigned int num_pages, bool is_2m_pages, unsigned int *target) |
516 | { | 516 | { |
517 | unsigned long status; | 517 | unsigned long status; |
518 | unsigned long pfn = page_to_pfn(b->page); | 518 | unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); |
519 | 519 | ||
520 | STATS_INC(b->stats.unlock[is_2m_pages]); | 520 | STATS_INC(b->stats.unlock[is_2m_pages]); |
521 | 521 | ||
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index ef05e0039378..2a833686784b 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c | |||
@@ -27,8 +27,8 @@ struct mmc_gpio { | |||
27 | bool override_cd_active_level; | 27 | bool override_cd_active_level; |
28 | irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); | 28 | irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); |
29 | char *ro_label; | 29 | char *ro_label; |
30 | char cd_label[0]; | ||
31 | u32 cd_debounce_delay_ms; | 30 | u32 cd_debounce_delay_ms; |
31 | char cd_label[]; | ||
32 | }; | 32 | }; |
33 | 33 | ||
34 | static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) | 34 | static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 623f4d27fa01..80dc2fd6576c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) | |||
1065 | * It's used when HS400 mode is enabled. | 1065 | * It's used when HS400 mode is enabled. |
1066 | */ | 1066 | */ |
1067 | if (data->flags & MMC_DATA_WRITE && | 1067 | if (data->flags & MMC_DATA_WRITE && |
1068 | !(host->timing != MMC_TIMING_MMC_HS400)) | 1068 | host->timing != MMC_TIMING_MMC_HS400) |
1069 | return; | 1069 | goto disable; |
1070 | 1070 | ||
1071 | if (data->flags & MMC_DATA_WRITE) | 1071 | if (data->flags & MMC_DATA_WRITE) |
1072 | enable = SDMMC_CARD_WR_THR_EN; | 1072 | enable = SDMMC_CARD_WR_THR_EN; |
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) | |||
1074 | enable = SDMMC_CARD_RD_THR_EN; | 1074 | enable = SDMMC_CARD_RD_THR_EN; |
1075 | 1075 | ||
1076 | if (host->timing != MMC_TIMING_MMC_HS200 && | 1076 | if (host->timing != MMC_TIMING_MMC_HS200 && |
1077 | host->timing != MMC_TIMING_UHS_SDR104) | 1077 | host->timing != MMC_TIMING_UHS_SDR104 && |
1078 | host->timing != MMC_TIMING_MMC_HS400) | ||
1078 | goto disable; | 1079 | goto disable; |
1079 | 1080 | ||
1080 | blksz_depth = blksz / (1 << host->data_shift); | 1081 | blksz_depth = blksz / (1 << host->data_shift); |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index f7f9773d161f..d032bd63444d 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
@@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { | |||
139 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, | 139 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, |
140 | RST_RESERVED_BITS | val); | 140 | RST_RESERVED_BITS | val); |
141 | 141 | ||
142 | if (host->data && host->data->flags & MMC_DATA_READ) | 142 | clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); |
143 | clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); | ||
144 | 143 | ||
145 | renesas_sdhi_internal_dmac_enable_dma(host, true); | 144 | renesas_sdhi_internal_dmac_enable_dma(host, true); |
146 | } | 145 | } |
@@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, | |||
164 | goto force_pio; | 163 | goto force_pio; |
165 | 164 | ||
166 | /* This DMAC cannot handle if buffer is not 8-bytes alignment */ | 165 | /* This DMAC cannot handle if buffer is not 8-bytes alignment */ |
167 | if (!IS_ALIGNED(sg_dma_address(sg), 8)) { | 166 | if (!IS_ALIGNED(sg_dma_address(sg), 8)) |
168 | dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, | 167 | goto force_pio_with_unmap; |
169 | mmc_get_dma_dir(data)); | ||
170 | goto force_pio; | ||
171 | } | ||
172 | 168 | ||
173 | if (data->flags & MMC_DATA_READ) { | 169 | if (data->flags & MMC_DATA_READ) { |
174 | dtran_mode |= DTRAN_MODE_CH_NUM_CH1; | 170 | dtran_mode |= DTRAN_MODE_CH_NUM_CH1; |
175 | if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && | 171 | if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && |
176 | test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) | 172 | test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) |
177 | goto force_pio; | 173 | goto force_pio_with_unmap; |
178 | } else { | 174 | } else { |
179 | dtran_mode |= DTRAN_MODE_CH_NUM_CH0; | 175 | dtran_mode |= DTRAN_MODE_CH_NUM_CH0; |
180 | } | 176 | } |
@@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, | |||
189 | 185 | ||
190 | return; | 186 | return; |
191 | 187 | ||
188 | force_pio_with_unmap: | ||
189 | dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data)); | ||
190 | |||
192 | force_pio: | 191 | force_pio: |
193 | host->force_pio = true; | 192 | host->force_pio = true; |
194 | renesas_sdhi_internal_dmac_enable_dma(host, false); | 193 | renesas_sdhi_internal_dmac_enable_dma(host, false); |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d6aef70d34fa..4eb3d29ecde1 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
312 | 312 | ||
313 | if (imx_data->socdata->flags & ESDHC_FLAG_HS400) | 313 | if (imx_data->socdata->flags & ESDHC_FLAG_HS400) |
314 | val |= SDHCI_SUPPORT_HS400; | 314 | val |= SDHCI_SUPPORT_HS400; |
315 | |||
316 | /* | ||
317 | * Do not advertise faster UHS modes if there are no | ||
318 | * pinctrl states for 100MHz/200MHz. | ||
319 | */ | ||
320 | if (IS_ERR_OR_NULL(imx_data->pins_100mhz) || | ||
321 | IS_ERR_OR_NULL(imx_data->pins_200mhz)) | ||
322 | val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50 | ||
323 | | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400); | ||
315 | } | 324 | } |
316 | } | 325 | } |
317 | 326 | ||
@@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | |||
1158 | ESDHC_PINCTRL_STATE_100MHZ); | 1167 | ESDHC_PINCTRL_STATE_100MHZ); |
1159 | imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, | 1168 | imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, |
1160 | ESDHC_PINCTRL_STATE_200MHZ); | 1169 | ESDHC_PINCTRL_STATE_200MHZ); |
1161 | if (IS_ERR(imx_data->pins_100mhz) || | ||
1162 | IS_ERR(imx_data->pins_200mhz)) { | ||
1163 | dev_warn(mmc_dev(host->mmc), | ||
1164 | "could not get ultra high speed state, work on normal mode\n"); | ||
1165 | /* | ||
1166 | * fall back to not supporting uhs by specifying no | ||
1167 | * 1.8v quirk | ||
1168 | */ | ||
1169 | host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; | ||
1170 | } | ||
1171 | } else { | ||
1172 | host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; | ||
1173 | } | 1170 | } |
1174 | 1171 | ||
1175 | /* call to generic mmc_of_parse to support additional capabilities */ | 1172 | /* call to generic mmc_of_parse to support additional capabilities */ |
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index e7472590f2ed..8e7f3e35ee3d 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev) | |||
1446 | sunxi_mmc_init_host(host); | 1446 | sunxi_mmc_init_host(host); |
1447 | sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); | 1447 | sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); |
1448 | sunxi_mmc_set_clk(host, &mmc->ios); | 1448 | sunxi_mmc_set_clk(host, &mmc->ios); |
1449 | enable_irq(host->irq); | ||
1449 | 1450 | ||
1450 | return 0; | 1451 | return 0; |
1451 | } | 1452 | } |
@@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev) | |||
1455 | struct mmc_host *mmc = dev_get_drvdata(dev); | 1456 | struct mmc_host *mmc = dev_get_drvdata(dev); |
1456 | struct sunxi_mmc_host *host = mmc_priv(mmc); | 1457 | struct sunxi_mmc_host *host = mmc_priv(mmc); |
1457 | 1458 | ||
1459 | /* | ||
1460 | * When clocks are off, it's possible receiving | ||
1461 | * fake interrupts, which will stall the system. | ||
1462 | * Disabling the irq will prevent this. | ||
1463 | */ | ||
1464 | disable_irq(host->irq); | ||
1458 | sunxi_mmc_reset_host(host); | 1465 | sunxi_mmc_reset_host(host); |
1459 | sunxi_mmc_disable(host); | 1466 | sunxi_mmc_disable(host); |
1460 | 1467 | ||
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index c3f7aaa5d18f..d7e10b36a0b9 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c | |||
@@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to, | |||
926 | if (ret) | 926 | if (ret) |
927 | return ret; | 927 | return ret; |
928 | 928 | ||
929 | if (f_pdata->use_direct_mode) | 929 | if (f_pdata->use_direct_mode) { |
930 | memcpy_toio(cqspi->ahb_base + to, buf, len); | 930 | memcpy_toio(cqspi->ahb_base + to, buf, len); |
931 | else | 931 | ret = cqspi_wait_idle(cqspi); |
932 | } else { | ||
932 | ret = cqspi_indirect_write_execute(nor, to, buf, len); | 933 | ret = cqspi_indirect_write_execute(nor, to, buf, len); |
934 | } | ||
933 | if (ret) | 935 | if (ret) |
934 | return ret; | 936 | return ret; |
935 | 937 | ||
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 567ee54504bc..5e5022fa1d04 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev) | |||
1897 | struct pci_dev *pdev = to_pci_dev(dev); | 1897 | struct pci_dev *pdev = to_pci_dev(dev); |
1898 | struct alx_priv *alx = pci_get_drvdata(pdev); | 1898 | struct alx_priv *alx = pci_get_drvdata(pdev); |
1899 | struct alx_hw *hw = &alx->hw; | 1899 | struct alx_hw *hw = &alx->hw; |
1900 | int err; | ||
1900 | 1901 | ||
1901 | alx_reset_phy(hw); | 1902 | alx_reset_phy(hw); |
1902 | 1903 | ||
1903 | if (!netif_running(alx->dev)) | 1904 | if (!netif_running(alx->dev)) |
1904 | return 0; | 1905 | return 0; |
1905 | netif_device_attach(alx->dev); | 1906 | netif_device_attach(alx->dev); |
1906 | return __alx_open(alx, true); | 1907 | |
1908 | rtnl_lock(); | ||
1909 | err = __alx_open(alx, true); | ||
1910 | rtnl_unlock(); | ||
1911 | |||
1912 | return err; | ||
1907 | } | 1913 | } |
1908 | 1914 | ||
1909 | static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); | 1915 | static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d847e1b9c37b..be1506169076 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1533,6 +1533,7 @@ struct bnx2x { | |||
1533 | struct link_vars link_vars; | 1533 | struct link_vars link_vars; |
1534 | u32 link_cnt; | 1534 | u32 link_cnt; |
1535 | struct bnx2x_link_report_data last_reported_link; | 1535 | struct bnx2x_link_report_data last_reported_link; |
1536 | bool force_link_down; | ||
1536 | 1537 | ||
1537 | struct mdio_if_info mdio; | 1538 | struct mdio_if_info mdio; |
1538 | 1539 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8cd73ff5debc..af7b5a4d8ba0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp) | |||
1261 | { | 1261 | { |
1262 | struct bnx2x_link_report_data cur_data; | 1262 | struct bnx2x_link_report_data cur_data; |
1263 | 1263 | ||
1264 | if (bp->force_link_down) { | ||
1265 | bp->link_vars.link_up = 0; | ||
1266 | return; | ||
1267 | } | ||
1268 | |||
1264 | /* reread mf_cfg */ | 1269 | /* reread mf_cfg */ |
1265 | if (IS_PF(bp) && !CHIP_IS_E1(bp)) | 1270 | if (IS_PF(bp) && !CHIP_IS_E1(bp)) |
1266 | bnx2x_read_mf_cfg(bp); | 1271 | bnx2x_read_mf_cfg(bp); |
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2817 | bp->pending_max = 0; | 2822 | bp->pending_max = 0; |
2818 | } | 2823 | } |
2819 | 2824 | ||
2825 | bp->force_link_down = false; | ||
2820 | if (bp->port.pmf) { | 2826 | if (bp->port.pmf) { |
2821 | rc = bnx2x_initial_phy_init(bp, load_mode); | 2827 | rc = bnx2x_initial_phy_init(bp, load_mode); |
2822 | if (rc) | 2828 | if (rc) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b1ed240bf18..57348f2b49a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) | |||
10279 | bp->sp_rtnl_state = 0; | 10279 | bp->sp_rtnl_state = 0; |
10280 | smp_mb(); | 10280 | smp_mb(); |
10281 | 10281 | ||
10282 | /* Immediately indicate link as down */ | ||
10283 | bp->link_vars.link_up = 0; | ||
10284 | bp->force_link_down = true; | ||
10285 | netif_carrier_off(bp->dev); | ||
10286 | BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); | ||
10287 | |||
10282 | bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); | 10288 | bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); |
10283 | /* When ret value shows failure of allocation failure, | 10289 | /* When ret value shows failure of allocation failure, |
10284 | * the nic is rebooted again. If open still fails, a error | 10290 | * the nic is rebooted again. If open still fails, a error |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 30273a7717e2..4fd829b5e65d 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, | |||
660 | id_tbl->max = size; | 660 | id_tbl->max = size; |
661 | id_tbl->next = next; | 661 | id_tbl->next = next; |
662 | spin_lock_init(&id_tbl->lock); | 662 | spin_lock_init(&id_tbl->lock); |
663 | id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); | 663 | id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); |
664 | if (!id_tbl->table) | 664 | if (!id_tbl->table) |
665 | return -ENOMEM; | 665 | return -ENOMEM; |
666 | 666 | ||
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 3e93df5d4e3b..96cc03a6d942 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
@@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev) | |||
3726 | int err; | 3726 | int err; |
3727 | u32 reg; | 3727 | u32 reg; |
3728 | 3728 | ||
3729 | bp->queues[0].bp = bp; | ||
3730 | |||
3729 | dev->netdev_ops = &at91ether_netdev_ops; | 3731 | dev->netdev_ops = &at91ether_netdev_ops; |
3730 | dev->ethtool_ops = &macb_ethtool_ops; | 3732 | dev->ethtool_ops = &macb_ethtool_ops; |
3731 | 3733 | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 5f4e1ffa7b95..ab02057ac730 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); | |||
125 | /* Default alignment for start of data in an Rx FD */ | 125 | /* Default alignment for start of data in an Rx FD */ |
126 | #define DPAA_FD_DATA_ALIGNMENT 16 | 126 | #define DPAA_FD_DATA_ALIGNMENT 16 |
127 | 127 | ||
128 | /* The DPAA requires 256 bytes reserved and mapped for the SGT */ | ||
129 | #define DPAA_SGT_SIZE 256 | ||
130 | |||
128 | /* Values for the L3R field of the FM Parse Results | 131 | /* Values for the L3R field of the FM Parse Results |
129 | */ | 132 | */ |
130 | /* L3 Type field: First IP Present IPv4 */ | 133 | /* L3 Type field: First IP Present IPv4 */ |
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, | |||
1617 | 1620 | ||
1618 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { | 1621 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { |
1619 | nr_frags = skb_shinfo(skb)->nr_frags; | 1622 | nr_frags = skb_shinfo(skb)->nr_frags; |
1620 | dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + | 1623 | dma_unmap_single(dev, addr, |
1621 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | 1624 | qm_fd_get_offset(fd) + DPAA_SGT_SIZE, |
1622 | dma_dir); | 1625 | dma_dir); |
1623 | 1626 | ||
1624 | /* The sgt buffer has been allocated with netdev_alloc_frag(), | 1627 | /* The sgt buffer has been allocated with netdev_alloc_frag(), |
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, | |||
1903 | void *sgt_buf; | 1906 | void *sgt_buf; |
1904 | 1907 | ||
1905 | /* get a page frag to store the SGTable */ | 1908 | /* get a page frag to store the SGTable */ |
1906 | sz = SKB_DATA_ALIGN(priv->tx_headroom + | 1909 | sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); |
1907 | sizeof(struct qm_sg_entry) * (1 + nr_frags)); | ||
1908 | sgt_buf = netdev_alloc_frag(sz); | 1910 | sgt_buf = netdev_alloc_frag(sz); |
1909 | if (unlikely(!sgt_buf)) { | 1911 | if (unlikely(!sgt_buf)) { |
1910 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", | 1912 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", |
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, | |||
1972 | skbh = (struct sk_buff **)buffer_start; | 1974 | skbh = (struct sk_buff **)buffer_start; |
1973 | *skbh = skb; | 1975 | *skbh = skb; |
1974 | 1976 | ||
1975 | addr = dma_map_single(dev, buffer_start, priv->tx_headroom + | 1977 | addr = dma_map_single(dev, buffer_start, |
1976 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | 1978 | priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); |
1977 | dma_dir); | ||
1978 | if (unlikely(dma_mapping_error(dev, addr))) { | 1979 | if (unlikely(dma_mapping_error(dev, addr))) { |
1979 | dev_err(dev, "DMA mapping failed"); | 1980 | dev_err(dev, "DMA mapping failed"); |
1980 | err = -EINVAL; | 1981 | err = -EINVAL; |
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ce6e24c74978..ecbf6187e13a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c | |||
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs { | |||
324 | #define HWP_HXS_PHE_REPORT 0x00000800 | 324 | #define HWP_HXS_PHE_REPORT 0x00000800 |
325 | #define HWP_HXS_PCAC_PSTAT 0x00000100 | 325 | #define HWP_HXS_PCAC_PSTAT 0x00000100 |
326 | #define HWP_HXS_PCAC_PSTOP 0x00000001 | 326 | #define HWP_HXS_PCAC_PSTOP 0x00000001 |
327 | #define HWP_HXS_TCP_OFFSET 0xA | ||
328 | #define HWP_HXS_UDP_OFFSET 0xB | ||
329 | #define HWP_HXS_SH_PAD_REM 0x80000000 | ||
330 | |||
327 | struct fman_port_hwp_regs { | 331 | struct fman_port_hwp_regs { |
328 | struct { | 332 | struct { |
329 | u32 ssa; /* Soft Sequence Attachment */ | 333 | u32 ssa; /* Soft Sequence Attachment */ |
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port) | |||
728 | iowrite32be(0xffffffff, ®s->pmda[i].lcv); | 732 | iowrite32be(0xffffffff, ®s->pmda[i].lcv); |
729 | } | 733 | } |
730 | 734 | ||
735 | /* Short packet padding removal from checksum calculation */ | ||
736 | iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_TCP_OFFSET].ssa); | ||
737 | iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_UDP_OFFSET].ssa); | ||
738 | |||
731 | start_port_hwp(port); | 739 | start_port_hwp(port); |
732 | } | 740 | } |
733 | 741 | ||
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index e2e5cdc7119c..4c0f7eda1166 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c | |||
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq) | |||
439 | { | 439 | { |
440 | struct hinic_rq *rq = rxq->rq; | 440 | struct hinic_rq *rq = rxq->rq; |
441 | 441 | ||
442 | irq_set_affinity_hint(rq->irq, NULL); | ||
442 | free_irq(rq->irq, rxq); | 443 | free_irq(rq->irq, rxq); |
443 | rx_del_napi(rxq); | 444 | rx_del_napi(rxq); |
444 | } | 445 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index ed6dbcfd4e96..b151ae316546 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, | |||
2199 | return true; | 2199 | return true; |
2200 | } | 2200 | } |
2201 | 2201 | ||
2202 | #define I40E_XDP_PASS 0 | 2202 | #define I40E_XDP_PASS 0 |
2203 | #define I40E_XDP_CONSUMED 1 | 2203 | #define I40E_XDP_CONSUMED BIT(0) |
2204 | #define I40E_XDP_TX 2 | 2204 | #define I40E_XDP_TX BIT(1) |
2205 | #define I40E_XDP_REDIR BIT(2) | ||
2205 | 2206 | ||
2206 | static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, | 2207 | static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, |
2207 | struct i40e_ring *xdp_ring); | 2208 | struct i40e_ring *xdp_ring); |
@@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, | |||
2248 | break; | 2249 | break; |
2249 | case XDP_REDIRECT: | 2250 | case XDP_REDIRECT: |
2250 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | 2251 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); |
2251 | result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; | 2252 | result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; |
2252 | break; | 2253 | break; |
2253 | default: | 2254 | default: |
2254 | bpf_warn_invalid_xdp_action(act); | 2255 | bpf_warn_invalid_xdp_action(act); |
@@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
2311 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 2312 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
2312 | struct sk_buff *skb = rx_ring->skb; | 2313 | struct sk_buff *skb = rx_ring->skb; |
2313 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | 2314 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); |
2314 | bool failure = false, xdp_xmit = false; | 2315 | unsigned int xdp_xmit = 0; |
2316 | bool failure = false; | ||
2315 | struct xdp_buff xdp; | 2317 | struct xdp_buff xdp; |
2316 | 2318 | ||
2317 | xdp.rxq = &rx_ring->xdp_rxq; | 2319 | xdp.rxq = &rx_ring->xdp_rxq; |
@@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
2372 | } | 2374 | } |
2373 | 2375 | ||
2374 | if (IS_ERR(skb)) { | 2376 | if (IS_ERR(skb)) { |
2375 | if (PTR_ERR(skb) == -I40E_XDP_TX) { | 2377 | unsigned int xdp_res = -PTR_ERR(skb); |
2376 | xdp_xmit = true; | 2378 | |
2379 | if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { | ||
2380 | xdp_xmit |= xdp_res; | ||
2377 | i40e_rx_buffer_flip(rx_ring, rx_buffer, size); | 2381 | i40e_rx_buffer_flip(rx_ring, rx_buffer, size); |
2378 | } else { | 2382 | } else { |
2379 | rx_buffer->pagecnt_bias++; | 2383 | rx_buffer->pagecnt_bias++; |
@@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
2427 | total_rx_packets++; | 2431 | total_rx_packets++; |
2428 | } | 2432 | } |
2429 | 2433 | ||
2430 | if (xdp_xmit) { | 2434 | if (xdp_xmit & I40E_XDP_REDIR) |
2435 | xdp_do_flush_map(); | ||
2436 | |||
2437 | if (xdp_xmit & I40E_XDP_TX) { | ||
2431 | struct i40e_ring *xdp_ring = | 2438 | struct i40e_ring *xdp_ring = |
2432 | rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | 2439 | rx_ring->vsi->xdp_rings[rx_ring->queue_index]; |
2433 | 2440 | ||
2434 | i40e_xdp_ring_update_tail(xdp_ring); | 2441 | i40e_xdp_ring_update_tail(xdp_ring); |
2435 | xdp_do_flush_map(); | ||
2436 | } | 2442 | } |
2437 | 2443 | ||
2438 | rx_ring->skb = skb; | 2444 | rx_ring->skb = skb; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e87dbbc9024..62e57b05a0ae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, | |||
2186 | return skb; | 2186 | return skb; |
2187 | } | 2187 | } |
2188 | 2188 | ||
2189 | #define IXGBE_XDP_PASS 0 | 2189 | #define IXGBE_XDP_PASS 0 |
2190 | #define IXGBE_XDP_CONSUMED 1 | 2190 | #define IXGBE_XDP_CONSUMED BIT(0) |
2191 | #define IXGBE_XDP_TX 2 | 2191 | #define IXGBE_XDP_TX BIT(1) |
2192 | #define IXGBE_XDP_REDIR BIT(2) | ||
2192 | 2193 | ||
2193 | static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, | 2194 | static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, |
2194 | struct xdp_frame *xdpf); | 2195 | struct xdp_frame *xdpf); |
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, | |||
2225 | case XDP_REDIRECT: | 2226 | case XDP_REDIRECT: |
2226 | err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); | 2227 | err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); |
2227 | if (!err) | 2228 | if (!err) |
2228 | result = IXGBE_XDP_TX; | 2229 | result = IXGBE_XDP_REDIR; |
2229 | else | 2230 | else |
2230 | result = IXGBE_XDP_CONSUMED; | 2231 | result = IXGBE_XDP_CONSUMED; |
2231 | break; | 2232 | break; |
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2285 | unsigned int mss = 0; | 2286 | unsigned int mss = 0; |
2286 | #endif /* IXGBE_FCOE */ | 2287 | #endif /* IXGBE_FCOE */ |
2287 | u16 cleaned_count = ixgbe_desc_unused(rx_ring); | 2288 | u16 cleaned_count = ixgbe_desc_unused(rx_ring); |
2288 | bool xdp_xmit = false; | 2289 | unsigned int xdp_xmit = 0; |
2289 | struct xdp_buff xdp; | 2290 | struct xdp_buff xdp; |
2290 | 2291 | ||
2291 | xdp.rxq = &rx_ring->xdp_rxq; | 2292 | xdp.rxq = &rx_ring->xdp_rxq; |
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2328 | } | 2329 | } |
2329 | 2330 | ||
2330 | if (IS_ERR(skb)) { | 2331 | if (IS_ERR(skb)) { |
2331 | if (PTR_ERR(skb) == -IXGBE_XDP_TX) { | 2332 | unsigned int xdp_res = -PTR_ERR(skb); |
2332 | xdp_xmit = true; | 2333 | |
2334 | if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { | ||
2335 | xdp_xmit |= xdp_res; | ||
2333 | ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); | 2336 | ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); |
2334 | } else { | 2337 | } else { |
2335 | rx_buffer->pagecnt_bias++; | 2338 | rx_buffer->pagecnt_bias++; |
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2401 | total_rx_packets++; | 2404 | total_rx_packets++; |
2402 | } | 2405 | } |
2403 | 2406 | ||
2404 | if (xdp_xmit) { | 2407 | if (xdp_xmit & IXGBE_XDP_REDIR) |
2408 | xdp_do_flush_map(); | ||
2409 | |||
2410 | if (xdp_xmit & IXGBE_XDP_TX) { | ||
2405 | struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; | 2411 | struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; |
2406 | 2412 | ||
2407 | /* Force memory writes to complete before letting h/w | 2413 | /* Force memory writes to complete before letting h/w |
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
2409 | */ | 2415 | */ |
2410 | wmb(); | 2416 | wmb(); |
2411 | writel(ring->next_to_use, ring->tail); | 2417 | writel(ring->next_to_use, ring->tail); |
2412 | |||
2413 | xdp_do_flush_map(); | ||
2414 | } | 2418 | } |
2415 | 2419 | ||
2416 | u64_stats_update_begin(&rx_ring->syncp); | 2420 | u64_stats_update_begin(&rx_ring->syncp); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 487388aed98f..384c1fa49081 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
807 | unsigned long flags; | 807 | unsigned long flags; |
808 | bool poll_cmd = ent->polling; | 808 | bool poll_cmd = ent->polling; |
809 | int alloc_ret; | 809 | int alloc_ret; |
810 | int cmd_mode; | ||
810 | 811 | ||
811 | sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; | 812 | sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; |
812 | down(sem); | 813 | down(sem); |
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
853 | set_signature(ent, !cmd->checksum_disabled); | 854 | set_signature(ent, !cmd->checksum_disabled); |
854 | dump_command(dev, ent, 1); | 855 | dump_command(dev, ent, 1); |
855 | ent->ts1 = ktime_get_ns(); | 856 | ent->ts1 = ktime_get_ns(); |
857 | cmd_mode = cmd->mode; | ||
856 | 858 | ||
857 | if (ent->callback) | 859 | if (ent->callback) |
858 | schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); | 860 | schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); |
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
877 | iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); | 879 | iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); |
878 | mmiowb(); | 880 | mmiowb(); |
879 | /* if not in polling don't use ent after this point */ | 881 | /* if not in polling don't use ent after this point */ |
880 | if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { | 882 | if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { |
881 | poll_timeout(ent); | 883 | poll_timeout(ent); |
882 | /* make sure we read the descriptor after ownership is SW */ | 884 | /* make sure we read the descriptor after ownership is SW */ |
883 | rmb(); | 885 | rmb(); |
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, | |||
1276 | { | 1278 | { |
1277 | struct mlx5_core_dev *dev = filp->private_data; | 1279 | struct mlx5_core_dev *dev = filp->private_data; |
1278 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; | 1280 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1279 | char outlen_str[8]; | 1281 | char outlen_str[8] = {0}; |
1280 | int outlen; | 1282 | int outlen; |
1281 | void *ptr; | 1283 | void *ptr; |
1282 | int err; | 1284 | int err; |
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, | |||
1291 | if (copy_from_user(outlen_str, buf, count)) | 1293 | if (copy_from_user(outlen_str, buf, count)) |
1292 | return -EFAULT; | 1294 | return -EFAULT; |
1293 | 1295 | ||
1294 | outlen_str[7] = 0; | ||
1295 | |||
1296 | err = sscanf(outlen_str, "%d", &outlen); | 1296 | err = sscanf(outlen_str, "%d", &outlen); |
1297 | if (err < 0) | 1297 | if (err < 0) |
1298 | return err; | 1298 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 56c1b6f5593e..dae4156a710d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) | |||
2846 | mlx5e_activate_channels(&priv->channels); | 2846 | mlx5e_activate_channels(&priv->channels); |
2847 | netif_tx_start_all_queues(priv->netdev); | 2847 | netif_tx_start_all_queues(priv->netdev); |
2848 | 2848 | ||
2849 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 2849 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
2850 | mlx5e_add_sqs_fwd_rules(priv); | 2850 | mlx5e_add_sqs_fwd_rules(priv); |
2851 | 2851 | ||
2852 | mlx5e_wait_channels_min_rx_wqes(&priv->channels); | 2852 | mlx5e_wait_channels_min_rx_wqes(&priv->channels); |
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) | |||
2857 | { | 2857 | { |
2858 | mlx5e_redirect_rqts_to_drop(priv); | 2858 | mlx5e_redirect_rqts_to_drop(priv); |
2859 | 2859 | ||
2860 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 2860 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
2861 | mlx5e_remove_sqs_fwd_rules(priv); | 2861 | mlx5e_remove_sqs_fwd_rules(priv); |
2862 | 2862 | ||
2863 | /* FIXME: This is a W/A only for tx timeout watch dog false alarm when | 2863 | /* FIXME: This is a W/A only for tx timeout watch dog false alarm when |
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) | |||
4597 | mlx5e_set_netdev_dev_addr(netdev); | 4597 | mlx5e_set_netdev_dev_addr(netdev); |
4598 | 4598 | ||
4599 | #if IS_ENABLED(CONFIG_MLX5_ESWITCH) | 4599 | #if IS_ENABLED(CONFIG_MLX5_ESWITCH) |
4600 | if (MLX5_VPORT_MANAGER(mdev)) | 4600 | if (MLX5_ESWITCH_MANAGER(mdev)) |
4601 | netdev->switchdev_ops = &mlx5e_switchdev_ops; | 4601 | netdev->switchdev_ops = &mlx5e_switchdev_ops; |
4602 | #endif | 4602 | #endif |
4603 | 4603 | ||
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) | |||
4753 | 4753 | ||
4754 | mlx5e_enable_async_events(priv); | 4754 | mlx5e_enable_async_events(priv); |
4755 | 4755 | ||
4756 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 4756 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
4757 | mlx5e_register_vport_reps(priv); | 4757 | mlx5e_register_vport_reps(priv); |
4758 | 4758 | ||
4759 | if (netdev->reg_state != NETREG_REGISTERED) | 4759 | if (netdev->reg_state != NETREG_REGISTERED) |
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) | |||
4788 | 4788 | ||
4789 | queue_work(priv->wq, &priv->set_rx_mode_work); | 4789 | queue_work(priv->wq, &priv->set_rx_mode_work); |
4790 | 4790 | ||
4791 | if (MLX5_VPORT_MANAGER(priv->mdev)) | 4791 | if (MLX5_ESWITCH_MANAGER(priv->mdev)) |
4792 | mlx5e_unregister_vport_reps(priv); | 4792 | mlx5e_unregister_vport_reps(priv); |
4793 | 4793 | ||
4794 | mlx5e_disable_async_events(priv); | 4794 | mlx5e_disable_async_events(priv); |
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) | |||
4972 | return NULL; | 4972 | return NULL; |
4973 | 4973 | ||
4974 | #ifdef CONFIG_MLX5_ESWITCH | 4974 | #ifdef CONFIG_MLX5_ESWITCH |
4975 | if (MLX5_VPORT_MANAGER(mdev)) { | 4975 | if (MLX5_ESWITCH_MANAGER(mdev)) { |
4976 | rpriv = mlx5e_alloc_nic_rep_priv(mdev); | 4976 | rpriv = mlx5e_alloc_nic_rep_priv(mdev); |
4977 | if (!rpriv) { | 4977 | if (!rpriv) { |
4978 | mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); | 4978 | mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 57987f6546e8..2b8040a3cdbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) | |||
823 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 823 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
824 | struct mlx5_eswitch_rep *rep; | 824 | struct mlx5_eswitch_rep *rep; |
825 | 825 | ||
826 | if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) | 826 | if (!MLX5_ESWITCH_MANAGER(priv->mdev)) |
827 | return false; | 827 | return false; |
828 | 828 | ||
829 | rep = rpriv->rep; | 829 | rep = rpriv->rep; |
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) | |||
837 | static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) | 837 | static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) |
838 | { | 838 | { |
839 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 839 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
840 | struct mlx5_eswitch_rep *rep = rpriv->rep; | 840 | struct mlx5_eswitch_rep *rep; |
841 | 841 | ||
842 | if (!MLX5_ESWITCH_MANAGER(priv->mdev)) | ||
843 | return false; | ||
844 | |||
845 | rep = rpriv->rep; | ||
842 | if (rep && rep->vport != FDB_UPLINK_VPORT) | 846 | if (rep && rep->vport != FDB_UPLINK_VPORT) |
843 | return true; | 847 | return true; |
844 | 848 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f63dfbcd29fe..b79d74860a30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) | |||
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | /* Public E-Switch API */ | 1596 | /* Public E-Switch API */ |
1597 | #define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) | 1597 | #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) |
1598 | |||
1598 | 1599 | ||
1599 | int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) | 1600 | int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) |
1600 | { | 1601 | { |
1601 | int err; | 1602 | int err; |
1602 | int i, enabled_events; | 1603 | int i, enabled_events; |
1603 | 1604 | ||
1604 | if (!ESW_ALLOWED(esw)) | 1605 | if (!ESW_ALLOWED(esw) || |
1605 | return 0; | ||
1606 | |||
1607 | if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || | ||
1608 | !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { | 1606 | !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { |
1609 | esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); | 1607 | esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); |
1610 | return -EOPNOTSUPP; | 1608 | return -EOPNOTSUPP; |
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
1806 | u64 node_guid; | 1804 | u64 node_guid; |
1807 | int err = 0; | 1805 | int err = 0; |
1808 | 1806 | ||
1809 | if (!ESW_ALLOWED(esw)) | 1807 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
1810 | return -EPERM; | 1808 | return -EPERM; |
1811 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) | 1809 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) |
1812 | return -EINVAL; | 1810 | return -EINVAL; |
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, | |||
1883 | { | 1881 | { |
1884 | struct mlx5_vport *evport; | 1882 | struct mlx5_vport *evport; |
1885 | 1883 | ||
1886 | if (!ESW_ALLOWED(esw)) | 1884 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
1887 | return -EPERM; | 1885 | return -EPERM; |
1888 | if (!LEGAL_VPORT(esw, vport)) | 1886 | if (!LEGAL_VPORT(esw, vport)) |
1889 | return -EINVAL; | 1887 | return -EINVAL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index cecd201f0b73..91f1209886ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) | |||
1079 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | 1079 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) |
1080 | return -EOPNOTSUPP; | 1080 | return -EOPNOTSUPP; |
1081 | 1081 | ||
1082 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 1082 | if(!MLX5_ESWITCH_MANAGER(dev)) |
1083 | return -EOPNOTSUPP; | 1083 | return -EPERM; |
1084 | 1084 | ||
1085 | if (dev->priv.eswitch->mode == SRIOV_NONE) | 1085 | if (dev->priv.eswitch->mode == SRIOV_NONE) |
1086 | return -EOPNOTSUPP; | 1086 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 49a75d31185e..f1a86cea86a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/mlx5/driver.h> | 34 | #include <linux/mlx5/driver.h> |
35 | #include <linux/mlx5/eswitch.h> | ||
35 | 36 | ||
36 | #include "mlx5_core.h" | 37 | #include "mlx5_core.h" |
37 | #include "fs_core.h" | 38 | #include "fs_core.h" |
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) | |||
2652 | goto err; | 2653 | goto err; |
2653 | } | 2654 | } |
2654 | 2655 | ||
2655 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 2656 | if (MLX5_ESWITCH_MANAGER(dev)) { |
2656 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { | 2657 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { |
2657 | err = init_fdb_root_ns(steering); | 2658 | err = init_fdb_root_ns(steering); |
2658 | if (err) | 2659 | if (err) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index afd9f4fa22f4..41ad24f0de2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <linux/mlx5/driver.h> | 33 | #include <linux/mlx5/driver.h> |
34 | #include <linux/mlx5/cmd.h> | 34 | #include <linux/mlx5/cmd.h> |
35 | #include <linux/mlx5/eswitch.h> | ||
35 | #include <linux/module.h> | 36 | #include <linux/module.h> |
36 | #include "mlx5_core.h" | 37 | #include "mlx5_core.h" |
37 | #include "../../mlxfw/mlxfw.h" | 38 | #include "../../mlxfw/mlxfw.h" |
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) | |||
159 | } | 160 | } |
160 | 161 | ||
161 | if (MLX5_CAP_GEN(dev, vport_group_manager) && | 162 | if (MLX5_CAP_GEN(dev, vport_group_manager) && |
162 | MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 163 | MLX5_ESWITCH_MANAGER(dev)) { |
163 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); | 164 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); |
164 | if (err) | 165 | if (err) |
165 | return err; | 166 | return err; |
166 | } | 167 | } |
167 | 168 | ||
168 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 169 | if (MLX5_ESWITCH_MANAGER(dev)) { |
169 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); | 170 | err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); |
170 | if (err) | 171 | if (err) |
171 | return err; | 172 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 7cb67122e8b5..98359559c77e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/etherdevice.h> | 33 | #include <linux/etherdevice.h> |
34 | #include <linux/mlx5/driver.h> | 34 | #include <linux/mlx5/driver.h> |
35 | #include <linux/mlx5/mlx5_ifc.h> | 35 | #include <linux/mlx5/mlx5_ifc.h> |
36 | #include <linux/mlx5/eswitch.h> | ||
36 | #include "mlx5_core.h" | 37 | #include "mlx5_core.h" |
37 | #include "lib/mpfs.h" | 38 | #include "lib/mpfs.h" |
38 | 39 | ||
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) | |||
98 | int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); | 99 | int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); |
99 | struct mlx5_mpfs *mpfs; | 100 | struct mlx5_mpfs *mpfs; |
100 | 101 | ||
101 | if (!MLX5_VPORT_MANAGER(dev)) | 102 | if (!MLX5_ESWITCH_MANAGER(dev)) |
102 | return 0; | 103 | return 0; |
103 | 104 | ||
104 | mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); | 105 | mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); |
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) | |||
122 | { | 123 | { |
123 | struct mlx5_mpfs *mpfs = dev->priv.mpfs; | 124 | struct mlx5_mpfs *mpfs = dev->priv.mpfs; |
124 | 125 | ||
125 | if (!MLX5_VPORT_MANAGER(dev)) | 126 | if (!MLX5_ESWITCH_MANAGER(dev)) |
126 | return; | 127 | return; |
127 | 128 | ||
128 | WARN_ON(!hlist_empty(mpfs->hash)); | 129 | WARN_ON(!hlist_empty(mpfs->hash)); |
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) | |||
137 | u32 index; | 138 | u32 index; |
138 | int err; | 139 | int err; |
139 | 140 | ||
140 | if (!MLX5_VPORT_MANAGER(dev)) | 141 | if (!MLX5_ESWITCH_MANAGER(dev)) |
141 | return 0; | 142 | return 0; |
142 | 143 | ||
143 | mutex_lock(&mpfs->lock); | 144 | mutex_lock(&mpfs->lock); |
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) | |||
179 | int err = 0; | 180 | int err = 0; |
180 | u32 index; | 181 | u32 index; |
181 | 182 | ||
182 | if (!MLX5_VPORT_MANAGER(dev)) | 183 | if (!MLX5_ESWITCH_MANAGER(dev)) |
183 | return 0; | 184 | return 0; |
184 | 185 | ||
185 | mutex_lock(&mpfs->lock); | 186 | mutex_lock(&mpfs->lock); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index fa9d0760dd36..31a9cbd85689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c | |||
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); | |||
701 | static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, | 701 | static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, |
702 | int inlen) | 702 | int inlen) |
703 | { | 703 | { |
704 | u32 out[MLX5_ST_SZ_DW(qtct_reg)]; | 704 | u32 out[MLX5_ST_SZ_DW(qetc_reg)]; |
705 | 705 | ||
706 | if (!MLX5_CAP_GEN(mdev, ets)) | 706 | if (!MLX5_CAP_GEN(mdev, ets)) |
707 | return -EOPNOTSUPP; | 707 | return -EOPNOTSUPP; |
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, | |||
713 | static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, | 713 | static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, |
714 | int outlen) | 714 | int outlen) |
715 | { | 715 | { |
716 | u32 in[MLX5_ST_SZ_DW(qtct_reg)]; | 716 | u32 in[MLX5_ST_SZ_DW(qetc_reg)]; |
717 | 717 | ||
718 | if (!MLX5_CAP_GEN(mdev, ets)) | 718 | if (!MLX5_CAP_GEN(mdev, ets)) |
719 | return -EOPNOTSUPP; | 719 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2a8b529ce6dd..a0674962f02c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c | |||
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) | |||
88 | return -EBUSY; | 88 | return -EBUSY; |
89 | } | 89 | } |
90 | 90 | ||
91 | if (!MLX5_ESWITCH_MANAGER(dev)) | ||
92 | goto enable_vfs_hca; | ||
93 | |||
91 | err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); | 94 | err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); |
92 | if (err) { | 95 | if (err) { |
93 | mlx5_core_warn(dev, | 96 | mlx5_core_warn(dev, |
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) | |||
95 | return err; | 98 | return err; |
96 | } | 99 | } |
97 | 100 | ||
101 | enable_vfs_hca: | ||
98 | for (vf = 0; vf < num_vfs; vf++) { | 102 | for (vf = 0; vf < num_vfs; vf++) { |
99 | err = mlx5_core_enable_hca(dev, vf + 1); | 103 | err = mlx5_core_enable_hca(dev, vf + 1); |
100 | if (err) { | 104 | if (err) { |
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) | |||
140 | } | 144 | } |
141 | 145 | ||
142 | out: | 146 | out: |
143 | mlx5_eswitch_disable_sriov(dev->priv.eswitch); | 147 | if (MLX5_ESWITCH_MANAGER(dev)) |
148 | mlx5_eswitch_disable_sriov(dev->priv.eswitch); | ||
144 | 149 | ||
145 | if (mlx5_wait_for_vf_pages(dev)) | 150 | if (mlx5_wait_for_vf_pages(dev)) |
146 | mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); | 151 | mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 719cecb182c6..7eecd5b07bb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | |||
549 | return -EINVAL; | 549 | return -EINVAL; |
550 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | 550 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) |
551 | return -EACCES; | 551 | return -EACCES; |
552 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) | ||
553 | return -EOPNOTSUPP; | ||
554 | 552 | ||
555 | in = kvzalloc(inlen, GFP_KERNEL); | 553 | in = kvzalloc(inlen, GFP_KERNEL); |
556 | if (!in) | 554 | if (!in) |
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fcdfb8e7fdea..40216d56dddc 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c | |||
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, | |||
81 | 81 | ||
82 | ret = nfp_net_bpf_offload(nn, prog, running, extack); | 82 | ret = nfp_net_bpf_offload(nn, prog, running, extack); |
83 | /* Stop offload if replace not possible */ | 83 | /* Stop offload if replace not possible */ |
84 | if (ret && prog) | 84 | if (ret) |
85 | nfp_bpf_xdp_offload(app, nn, NULL, extack); | 85 | return ret; |
86 | 86 | ||
87 | nn->dp.bpf_offload_xdp = prog && !ret; | 87 | nn->dp.bpf_offload_xdp = !!prog; |
88 | return ret; | 88 | return ret; |
89 | } | 89 | } |
90 | 90 | ||
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, | |||
202 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) | 202 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
203 | return -EOPNOTSUPP; | 203 | return -EOPNOTSUPP; |
204 | 204 | ||
205 | if (tcf_block_shared(f->block)) | ||
206 | return -EOPNOTSUPP; | ||
207 | |||
205 | switch (f->command) { | 208 | switch (f->command) { |
206 | case TC_BLOCK_BIND: | 209 | case TC_BLOCK_BIND: |
207 | return tcf_block_cb_register(f->block, | 210 | return tcf_block_cb_register(f->block, |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 91935405f586..84f7a5dbea9d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, | |||
123 | NFP_FLOWER_MASK_MPLS_Q; | 123 | NFP_FLOWER_MASK_MPLS_Q; |
124 | 124 | ||
125 | frame->mpls_lse = cpu_to_be32(t_mpls); | 125 | frame->mpls_lse = cpu_to_be32(t_mpls); |
126 | } else if (dissector_uses_key(flow->dissector, | ||
127 | FLOW_DISSECTOR_KEY_BASIC)) { | ||
128 | /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q | ||
129 | * bit, which indicates an mpls ether type but without any | ||
130 | * mpls fields. | ||
131 | */ | ||
132 | struct flow_dissector_key_basic *key_basic; | ||
133 | |||
134 | key_basic = skb_flow_dissector_target(flow->dissector, | ||
135 | FLOW_DISSECTOR_KEY_BASIC, | ||
136 | flow->key); | ||
137 | if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || | ||
138 | key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) | ||
139 | frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); | ||
126 | } | 140 | } |
127 | } | 141 | } |
128 | 142 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c42e64f32333..525057bee0ed 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c | |||
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, | |||
264 | case cpu_to_be16(ETH_P_ARP): | 264 | case cpu_to_be16(ETH_P_ARP): |
265 | return -EOPNOTSUPP; | 265 | return -EOPNOTSUPP; |
266 | 266 | ||
267 | case cpu_to_be16(ETH_P_MPLS_UC): | ||
268 | case cpu_to_be16(ETH_P_MPLS_MC): | ||
269 | if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { | ||
270 | key_layer |= NFP_FLOWER_LAYER_MAC; | ||
271 | key_size += sizeof(struct nfp_flower_mac_mpls); | ||
272 | } | ||
273 | break; | ||
274 | |||
267 | /* Will be included in layer 2. */ | 275 | /* Will be included in layer 2. */ |
268 | case cpu_to_be16(ETH_P_8021Q): | 276 | case cpu_to_be16(ETH_P_8021Q): |
269 | break; | 277 | break; |
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, | |||
623 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) | 631 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
624 | return -EOPNOTSUPP; | 632 | return -EOPNOTSUPP; |
625 | 633 | ||
634 | if (tcf_block_shared(f->block)) | ||
635 | return -EOPNOTSUPP; | ||
636 | |||
626 | switch (f->command) { | 637 | switch (f->command) { |
627 | case TC_BLOCK_BIND: | 638 | case TC_BLOCK_BIND: |
628 | return tcf_block_cb_register(f->block, | 639 | return tcf_block_cb_register(f->block, |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 46b76d5a726c..152283d7e59c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c | |||
@@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) | |||
240 | return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); | 240 | return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); |
241 | 241 | ||
242 | pf->limit_vfs = ~0; | 242 | pf->limit_vfs = ~0; |
243 | pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */ | ||
244 | /* Allow any setting for backwards compatibility if symbol not found */ | 243 | /* Allow any setting for backwards compatibility if symbol not found */ |
245 | if (err == -ENOENT) | 244 | if (err == -ENOENT) |
246 | return 0; | 245 | return 0; |
@@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, | |||
668 | 667 | ||
669 | err = nfp_net_pci_probe(pf); | 668 | err = nfp_net_pci_probe(pf); |
670 | if (err) | 669 | if (err) |
671 | goto err_sriov_unlimit; | 670 | goto err_fw_unload; |
672 | 671 | ||
673 | err = nfp_hwmon_register(pf); | 672 | err = nfp_hwmon_register(pf); |
674 | if (err) { | 673 | if (err) { |
@@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev, | |||
680 | 679 | ||
681 | err_net_remove: | 680 | err_net_remove: |
682 | nfp_net_pci_remove(pf); | 681 | nfp_net_pci_remove(pf); |
683 | err_sriov_unlimit: | ||
684 | pci_sriov_set_totalvfs(pf->pdev, 0); | ||
685 | err_fw_unload: | 682 | err_fw_unload: |
686 | kfree(pf->rtbl); | 683 | kfree(pf->rtbl); |
687 | nfp_mip_close(pf->mip); | 684 | nfp_mip_close(pf->mip); |
@@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev) | |||
715 | nfp_hwmon_unregister(pf); | 712 | nfp_hwmon_unregister(pf); |
716 | 713 | ||
717 | nfp_pcie_sriov_disable(pdev); | 714 | nfp_pcie_sriov_disable(pdev); |
718 | pci_sriov_set_totalvfs(pf->pdev, 0); | ||
719 | 715 | ||
720 | nfp_net_pci_remove(pf); | 716 | nfp_net_pci_remove(pf); |
721 | 717 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index cd34097b79f1..37a6d7822a38 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c | |||
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) | |||
232 | err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), | 232 | err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), |
233 | nfp_resource_address(state->res), | 233 | nfp_resource_address(state->res), |
234 | fwinf, sizeof(*fwinf)); | 234 | fwinf, sizeof(*fwinf)); |
235 | if (err < sizeof(*fwinf)) | 235 | if (err < (int)sizeof(*fwinf)) |
236 | goto err_release; | 236 | goto err_release; |
237 | 237 | ||
238 | if (!nffw_res_flg_init_get(fwinf)) | 238 | if (!nffw_res_flg_init_get(fwinf)) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f0b01385d5cb..e0680ce91328 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, | |||
709 | p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; | 709 | p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; |
710 | 710 | ||
711 | memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, | 711 | memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, |
712 | ARRAY_SIZE(p_local->local_chassis_id)); | 712 | sizeof(p_local->local_chassis_id)); |
713 | memcpy(params->lldp_local.local_port_id, p_local->local_port_id, | 713 | memcpy(params->lldp_local.local_port_id, p_local->local_port_id, |
714 | ARRAY_SIZE(p_local->local_port_id)); | 714 | sizeof(p_local->local_port_id)); |
715 | } | 715 | } |
716 | 716 | ||
717 | static void | 717 | static void |
@@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, | |||
723 | p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; | 723 | p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; |
724 | 724 | ||
725 | memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, | 725 | memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, |
726 | ARRAY_SIZE(p_remote->peer_chassis_id)); | 726 | sizeof(p_remote->peer_chassis_id)); |
727 | memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, | 727 | memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, |
728 | ARRAY_SIZE(p_remote->peer_port_id)); | 728 | sizeof(p_remote->peer_port_id)); |
729 | } | 729 | } |
730 | 730 | ||
731 | static int | 731 | static int |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 329781cda77f..e5249b4741d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | |||
1804 | DP_INFO(p_hwfn, "Failed to update driver state\n"); | 1804 | DP_INFO(p_hwfn, "Failed to update driver state\n"); |
1805 | 1805 | ||
1806 | rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, | 1806 | rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, |
1807 | QED_OV_ESWITCH_VEB); | 1807 | QED_OV_ESWITCH_NONE); |
1808 | if (rc) | 1808 | if (rc) |
1809 | DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); | 1809 | DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); |
1810 | } | 1810 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 5c10fd7210c3..0cbc74d6ca8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, | |||
789 | /* We want a minimum of one slowpath and one fastpath vector per hwfn */ | 789 | /* We want a minimum of one slowpath and one fastpath vector per hwfn */ |
790 | cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; | 790 | cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; |
791 | 791 | ||
792 | if (is_kdump_kernel()) { | ||
793 | DP_INFO(cdev, | ||
794 | "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", | ||
795 | cdev->int_params.in.min_msix_cnt); | ||
796 | cdev->int_params.in.num_vectors = | ||
797 | cdev->int_params.in.min_msix_cnt; | ||
798 | } | ||
799 | |||
792 | rc = qed_set_int_mode(cdev, false); | 800 | rc = qed_set_int_mode(cdev, false); |
793 | if (rc) { | 801 | if (rc) { |
794 | DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); | 802 | DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f01bf52bc381..fd59cf45f4be 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, | |||
4513 | static int qed_sriov_enable(struct qed_dev *cdev, int num) | 4513 | static int qed_sriov_enable(struct qed_dev *cdev, int num) |
4514 | { | 4514 | { |
4515 | struct qed_iov_vf_init_params params; | 4515 | struct qed_iov_vf_init_params params; |
4516 | struct qed_hwfn *hwfn; | ||
4517 | struct qed_ptt *ptt; | ||
4516 | int i, j, rc; | 4518 | int i, j, rc; |
4517 | 4519 | ||
4518 | if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { | 4520 | if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { |
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) | |||
4525 | 4527 | ||
4526 | /* Initialize HW for VF access */ | 4528 | /* Initialize HW for VF access */ |
4527 | for_each_hwfn(cdev, j) { | 4529 | for_each_hwfn(cdev, j) { |
4528 | struct qed_hwfn *hwfn = &cdev->hwfns[j]; | 4530 | hwfn = &cdev->hwfns[j]; |
4529 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | 4531 | ptt = qed_ptt_acquire(hwfn); |
4530 | 4532 | ||
4531 | /* Make sure not to use more than 16 queues per VF */ | 4533 | /* Make sure not to use more than 16 queues per VF */ |
4532 | params.num_queues = min_t(int, | 4534 | params.num_queues = min_t(int, |
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) | |||
4562 | goto err; | 4564 | goto err; |
4563 | } | 4565 | } |
4564 | 4566 | ||
4567 | hwfn = QED_LEADING_HWFN(cdev); | ||
4568 | ptt = qed_ptt_acquire(hwfn); | ||
4569 | if (!ptt) { | ||
4570 | DP_ERR(hwfn, "Failed to acquire ptt\n"); | ||
4571 | rc = -EBUSY; | ||
4572 | goto err; | ||
4573 | } | ||
4574 | |||
4575 | rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); | ||
4576 | if (rc) | ||
4577 | DP_INFO(cdev, "Failed to update eswitch mode\n"); | ||
4578 | qed_ptt_release(hwfn, ptt); | ||
4579 | |||
4565 | return num; | 4580 | return num; |
4566 | 4581 | ||
4567 | err: | 4582 | err: |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 02adb513f475..013ff567283c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c | |||
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) | |||
337 | { | 337 | { |
338 | struct qede_ptp *ptp = edev->ptp; | 338 | struct qede_ptp *ptp = edev->ptp; |
339 | 339 | ||
340 | if (!ptp) | 340 | if (!ptp) { |
341 | return -EIO; | 341 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
342 | SOF_TIMESTAMPING_RX_SOFTWARE | | ||
343 | SOF_TIMESTAMPING_SOFTWARE; | ||
344 | info->phc_index = -1; | ||
345 | |||
346 | return 0; | ||
347 | } | ||
342 | 348 | ||
343 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | | 349 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
344 | SOF_TIMESTAMPING_RX_SOFTWARE | | 350 | SOF_TIMESTAMPING_RX_SOFTWARE | |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 8edf20967c82..e045a5d6b938 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) | |||
2794 | if (!state) | 2794 | if (!state) |
2795 | return -ENOMEM; | 2795 | return -ENOMEM; |
2796 | efx->filter_state = state; | 2796 | efx->filter_state = state; |
2797 | init_rwsem(&state->lock); | ||
2797 | 2798 | ||
2798 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; | 2799 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; |
2799 | table->id = EFX_FARCH_FILTER_TABLE_RX_IP; | 2800 | table->id = EFX_FARCH_FILTER_TABLE_RX_IP; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37f17ca62fe..65bc3556bd8f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | |||
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) | |||
407 | } | 407 | } |
408 | } | 408 | } |
409 | 409 | ||
410 | static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) | ||
411 | { | ||
412 | u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); | ||
413 | |||
414 | value &= ~DMA_RBSZ_MASK; | ||
415 | value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; | ||
416 | |||
417 | writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); | ||
418 | } | ||
419 | |||
410 | const struct stmmac_dma_ops dwmac4_dma_ops = { | 420 | const struct stmmac_dma_ops dwmac4_dma_ops = { |
411 | .reset = dwmac4_dma_reset, | 421 | .reset = dwmac4_dma_reset, |
412 | .init = dwmac4_dma_init, | 422 | .init = dwmac4_dma_init, |
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { | |||
431 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, | 441 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, |
432 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, | 442 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, |
433 | .enable_tso = dwmac4_enable_tso, | 443 | .enable_tso = dwmac4_enable_tso, |
444 | .set_bfsize = dwmac4_set_bfsize, | ||
434 | }; | 445 | }; |
435 | 446 | ||
436 | const struct stmmac_dma_ops dwmac410_dma_ops = { | 447 | const struct stmmac_dma_ops dwmac410_dma_ops = { |
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { | |||
457 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, | 468 | .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, |
458 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, | 469 | .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, |
459 | .enable_tso = dwmac4_enable_tso, | 470 | .enable_tso = dwmac4_enable_tso, |
471 | .set_bfsize = dwmac4_set_bfsize, | ||
460 | }; | 472 | }; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index c63c1fe3f26b..22a4a6dbb1a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h | |||
@@ -120,6 +120,8 @@ | |||
120 | 120 | ||
121 | /* DMA Rx Channel X Control register defines */ | 121 | /* DMA Rx Channel X Control register defines */ |
122 | #define DMA_CONTROL_SR BIT(0) | 122 | #define DMA_CONTROL_SR BIT(0) |
123 | #define DMA_RBSZ_MASK GENMASK(14, 1) | ||
124 | #define DMA_RBSZ_SHIFT 1 | ||
123 | 125 | ||
124 | /* Interrupt status per channel */ | 126 | /* Interrupt status per channel */ |
125 | #define DMA_CHAN_STATUS_REB GENMASK(21, 19) | 127 | #define DMA_CHAN_STATUS_REB GENMASK(21, 19) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e44e7b26ce82..fe8b536b13f8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h | |||
@@ -183,6 +183,7 @@ struct stmmac_dma_ops { | |||
183 | void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); | 183 | void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
184 | void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); | 184 | void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); |
185 | void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); | 185 | void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); |
186 | void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); | ||
186 | }; | 187 | }; |
187 | 188 | ||
188 | #define stmmac_reset(__priv, __args...) \ | 189 | #define stmmac_reset(__priv, __args...) \ |
@@ -235,6 +236,8 @@ struct stmmac_dma_ops { | |||
235 | stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) | 236 | stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) |
236 | #define stmmac_enable_tso(__priv, __args...) \ | 237 | #define stmmac_enable_tso(__priv, __args...) \ |
237 | stmmac_do_void_callback(__priv, dma, enable_tso, __args) | 238 | stmmac_do_void_callback(__priv, dma, enable_tso, __args) |
239 | #define stmmac_set_dma_bfsize(__priv, __args...) \ | ||
240 | stmmac_do_void_callback(__priv, dma, set_bfsize, __args) | ||
238 | 241 | ||
239 | struct mac_device_info; | 242 | struct mac_device_info; |
240 | struct net_device; | 243 | struct net_device; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cba46b62a1cd..60f59abab009 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1804,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |||
1804 | 1804 | ||
1805 | stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, | 1805 | stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, |
1806 | rxfifosz, qmode); | 1806 | rxfifosz, qmode); |
1807 | stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, | ||
1808 | chan); | ||
1807 | } | 1809 | } |
1808 | 1810 | ||
1809 | for (chan = 0; chan < tx_channels_count; chan++) { | 1811 | for (chan = 0; chan < tx_channels_count; chan++) { |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 750eaa53bf0c..ada33c2d9ac2 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, | |||
476 | out_unlock: | 476 | out_unlock: |
477 | rcu_read_unlock(); | 477 | rcu_read_unlock(); |
478 | out: | 478 | out: |
479 | NAPI_GRO_CB(skb)->flush |= flush; | 479 | skb_gro_flush_final(skb, pp, flush); |
480 | 480 | ||
481 | return pp; | 481 | return pp; |
482 | } | 482 | } |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 1a924b867b07..4b6e308199d2 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net, | |||
210 | void netvsc_channel_cb(void *context); | 210 | void netvsc_channel_cb(void *context); |
211 | int netvsc_poll(struct napi_struct *napi, int budget); | 211 | int netvsc_poll(struct napi_struct *napi, int budget); |
212 | 212 | ||
213 | void rndis_set_subchannel(struct work_struct *w); | 213 | int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); |
214 | int rndis_filter_open(struct netvsc_device *nvdev); | 214 | int rndis_filter_open(struct netvsc_device *nvdev); |
215 | int rndis_filter_close(struct netvsc_device *nvdev); | 215 | int rndis_filter_close(struct netvsc_device *nvdev); |
216 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | 216 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5d5bd513847f..8e9d0ee1572b 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) | |||
65 | VM_PKT_DATA_INBAND, 0); | 65 | VM_PKT_DATA_INBAND, 0); |
66 | } | 66 | } |
67 | 67 | ||
68 | /* Worker to setup sub channels on initial setup | ||
69 | * Initial hotplug event occurs in softirq context | ||
70 | * and can't wait for channels. | ||
71 | */ | ||
72 | static void netvsc_subchan_work(struct work_struct *w) | ||
73 | { | ||
74 | struct netvsc_device *nvdev = | ||
75 | container_of(w, struct netvsc_device, subchan_work); | ||
76 | struct rndis_device *rdev; | ||
77 | int i, ret; | ||
78 | |||
79 | /* Avoid deadlock with device removal already under RTNL */ | ||
80 | if (!rtnl_trylock()) { | ||
81 | schedule_work(w); | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | rdev = nvdev->extension; | ||
86 | if (rdev) { | ||
87 | ret = rndis_set_subchannel(rdev->ndev, nvdev); | ||
88 | if (ret == 0) { | ||
89 | netif_device_attach(rdev->ndev); | ||
90 | } else { | ||
91 | /* fallback to only primary channel */ | ||
92 | for (i = 1; i < nvdev->num_chn; i++) | ||
93 | netif_napi_del(&nvdev->chan_table[i].napi); | ||
94 | |||
95 | nvdev->max_chn = 1; | ||
96 | nvdev->num_chn = 1; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | rtnl_unlock(); | ||
101 | } | ||
102 | |||
68 | static struct netvsc_device *alloc_net_device(void) | 103 | static struct netvsc_device *alloc_net_device(void) |
69 | { | 104 | { |
70 | struct netvsc_device *net_device; | 105 | struct netvsc_device *net_device; |
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void) | |||
81 | 116 | ||
82 | init_completion(&net_device->channel_init_wait); | 117 | init_completion(&net_device->channel_init_wait); |
83 | init_waitqueue_head(&net_device->subchan_open); | 118 | init_waitqueue_head(&net_device->subchan_open); |
84 | INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); | 119 | INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); |
85 | 120 | ||
86 | return net_device; | 121 | return net_device; |
87 | } | 122 | } |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fe2256bf1d13..dd1d6e115145 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev, | |||
905 | if (IS_ERR(nvdev)) | 905 | if (IS_ERR(nvdev)) |
906 | return PTR_ERR(nvdev); | 906 | return PTR_ERR(nvdev); |
907 | 907 | ||
908 | /* Note: enable and attach happen when sub-channels setup */ | 908 | if (nvdev->num_chn > 1) { |
909 | ret = rndis_set_subchannel(ndev, nvdev); | ||
910 | |||
911 | /* if unavailable, just proceed with one queue */ | ||
912 | if (ret) { | ||
913 | nvdev->max_chn = 1; | ||
914 | nvdev->num_chn = 1; | ||
915 | } | ||
916 | } | ||
917 | |||
918 | /* In any case device is now ready */ | ||
919 | netif_device_attach(ndev); | ||
909 | 920 | ||
921 | /* Note: enable and attach happen when sub-channels setup */ | ||
910 | netif_carrier_off(ndev); | 922 | netif_carrier_off(ndev); |
911 | 923 | ||
912 | if (netif_running(ndev)) { | 924 | if (netif_running(ndev)) { |
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev, | |||
2089 | 2101 | ||
2090 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 2102 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
2091 | 2103 | ||
2104 | if (nvdev->num_chn > 1) | ||
2105 | schedule_work(&nvdev->subchan_work); | ||
2106 | |||
2092 | /* hw_features computed in rndis_netdev_set_hwcaps() */ | 2107 | /* hw_features computed in rndis_netdev_set_hwcaps() */ |
2093 | net->features = net->hw_features | | 2108 | net->features = net->hw_features | |
2094 | NETIF_F_HIGHDMA | NETIF_F_SG | | 2109 | NETIF_F_HIGHDMA | NETIF_F_SG | |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 5428bb261102..9b4e3c3787e5 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) | |||
1062 | * This breaks overlap of processing the host message for the | 1062 | * This breaks overlap of processing the host message for the |
1063 | * new primary channel with the initialization of sub-channels. | 1063 | * new primary channel with the initialization of sub-channels. |
1064 | */ | 1064 | */ |
1065 | void rndis_set_subchannel(struct work_struct *w) | 1065 | int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) |
1066 | { | 1066 | { |
1067 | struct netvsc_device *nvdev | ||
1068 | = container_of(w, struct netvsc_device, subchan_work); | ||
1069 | struct nvsp_message *init_packet = &nvdev->channel_init_pkt; | 1067 | struct nvsp_message *init_packet = &nvdev->channel_init_pkt; |
1070 | struct net_device_context *ndev_ctx; | 1068 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
1071 | struct rndis_device *rdev; | 1069 | struct hv_device *hv_dev = ndev_ctx->device_ctx; |
1072 | struct net_device *ndev; | 1070 | struct rndis_device *rdev = nvdev->extension; |
1073 | struct hv_device *hv_dev; | ||
1074 | int i, ret; | 1071 | int i, ret; |
1075 | 1072 | ||
1076 | if (!rtnl_trylock()) { | 1073 | ASSERT_RTNL(); |
1077 | schedule_work(w); | ||
1078 | return; | ||
1079 | } | ||
1080 | |||
1081 | rdev = nvdev->extension; | ||
1082 | if (!rdev) | ||
1083 | goto unlock; /* device was removed */ | ||
1084 | |||
1085 | ndev = rdev->ndev; | ||
1086 | ndev_ctx = netdev_priv(ndev); | ||
1087 | hv_dev = ndev_ctx->device_ctx; | ||
1088 | 1074 | ||
1089 | memset(init_packet, 0, sizeof(struct nvsp_message)); | 1075 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
1090 | init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; | 1076 | init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; |
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w) | |||
1100 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 1086 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
1101 | if (ret) { | 1087 | if (ret) { |
1102 | netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); | 1088 | netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); |
1103 | goto failed; | 1089 | return ret; |
1104 | } | 1090 | } |
1105 | 1091 | ||
1106 | wait_for_completion(&nvdev->channel_init_wait); | 1092 | wait_for_completion(&nvdev->channel_init_wait); |
1107 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { | 1093 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { |
1108 | netdev_err(ndev, "sub channel request failed\n"); | 1094 | netdev_err(ndev, "sub channel request failed\n"); |
1109 | goto failed; | 1095 | return -EIO; |
1110 | } | 1096 | } |
1111 | 1097 | ||
1112 | nvdev->num_chn = 1 + | 1098 | nvdev->num_chn = 1 + |
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w) | |||
1125 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) | 1111 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) |
1126 | ndev_ctx->tx_table[i] = i % nvdev->num_chn; | 1112 | ndev_ctx->tx_table[i] = i % nvdev->num_chn; |
1127 | 1113 | ||
1128 | netif_device_attach(ndev); | 1114 | return 0; |
1129 | rtnl_unlock(); | ||
1130 | return; | ||
1131 | |||
1132 | failed: | ||
1133 | /* fallback to only primary channel */ | ||
1134 | for (i = 1; i < nvdev->num_chn; i++) | ||
1135 | netif_napi_del(&nvdev->chan_table[i].napi); | ||
1136 | |||
1137 | nvdev->max_chn = 1; | ||
1138 | nvdev->num_chn = 1; | ||
1139 | |||
1140 | netif_device_attach(ndev); | ||
1141 | unlock: | ||
1142 | rtnl_unlock(); | ||
1143 | } | 1115 | } |
1144 | 1116 | ||
1145 | static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, | 1117 | static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, |
@@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | |||
1360 | netif_napi_add(net, &net_device->chan_table[i].napi, | 1332 | netif_napi_add(net, &net_device->chan_table[i].napi, |
1361 | netvsc_poll, NAPI_POLL_WEIGHT); | 1333 | netvsc_poll, NAPI_POLL_WEIGHT); |
1362 | 1334 | ||
1363 | if (net_device->num_chn > 1) | 1335 | return net_device; |
1364 | schedule_work(&net_device->subchan_work); | ||
1365 | 1336 | ||
1366 | out: | 1337 | out: |
1367 | /* if unavailable, just proceed with one queue */ | 1338 | /* setting up multiple channels failed */ |
1368 | if (ret) { | 1339 | net_device->max_chn = 1; |
1369 | net_device->max_chn = 1; | 1340 | net_device->num_chn = 1; |
1370 | net_device->num_chn = 1; | ||
1371 | } | ||
1372 | |||
1373 | /* No sub channels, device is ready */ | ||
1374 | if (net_device->num_chn == 1) | ||
1375 | netif_device_attach(net); | ||
1376 | |||
1377 | return net_device; | ||
1378 | 1341 | ||
1379 | err_dev_remv: | 1342 | err_dev_remv: |
1380 | rndis_filter_device_remove(dev, net_device); | 1343 | rndis_filter_device_remove(dev, net_device); |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 23c1d6600241..4a949569ec4c 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) | |||
75 | { | 75 | { |
76 | struct ipvl_dev *ipvlan; | 76 | struct ipvl_dev *ipvlan; |
77 | struct net_device *mdev = port->dev; | 77 | struct net_device *mdev = port->dev; |
78 | int err = 0; | 78 | unsigned int flags; |
79 | int err; | ||
79 | 80 | ||
80 | ASSERT_RTNL(); | 81 | ASSERT_RTNL(); |
81 | if (port->mode != nval) { | 82 | if (port->mode != nval) { |
83 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | ||
84 | flags = ipvlan->dev->flags; | ||
85 | if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) { | ||
86 | err = dev_change_flags(ipvlan->dev, | ||
87 | flags | IFF_NOARP); | ||
88 | } else { | ||
89 | err = dev_change_flags(ipvlan->dev, | ||
90 | flags & ~IFF_NOARP); | ||
91 | } | ||
92 | if (unlikely(err)) | ||
93 | goto fail; | ||
94 | } | ||
82 | if (nval == IPVLAN_MODE_L3S) { | 95 | if (nval == IPVLAN_MODE_L3S) { |
83 | /* New mode is L3S */ | 96 | /* New mode is L3S */ |
84 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); | 97 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); |
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) | |||
86 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; | 99 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; |
87 | mdev->priv_flags |= IFF_L3MDEV_MASTER; | 100 | mdev->priv_flags |= IFF_L3MDEV_MASTER; |
88 | } else | 101 | } else |
89 | return err; | 102 | goto fail; |
90 | } else if (port->mode == IPVLAN_MODE_L3S) { | 103 | } else if (port->mode == IPVLAN_MODE_L3S) { |
91 | /* Old mode was L3S */ | 104 | /* Old mode was L3S */ |
92 | mdev->priv_flags &= ~IFF_L3MDEV_MASTER; | 105 | mdev->priv_flags &= ~IFF_L3MDEV_MASTER; |
93 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); | 106 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); |
94 | mdev->l3mdev_ops = NULL; | 107 | mdev->l3mdev_ops = NULL; |
95 | } | 108 | } |
96 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | ||
97 | if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) | ||
98 | ipvlan->dev->flags |= IFF_NOARP; | ||
99 | else | ||
100 | ipvlan->dev->flags &= ~IFF_NOARP; | ||
101 | } | ||
102 | port->mode = nval; | 109 | port->mode = nval; |
103 | } | 110 | } |
111 | return 0; | ||
112 | |||
113 | fail: | ||
114 | /* Undo the flags changes that have been done so far. */ | ||
115 | list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) { | ||
116 | flags = ipvlan->dev->flags; | ||
117 | if (port->mode == IPVLAN_MODE_L3 || | ||
118 | port->mode == IPVLAN_MODE_L3S) | ||
119 | dev_change_flags(ipvlan->dev, flags | IFF_NOARP); | ||
120 | else | ||
121 | dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP); | ||
122 | } | ||
123 | |||
104 | return err; | 124 | return err; |
105 | } | 125 | } |
106 | 126 | ||
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 081d99aa3985..49ac678eb2dc 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c | |||
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev) | |||
222 | if (err < 0) | 222 | if (err < 0) |
223 | return err; | 223 | return err; |
224 | 224 | ||
225 | err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); | 225 | err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); |
226 | } | 226 | } |
227 | 227 | ||
228 | return err; | 228 | return err; |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 8dff87ec6d99..2e4130746c40 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #define DEFAULT_RX_CSUM_ENABLE (true) | 64 | #define DEFAULT_RX_CSUM_ENABLE (true) |
65 | #define DEFAULT_TSO_CSUM_ENABLE (true) | 65 | #define DEFAULT_TSO_CSUM_ENABLE (true) |
66 | #define DEFAULT_VLAN_FILTER_ENABLE (true) | 66 | #define DEFAULT_VLAN_FILTER_ENABLE (true) |
67 | #define DEFAULT_VLAN_RX_OFFLOAD (true) | ||
67 | #define TX_OVERHEAD (8) | 68 | #define TX_OVERHEAD (8) |
68 | #define RXW_PADDING 2 | 69 | #define RXW_PADDING 2 |
69 | 70 | ||
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) | |||
2298 | if ((ll_mtu % dev->maxpacket) == 0) | 2299 | if ((ll_mtu % dev->maxpacket) == 0) |
2299 | return -EDOM; | 2300 | return -EDOM; |
2300 | 2301 | ||
2301 | ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); | 2302 | ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); |
2302 | 2303 | ||
2303 | netdev->mtu = new_mtu; | 2304 | netdev->mtu = new_mtu; |
2304 | 2305 | ||
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev, | |||
2364 | } | 2365 | } |
2365 | 2366 | ||
2366 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | 2367 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
2368 | pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_; | ||
2369 | else | ||
2370 | pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_; | ||
2371 | |||
2372 | if (features & NETIF_F_HW_VLAN_CTAG_FILTER) | ||
2367 | pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; | 2373 | pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; |
2368 | else | 2374 | else |
2369 | pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; | 2375 | pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; |
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) | |||
2587 | buf |= FCT_TX_CTL_EN_; | 2593 | buf |= FCT_TX_CTL_EN_; |
2588 | ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); | 2594 | ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); |
2589 | 2595 | ||
2590 | ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); | 2596 | ret = lan78xx_set_rx_max_frame_length(dev, |
2597 | dev->net->mtu + VLAN_ETH_HLEN); | ||
2591 | 2598 | ||
2592 | ret = lan78xx_read_reg(dev, MAC_RX, &buf); | 2599 | ret = lan78xx_read_reg(dev, MAC_RX, &buf); |
2593 | buf |= MAC_RX_RXEN_; | 2600 | buf |= MAC_RX_RXEN_; |
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) | |||
2975 | if (DEFAULT_TSO_CSUM_ENABLE) | 2982 | if (DEFAULT_TSO_CSUM_ENABLE) |
2976 | dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; | 2983 | dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; |
2977 | 2984 | ||
2985 | if (DEFAULT_VLAN_RX_OFFLOAD) | ||
2986 | dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX; | ||
2987 | |||
2988 | if (DEFAULT_VLAN_FILTER_ENABLE) | ||
2989 | dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | ||
2990 | |||
2978 | dev->net->hw_features = dev->net->features; | 2991 | dev->net->hw_features = dev->net->features; |
2979 | 2992 | ||
2980 | ret = lan78xx_setup_irq_domain(dev); | 2993 | ret = lan78xx_setup_irq_domain(dev); |
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, | |||
3039 | struct sk_buff *skb, | 3052 | struct sk_buff *skb, |
3040 | u32 rx_cmd_a, u32 rx_cmd_b) | 3053 | u32 rx_cmd_a, u32 rx_cmd_b) |
3041 | { | 3054 | { |
3055 | /* HW Checksum offload appears to be flawed if used when not stripping | ||
3056 | * VLAN headers. Drop back to S/W checksums under these conditions. | ||
3057 | */ | ||
3042 | if (!(dev->net->features & NETIF_F_RXCSUM) || | 3058 | if (!(dev->net->features & NETIF_F_RXCSUM) || |
3043 | unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { | 3059 | unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || |
3060 | ((rx_cmd_a & RX_CMD_A_FVTG_) && | ||
3061 | !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { | ||
3044 | skb->ip_summed = CHECKSUM_NONE; | 3062 | skb->ip_summed = CHECKSUM_NONE; |
3045 | } else { | 3063 | } else { |
3046 | skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); | 3064 | skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); |
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, | |||
3048 | } | 3066 | } |
3049 | } | 3067 | } |
3050 | 3068 | ||
3069 | static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, | ||
3070 | struct sk_buff *skb, | ||
3071 | u32 rx_cmd_a, u32 rx_cmd_b) | ||
3072 | { | ||
3073 | if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) && | ||
3074 | (rx_cmd_a & RX_CMD_A_FVTG_)) | ||
3075 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | ||
3076 | (rx_cmd_b & 0xffff)); | ||
3077 | } | ||
3078 | |||
3051 | static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) | 3079 | static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) |
3052 | { | 3080 | { |
3053 | int status; | 3081 | int status; |
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) | |||
3112 | if (skb->len == size) { | 3140 | if (skb->len == size) { |
3113 | lan78xx_rx_csum_offload(dev, skb, | 3141 | lan78xx_rx_csum_offload(dev, skb, |
3114 | rx_cmd_a, rx_cmd_b); | 3142 | rx_cmd_a, rx_cmd_b); |
3143 | lan78xx_rx_vlan_offload(dev, skb, | ||
3144 | rx_cmd_a, rx_cmd_b); | ||
3115 | 3145 | ||
3116 | skb_trim(skb, skb->len - 4); /* remove fcs */ | 3146 | skb_trim(skb, skb->len - 4); /* remove fcs */ |
3117 | skb->truesize = size + sizeof(struct sk_buff); | 3147 | skb->truesize = size + sizeof(struct sk_buff); |
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) | |||
3130 | skb_set_tail_pointer(skb2, size); | 3160 | skb_set_tail_pointer(skb2, size); |
3131 | 3161 | ||
3132 | lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); | 3162 | lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); |
3163 | lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); | ||
3133 | 3164 | ||
3134 | skb_trim(skb2, skb2->len - 4); /* remove fcs */ | 3165 | skb_trim(skb2, skb2->len - 4); /* remove fcs */ |
3135 | skb2->truesize = size + sizeof(struct sk_buff); | 3166 | skb2->truesize = size + sizeof(struct sk_buff); |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 86f7196f9d91..2a58607a6aea 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev) | |||
3962 | #ifdef CONFIG_PM_SLEEP | 3962 | #ifdef CONFIG_PM_SLEEP |
3963 | unregister_pm_notifier(&tp->pm_notifier); | 3963 | unregister_pm_notifier(&tp->pm_notifier); |
3964 | #endif | 3964 | #endif |
3965 | napi_disable(&tp->napi); | 3965 | if (!test_bit(RTL8152_UNPLUG, &tp->flags)) |
3966 | napi_disable(&tp->napi); | ||
3966 | clear_bit(WORK_ENABLE, &tp->flags); | 3967 | clear_bit(WORK_ENABLE, &tp->flags); |
3967 | usb_kill_urb(tp->intr_urb); | 3968 | usb_kill_urb(tp->intr_urb); |
3968 | cancel_delayed_work_sync(&tp->schedule); | 3969 | cancel_delayed_work_sync(&tp->schedule); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b6c9a2af3732..53085c63277b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644); | |||
53 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ | 53 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
54 | #define VIRTIO_XDP_HEADROOM 256 | 54 | #define VIRTIO_XDP_HEADROOM 256 |
55 | 55 | ||
56 | /* Separating two types of XDP xmit */ | ||
57 | #define VIRTIO_XDP_TX BIT(0) | ||
58 | #define VIRTIO_XDP_REDIR BIT(1) | ||
59 | |||
56 | /* RX packet size EWMA. The average packet size is used to determine the packet | 60 | /* RX packet size EWMA. The average packet size is used to determine the packet |
57 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | 61 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
58 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | 62 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
582 | struct receive_queue *rq, | 586 | struct receive_queue *rq, |
583 | void *buf, void *ctx, | 587 | void *buf, void *ctx, |
584 | unsigned int len, | 588 | unsigned int len, |
585 | bool *xdp_xmit) | 589 | unsigned int *xdp_xmit) |
586 | { | 590 | { |
587 | struct sk_buff *skb; | 591 | struct sk_buff *skb; |
588 | struct bpf_prog *xdp_prog; | 592 | struct bpf_prog *xdp_prog; |
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
654 | trace_xdp_exception(vi->dev, xdp_prog, act); | 658 | trace_xdp_exception(vi->dev, xdp_prog, act); |
655 | goto err_xdp; | 659 | goto err_xdp; |
656 | } | 660 | } |
657 | *xdp_xmit = true; | 661 | *xdp_xmit |= VIRTIO_XDP_TX; |
658 | rcu_read_unlock(); | 662 | rcu_read_unlock(); |
659 | goto xdp_xmit; | 663 | goto xdp_xmit; |
660 | case XDP_REDIRECT: | 664 | case XDP_REDIRECT: |
661 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | 665 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
662 | if (err) | 666 | if (err) |
663 | goto err_xdp; | 667 | goto err_xdp; |
664 | *xdp_xmit = true; | 668 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
665 | rcu_read_unlock(); | 669 | rcu_read_unlock(); |
666 | goto xdp_xmit; | 670 | goto xdp_xmit; |
667 | default: | 671 | default: |
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
723 | void *buf, | 727 | void *buf, |
724 | void *ctx, | 728 | void *ctx, |
725 | unsigned int len, | 729 | unsigned int len, |
726 | bool *xdp_xmit) | 730 | unsigned int *xdp_xmit) |
727 | { | 731 | { |
728 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; | 732 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
729 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); | 733 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); |
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
818 | put_page(xdp_page); | 822 | put_page(xdp_page); |
819 | goto err_xdp; | 823 | goto err_xdp; |
820 | } | 824 | } |
821 | *xdp_xmit = true; | 825 | *xdp_xmit |= VIRTIO_XDP_TX; |
822 | if (unlikely(xdp_page != page)) | 826 | if (unlikely(xdp_page != page)) |
823 | put_page(page); | 827 | put_page(page); |
824 | rcu_read_unlock(); | 828 | rcu_read_unlock(); |
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
830 | put_page(xdp_page); | 834 | put_page(xdp_page); |
831 | goto err_xdp; | 835 | goto err_xdp; |
832 | } | 836 | } |
833 | *xdp_xmit = true; | 837 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
834 | if (unlikely(xdp_page != page)) | 838 | if (unlikely(xdp_page != page)) |
835 | put_page(page); | 839 | put_page(page); |
836 | rcu_read_unlock(); | 840 | rcu_read_unlock(); |
@@ -939,7 +943,8 @@ xdp_xmit: | |||
939 | } | 943 | } |
940 | 944 | ||
941 | static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, | 945 | static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
942 | void *buf, unsigned int len, void **ctx, bool *xdp_xmit) | 946 | void *buf, unsigned int len, void **ctx, |
947 | unsigned int *xdp_xmit) | ||
943 | { | 948 | { |
944 | struct net_device *dev = vi->dev; | 949 | struct net_device *dev = vi->dev; |
945 | struct sk_buff *skb; | 950 | struct sk_buff *skb; |
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work) | |||
1232 | } | 1237 | } |
1233 | } | 1238 | } |
1234 | 1239 | ||
1235 | static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) | 1240 | static int virtnet_receive(struct receive_queue *rq, int budget, |
1241 | unsigned int *xdp_xmit) | ||
1236 | { | 1242 | { |
1237 | struct virtnet_info *vi = rq->vq->vdev->priv; | 1243 | struct virtnet_info *vi = rq->vq->vdev->priv; |
1238 | unsigned int len, received = 0, bytes = 0; | 1244 | unsigned int len, received = 0, bytes = 0; |
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
1321 | struct virtnet_info *vi = rq->vq->vdev->priv; | 1327 | struct virtnet_info *vi = rq->vq->vdev->priv; |
1322 | struct send_queue *sq; | 1328 | struct send_queue *sq; |
1323 | unsigned int received, qp; | 1329 | unsigned int received, qp; |
1324 | bool xdp_xmit = false; | 1330 | unsigned int xdp_xmit = 0; |
1325 | 1331 | ||
1326 | virtnet_poll_cleantx(rq); | 1332 | virtnet_poll_cleantx(rq); |
1327 | 1333 | ||
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
1331 | if (received < budget) | 1337 | if (received < budget) |
1332 | virtqueue_napi_complete(napi, rq->vq, received); | 1338 | virtqueue_napi_complete(napi, rq->vq, received); |
1333 | 1339 | ||
1334 | if (xdp_xmit) { | 1340 | if (xdp_xmit & VIRTIO_XDP_REDIR) |
1341 | xdp_do_flush_map(); | ||
1342 | |||
1343 | if (xdp_xmit & VIRTIO_XDP_TX) { | ||
1335 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + | 1344 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + |
1336 | smp_processor_id(); | 1345 | smp_processor_id(); |
1337 | sq = &vi->sq[qp]; | 1346 | sq = &vi->sq[qp]; |
1338 | virtqueue_kick(sq->vq); | 1347 | virtqueue_kick(sq->vq); |
1339 | xdp_do_flush_map(); | ||
1340 | } | 1348 | } |
1341 | 1349 | ||
1342 | return received; | 1350 | return received; |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aee0e60471f1..f6bb1d54d4bd 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, | |||
623 | flush = 0; | 623 | flush = 0; |
624 | 624 | ||
625 | out: | 625 | out: |
626 | skb_gro_remcsum_cleanup(skb, &grc); | 626 | skb_gro_flush_final_remcsum(skb, pp, flush, &grc); |
627 | skb->remcsum_offload = 0; | ||
628 | NAPI_GRO_CB(skb)->flush |= flush; | ||
629 | 627 | ||
630 | return pp; | 628 | return pp; |
631 | } | 629 | } |
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 2e96b34bc936..fb667bf469c7 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c | |||
@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, | |||
278 | return -EIO; | 278 | return -EIO; |
279 | if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) | 279 | if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) |
280 | return -EIO; | 280 | return -EIO; |
281 | return 0; | ||
281 | } | 282 | } |
282 | 283 | ||
283 | if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { | 284 | if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index b5b0cdc21d01..514d1dfc5630 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
@@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) | |||
936 | return cell; | 936 | return cell; |
937 | } | 937 | } |
938 | 938 | ||
939 | /* NULL cell_id only allowed for device tree; invalid otherwise */ | ||
940 | if (!cell_id) | ||
941 | return ERR_PTR(-EINVAL); | ||
942 | |||
939 | return nvmem_cell_get_from_list(cell_id); | 943 | return nvmem_cell_get_from_list(cell_id); |
940 | } | 944 | } |
941 | EXPORT_SYMBOL_GPL(nvmem_cell_get); | 945 | EXPORT_SYMBOL_GPL(nvmem_cell_get); |
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 16f52c626b4b..91b0194240a5 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig | |||
@@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST | |||
58 | depends on PCI && PCI_MSI_IRQ_DOMAIN | 58 | depends on PCI && PCI_MSI_IRQ_DOMAIN |
59 | select PCIE_DW_HOST | 59 | select PCIE_DW_HOST |
60 | select PCIE_DW_PLAT | 60 | select PCIE_DW_PLAT |
61 | default y | ||
62 | help | 61 | help |
63 | Enables support for the PCIe controller in the Designware IP to | 62 | Enables support for the PCIe controller in the Designware IP to |
64 | work in host mode. There are two instances of PCIe controller in | 63 | work in host mode. There are two instances of PCIe controller in |
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index a1ebe9ed441f..20bb2564a6b3 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c | |||
@@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) | |||
355 | irq = of_irq_get(intc, 0); | 355 | irq = of_irq_get(intc, 0); |
356 | if (irq <= 0) { | 356 | if (irq <= 0) { |
357 | dev_err(p->dev, "failed to get parent IRQ\n"); | 357 | dev_err(p->dev, "failed to get parent IRQ\n"); |
358 | of_node_put(intc); | ||
358 | return irq ?: -EINVAL; | 359 | return irq ?: -EINVAL; |
359 | } | 360 | } |
360 | 361 | ||
361 | p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, | 362 | p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, |
362 | &faraday_pci_irqdomain_ops, p); | 363 | &faraday_pci_irqdomain_ops, p); |
364 | of_node_put(intc); | ||
363 | if (!p->irqdomain) { | 365 | if (!p->irqdomain) { |
364 | dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); | 366 | dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); |
365 | return -EINVAL; | 367 | return -EINVAL; |
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index 874d75c9ee4a..c8febb009454 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c | |||
@@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie) | |||
680 | if (err) | 680 | if (err) |
681 | return err; | 681 | return err; |
682 | 682 | ||
683 | return phy_power_on(pcie->phy); | 683 | err = phy_power_on(pcie->phy); |
684 | if (err) | ||
685 | phy_exit(pcie->phy); | ||
686 | |||
687 | return err; | ||
684 | } | 688 | } |
685 | 689 | ||
686 | static int rcar_msi_alloc(struct rcar_msi *chip) | 690 | static int rcar_msi_alloc(struct rcar_msi *chip) |
@@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
1165 | if (rcar_pcie_hw_init(pcie)) { | 1169 | if (rcar_pcie_hw_init(pcie)) { |
1166 | dev_info(dev, "PCIe link down\n"); | 1170 | dev_info(dev, "PCIe link down\n"); |
1167 | err = -ENODEV; | 1171 | err = -ENODEV; |
1168 | goto err_clk_disable; | 1172 | goto err_phy_shutdown; |
1169 | } | 1173 | } |
1170 | 1174 | ||
1171 | data = rcar_pci_read_reg(pcie, MACSR); | 1175 | data = rcar_pci_read_reg(pcie, MACSR); |
@@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
1177 | dev_err(dev, | 1181 | dev_err(dev, |
1178 | "failed to enable MSI support: %d\n", | 1182 | "failed to enable MSI support: %d\n", |
1179 | err); | 1183 | err); |
1180 | goto err_clk_disable; | 1184 | goto err_phy_shutdown; |
1181 | } | 1185 | } |
1182 | } | 1186 | } |
1183 | 1187 | ||
@@ -1191,6 +1195,12 @@ err_msi_teardown: | |||
1191 | if (IS_ENABLED(CONFIG_PCI_MSI)) | 1195 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
1192 | rcar_pcie_teardown_msi(pcie); | 1196 | rcar_pcie_teardown_msi(pcie); |
1193 | 1197 | ||
1198 | err_phy_shutdown: | ||
1199 | if (pcie->phy) { | ||
1200 | phy_power_off(pcie->phy); | ||
1201 | phy_exit(pcie->phy); | ||
1202 | } | ||
1203 | |||
1194 | err_clk_disable: | 1204 | err_clk_disable: |
1195 | clk_disable_unprepare(pcie->bus_clk); | 1205 | clk_disable_unprepare(pcie->bus_clk); |
1196 | 1206 | ||
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 6a4bbb5b3de0..fb32840ce8e6 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c | |||
@@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) | |||
559 | PCI_NUM_INTX, | 559 | PCI_NUM_INTX, |
560 | &legacy_domain_ops, | 560 | &legacy_domain_ops, |
561 | pcie); | 561 | pcie); |
562 | 562 | of_node_put(legacy_intc_node); | |
563 | if (!pcie->legacy_irq_domain) { | 563 | if (!pcie->legacy_irq_domain) { |
564 | dev_err(dev, "failed to create IRQ domain\n"); | 564 | dev_err(dev, "failed to create IRQ domain\n"); |
565 | return -ENOMEM; | 565 | return -ENOMEM; |
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index b110a3a814e3..7b1389d8e2a5 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c | |||
@@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) | |||
509 | port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | 509 | port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, |
510 | &intx_domain_ops, | 510 | &intx_domain_ops, |
511 | port); | 511 | port); |
512 | of_node_put(pcie_intc_node); | ||
512 | if (!port->leg_domain) { | 513 | if (!port->leg_domain) { |
513 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | 514 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); |
514 | return -ENODEV; | 515 | return -ENODEV; |
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 523a8cab3bfb..bf53fad636a5 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c | |||
@@ -145,10 +145,10 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space); | |||
145 | */ | 145 | */ |
146 | void pci_epf_unregister_driver(struct pci_epf_driver *driver) | 146 | void pci_epf_unregister_driver(struct pci_epf_driver *driver) |
147 | { | 147 | { |
148 | struct config_group *group; | 148 | struct config_group *group, *tmp; |
149 | 149 | ||
150 | mutex_lock(&pci_epf_mutex); | 150 | mutex_lock(&pci_epf_mutex); |
151 | list_for_each_entry(group, &driver->epf_group, group_entry) | 151 | list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) |
152 | pci_ep_cfs_remove_epf_group(group); | 152 | pci_ep_cfs_remove_epf_group(group); |
153 | list_del(&driver->epf_group); | 153 | list_del(&driver->epf_group); |
154 | mutex_unlock(&pci_epf_mutex); | 154 | mutex_unlock(&pci_epf_mutex); |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index d0d73dbbd5ca..0f04ae648cf1 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -575,6 +575,22 @@ void pci_iov_release(struct pci_dev *dev) | |||
575 | } | 575 | } |
576 | 576 | ||
577 | /** | 577 | /** |
578 | * pci_iov_remove - clean up SR-IOV state after PF driver is detached | ||
579 | * @dev: the PCI device | ||
580 | */ | ||
581 | void pci_iov_remove(struct pci_dev *dev) | ||
582 | { | ||
583 | struct pci_sriov *iov = dev->sriov; | ||
584 | |||
585 | if (!dev->is_physfn) | ||
586 | return; | ||
587 | |||
588 | iov->driver_max_VFs = iov->total_VFs; | ||
589 | if (iov->num_VFs) | ||
590 | pci_warn(dev, "driver left SR-IOV enabled after remove\n"); | ||
591 | } | ||
592 | |||
593 | /** | ||
578 | * pci_iov_update_resource - update a VF BAR | 594 | * pci_iov_update_resource - update a VF BAR |
579 | * @dev: the PCI device | 595 | * @dev: the PCI device |
580 | * @resno: the resource number | 596 | * @resno: the resource number |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 65113b6eed14..89ee6a2b6eb8 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) | |||
629 | { | 629 | { |
630 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); | 630 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
631 | 631 | ||
632 | /* | ||
633 | * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over | ||
634 | * system-wide suspend/resume confuses the platform firmware, so avoid | ||
635 | * doing that, unless the bridge has a driver that should take care of | ||
636 | * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint | ||
637 | * devices are expected to be in D3 before invoking the S3 entry path | ||
638 | * from the firmware, so they should not be affected by this issue. | ||
639 | */ | ||
640 | if (pci_is_bridge(dev) && !dev->driver && | ||
641 | acpi_target_system_state() != ACPI_STATE_S0) | ||
642 | return true; | ||
643 | |||
632 | if (!adev || !acpi_device_power_manageable(adev)) | 644 | if (!adev || !acpi_device_power_manageable(adev)) |
633 | return false; | 645 | return false; |
634 | 646 | ||
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index c125d53033c6..6792292b5fc7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev) | |||
445 | } | 445 | } |
446 | pcibios_free_irq(pci_dev); | 446 | pcibios_free_irq(pci_dev); |
447 | pci_dev->driver = NULL; | 447 | pci_dev->driver = NULL; |
448 | pci_iov_remove(pci_dev); | ||
448 | } | 449 | } |
449 | 450 | ||
450 | /* Undo the runtime PM settings in local_pci_probe() */ | 451 | /* Undo the runtime PM settings in local_pci_probe() */ |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index c358e7a07f3f..882f1f9596df 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -311,6 +311,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) | |||
311 | #ifdef CONFIG_PCI_IOV | 311 | #ifdef CONFIG_PCI_IOV |
312 | int pci_iov_init(struct pci_dev *dev); | 312 | int pci_iov_init(struct pci_dev *dev); |
313 | void pci_iov_release(struct pci_dev *dev); | 313 | void pci_iov_release(struct pci_dev *dev); |
314 | void pci_iov_remove(struct pci_dev *dev); | ||
314 | void pci_iov_update_resource(struct pci_dev *dev, int resno); | 315 | void pci_iov_update_resource(struct pci_dev *dev, int resno); |
315 | resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); | 316 | resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); |
316 | void pci_restore_iov_state(struct pci_dev *dev); | 317 | void pci_restore_iov_state(struct pci_dev *dev); |
@@ -325,6 +326,9 @@ static inline void pci_iov_release(struct pci_dev *dev) | |||
325 | 326 | ||
326 | { | 327 | { |
327 | } | 328 | } |
329 | static inline void pci_iov_remove(struct pci_dev *dev) | ||
330 | { | ||
331 | } | ||
328 | static inline void pci_restore_iov_state(struct pci_dev *dev) | 332 | static inline void pci_restore_iov_state(struct pci_dev *dev) |
329 | { | 333 | { |
330 | } | 334 | } |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 6d4012dd6922..bac1eeb3d312 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
265 | return err; | 265 | return err; |
266 | 266 | ||
267 | /* full-function RTCs won't have such missing fields */ | 267 | /* full-function RTCs won't have such missing fields */ |
268 | if (rtc_valid_tm(&alarm->time) == 0) | 268 | if (rtc_valid_tm(&alarm->time) == 0) { |
269 | rtc_add_offset(rtc, &alarm->time); | ||
269 | return 0; | 270 | return 0; |
271 | } | ||
270 | 272 | ||
271 | /* get the "after" timestamp, to detect wrapped fields */ | 273 | /* get the "after" timestamp, to detect wrapped fields */ |
272 | err = rtc_read_time(rtc, &now); | 274 | err = rtc_read_time(rtc, &now); |
@@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
409 | if (err) | 411 | if (err) |
410 | return err; | 412 | return err; |
411 | 413 | ||
412 | rtc_subtract_offset(rtc, &alarm->time); | ||
413 | scheduled = rtc_tm_to_time64(&alarm->time); | 414 | scheduled = rtc_tm_to_time64(&alarm->time); |
414 | 415 | ||
415 | /* Make sure we're not setting alarms in the past */ | 416 | /* Make sure we're not setting alarms in the past */ |
@@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
426 | * over right here, before we set the alarm. | 427 | * over right here, before we set the alarm. |
427 | */ | 428 | */ |
428 | 429 | ||
430 | rtc_subtract_offset(rtc, &alarm->time); | ||
431 | |||
429 | if (!rtc->ops) | 432 | if (!rtc->ops) |
430 | err = -ENODEV; | 433 | err = -ENODEV; |
431 | else if (!rtc->ops->set_alarm) | 434 | else if (!rtc->ops->set_alarm) |
@@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
467 | 470 | ||
468 | mutex_unlock(&rtc->ops_lock); | 471 | mutex_unlock(&rtc->ops_lock); |
469 | 472 | ||
470 | rtc_add_offset(rtc, &alarm->time); | ||
471 | return err; | 473 | return err; |
472 | } | 474 | } |
473 | EXPORT_SYMBOL_GPL(rtc_set_alarm); | 475 | EXPORT_SYMBOL_GPL(rtc_set_alarm); |
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index 097a4d4e2aba..1925aaf09093 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
@@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, | |||
367 | } | 367 | } |
368 | 368 | ||
369 | retval = rtc_register_device(mrst_rtc.rtc); | 369 | retval = rtc_register_device(mrst_rtc.rtc); |
370 | if (retval) { | 370 | if (retval) |
371 | retval = PTR_ERR(mrst_rtc.rtc); | ||
372 | goto cleanup0; | 371 | goto cleanup0; |
373 | } | ||
374 | 372 | ||
375 | dev_dbg(dev, "initialised\n"); | 373 | dev_dbg(dev, "initialised\n"); |
376 | return 0; | 374 | return 0; |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index d3a38c421503..a9f60d0ee02e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -41,6 +41,15 @@ | |||
41 | 41 | ||
42 | #define DASD_DIAG_MOD "dasd_diag_mod" | 42 | #define DASD_DIAG_MOD "dasd_diag_mod" |
43 | 43 | ||
44 | static unsigned int queue_depth = 32; | ||
45 | static unsigned int nr_hw_queues = 4; | ||
46 | |||
47 | module_param(queue_depth, uint, 0444); | ||
48 | MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); | ||
49 | |||
50 | module_param(nr_hw_queues, uint, 0444); | ||
51 | MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); | ||
52 | |||
44 | /* | 53 | /* |
45 | * SECTION: exported variables of dasd.c | 54 | * SECTION: exported variables of dasd.c |
46 | */ | 55 | */ |
@@ -3115,8 +3124,8 @@ static int dasd_alloc_queue(struct dasd_block *block) | |||
3115 | 3124 | ||
3116 | block->tag_set.ops = &dasd_mq_ops; | 3125 | block->tag_set.ops = &dasd_mq_ops; |
3117 | block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); | 3126 | block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); |
3118 | block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; | 3127 | block->tag_set.nr_hw_queues = nr_hw_queues; |
3119 | block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; | 3128 | block->tag_set.queue_depth = queue_depth; |
3120 | block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 3129 | block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
3121 | 3130 | ||
3122 | rc = blk_mq_alloc_tag_set(&block->tag_set); | 3131 | rc = blk_mq_alloc_tag_set(&block->tag_set); |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 976b6bd4fb05..de6b96036aa4 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -228,14 +228,6 @@ struct dasd_ccw_req { | |||
228 | #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ | 228 | #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ |
229 | #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ | 229 | #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ |
230 | 230 | ||
231 | /* | ||
232 | * There is no reliable way to determine the number of available CPUs on | ||
233 | * LPAR but there is no big performance difference between 1 and the | ||
234 | * maximum CPU number. | ||
235 | * 64 is a good trade off performance wise. | ||
236 | */ | ||
237 | #define DASD_NR_HW_QUEUES 64 | ||
238 | #define DASD_MAX_LCU_DEV 256 | ||
239 | #define DASD_REQ_PER_DEV 4 | 231 | #define DASD_REQ_PER_DEV 4 |
240 | 232 | ||
241 | /* Signature for error recovery functions. */ | 233 | /* Signature for error recovery functions. */ |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2a5fec55bf60..a246a618f9a4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -829,6 +829,17 @@ struct qeth_trap_id { | |||
829 | /*some helper functions*/ | 829 | /*some helper functions*/ |
830 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") | 830 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") |
831 | 831 | ||
832 | static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, | ||
833 | unsigned int elements) | ||
834 | { | ||
835 | unsigned int i; | ||
836 | |||
837 | for (i = 0; i < elements; i++) | ||
838 | memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); | ||
839 | buf->element[14].sflags = 0; | ||
840 | buf->element[15].sflags = 0; | ||
841 | } | ||
842 | |||
832 | /** | 843 | /** |
833 | * qeth_get_elements_for_range() - find number of SBALEs to cover range. | 844 | * qeth_get_elements_for_range() - find number of SBALEs to cover range. |
834 | * @start: Start of the address range. | 845 | * @start: Start of the address range. |
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, | |||
1029 | __u16, __u16, | 1040 | __u16, __u16, |
1030 | enum qeth_prot_versions); | 1041 | enum qeth_prot_versions); |
1031 | int qeth_set_features(struct net_device *, netdev_features_t); | 1042 | int qeth_set_features(struct net_device *, netdev_features_t); |
1032 | void qeth_recover_features(struct net_device *dev); | 1043 | void qeth_enable_hw_features(struct net_device *dev); |
1033 | netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); | 1044 | netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); |
1034 | netdev_features_t qeth_features_check(struct sk_buff *skb, | 1045 | netdev_features_t qeth_features_check(struct sk_buff *skb, |
1035 | struct net_device *dev, | 1046 | struct net_device *dev, |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 8e1474f1ffac..d01ac29fd986 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, | |||
73 | struct qeth_qdio_out_buffer *buf, | 73 | struct qeth_qdio_out_buffer *buf, |
74 | enum iucv_tx_notify notification); | 74 | enum iucv_tx_notify notification); |
75 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); | 75 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); |
76 | static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | ||
77 | struct qeth_qdio_out_buffer *buf, | ||
78 | enum qeth_qdio_buffer_states newbufstate); | ||
79 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); | 76 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); |
80 | 77 | ||
81 | struct workqueue_struct *qeth_wq; | 78 | struct workqueue_struct *qeth_wq; |
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, | |||
489 | struct qaob *aob; | 486 | struct qaob *aob; |
490 | struct qeth_qdio_out_buffer *buffer; | 487 | struct qeth_qdio_out_buffer *buffer; |
491 | enum iucv_tx_notify notification; | 488 | enum iucv_tx_notify notification; |
489 | unsigned int i; | ||
492 | 490 | ||
493 | aob = (struct qaob *) phys_to_virt(phys_aob_addr); | 491 | aob = (struct qaob *) phys_to_virt(phys_aob_addr); |
494 | QETH_CARD_TEXT(card, 5, "haob"); | 492 | QETH_CARD_TEXT(card, 5, "haob"); |
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, | |||
513 | qeth_notify_skbs(buffer->q, buffer, notification); | 511 | qeth_notify_skbs(buffer->q, buffer, notification); |
514 | 512 | ||
515 | buffer->aob = NULL; | 513 | buffer->aob = NULL; |
516 | qeth_clear_output_buffer(buffer->q, buffer, | 514 | /* Free dangling allocations. The attached skbs are handled by |
517 | QETH_QDIO_BUF_HANDLED_DELAYED); | 515 | * qeth_cleanup_handled_pending(). |
516 | */ | ||
517 | for (i = 0; | ||
518 | i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); | ||
519 | i++) { | ||
520 | if (aob->sba[i] && buffer->is_header[i]) | ||
521 | kmem_cache_free(qeth_core_header_cache, | ||
522 | (void *) aob->sba[i]); | ||
523 | } | ||
524 | atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); | ||
518 | 525 | ||
519 | /* from here on: do not touch buffer anymore */ | ||
520 | qdio_release_aob(aob); | 526 | qdio_release_aob(aob); |
521 | } | 527 | } |
522 | 528 | ||
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, | |||
3759 | QETH_CARD_TEXT(queue->card, 5, "aob"); | 3765 | QETH_CARD_TEXT(queue->card, 5, "aob"); |
3760 | QETH_CARD_TEXT_(queue->card, 5, "%lx", | 3766 | QETH_CARD_TEXT_(queue->card, 5, "%lx", |
3761 | virt_to_phys(buffer->aob)); | 3767 | virt_to_phys(buffer->aob)); |
3768 | |||
3769 | /* prepare the queue slot for re-use: */ | ||
3770 | qeth_scrub_qdio_buffer(buffer->buffer, | ||
3771 | QETH_MAX_BUFFER_ELEMENTS(card)); | ||
3762 | if (qeth_init_qdio_out_buf(queue, bidx)) { | 3772 | if (qeth_init_qdio_out_buf(queue, bidx)) { |
3763 | QETH_CARD_TEXT(card, 2, "outofbuf"); | 3773 | QETH_CARD_TEXT(card, 2, "outofbuf"); |
3764 | qeth_schedule_recovery(card); | 3774 | qeth_schedule_recovery(card); |
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card) | |||
4834 | goto out; | 4844 | goto out; |
4835 | } | 4845 | } |
4836 | 4846 | ||
4837 | ccw_device_get_id(CARD_RDEV(card), &id); | 4847 | ccw_device_get_id(CARD_DDEV(card), &id); |
4838 | request->resp_buf_len = sizeof(*response); | 4848 | request->resp_buf_len = sizeof(*response); |
4839 | request->resp_version = DIAG26C_VERSION2; | 4849 | request->resp_version = DIAG26C_VERSION2; |
4840 | request->op_code = DIAG26C_GET_MAC; | 4850 | request->op_code = DIAG26C_GET_MAC; |
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) | |||
6459 | #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ | 6469 | #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ |
6460 | NETIF_F_IPV6_CSUM) | 6470 | NETIF_F_IPV6_CSUM) |
6461 | /** | 6471 | /** |
6462 | * qeth_recover_features() - Restore device features after recovery | 6472 | * qeth_enable_hw_features() - (Re-)Enable HW functions for device features |
6463 | * @dev: the recovering net_device | 6473 | * @dev: a net_device |
6464 | * | ||
6465 | * Caller must hold rtnl lock. | ||
6466 | */ | 6474 | */ |
6467 | void qeth_recover_features(struct net_device *dev) | 6475 | void qeth_enable_hw_features(struct net_device *dev) |
6468 | { | 6476 | { |
6469 | netdev_features_t features = dev->features; | ||
6470 | struct qeth_card *card = dev->ml_priv; | 6477 | struct qeth_card *card = dev->ml_priv; |
6478 | netdev_features_t features; | ||
6471 | 6479 | ||
6480 | rtnl_lock(); | ||
6481 | features = dev->features; | ||
6472 | /* force-off any feature that needs an IPA sequence. | 6482 | /* force-off any feature that needs an IPA sequence. |
6473 | * netdev_update_features() will restart them. | 6483 | * netdev_update_features() will restart them. |
6474 | */ | 6484 | */ |
6475 | dev->features &= ~QETH_HW_FEATURES; | 6485 | dev->features &= ~QETH_HW_FEATURES; |
6476 | netdev_update_features(dev); | 6486 | netdev_update_features(dev); |
6477 | 6487 | if (features != dev->features) | |
6478 | if (features == dev->features) | 6488 | dev_warn(&card->gdev->dev, |
6479 | return; | 6489 | "Device recovery failed to restore all offload features\n"); |
6480 | dev_warn(&card->gdev->dev, | 6490 | rtnl_unlock(); |
6481 | "Device recovery failed to restore all offload features\n"); | ||
6482 | } | 6491 | } |
6483 | EXPORT_SYMBOL_GPL(qeth_recover_features); | 6492 | EXPORT_SYMBOL_GPL(qeth_enable_hw_features); |
6484 | 6493 | ||
6485 | int qeth_set_features(struct net_device *dev, netdev_features_t features) | 6494 | int qeth_set_features(struct net_device *dev, netdev_features_t features) |
6486 | { | 6495 | { |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a7cb37da6a21..2487f0aeb165 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) | |||
140 | 140 | ||
141 | static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) | 141 | static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) |
142 | { | 142 | { |
143 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? | 143 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? |
144 | IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; | 144 | IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; |
145 | int rc; | 145 | int rc; |
146 | 146 | ||
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) | |||
157 | 157 | ||
158 | static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) | 158 | static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) |
159 | { | 159 | { |
160 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? | 160 | enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? |
161 | IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; | 161 | IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; |
162 | int rc; | 162 | int rc; |
163 | 163 | ||
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
501 | return -ERESTARTSYS; | 501 | return -ERESTARTSYS; |
502 | } | 502 | } |
503 | 503 | ||
504 | /* avoid racing against concurrent state change: */ | ||
505 | if (!mutex_trylock(&card->conf_mutex)) | ||
506 | return -EAGAIN; | ||
507 | |||
504 | if (!qeth_card_hw_is_reachable(card)) { | 508 | if (!qeth_card_hw_is_reachable(card)) { |
505 | ether_addr_copy(dev->dev_addr, addr->sa_data); | 509 | ether_addr_copy(dev->dev_addr, addr->sa_data); |
506 | return 0; | 510 | goto out_unlock; |
507 | } | 511 | } |
508 | 512 | ||
509 | /* don't register the same address twice */ | 513 | /* don't register the same address twice */ |
510 | if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && | 514 | if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && |
511 | (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) | 515 | (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) |
512 | return 0; | 516 | goto out_unlock; |
513 | 517 | ||
514 | /* add the new address, switch over, drop the old */ | 518 | /* add the new address, switch over, drop the old */ |
515 | rc = qeth_l2_send_setmac(card, addr->sa_data); | 519 | rc = qeth_l2_send_setmac(card, addr->sa_data); |
516 | if (rc) | 520 | if (rc) |
517 | return rc; | 521 | goto out_unlock; |
518 | ether_addr_copy(old_addr, dev->dev_addr); | 522 | ether_addr_copy(old_addr, dev->dev_addr); |
519 | ether_addr_copy(dev->dev_addr, addr->sa_data); | 523 | ether_addr_copy(dev->dev_addr, addr->sa_data); |
520 | 524 | ||
521 | if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) | 525 | if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) |
522 | qeth_l2_remove_mac(card, old_addr); | 526 | qeth_l2_remove_mac(card, old_addr); |
523 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | 527 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; |
524 | return 0; | 528 | |
529 | out_unlock: | ||
530 | mutex_unlock(&card->conf_mutex); | ||
531 | return rc; | ||
525 | } | 532 | } |
526 | 533 | ||
527 | static void qeth_promisc_to_bridge(struct qeth_card *card) | 534 | static void qeth_promisc_to_bridge(struct qeth_card *card) |
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1112 | netif_carrier_off(card->dev); | 1119 | netif_carrier_off(card->dev); |
1113 | 1120 | ||
1114 | qeth_set_allowed_threads(card, 0xffffffff, 0); | 1121 | qeth_set_allowed_threads(card, 0xffffffff, 0); |
1122 | |||
1123 | qeth_enable_hw_features(card->dev); | ||
1115 | if (recover_flag == CARD_STATE_RECOVER) { | 1124 | if (recover_flag == CARD_STATE_RECOVER) { |
1116 | if (recovery_mode && | 1125 | if (recovery_mode && |
1117 | card->info.type != QETH_CARD_TYPE_OSN) { | 1126 | card->info.type != QETH_CARD_TYPE_OSN) { |
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1123 | } | 1132 | } |
1124 | /* this also sets saved unicast addresses */ | 1133 | /* this also sets saved unicast addresses */ |
1125 | qeth_l2_set_rx_mode(card->dev); | 1134 | qeth_l2_set_rx_mode(card->dev); |
1126 | rtnl_lock(); | ||
1127 | qeth_recover_features(card->dev); | ||
1128 | rtnl_unlock(); | ||
1129 | } | 1135 | } |
1130 | /* let user_space know that device is online */ | 1136 | /* let user_space know that device is online */ |
1131 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 1137 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e7fa479adf47..5905dc63e256 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
2662 | netif_carrier_on(card->dev); | 2662 | netif_carrier_on(card->dev); |
2663 | else | 2663 | else |
2664 | netif_carrier_off(card->dev); | 2664 | netif_carrier_off(card->dev); |
2665 | |||
2666 | qeth_enable_hw_features(card->dev); | ||
2665 | if (recover_flag == CARD_STATE_RECOVER) { | 2667 | if (recover_flag == CARD_STATE_RECOVER) { |
2666 | rtnl_lock(); | 2668 | rtnl_lock(); |
2667 | if (recovery_mode) | 2669 | if (recovery_mode) |
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
2669 | else | 2671 | else |
2670 | dev_open(card->dev); | 2672 | dev_open(card->dev); |
2671 | qeth_l3_set_rx_mode(card->dev); | 2673 | qeth_l3_set_rx_mode(card->dev); |
2672 | qeth_recover_features(card->dev); | ||
2673 | rtnl_unlock(); | 2674 | rtnl_unlock(); |
2674 | } | 2675 | } |
2675 | qeth_trace_features(card); | 2676 | qeth_trace_features(card); |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a9831bd37a73..a57f3a7d4748 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) | |||
1974 | u32 lun_count, nexus; | 1974 | u32 lun_count, nexus; |
1975 | u32 i, bus, target; | 1975 | u32 i, bus, target; |
1976 | u8 expose_flag, attribs; | 1976 | u8 expose_flag, attribs; |
1977 | u8 devtype; | ||
1978 | 1977 | ||
1979 | lun_count = aac_get_safw_phys_lun_count(dev); | 1978 | lun_count = aac_get_safw_phys_lun_count(dev); |
1980 | 1979 | ||
@@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) | |||
1992 | continue; | 1991 | continue; |
1993 | 1992 | ||
1994 | if (expose_flag != 0) { | 1993 | if (expose_flag != 0) { |
1995 | devtype = AAC_DEVTYPE_RAID_MEMBER; | 1994 | dev->hba_map[bus][target].devtype = |
1996 | goto update_devtype; | 1995 | AAC_DEVTYPE_RAID_MEMBER; |
1996 | continue; | ||
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | if (nexus != 0 && (attribs & 8)) { | 1999 | if (nexus != 0 && (attribs & 8)) { |
2000 | devtype = AAC_DEVTYPE_NATIVE_RAW; | 2000 | dev->hba_map[bus][target].devtype = |
2001 | AAC_DEVTYPE_NATIVE_RAW; | ||
2001 | dev->hba_map[bus][target].rmw_nexus = | 2002 | dev->hba_map[bus][target].rmw_nexus = |
2002 | nexus; | 2003 | nexus; |
2003 | } else | 2004 | } else |
2004 | devtype = AAC_DEVTYPE_ARC_RAW; | 2005 | dev->hba_map[bus][target].devtype = |
2006 | AAC_DEVTYPE_ARC_RAW; | ||
2005 | 2007 | ||
2006 | dev->hba_map[bus][target].scan_counter = dev->scan_counter; | 2008 | dev->hba_map[bus][target].scan_counter = dev->scan_counter; |
2007 | 2009 | ||
2008 | aac_set_safw_target_qd(dev, bus, target); | 2010 | aac_set_safw_target_qd(dev, bus, target); |
2009 | |||
2010 | update_devtype: | ||
2011 | dev->hba_map[bus][target].devtype = devtype; | ||
2012 | } | 2011 | } |
2013 | } | 2012 | } |
2014 | 2013 | ||
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 53ae52dbff84..cd2fdac000c9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ | |||
51 | #include <linux/atomic.h> | 51 | #include <linux/atomic.h> |
52 | #include <linux/ratelimit.h> | 52 | #include <linux/ratelimit.h> |
53 | #include <linux/uio.h> | 53 | #include <linux/uio.h> |
54 | #include <linux/cred.h> /* for sg_check_file_access() */ | ||
54 | 55 | ||
55 | #include "scsi.h" | 56 | #include "scsi.h" |
56 | #include <scsi/scsi_dbg.h> | 57 | #include <scsi/scsi_dbg.h> |
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref); | |||
209 | sdev_prefix_printk(prefix, (sdp)->device, \ | 210 | sdev_prefix_printk(prefix, (sdp)->device, \ |
210 | (sdp)->disk->disk_name, fmt, ##a) | 211 | (sdp)->disk->disk_name, fmt, ##a) |
211 | 212 | ||
213 | /* | ||
214 | * The SCSI interfaces that use read() and write() as an asynchronous variant of | ||
215 | * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways | ||
216 | * to trigger read() and write() calls from various contexts with elevated | ||
217 | * privileges. This can lead to kernel memory corruption (e.g. if these | ||
218 | * interfaces are called through splice()) and privilege escalation inside | ||
219 | * userspace (e.g. if a process with access to such a device passes a file | ||
220 | * descriptor to a SUID binary as stdin/stdout/stderr). | ||
221 | * | ||
222 | * This function provides protection for the legacy API by restricting the | ||
223 | * calling context. | ||
224 | */ | ||
225 | static int sg_check_file_access(struct file *filp, const char *caller) | ||
226 | { | ||
227 | if (filp->f_cred != current_real_cred()) { | ||
228 | pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", | ||
229 | caller, task_tgid_vnr(current), current->comm); | ||
230 | return -EPERM; | ||
231 | } | ||
232 | if (uaccess_kernel()) { | ||
233 | pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", | ||
234 | caller, task_tgid_vnr(current), current->comm); | ||
235 | return -EACCES; | ||
236 | } | ||
237 | return 0; | ||
238 | } | ||
239 | |||
212 | static int sg_allow_access(struct file *filp, unsigned char *cmd) | 240 | static int sg_allow_access(struct file *filp, unsigned char *cmd) |
213 | { | 241 | { |
214 | struct sg_fd *sfp = filp->private_data; | 242 | struct sg_fd *sfp = filp->private_data; |
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
393 | struct sg_header *old_hdr = NULL; | 421 | struct sg_header *old_hdr = NULL; |
394 | int retval = 0; | 422 | int retval = 0; |
395 | 423 | ||
424 | /* | ||
425 | * This could cause a response to be stranded. Close the associated | ||
426 | * file descriptor to free up any resources being held. | ||
427 | */ | ||
428 | retval = sg_check_file_access(filp, __func__); | ||
429 | if (retval) | ||
430 | return retval; | ||
431 | |||
396 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 432 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
397 | return -ENXIO; | 433 | return -ENXIO; |
398 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, | 434 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
580 | struct sg_header old_hdr; | 616 | struct sg_header old_hdr; |
581 | sg_io_hdr_t *hp; | 617 | sg_io_hdr_t *hp; |
582 | unsigned char cmnd[SG_MAX_CDB_SIZE]; | 618 | unsigned char cmnd[SG_MAX_CDB_SIZE]; |
619 | int retval; | ||
583 | 620 | ||
584 | if (unlikely(uaccess_kernel())) | 621 | retval = sg_check_file_access(filp, __func__); |
585 | return -EINVAL; | 622 | if (retval) |
623 | return retval; | ||
586 | 624 | ||
587 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 625 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
588 | return -ENXIO; | 626 | return -ENXIO; |
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c index 45c05527a57a..faf4b4158cfa 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ap.c +++ b/drivers/staging/rtl8723bs/core/rtw_ap.c | |||
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) | |||
1051 | return _FAIL; | 1051 | return _FAIL; |
1052 | 1052 | ||
1053 | 1053 | ||
1054 | if (len > MAX_IE_SZ) | 1054 | if (len < 0 || len > MAX_IE_SZ) |
1055 | return _FAIL; | 1055 | return _FAIL; |
1056 | 1056 | ||
1057 | pbss_network->IELength = len; | 1057 | pbss_network->IELength = len; |
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c index 7947edb239a1..88ba5b2fea6a 100644 --- a/drivers/staging/rtlwifi/rtl8822be/hw.c +++ b/drivers/staging/rtlwifi/rtl8822be/hw.c | |||
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw) | |||
803 | return; | 803 | return; |
804 | 804 | ||
805 | pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp); | 805 | pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp); |
806 | pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7)); | 806 | pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3); |
807 | 807 | ||
808 | pci_read_config_byte(rtlpci->pdev, 0x719, &tmp); | 808 | pci_read_config_byte(rtlpci->pdev, 0x719, &tmp); |
809 | pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4)); | 809 | pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4)); |
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h index 012fb618840b..a45f0eb69d3f 100644 --- a/drivers/staging/rtlwifi/wifi.h +++ b/drivers/staging/rtlwifi/wifi.h | |||
@@ -88,6 +88,7 @@ | |||
88 | #define RTL_USB_MAX_RX_COUNT 100 | 88 | #define RTL_USB_MAX_RX_COUNT 100 |
89 | #define QBSS_LOAD_SIZE 5 | 89 | #define QBSS_LOAD_SIZE 5 |
90 | #define MAX_WMMELE_LENGTH 64 | 90 | #define MAX_WMMELE_LENGTH 64 |
91 | #define ASPM_L1_LATENCY 7 | ||
91 | 92 | ||
92 | #define TOTAL_CAM_ENTRY 32 | 93 | #define TOTAL_CAM_ENTRY 32 |
93 | 94 | ||
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 01ac306131c1..10db5656fd5d 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) | |||
3727 | * Check for overflow of 8byte PRI READ_KEYS payload and | 3727 | * Check for overflow of 8byte PRI READ_KEYS payload and |
3728 | * next reservation key list descriptor. | 3728 | * next reservation key list descriptor. |
3729 | */ | 3729 | */ |
3730 | if ((add_len + 8) > (cmd->data_length - 8)) | 3730 | if (off + 8 <= cmd->data_length) { |
3731 | break; | 3731 | put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); |
3732 | 3732 | off += 8; | |
3733 | put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); | 3733 | } |
3734 | off += 8; | 3734 | /* |
3735 | * SPC5r17: 6.16.2 READ KEYS service action | ||
3736 | * The ADDITIONAL LENGTH field indicates the number of bytes in | ||
3737 | * the Reservation key list. The contents of the ADDITIONAL | ||
3738 | * LENGTH field are not altered based on the allocation length | ||
3739 | */ | ||
3735 | add_len += 8; | 3740 | add_len += 8; |
3736 | } | 3741 | } |
3737 | spin_unlock(&dev->t10_pr.registration_lock); | 3742 | spin_unlock(&dev->t10_pr.registration_lock); |
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 6281266b8ec0..a923ebdeb73c 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c | |||
@@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, | |||
213 | goto err_free_acl; | 213 | goto err_free_acl; |
214 | } | 214 | } |
215 | ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); | 215 | ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); |
216 | if (!ret) { | ||
217 | /* Notify userspace about the change */ | ||
218 | kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); | ||
219 | } | ||
216 | mutex_unlock(&tb->lock); | 220 | mutex_unlock(&tb->lock); |
217 | 221 | ||
218 | err_free_acl: | 222 | err_free_acl: |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index e8f4ac9400ea..5d421d7e8904 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
@@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev, | |||
215 | struct device_attribute *attr, char *buf) | 215 | struct device_attribute *attr, char *buf) |
216 | { | 216 | { |
217 | struct uio_device *idev = dev_get_drvdata(dev); | 217 | struct uio_device *idev = dev_get_drvdata(dev); |
218 | return sprintf(buf, "%s\n", idev->info->name); | 218 | int ret; |
219 | |||
220 | mutex_lock(&idev->info_lock); | ||
221 | if (!idev->info) { | ||
222 | ret = -EINVAL; | ||
223 | dev_err(dev, "the device has been unregistered\n"); | ||
224 | goto out; | ||
225 | } | ||
226 | |||
227 | ret = sprintf(buf, "%s\n", idev->info->name); | ||
228 | |||
229 | out: | ||
230 | mutex_unlock(&idev->info_lock); | ||
231 | return ret; | ||
219 | } | 232 | } |
220 | static DEVICE_ATTR_RO(name); | 233 | static DEVICE_ATTR_RO(name); |
221 | 234 | ||
@@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev, | |||
223 | struct device_attribute *attr, char *buf) | 236 | struct device_attribute *attr, char *buf) |
224 | { | 237 | { |
225 | struct uio_device *idev = dev_get_drvdata(dev); | 238 | struct uio_device *idev = dev_get_drvdata(dev); |
226 | return sprintf(buf, "%s\n", idev->info->version); | 239 | int ret; |
240 | |||
241 | mutex_lock(&idev->info_lock); | ||
242 | if (!idev->info) { | ||
243 | ret = -EINVAL; | ||
244 | dev_err(dev, "the device has been unregistered\n"); | ||
245 | goto out; | ||
246 | } | ||
247 | |||
248 | ret = sprintf(buf, "%s\n", idev->info->version); | ||
249 | |||
250 | out: | ||
251 | mutex_unlock(&idev->info_lock); | ||
252 | return ret; | ||
227 | } | 253 | } |
228 | static DEVICE_ATTR_RO(version); | 254 | static DEVICE_ATTR_RO(version); |
229 | 255 | ||
@@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify); | |||
415 | static irqreturn_t uio_interrupt(int irq, void *dev_id) | 441 | static irqreturn_t uio_interrupt(int irq, void *dev_id) |
416 | { | 442 | { |
417 | struct uio_device *idev = (struct uio_device *)dev_id; | 443 | struct uio_device *idev = (struct uio_device *)dev_id; |
418 | irqreturn_t ret = idev->info->handler(irq, idev->info); | 444 | irqreturn_t ret; |
419 | 445 | ||
446 | mutex_lock(&idev->info_lock); | ||
447 | |||
448 | ret = idev->info->handler(irq, idev->info); | ||
420 | if (ret == IRQ_HANDLED) | 449 | if (ret == IRQ_HANDLED) |
421 | uio_event_notify(idev->info); | 450 | uio_event_notify(idev->info); |
422 | 451 | ||
452 | mutex_unlock(&idev->info_lock); | ||
423 | return ret; | 453 | return ret; |
424 | } | 454 | } |
425 | 455 | ||
@@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep) | |||
433 | struct uio_device *idev; | 463 | struct uio_device *idev; |
434 | struct uio_listener *listener; | 464 | struct uio_listener *listener; |
435 | int ret = 0; | 465 | int ret = 0; |
436 | unsigned long flags; | ||
437 | 466 | ||
438 | mutex_lock(&minor_lock); | 467 | mutex_lock(&minor_lock); |
439 | idev = idr_find(&uio_idr, iminor(inode)); | 468 | idev = idr_find(&uio_idr, iminor(inode)); |
@@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep) | |||
460 | listener->event_count = atomic_read(&idev->event); | 489 | listener->event_count = atomic_read(&idev->event); |
461 | filep->private_data = listener; | 490 | filep->private_data = listener; |
462 | 491 | ||
463 | spin_lock_irqsave(&idev->info_lock, flags); | 492 | mutex_lock(&idev->info_lock); |
493 | if (!idev->info) { | ||
494 | mutex_unlock(&idev->info_lock); | ||
495 | ret = -EINVAL; | ||
496 | goto err_alloc_listener; | ||
497 | } | ||
498 | |||
464 | if (idev->info && idev->info->open) | 499 | if (idev->info && idev->info->open) |
465 | ret = idev->info->open(idev->info, inode); | 500 | ret = idev->info->open(idev->info, inode); |
466 | spin_unlock_irqrestore(&idev->info_lock, flags); | 501 | mutex_unlock(&idev->info_lock); |
467 | if (ret) | 502 | if (ret) |
468 | goto err_infoopen; | 503 | goto err_infoopen; |
469 | 504 | ||
@@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep) | |||
495 | int ret = 0; | 530 | int ret = 0; |
496 | struct uio_listener *listener = filep->private_data; | 531 | struct uio_listener *listener = filep->private_data; |
497 | struct uio_device *idev = listener->dev; | 532 | struct uio_device *idev = listener->dev; |
498 | unsigned long flags; | ||
499 | 533 | ||
500 | spin_lock_irqsave(&idev->info_lock, flags); | 534 | mutex_lock(&idev->info_lock); |
501 | if (idev->info && idev->info->release) | 535 | if (idev->info && idev->info->release) |
502 | ret = idev->info->release(idev->info, inode); | 536 | ret = idev->info->release(idev->info, inode); |
503 | spin_unlock_irqrestore(&idev->info_lock, flags); | 537 | mutex_unlock(&idev->info_lock); |
504 | 538 | ||
505 | module_put(idev->owner); | 539 | module_put(idev->owner); |
506 | kfree(listener); | 540 | kfree(listener); |
@@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) | |||
513 | struct uio_listener *listener = filep->private_data; | 547 | struct uio_listener *listener = filep->private_data; |
514 | struct uio_device *idev = listener->dev; | 548 | struct uio_device *idev = listener->dev; |
515 | __poll_t ret = 0; | 549 | __poll_t ret = 0; |
516 | unsigned long flags; | ||
517 | 550 | ||
518 | spin_lock_irqsave(&idev->info_lock, flags); | 551 | mutex_lock(&idev->info_lock); |
519 | if (!idev->info || !idev->info->irq) | 552 | if (!idev->info || !idev->info->irq) |
520 | ret = -EIO; | 553 | ret = -EIO; |
521 | spin_unlock_irqrestore(&idev->info_lock, flags); | 554 | mutex_unlock(&idev->info_lock); |
522 | 555 | ||
523 | if (ret) | 556 | if (ret) |
524 | return ret; | 557 | return ret; |
@@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf, | |||
537 | DECLARE_WAITQUEUE(wait, current); | 570 | DECLARE_WAITQUEUE(wait, current); |
538 | ssize_t retval = 0; | 571 | ssize_t retval = 0; |
539 | s32 event_count; | 572 | s32 event_count; |
540 | unsigned long flags; | ||
541 | 573 | ||
542 | spin_lock_irqsave(&idev->info_lock, flags); | 574 | mutex_lock(&idev->info_lock); |
543 | if (!idev->info || !idev->info->irq) | 575 | if (!idev->info || !idev->info->irq) |
544 | retval = -EIO; | 576 | retval = -EIO; |
545 | spin_unlock_irqrestore(&idev->info_lock, flags); | 577 | mutex_unlock(&idev->info_lock); |
546 | 578 | ||
547 | if (retval) | 579 | if (retval) |
548 | return retval; | 580 | return retval; |
@@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, | |||
592 | struct uio_device *idev = listener->dev; | 624 | struct uio_device *idev = listener->dev; |
593 | ssize_t retval; | 625 | ssize_t retval; |
594 | s32 irq_on; | 626 | s32 irq_on; |
595 | unsigned long flags; | ||
596 | 627 | ||
597 | spin_lock_irqsave(&idev->info_lock, flags); | 628 | mutex_lock(&idev->info_lock); |
629 | if (!idev->info) { | ||
630 | retval = -EINVAL; | ||
631 | goto out; | ||
632 | } | ||
633 | |||
598 | if (!idev->info || !idev->info->irq) { | 634 | if (!idev->info || !idev->info->irq) { |
599 | retval = -EIO; | 635 | retval = -EIO; |
600 | goto out; | 636 | goto out; |
@@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, | |||
618 | retval = idev->info->irqcontrol(idev->info, irq_on); | 654 | retval = idev->info->irqcontrol(idev->info, irq_on); |
619 | 655 | ||
620 | out: | 656 | out: |
621 | spin_unlock_irqrestore(&idev->info_lock, flags); | 657 | mutex_unlock(&idev->info_lock); |
622 | return retval ? retval : sizeof(s32); | 658 | return retval ? retval : sizeof(s32); |
623 | } | 659 | } |
624 | 660 | ||
@@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) | |||
640 | struct page *page; | 676 | struct page *page; |
641 | unsigned long offset; | 677 | unsigned long offset; |
642 | void *addr; | 678 | void *addr; |
679 | int ret = 0; | ||
680 | int mi; | ||
643 | 681 | ||
644 | int mi = uio_find_mem_index(vmf->vma); | 682 | mutex_lock(&idev->info_lock); |
645 | if (mi < 0) | 683 | if (!idev->info) { |
646 | return VM_FAULT_SIGBUS; | 684 | ret = VM_FAULT_SIGBUS; |
685 | goto out; | ||
686 | } | ||
687 | |||
688 | mi = uio_find_mem_index(vmf->vma); | ||
689 | if (mi < 0) { | ||
690 | ret = VM_FAULT_SIGBUS; | ||
691 | goto out; | ||
692 | } | ||
647 | 693 | ||
648 | /* | 694 | /* |
649 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE | 695 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE |
@@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) | |||
658 | page = vmalloc_to_page(addr); | 704 | page = vmalloc_to_page(addr); |
659 | get_page(page); | 705 | get_page(page); |
660 | vmf->page = page; | 706 | vmf->page = page; |
661 | return 0; | 707 | |
708 | out: | ||
709 | mutex_unlock(&idev->info_lock); | ||
710 | |||
711 | return ret; | ||
662 | } | 712 | } |
663 | 713 | ||
664 | static const struct vm_operations_struct uio_logical_vm_ops = { | 714 | static const struct vm_operations_struct uio_logical_vm_ops = { |
@@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma) | |||
683 | struct uio_device *idev = vma->vm_private_data; | 733 | struct uio_device *idev = vma->vm_private_data; |
684 | int mi = uio_find_mem_index(vma); | 734 | int mi = uio_find_mem_index(vma); |
685 | struct uio_mem *mem; | 735 | struct uio_mem *mem; |
736 | |||
686 | if (mi < 0) | 737 | if (mi < 0) |
687 | return -EINVAL; | 738 | return -EINVAL; |
688 | mem = idev->info->mem + mi; | 739 | mem = idev->info->mem + mi; |
@@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) | |||
724 | 775 | ||
725 | vma->vm_private_data = idev; | 776 | vma->vm_private_data = idev; |
726 | 777 | ||
778 | mutex_lock(&idev->info_lock); | ||
779 | if (!idev->info) { | ||
780 | ret = -EINVAL; | ||
781 | goto out; | ||
782 | } | ||
783 | |||
727 | mi = uio_find_mem_index(vma); | 784 | mi = uio_find_mem_index(vma); |
728 | if (mi < 0) | 785 | if (mi < 0) { |
729 | return -EINVAL; | 786 | ret = -EINVAL; |
787 | goto out; | ||
788 | } | ||
730 | 789 | ||
731 | requested_pages = vma_pages(vma); | 790 | requested_pages = vma_pages(vma); |
732 | actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) | 791 | actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) |
733 | + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; | 792 | + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; |
734 | if (requested_pages > actual_pages) | 793 | if (requested_pages > actual_pages) { |
735 | return -EINVAL; | 794 | ret = -EINVAL; |
795 | goto out; | ||
796 | } | ||
736 | 797 | ||
737 | if (idev->info->mmap) { | 798 | if (idev->info->mmap) { |
738 | ret = idev->info->mmap(idev->info, vma); | 799 | ret = idev->info->mmap(idev->info, vma); |
739 | return ret; | 800 | goto out; |
740 | } | 801 | } |
741 | 802 | ||
742 | switch (idev->info->mem[mi].memtype) { | 803 | switch (idev->info->mem[mi].memtype) { |
743 | case UIO_MEM_PHYS: | 804 | case UIO_MEM_PHYS: |
744 | return uio_mmap_physical(vma); | 805 | ret = uio_mmap_physical(vma); |
806 | break; | ||
745 | case UIO_MEM_LOGICAL: | 807 | case UIO_MEM_LOGICAL: |
746 | case UIO_MEM_VIRTUAL: | 808 | case UIO_MEM_VIRTUAL: |
747 | return uio_mmap_logical(vma); | 809 | ret = uio_mmap_logical(vma); |
810 | break; | ||
748 | default: | 811 | default: |
749 | return -EINVAL; | 812 | ret = -EINVAL; |
750 | } | 813 | } |
814 | |||
815 | out: | ||
816 | mutex_unlock(&idev->info_lock); | ||
817 | return 0; | ||
751 | } | 818 | } |
752 | 819 | ||
753 | static const struct file_operations uio_fops = { | 820 | static const struct file_operations uio_fops = { |
@@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner, | |||
865 | 932 | ||
866 | idev->owner = owner; | 933 | idev->owner = owner; |
867 | idev->info = info; | 934 | idev->info = info; |
868 | spin_lock_init(&idev->info_lock); | 935 | mutex_init(&idev->info_lock); |
869 | init_waitqueue_head(&idev->wait); | 936 | init_waitqueue_head(&idev->wait); |
870 | atomic_set(&idev->event, 0); | 937 | atomic_set(&idev->event, 0); |
871 | 938 | ||
@@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner, | |||
902 | * FDs at the time of unregister and therefore may not be | 969 | * FDs at the time of unregister and therefore may not be |
903 | * freed until they are released. | 970 | * freed until they are released. |
904 | */ | 971 | */ |
905 | ret = request_irq(info->irq, uio_interrupt, | 972 | ret = request_threaded_irq(info->irq, NULL, uio_interrupt, |
906 | info->irq_flags, info->name, idev); | 973 | info->irq_flags, info->name, idev); |
974 | |||
907 | if (ret) | 975 | if (ret) |
908 | goto err_request_irq; | 976 | goto err_request_irq; |
909 | } | 977 | } |
@@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device); | |||
928 | void uio_unregister_device(struct uio_info *info) | 996 | void uio_unregister_device(struct uio_info *info) |
929 | { | 997 | { |
930 | struct uio_device *idev; | 998 | struct uio_device *idev; |
931 | unsigned long flags; | ||
932 | 999 | ||
933 | if (!info || !info->uio_dev) | 1000 | if (!info || !info->uio_dev) |
934 | return; | 1001 | return; |
@@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info) | |||
937 | 1004 | ||
938 | uio_free_minor(idev); | 1005 | uio_free_minor(idev); |
939 | 1006 | ||
1007 | mutex_lock(&idev->info_lock); | ||
940 | uio_dev_del_attributes(idev); | 1008 | uio_dev_del_attributes(idev); |
941 | 1009 | ||
942 | if (info->irq && info->irq != UIO_IRQ_CUSTOM) | 1010 | if (info->irq && info->irq != UIO_IRQ_CUSTOM) |
943 | free_irq(info->irq, idev); | 1011 | free_irq(info->irq, idev); |
944 | 1012 | ||
945 | spin_lock_irqsave(&idev->info_lock, flags); | ||
946 | idev->info = NULL; | 1013 | idev->info = NULL; |
947 | spin_unlock_irqrestore(&idev->info_lock, flags); | 1014 | mutex_unlock(&idev->info_lock); |
948 | 1015 | ||
949 | device_unregister(&idev->dev); | 1016 | device_unregister(&idev->dev); |
950 | 1017 | ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index c55def2f1320..097057d2eacf 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
378 | /* Corsair K70 RGB */ | 378 | /* Corsair K70 RGB */ |
379 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, | 379 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, |
380 | 380 | ||
381 | /* Corsair Strafe */ | ||
382 | { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | | ||
383 | USB_QUIRK_DELAY_CTRL_MSG }, | ||
384 | |||
381 | /* Corsair Strafe RGB */ | 385 | /* Corsair Strafe RGB */ |
382 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | | 386 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | |
383 | USB_QUIRK_DELAY_CTRL_MSG }, | 387 | USB_QUIRK_DELAY_CTRL_MSG }, |
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig index f0cdf89b8503..83ba8a2eb6af 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig +++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig | |||
@@ -2,6 +2,7 @@ | |||
2 | config USB_ASPEED_VHUB | 2 | config USB_ASPEED_VHUB |
3 | tristate "Aspeed vHub UDC driver" | 3 | tristate "Aspeed vHub UDC driver" |
4 | depends on ARCH_ASPEED || COMPILE_TEST | 4 | depends on ARCH_ASPEED || COMPILE_TEST |
5 | depends on USB_LIBCOMPOSITE | ||
5 | help | 6 | help |
6 | USB peripheral controller for the Aspeed AST2500 family | 7 | USB peripheral controller for the Aspeed AST2500 family |
7 | SoCs supporting the "vHub" functionality and USB2.0 | 8 | SoCs supporting the "vHub" functionality and USB2.0 |
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 73d5a0e2a225..86cff5c28eff 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c | |||
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci) | |||
508 | return 0; | 508 | return 0; |
509 | } | 509 | } |
510 | 510 | ||
511 | static void xhci_do_dbc_stop(struct xhci_hcd *xhci) | 511 | static int xhci_do_dbc_stop(struct xhci_hcd *xhci) |
512 | { | 512 | { |
513 | struct xhci_dbc *dbc = xhci->dbc; | 513 | struct xhci_dbc *dbc = xhci->dbc; |
514 | 514 | ||
515 | if (dbc->state == DS_DISABLED) | 515 | if (dbc->state == DS_DISABLED) |
516 | return; | 516 | return -1; |
517 | 517 | ||
518 | writel(0, &dbc->regs->control); | 518 | writel(0, &dbc->regs->control); |
519 | xhci_dbc_mem_cleanup(xhci); | 519 | xhci_dbc_mem_cleanup(xhci); |
520 | dbc->state = DS_DISABLED; | 520 | dbc->state = DS_DISABLED; |
521 | |||
522 | return 0; | ||
521 | } | 523 | } |
522 | 524 | ||
523 | static int xhci_dbc_start(struct xhci_hcd *xhci) | 525 | static int xhci_dbc_start(struct xhci_hcd *xhci) |
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci) | |||
544 | 546 | ||
545 | static void xhci_dbc_stop(struct xhci_hcd *xhci) | 547 | static void xhci_dbc_stop(struct xhci_hcd *xhci) |
546 | { | 548 | { |
549 | int ret; | ||
547 | unsigned long flags; | 550 | unsigned long flags; |
548 | struct xhci_dbc *dbc = xhci->dbc; | 551 | struct xhci_dbc *dbc = xhci->dbc; |
549 | struct dbc_port *port = &dbc->port; | 552 | struct dbc_port *port = &dbc->port; |
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) | |||
556 | xhci_dbc_tty_unregister_device(xhci); | 559 | xhci_dbc_tty_unregister_device(xhci); |
557 | 560 | ||
558 | spin_lock_irqsave(&dbc->lock, flags); | 561 | spin_lock_irqsave(&dbc->lock, flags); |
559 | xhci_do_dbc_stop(xhci); | 562 | ret = xhci_do_dbc_stop(xhci); |
560 | spin_unlock_irqrestore(&dbc->lock, flags); | 563 | spin_unlock_irqrestore(&dbc->lock, flags); |
561 | 564 | ||
562 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); | 565 | if (!ret) |
566 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); | ||
563 | } | 567 | } |
564 | 568 | ||
565 | static void | 569 | static void |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 8a62eee9eee1..ef350c33dc4a 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring( | |||
595 | if (!ep->stream_info) | 595 | if (!ep->stream_info) |
596 | return NULL; | 596 | return NULL; |
597 | 597 | ||
598 | if (stream_id > ep->stream_info->num_streams) | 598 | if (stream_id >= ep->stream_info->num_streams) |
599 | return NULL; | 599 | return NULL; |
600 | return ep->stream_info->stream_rings[stream_id]; | 600 | return ep->stream_info->stream_rings[stream_id]; |
601 | } | 601 | } |
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 8abb6cbbd98a..3be40eaa1ac9 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c | |||
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, | |||
396 | loff_t *ppos) | 396 | loff_t *ppos) |
397 | { | 397 | { |
398 | struct usb_yurex *dev; | 398 | struct usb_yurex *dev; |
399 | int retval = 0; | 399 | int len = 0; |
400 | int bytes_read = 0; | ||
401 | char in_buffer[20]; | 400 | char in_buffer[20]; |
402 | unsigned long flags; | 401 | unsigned long flags; |
403 | 402 | ||
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, | |||
405 | 404 | ||
406 | mutex_lock(&dev->io_mutex); | 405 | mutex_lock(&dev->io_mutex); |
407 | if (!dev->interface) { /* already disconnected */ | 406 | if (!dev->interface) { /* already disconnected */ |
408 | retval = -ENODEV; | 407 | mutex_unlock(&dev->io_mutex); |
409 | goto exit; | 408 | return -ENODEV; |
410 | } | 409 | } |
411 | 410 | ||
412 | spin_lock_irqsave(&dev->lock, flags); | 411 | spin_lock_irqsave(&dev->lock, flags); |
413 | bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); | 412 | len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); |
414 | spin_unlock_irqrestore(&dev->lock, flags); | 413 | spin_unlock_irqrestore(&dev->lock, flags); |
415 | |||
416 | if (*ppos < bytes_read) { | ||
417 | if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos)) | ||
418 | retval = -EFAULT; | ||
419 | else { | ||
420 | retval = bytes_read - *ppos; | ||
421 | *ppos += bytes_read; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | exit: | ||
426 | mutex_unlock(&dev->io_mutex); | 414 | mutex_unlock(&dev->io_mutex); |
427 | return retval; | 415 | |
416 | return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); | ||
428 | } | 417 | } |
429 | 418 | ||
430 | static ssize_t yurex_write(struct file *file, const char __user *user_buffer, | 419 | static ssize_t yurex_write(struct file *file, const char __user *user_buffer, |
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index bdd7a5ad3bf1..3bb1fff02bed 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c | |||
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev, | |||
128 | r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, | 128 | r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, |
129 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, | 129 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, |
130 | value, index, buf, bufsize, DEFAULT_TIMEOUT); | 130 | value, index, buf, bufsize, DEFAULT_TIMEOUT); |
131 | if (r < bufsize) { | 131 | if (r < (int)bufsize) { |
132 | if (r >= 0) { | 132 | if (r >= 0) { |
133 | dev_err(&dev->dev, | 133 | dev_err(&dev->dev, |
134 | "short control message received (%d < %u)\n", | 134 | "short control message received (%d < %u)\n", |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index ee0cc1d90b51..626a29d9aa58 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -149,6 +149,7 @@ static const struct usb_device_id id_table[] = { | |||
149 | { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ | 149 | { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ |
150 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ | 150 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ |
151 | { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ | 151 | { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ |
152 | { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ | ||
152 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ | 153 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ |
153 | { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ | 154 | { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ |
154 | { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ | 155 | { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ |
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 5169624d8b11..38d43c4b7ce5 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c | |||
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial, | |||
369 | 3, /* get pins */ | 369 | 3, /* get pins */ |
370 | USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, | 370 | USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, |
371 | 0, 0, data, 1, 2000); | 371 | 0, 0, data, 1, 2000); |
372 | if (rc >= 0) | 372 | if (rc == 1) |
373 | *value = *data; | 373 | *value = *data; |
374 | else if (rc >= 0) | ||
375 | rc = -EIO; | ||
374 | 376 | ||
375 | kfree(data); | 377 | kfree(data); |
376 | return rc; | 378 | return rc; |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index fdceb46d9fc6..b580b4c7fa48 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb) | |||
468 | } | 468 | } |
469 | 469 | ||
470 | dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); | 470 | dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); |
471 | if (urb->actual_length < 1) | ||
472 | goto out; | ||
473 | |||
471 | dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, | 474 | dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, |
472 | mos7840_port->MsrLsr, mos7840_port->port_num); | 475 | mos7840_port->MsrLsr, mos7840_port->port_num); |
473 | data = urb->transfer_buffer; | 476 | data = urb->transfer_buffer; |
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index c732fd703961..74e0cdabfcc8 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c | |||
@@ -726,6 +726,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) | |||
726 | 726 | ||
727 | tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma); | 727 | tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma); |
728 | 728 | ||
729 | port->supply_voltage = mv; | ||
730 | port->current_limit = max_ma; | ||
731 | |||
729 | if (port->tcpc->set_current_limit) | 732 | if (port->tcpc->set_current_limit) |
730 | ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); | 733 | ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); |
731 | 734 | ||
@@ -2693,8 +2696,6 @@ static void tcpm_reset_port(struct tcpm_port *port) | |||
2693 | tcpm_set_attached_state(port, false); | 2696 | tcpm_set_attached_state(port, false); |
2694 | port->try_src_count = 0; | 2697 | port->try_src_count = 0; |
2695 | port->try_snk_count = 0; | 2698 | port->try_snk_count = 0; |
2696 | port->supply_voltage = 0; | ||
2697 | port->current_limit = 0; | ||
2698 | port->usb_type = POWER_SUPPLY_USB_TYPE_C; | 2699 | port->usb_type = POWER_SUPPLY_USB_TYPE_C; |
2699 | 2700 | ||
2700 | power_supply_changed(port->psy); | 2701 | power_supply_changed(port->psy); |
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 24ee2605b9f0..42dc1d3d71cf 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig | |||
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX | |||
28 | def_bool y if !S390 | 28 | def_bool y if !S390 |
29 | 29 | ||
30 | config VFIO_PCI_IGD | 30 | config VFIO_PCI_IGD |
31 | depends on VFIO_PCI | 31 | bool "VFIO PCI extensions for Intel graphics (GVT-d)" |
32 | def_bool y if X86 | 32 | depends on VFIO_PCI && X86 |
33 | default y | ||
34 | help | ||
35 | Support for Intel IGD specific extensions to enable direct | ||
36 | assignment to virtual machines. This includes exposing an IGD | ||
37 | specific firmware table and read-only copies of the host bridge | ||
38 | and LPC bridge config space. | ||
39 | |||
40 | To enable Intel IGD assignment through vfio-pci, say Y. | ||
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 2c75b33db4ac..3e5b17710a4f 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | |||
343 | struct page *page[1]; | 343 | struct page *page[1]; |
344 | struct vm_area_struct *vma; | 344 | struct vm_area_struct *vma; |
345 | struct vm_area_struct *vmas[1]; | 345 | struct vm_area_struct *vmas[1]; |
346 | unsigned int flags = 0; | ||
346 | int ret; | 347 | int ret; |
347 | 348 | ||
349 | if (prot & IOMMU_WRITE) | ||
350 | flags |= FOLL_WRITE; | ||
351 | |||
352 | down_read(&mm->mmap_sem); | ||
348 | if (mm == current->mm) { | 353 | if (mm == current->mm) { |
349 | ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), | 354 | ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); |
350 | page, vmas); | ||
351 | } else { | 355 | } else { |
352 | unsigned int flags = 0; | ||
353 | |||
354 | if (prot & IOMMU_WRITE) | ||
355 | flags |= FOLL_WRITE; | ||
356 | |||
357 | down_read(&mm->mmap_sem); | ||
358 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, | 356 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, |
359 | vmas, NULL); | 357 | vmas, NULL); |
360 | /* | 358 | /* |
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | |||
368 | ret = -EOPNOTSUPP; | 366 | ret = -EOPNOTSUPP; |
369 | put_page(page[0]); | 367 | put_page(page[0]); |
370 | } | 368 | } |
371 | up_read(&mm->mmap_sem); | ||
372 | } | 369 | } |
370 | up_read(&mm->mmap_sem); | ||
373 | 371 | ||
374 | if (ret == 1) { | 372 | if (ret == 1) { |
375 | *pfn = page_to_pfn(page[0]); | 373 | *pfn = page_to_pfn(page[0]); |
diff --git a/fs/autofs/Makefile b/fs/autofs/Makefile index 43fedde15c26..1f85d35ec8b7 100644 --- a/fs/autofs/Makefile +++ b/fs/autofs/Makefile | |||
@@ -2,6 +2,6 @@ | |||
2 | # Makefile for the linux autofs-filesystem routines. | 2 | # Makefile for the linux autofs-filesystem routines. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_AUTOFS_FS) += autofs.o | 5 | obj-$(CONFIG_AUTOFS_FS) += autofs4.o |
6 | 6 | ||
7 | autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o | 7 | autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o |
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c index ea4ca1445ab7..86eafda4a652 100644 --- a/fs/autofs/dev-ioctl.c +++ b/fs/autofs/dev-ioctl.c | |||
@@ -135,6 +135,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param) | |||
135 | cmd); | 135 | cmd); |
136 | goto out; | 136 | goto out; |
137 | } | 137 | } |
138 | } else { | ||
139 | unsigned int inr = _IOC_NR(cmd); | ||
140 | |||
141 | if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD || | ||
142 | inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD || | ||
143 | inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) { | ||
144 | err = -EINVAL; | ||
145 | goto out; | ||
146 | } | ||
138 | } | 147 | } |
139 | 148 | ||
140 | err = 0; | 149 | err = 0; |
@@ -271,7 +280,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp, | |||
271 | dev_t devid; | 280 | dev_t devid; |
272 | int err, fd; | 281 | int err, fd; |
273 | 282 | ||
274 | /* param->path has already been checked */ | 283 | /* param->path has been checked in validate_dev_ioctl() */ |
284 | |||
275 | if (!param->openmount.devid) | 285 | if (!param->openmount.devid) |
276 | return -EINVAL; | 286 | return -EINVAL; |
277 | 287 | ||
@@ -433,10 +443,7 @@ static int autofs_dev_ioctl_requester(struct file *fp, | |||
433 | dev_t devid; | 443 | dev_t devid; |
434 | int err = -ENOENT; | 444 | int err = -ENOENT; |
435 | 445 | ||
436 | if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { | 446 | /* param->path has been checked in validate_dev_ioctl() */ |
437 | err = -EINVAL; | ||
438 | goto out; | ||
439 | } | ||
440 | 447 | ||
441 | devid = sbi->sb->s_dev; | 448 | devid = sbi->sb->s_dev; |
442 | 449 | ||
@@ -521,10 +528,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp, | |||
521 | unsigned int devid, magic; | 528 | unsigned int devid, magic; |
522 | int err = -ENOENT; | 529 | int err = -ENOENT; |
523 | 530 | ||
524 | if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { | 531 | /* param->path has been checked in validate_dev_ioctl() */ |
525 | err = -EINVAL; | ||
526 | goto out; | ||
527 | } | ||
528 | 532 | ||
529 | name = param->path; | 533 | name = param->path; |
530 | type = param->ismountpoint.in.type; | 534 | type = param->ismountpoint.in.type; |
diff --git a/fs/autofs/init.c b/fs/autofs/init.c index cc9447e1903f..79ae07d9592f 100644 --- a/fs/autofs/init.c +++ b/fs/autofs/init.c | |||
@@ -23,7 +23,7 @@ static struct file_system_type autofs_fs_type = { | |||
23 | .kill_sb = autofs_kill_sb, | 23 | .kill_sb = autofs_kill_sb, |
24 | }; | 24 | }; |
25 | MODULE_ALIAS_FS("autofs"); | 25 | MODULE_ALIAS_FS("autofs"); |
26 | MODULE_ALIAS("autofs4"); | 26 | MODULE_ALIAS("autofs"); |
27 | 27 | ||
28 | static int __init init_autofs_fs(void) | 28 | static int __init init_autofs_fs(void) |
29 | { | 29 | { |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 0ac456b52bdd..816cc921cf36 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1259,9 +1259,8 @@ static int load_elf_library(struct file *file) | |||
1259 | goto out_free_ph; | 1259 | goto out_free_ph; |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + | 1262 | len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr); |
1263 | ELF_MIN_ALIGN - 1); | 1263 | bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr); |
1264 | bss = eppnt->p_memsz + eppnt->p_vaddr; | ||
1265 | if (bss > len) { | 1264 | if (bss > len) { |
1266 | error = vm_brk(len, bss - len); | 1265 | error = vm_brk(len, bss - len); |
1267 | if (error) | 1266 | if (error) |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index bd78da59a4fd..c923c7854027 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -423,7 +423,7 @@ struct smb_version_operations { | |||
423 | void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int, | 423 | void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int, |
424 | bool *); | 424 | bool *); |
425 | /* create lease context buffer for CREATE request */ | 425 | /* create lease context buffer for CREATE request */ |
426 | char * (*create_lease_buf)(u8 *, u8); | 426 | char * (*create_lease_buf)(u8 *lease_key, u8 oplock); |
427 | /* parse lease context buffer and return oplock/epoch info */ | 427 | /* parse lease context buffer and return oplock/epoch info */ |
428 | __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey); | 428 | __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey); |
429 | ssize_t (*copychunk_range)(const unsigned int, | 429 | ssize_t (*copychunk_range)(const unsigned int, |
@@ -1416,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server, | |||
1416 | /* one of these for every pending CIFS request to the server */ | 1416 | /* one of these for every pending CIFS request to the server */ |
1417 | struct mid_q_entry { | 1417 | struct mid_q_entry { |
1418 | struct list_head qhead; /* mids waiting on reply from this server */ | 1418 | struct list_head qhead; /* mids waiting on reply from this server */ |
1419 | struct kref refcount; | ||
1419 | struct TCP_Server_Info *server; /* server corresponding to this mid */ | 1420 | struct TCP_Server_Info *server; /* server corresponding to this mid */ |
1420 | __u64 mid; /* multiplex id */ | 1421 | __u64 mid; /* multiplex id */ |
1421 | __u32 pid; /* process id */ | 1422 | __u32 pid; /* process id */ |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 03018be17283..1890f534c88b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, | |||
82 | struct TCP_Server_Info *server); | 82 | struct TCP_Server_Info *server); |
83 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); | 83 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); |
84 | extern void cifs_delete_mid(struct mid_q_entry *mid); | 84 | extern void cifs_delete_mid(struct mid_q_entry *mid); |
85 | extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); | ||
85 | extern void cifs_wake_up_task(struct mid_q_entry *mid); | 86 | extern void cifs_wake_up_task(struct mid_q_entry *mid); |
86 | extern int cifs_handle_standard(struct TCP_Server_Info *server, | 87 | extern int cifs_handle_standard(struct TCP_Server_Info *server, |
87 | struct mid_q_entry *mid); | 88 | struct mid_q_entry *mid); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index d352da325de3..93408eab92e7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -157,8 +157,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) | |||
157 | * greater than cifs socket timeout which is 7 seconds | 157 | * greater than cifs socket timeout which is 7 seconds |
158 | */ | 158 | */ |
159 | while (server->tcpStatus == CifsNeedReconnect) { | 159 | while (server->tcpStatus == CifsNeedReconnect) { |
160 | wait_event_interruptible_timeout(server->response_q, | 160 | rc = wait_event_interruptible_timeout(server->response_q, |
161 | (server->tcpStatus != CifsNeedReconnect), 10 * HZ); | 161 | (server->tcpStatus != CifsNeedReconnect), |
162 | 10 * HZ); | ||
163 | if (rc < 0) { | ||
164 | cifs_dbg(FYI, "%s: aborting reconnect due to a received" | ||
165 | " signal by the process\n", __func__); | ||
166 | return -ERESTARTSYS; | ||
167 | } | ||
162 | 168 | ||
163 | /* are we still trying to reconnect? */ | 169 | /* are we still trying to reconnect? */ |
164 | if (server->tcpStatus != CifsNeedReconnect) | 170 | if (server->tcpStatus != CifsNeedReconnect) |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a57da1b88bdf..5df2c0698cda 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -924,6 +924,7 @@ next_pdu: | |||
924 | server->pdu_size = next_offset; | 924 | server->pdu_size = next_offset; |
925 | } | 925 | } |
926 | 926 | ||
927 | mid_entry = NULL; | ||
927 | if (server->ops->is_transform_hdr && | 928 | if (server->ops->is_transform_hdr && |
928 | server->ops->receive_transform && | 929 | server->ops->receive_transform && |
929 | server->ops->is_transform_hdr(buf)) { | 930 | server->ops->is_transform_hdr(buf)) { |
@@ -938,8 +939,11 @@ next_pdu: | |||
938 | length = mid_entry->receive(server, mid_entry); | 939 | length = mid_entry->receive(server, mid_entry); |
939 | } | 940 | } |
940 | 941 | ||
941 | if (length < 0) | 942 | if (length < 0) { |
943 | if (mid_entry) | ||
944 | cifs_mid_q_entry_release(mid_entry); | ||
942 | continue; | 945 | continue; |
946 | } | ||
943 | 947 | ||
944 | if (server->large_buf) | 948 | if (server->large_buf) |
945 | buf = server->bigbuf; | 949 | buf = server->bigbuf; |
@@ -956,6 +960,8 @@ next_pdu: | |||
956 | 960 | ||
957 | if (!mid_entry->multiRsp || mid_entry->multiEnd) | 961 | if (!mid_entry->multiRsp || mid_entry->multiEnd) |
958 | mid_entry->callback(mid_entry); | 962 | mid_entry->callback(mid_entry); |
963 | |||
964 | cifs_mid_q_entry_release(mid_entry); | ||
959 | } else if (server->ops->is_oplock_break && | 965 | } else if (server->ops->is_oplock_break && |
960 | server->ops->is_oplock_break(buf, server)) { | 966 | server->ops->is_oplock_break(buf, server)) { |
961 | cifs_dbg(FYI, "Received oplock break\n"); | 967 | cifs_dbg(FYI, "Received oplock break\n"); |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index aff8ce8ba34d..646dcd149de1 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer) | |||
107 | if (compare_mid(mid->mid, buf) && | 107 | if (compare_mid(mid->mid, buf) && |
108 | mid->mid_state == MID_REQUEST_SUBMITTED && | 108 | mid->mid_state == MID_REQUEST_SUBMITTED && |
109 | le16_to_cpu(mid->command) == buf->Command) { | 109 | le16_to_cpu(mid->command) == buf->Command) { |
110 | kref_get(&mid->refcount); | ||
110 | spin_unlock(&GlobalMid_Lock); | 111 | spin_unlock(&GlobalMid_Lock); |
111 | return mid; | 112 | return mid; |
112 | } | 113 | } |
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 788412675723..4ed10dd086e6 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c | |||
@@ -41,7 +41,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, | |||
41 | int rc; | 41 | int rc; |
42 | __le16 *smb2_path; | 42 | __le16 *smb2_path; |
43 | struct smb2_file_all_info *smb2_data = NULL; | 43 | struct smb2_file_all_info *smb2_data = NULL; |
44 | __u8 smb2_oplock[17]; | 44 | __u8 smb2_oplock; |
45 | struct cifs_fid *fid = oparms->fid; | 45 | struct cifs_fid *fid = oparms->fid; |
46 | struct network_resiliency_req nr_ioctl_req; | 46 | struct network_resiliency_req nr_ioctl_req; |
47 | 47 | ||
@@ -59,12 +59,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, | |||
59 | } | 59 | } |
60 | 60 | ||
61 | oparms->desired_access |= FILE_READ_ATTRIBUTES; | 61 | oparms->desired_access |= FILE_READ_ATTRIBUTES; |
62 | *smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; | 62 | smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; |
63 | 63 | ||
64 | if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) | 64 | rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, |
65 | memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE); | ||
66 | |||
67 | rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL, | ||
68 | NULL); | 65 | NULL); |
69 | if (rc) | 66 | if (rc) |
70 | goto out; | 67 | goto out; |
@@ -101,7 +98,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, | |||
101 | move_smb2_info_to_cifs(buf, smb2_data); | 98 | move_smb2_info_to_cifs(buf, smb2_data); |
102 | } | 99 | } |
103 | 100 | ||
104 | *oplock = *smb2_oplock; | 101 | *oplock = smb2_oplock; |
105 | out: | 102 | out: |
106 | kfree(smb2_data); | 103 | kfree(smb2_data); |
107 | kfree(smb2_path); | 104 | kfree(smb2_path); |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 0356b5559c71..ea92a38b2f08 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) | |||
203 | if ((mid->mid == wire_mid) && | 203 | if ((mid->mid == wire_mid) && |
204 | (mid->mid_state == MID_REQUEST_SUBMITTED) && | 204 | (mid->mid_state == MID_REQUEST_SUBMITTED) && |
205 | (mid->command == shdr->Command)) { | 205 | (mid->command == shdr->Command)) { |
206 | kref_get(&mid->refcount); | ||
206 | spin_unlock(&GlobalMid_Lock); | 207 | spin_unlock(&GlobalMid_Lock); |
207 | return mid; | 208 | return mid; |
208 | } | 209 | } |
@@ -855,6 +856,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, | |||
855 | 856 | ||
856 | rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea, | 857 | rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea, |
857 | len); | 858 | len); |
859 | kfree(ea); | ||
860 | |||
858 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); | 861 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); |
859 | 862 | ||
860 | return rc; | 863 | return rc; |
@@ -2219,8 +2222,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock) | |||
2219 | if (!buf) | 2222 | if (!buf) |
2220 | return NULL; | 2223 | return NULL; |
2221 | 2224 | ||
2222 | buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key)); | 2225 | memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); |
2223 | buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8))); | ||
2224 | buf->lcontext.LeaseState = map_oplock_to_lease(oplock); | 2226 | buf->lcontext.LeaseState = map_oplock_to_lease(oplock); |
2225 | 2227 | ||
2226 | buf->ccontext.DataOffset = cpu_to_le16(offsetof | 2228 | buf->ccontext.DataOffset = cpu_to_le16(offsetof |
@@ -2246,8 +2248,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock) | |||
2246 | if (!buf) | 2248 | if (!buf) |
2247 | return NULL; | 2249 | return NULL; |
2248 | 2250 | ||
2249 | buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key)); | 2251 | memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); |
2250 | buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8))); | ||
2251 | buf->lcontext.LeaseState = map_oplock_to_lease(oplock); | 2252 | buf->lcontext.LeaseState = map_oplock_to_lease(oplock); |
2252 | 2253 | ||
2253 | buf->ccontext.DataOffset = cpu_to_le16(offsetof | 2254 | buf->ccontext.DataOffset = cpu_to_le16(offsetof |
@@ -2284,8 +2285,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) | |||
2284 | if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS) | 2285 | if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS) |
2285 | return SMB2_OPLOCK_LEVEL_NOCHANGE; | 2286 | return SMB2_OPLOCK_LEVEL_NOCHANGE; |
2286 | if (lease_key) | 2287 | if (lease_key) |
2287 | memcpy(lease_key, &lc->lcontext.LeaseKeyLow, | 2288 | memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); |
2288 | SMB2_LEASE_KEY_SIZE); | ||
2289 | return le32_to_cpu(lc->lcontext.LeaseState); | 2289 | return le32_to_cpu(lc->lcontext.LeaseState); |
2290 | } | 2290 | } |
2291 | 2291 | ||
@@ -2521,7 +2521,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq, | |||
2521 | if (!tr_hdr) | 2521 | if (!tr_hdr) |
2522 | goto err_free_iov; | 2522 | goto err_free_iov; |
2523 | 2523 | ||
2524 | orig_len = smb2_rqst_len(old_rq, false); | 2524 | orig_len = smb_rqst_len(server, old_rq); |
2525 | 2525 | ||
2526 | /* fill the 2nd iov with a transform header */ | 2526 | /* fill the 2nd iov with a transform header */ |
2527 | fill_transform_hdr(tr_hdr, orig_len, old_rq); | 2527 | fill_transform_hdr(tr_hdr, orig_len, old_rq); |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 810b85787c91..3c92678cb45b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -155,7 +155,7 @@ out: | |||
155 | static int | 155 | static int |
156 | smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | 156 | smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) |
157 | { | 157 | { |
158 | int rc = 0; | 158 | int rc; |
159 | struct nls_table *nls_codepage; | 159 | struct nls_table *nls_codepage; |
160 | struct cifs_ses *ses; | 160 | struct cifs_ses *ses; |
161 | struct TCP_Server_Info *server; | 161 | struct TCP_Server_Info *server; |
@@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | |||
166 | * for those three - in the calling routine. | 166 | * for those three - in the calling routine. |
167 | */ | 167 | */ |
168 | if (tcon == NULL) | 168 | if (tcon == NULL) |
169 | return rc; | 169 | return 0; |
170 | 170 | ||
171 | if (smb2_command == SMB2_TREE_CONNECT) | 171 | if (smb2_command == SMB2_TREE_CONNECT) |
172 | return rc; | 172 | return 0; |
173 | 173 | ||
174 | if (tcon->tidStatus == CifsExiting) { | 174 | if (tcon->tidStatus == CifsExiting) { |
175 | /* | 175 | /* |
@@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | |||
212 | return -EAGAIN; | 212 | return -EAGAIN; |
213 | } | 213 | } |
214 | 214 | ||
215 | wait_event_interruptible_timeout(server->response_q, | 215 | rc = wait_event_interruptible_timeout(server->response_q, |
216 | (server->tcpStatus != CifsNeedReconnect), 10 * HZ); | 216 | (server->tcpStatus != CifsNeedReconnect), |
217 | 10 * HZ); | ||
218 | if (rc < 0) { | ||
219 | cifs_dbg(FYI, "%s: aborting reconnect due to a received" | ||
220 | " signal by the process\n", __func__); | ||
221 | return -ERESTARTSYS; | ||
222 | } | ||
217 | 223 | ||
218 | /* are we still trying to reconnect? */ | 224 | /* are we still trying to reconnect? */ |
219 | if (server->tcpStatus != CifsNeedReconnect) | 225 | if (server->tcpStatus != CifsNeedReconnect) |
@@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | |||
231 | } | 237 | } |
232 | 238 | ||
233 | if (!tcon->ses->need_reconnect && !tcon->need_reconnect) | 239 | if (!tcon->ses->need_reconnect && !tcon->need_reconnect) |
234 | return rc; | 240 | return 0; |
235 | 241 | ||
236 | nls_codepage = load_nls_default(); | 242 | nls_codepage = load_nls_default(); |
237 | 243 | ||
@@ -340,7 +346,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, | |||
340 | return rc; | 346 | return rc; |
341 | 347 | ||
342 | /* BB eventually switch this to SMB2 specific small buf size */ | 348 | /* BB eventually switch this to SMB2 specific small buf size */ |
343 | *request_buf = cifs_small_buf_get(); | 349 | if (smb2_command == SMB2_SET_INFO) |
350 | *request_buf = cifs_buf_get(); | ||
351 | else | ||
352 | *request_buf = cifs_small_buf_get(); | ||
344 | if (*request_buf == NULL) { | 353 | if (*request_buf == NULL) { |
345 | /* BB should we add a retry in here if not a writepage? */ | 354 | /* BB should we add a retry in here if not a writepage? */ |
346 | return -ENOMEM; | 355 | return -ENOMEM; |
@@ -1707,12 +1716,12 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, | |||
1707 | 1716 | ||
1708 | static int | 1717 | static int |
1709 | add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, | 1718 | add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, |
1710 | unsigned int *num_iovec, __u8 *oplock) | 1719 | unsigned int *num_iovec, u8 *lease_key, __u8 *oplock) |
1711 | { | 1720 | { |
1712 | struct smb2_create_req *req = iov[0].iov_base; | 1721 | struct smb2_create_req *req = iov[0].iov_base; |
1713 | unsigned int num = *num_iovec; | 1722 | unsigned int num = *num_iovec; |
1714 | 1723 | ||
1715 | iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock); | 1724 | iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock); |
1716 | if (iov[num].iov_base == NULL) | 1725 | if (iov[num].iov_base == NULL) |
1717 | return -ENOMEM; | 1726 | return -ENOMEM; |
1718 | iov[num].iov_len = server->vals->create_lease_size; | 1727 | iov[num].iov_len = server->vals->create_lease_size; |
@@ -2172,7 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
2172 | *oplock == SMB2_OPLOCK_LEVEL_NONE) | 2181 | *oplock == SMB2_OPLOCK_LEVEL_NONE) |
2173 | req->RequestedOplockLevel = *oplock; | 2182 | req->RequestedOplockLevel = *oplock; |
2174 | else { | 2183 | else { |
2175 | rc = add_lease_context(server, iov, &n_iov, oplock); | 2184 | rc = add_lease_context(server, iov, &n_iov, |
2185 | oparms->fid->lease_key, oplock); | ||
2176 | if (rc) { | 2186 | if (rc) { |
2177 | cifs_small_buf_release(req); | 2187 | cifs_small_buf_release(req); |
2178 | kfree(copy_path); | 2188 | kfree(copy_path); |
@@ -3720,7 +3730,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
3720 | 3730 | ||
3721 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, | 3731 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, |
3722 | &rsp_iov); | 3732 | &rsp_iov); |
3723 | cifs_small_buf_release(req); | 3733 | cifs_buf_release(req); |
3724 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; | 3734 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; |
3725 | 3735 | ||
3726 | if (rc != 0) { | 3736 | if (rc != 0) { |
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 824dddeee3f2..a671adcc44a6 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h | |||
@@ -678,16 +678,14 @@ struct create_context { | |||
678 | #define SMB2_LEASE_KEY_SIZE 16 | 678 | #define SMB2_LEASE_KEY_SIZE 16 |
679 | 679 | ||
680 | struct lease_context { | 680 | struct lease_context { |
681 | __le64 LeaseKeyLow; | 681 | u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; |
682 | __le64 LeaseKeyHigh; | ||
683 | __le32 LeaseState; | 682 | __le32 LeaseState; |
684 | __le32 LeaseFlags; | 683 | __le32 LeaseFlags; |
685 | __le64 LeaseDuration; | 684 | __le64 LeaseDuration; |
686 | } __packed; | 685 | } __packed; |
687 | 686 | ||
688 | struct lease_context_v2 { | 687 | struct lease_context_v2 { |
689 | __le64 LeaseKeyLow; | 688 | u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; |
690 | __le64 LeaseKeyHigh; | ||
691 | __le32 LeaseState; | 689 | __le32 LeaseState; |
692 | __le32 LeaseFlags; | 690 | __le32 LeaseFlags; |
693 | __le64 LeaseDuration; | 691 | __le64 LeaseDuration; |
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 3ae208ac2a77..6e6a4f2ec890 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h | |||
@@ -113,8 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile, | |||
113 | extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); | 113 | extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); |
114 | extern void smb2_reconnect_server(struct work_struct *work); | 114 | extern void smb2_reconnect_server(struct work_struct *work); |
115 | extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server); | 115 | extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server); |
116 | extern unsigned long | 116 | extern unsigned long smb_rqst_len(struct TCP_Server_Info *server, |
117 | smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker); | 117 | struct smb_rqst *rqst); |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * SMB2 Worker functions - most of protocol specific implementation details | 120 | * SMB2 Worker functions - most of protocol specific implementation details |
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 51b9437c3c7b..719d55e63d88 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c | |||
@@ -173,6 +173,8 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
173 | struct kvec *iov = rqst->rq_iov; | 173 | struct kvec *iov = rqst->rq_iov; |
174 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; | 174 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; |
175 | struct cifs_ses *ses; | 175 | struct cifs_ses *ses; |
176 | struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash; | ||
177 | struct smb_rqst drqst; | ||
176 | 178 | ||
177 | ses = smb2_find_smb_ses(server, shdr->SessionId); | 179 | ses = smb2_find_smb_ses(server, shdr->SessionId); |
178 | if (!ses) { | 180 | if (!ses) { |
@@ -190,21 +192,39 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
190 | } | 192 | } |
191 | 193 | ||
192 | rc = crypto_shash_setkey(server->secmech.hmacsha256, | 194 | rc = crypto_shash_setkey(server->secmech.hmacsha256, |
193 | ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); | 195 | ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); |
194 | if (rc) { | 196 | if (rc) { |
195 | cifs_dbg(VFS, "%s: Could not update with response\n", __func__); | 197 | cifs_dbg(VFS, "%s: Could not update with response\n", __func__); |
196 | return rc; | 198 | return rc; |
197 | } | 199 | } |
198 | 200 | ||
199 | rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash); | 201 | rc = crypto_shash_init(shash); |
200 | if (rc) { | 202 | if (rc) { |
201 | cifs_dbg(VFS, "%s: Could not init sha256", __func__); | 203 | cifs_dbg(VFS, "%s: Could not init sha256", __func__); |
202 | return rc; | 204 | return rc; |
203 | } | 205 | } |
204 | 206 | ||
205 | rc = __cifs_calc_signature(rqst, server, sigptr, | 207 | /* |
206 | &server->secmech.sdeschmacsha256->shash); | 208 | * For SMB2+, __cifs_calc_signature() expects to sign only the actual |
209 | * data, that is, iov[0] should not contain a rfc1002 length. | ||
210 | * | ||
211 | * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to | ||
212 | * __cifs_calc_signature(). | ||
213 | */ | ||
214 | drqst = *rqst; | ||
215 | if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { | ||
216 | rc = crypto_shash_update(shash, iov[0].iov_base, | ||
217 | iov[0].iov_len); | ||
218 | if (rc) { | ||
219 | cifs_dbg(VFS, "%s: Could not update with payload\n", | ||
220 | __func__); | ||
221 | return rc; | ||
222 | } | ||
223 | drqst.rq_iov++; | ||
224 | drqst.rq_nvec--; | ||
225 | } | ||
207 | 226 | ||
227 | rc = __cifs_calc_signature(&drqst, server, sigptr, shash); | ||
208 | if (!rc) | 228 | if (!rc) |
209 | memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); | 229 | memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); |
210 | 230 | ||
@@ -408,12 +428,14 @@ generate_smb311signingkey(struct cifs_ses *ses) | |||
408 | int | 428 | int |
409 | smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | 429 | smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) |
410 | { | 430 | { |
411 | int rc = 0; | 431 | int rc; |
412 | unsigned char smb3_signature[SMB2_CMACAES_SIZE]; | 432 | unsigned char smb3_signature[SMB2_CMACAES_SIZE]; |
413 | unsigned char *sigptr = smb3_signature; | 433 | unsigned char *sigptr = smb3_signature; |
414 | struct kvec *iov = rqst->rq_iov; | 434 | struct kvec *iov = rqst->rq_iov; |
415 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; | 435 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; |
416 | struct cifs_ses *ses; | 436 | struct cifs_ses *ses; |
437 | struct shash_desc *shash = &server->secmech.sdesccmacaes->shash; | ||
438 | struct smb_rqst drqst; | ||
417 | 439 | ||
418 | ses = smb2_find_smb_ses(server, shdr->SessionId); | 440 | ses = smb2_find_smb_ses(server, shdr->SessionId); |
419 | if (!ses) { | 441 | if (!ses) { |
@@ -425,8 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
425 | memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); | 447 | memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); |
426 | 448 | ||
427 | rc = crypto_shash_setkey(server->secmech.cmacaes, | 449 | rc = crypto_shash_setkey(server->secmech.cmacaes, |
428 | ses->smb3signingkey, SMB2_CMACAES_SIZE); | 450 | ses->smb3signingkey, SMB2_CMACAES_SIZE); |
429 | |||
430 | if (rc) { | 451 | if (rc) { |
431 | cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__); | 452 | cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__); |
432 | return rc; | 453 | return rc; |
@@ -437,15 +458,33 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
437 | * so unlike smb2 case we do not have to check here if secmech are | 458 | * so unlike smb2 case we do not have to check here if secmech are |
438 | * initialized | 459 | * initialized |
439 | */ | 460 | */ |
440 | rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash); | 461 | rc = crypto_shash_init(shash); |
441 | if (rc) { | 462 | if (rc) { |
442 | cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__); | 463 | cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__); |
443 | return rc; | 464 | return rc; |
444 | } | 465 | } |
445 | 466 | ||
446 | rc = __cifs_calc_signature(rqst, server, sigptr, | 467 | /* |
447 | &server->secmech.sdesccmacaes->shash); | 468 | * For SMB2+, __cifs_calc_signature() expects to sign only the actual |
469 | * data, that is, iov[0] should not contain a rfc1002 length. | ||
470 | * | ||
471 | * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to | ||
472 | * __cifs_calc_signature(). | ||
473 | */ | ||
474 | drqst = *rqst; | ||
475 | if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { | ||
476 | rc = crypto_shash_update(shash, iov[0].iov_base, | ||
477 | iov[0].iov_len); | ||
478 | if (rc) { | ||
479 | cifs_dbg(VFS, "%s: Could not update with payload\n", | ||
480 | __func__); | ||
481 | return rc; | ||
482 | } | ||
483 | drqst.rq_iov++; | ||
484 | drqst.rq_nvec--; | ||
485 | } | ||
448 | 486 | ||
487 | rc = __cifs_calc_signature(&drqst, server, sigptr, shash); | ||
449 | if (!rc) | 488 | if (!rc) |
450 | memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); | 489 | memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); |
451 | 490 | ||
@@ -548,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr, | |||
548 | 587 | ||
549 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); | 588 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); |
550 | memset(temp, 0, sizeof(struct mid_q_entry)); | 589 | memset(temp, 0, sizeof(struct mid_q_entry)); |
590 | kref_init(&temp->refcount); | ||
551 | temp->mid = le64_to_cpu(shdr->MessageId); | 591 | temp->mid = le64_to_cpu(shdr->MessageId); |
552 | temp->pid = current->pid; | 592 | temp->pid = current->pid; |
553 | temp->command = shdr->Command; /* Always LE */ | 593 | temp->command = shdr->Command; /* Always LE */ |
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 6fd94d9ffac2..c55ea4e6201b 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c | |||
@@ -2083,8 +2083,9 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) | |||
2083 | * rqst: the data to write | 2083 | * rqst: the data to write |
2084 | * return value: 0 if successfully write, otherwise error code | 2084 | * return value: 0 if successfully write, otherwise error code |
2085 | */ | 2085 | */ |
2086 | int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) | 2086 | int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
2087 | { | 2087 | { |
2088 | struct smbd_connection *info = server->smbd_conn; | ||
2088 | struct kvec vec; | 2089 | struct kvec vec; |
2089 | int nvecs; | 2090 | int nvecs; |
2090 | int size; | 2091 | int size; |
@@ -2118,7 +2119,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) | |||
2118 | * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and | 2119 | * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and |
2119 | * ends at page boundary | 2120 | * ends at page boundary |
2120 | */ | 2121 | */ |
2121 | buflen = smb2_rqst_len(rqst, true); | 2122 | buflen = smb_rqst_len(server, rqst); |
2122 | 2123 | ||
2123 | if (buflen + sizeof(struct smbd_data_transfer) > | 2124 | if (buflen + sizeof(struct smbd_data_transfer) > |
2124 | info->max_fragmented_send_size) { | 2125 | info->max_fragmented_send_size) { |
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h index 1e419c21dc60..a11096254f29 100644 --- a/fs/cifs/smbdirect.h +++ b/fs/cifs/smbdirect.h | |||
@@ -292,7 +292,7 @@ void smbd_destroy(struct smbd_connection *info); | |||
292 | 292 | ||
293 | /* Interface for carrying upper layer I/O through send/recv */ | 293 | /* Interface for carrying upper layer I/O through send/recv */ |
294 | int smbd_recv(struct smbd_connection *info, struct msghdr *msg); | 294 | int smbd_recv(struct smbd_connection *info, struct msghdr *msg); |
295 | int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst); | 295 | int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst); |
296 | 296 | ||
297 | enum mr_state { | 297 | enum mr_state { |
298 | MR_READY, | 298 | MR_READY, |
@@ -332,7 +332,7 @@ static inline void *smbd_get_connection( | |||
332 | static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } | 332 | static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } |
333 | static inline void smbd_destroy(struct smbd_connection *info) {} | 333 | static inline void smbd_destroy(struct smbd_connection *info) {} |
334 | static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } | 334 | static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } |
335 | static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; } | 335 | static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; } |
336 | #endif | 336 | #endif |
337 | 337 | ||
338 | #endif | 338 | #endif |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index fb57dfbfb749..a341ec839c83 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | |||
61 | 61 | ||
62 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); | 62 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); |
63 | memset(temp, 0, sizeof(struct mid_q_entry)); | 63 | memset(temp, 0, sizeof(struct mid_q_entry)); |
64 | kref_init(&temp->refcount); | ||
64 | temp->mid = get_mid(smb_buffer); | 65 | temp->mid = get_mid(smb_buffer); |
65 | temp->pid = current->pid; | 66 | temp->pid = current->pid; |
66 | temp->command = cpu_to_le16(smb_buffer->Command); | 67 | temp->command = cpu_to_le16(smb_buffer->Command); |
@@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | |||
82 | return temp; | 83 | return temp; |
83 | } | 84 | } |
84 | 85 | ||
86 | static void _cifs_mid_q_entry_release(struct kref *refcount) | ||
87 | { | ||
88 | struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, | ||
89 | refcount); | ||
90 | |||
91 | mempool_free(mid, cifs_mid_poolp); | ||
92 | } | ||
93 | |||
94 | void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) | ||
95 | { | ||
96 | spin_lock(&GlobalMid_Lock); | ||
97 | kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); | ||
98 | spin_unlock(&GlobalMid_Lock); | ||
99 | } | ||
100 | |||
85 | void | 101 | void |
86 | DeleteMidQEntry(struct mid_q_entry *midEntry) | 102 | DeleteMidQEntry(struct mid_q_entry *midEntry) |
87 | { | 103 | { |
@@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | |||
110 | } | 126 | } |
111 | } | 127 | } |
112 | #endif | 128 | #endif |
113 | mempool_free(midEntry, cifs_mid_poolp); | 129 | cifs_mid_q_entry_release(midEntry); |
114 | } | 130 | } |
115 | 131 | ||
116 | void | 132 | void |
@@ -202,14 +218,15 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, | |||
202 | } | 218 | } |
203 | 219 | ||
204 | unsigned long | 220 | unsigned long |
205 | smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker) | 221 | smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
206 | { | 222 | { |
207 | unsigned int i; | 223 | unsigned int i; |
208 | struct kvec *iov; | 224 | struct kvec *iov; |
209 | int nvec; | 225 | int nvec; |
210 | unsigned long buflen = 0; | 226 | unsigned long buflen = 0; |
211 | 227 | ||
212 | if (skip_rfc1002_marker && rqst->rq_iov[0].iov_len == 4) { | 228 | if (server->vals->header_preamble_size == 0 && |
229 | rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { | ||
213 | iov = &rqst->rq_iov[1]; | 230 | iov = &rqst->rq_iov[1]; |
214 | nvec = rqst->rq_nvec - 1; | 231 | nvec = rqst->rq_nvec - 1; |
215 | } else { | 232 | } else { |
@@ -260,7 +277,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, | |||
260 | __be32 rfc1002_marker; | 277 | __be32 rfc1002_marker; |
261 | 278 | ||
262 | if (cifs_rdma_enabled(server) && server->smbd_conn) { | 279 | if (cifs_rdma_enabled(server) && server->smbd_conn) { |
263 | rc = smbd_send(server->smbd_conn, rqst); | 280 | rc = smbd_send(server, rqst); |
264 | goto smbd_done; | 281 | goto smbd_done; |
265 | } | 282 | } |
266 | if (ssocket == NULL) | 283 | if (ssocket == NULL) |
@@ -271,7 +288,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, | |||
271 | (char *)&val, sizeof(val)); | 288 | (char *)&val, sizeof(val)); |
272 | 289 | ||
273 | for (j = 0; j < num_rqst; j++) | 290 | for (j = 0; j < num_rqst; j++) |
274 | send_length += smb2_rqst_len(&rqst[j], true); | 291 | send_length += smb_rqst_len(server, &rqst[j]); |
275 | rfc1002_marker = cpu_to_be32(send_length); | 292 | rfc1002_marker = cpu_to_be32(send_length); |
276 | 293 | ||
277 | /* Generate a rfc1002 marker for SMB2+ */ | 294 | /* Generate a rfc1002 marker for SMB2+ */ |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index b00481c475cb..e68cefe08261 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, | |||
184 | unsigned int bit, bit_max; | 184 | unsigned int bit, bit_max; |
185 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 185 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
186 | ext4_fsblk_t start, tmp; | 186 | ext4_fsblk_t start, tmp; |
187 | int flex_bg = 0; | ||
188 | 187 | ||
189 | J_ASSERT_BH(bh, buffer_locked(bh)); | 188 | J_ASSERT_BH(bh, buffer_locked(bh)); |
190 | 189 | ||
@@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb, | |||
207 | 206 | ||
208 | start = ext4_group_first_block_no(sb, block_group); | 207 | start = ext4_group_first_block_no(sb, block_group); |
209 | 208 | ||
210 | if (ext4_has_feature_flex_bg(sb)) | ||
211 | flex_bg = 1; | ||
212 | |||
213 | /* Set bits for block and inode bitmaps, and inode table */ | 209 | /* Set bits for block and inode bitmaps, and inode table */ |
214 | tmp = ext4_block_bitmap(sb, gdp); | 210 | tmp = ext4_block_bitmap(sb, gdp); |
215 | if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) | 211 | if (ext4_block_in_group(sb, tmp, block_group)) |
216 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); | 212 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); |
217 | 213 | ||
218 | tmp = ext4_inode_bitmap(sb, gdp); | 214 | tmp = ext4_inode_bitmap(sb, gdp); |
219 | if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) | 215 | if (ext4_block_in_group(sb, tmp, block_group)) |
220 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); | 216 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); |
221 | 217 | ||
222 | tmp = ext4_inode_table(sb, gdp); | 218 | tmp = ext4_inode_table(sb, gdp); |
223 | for (; tmp < ext4_inode_table(sb, gdp) + | 219 | for (; tmp < ext4_inode_table(sb, gdp) + |
224 | sbi->s_itb_per_group; tmp++) { | 220 | sbi->s_itb_per_group; tmp++) { |
225 | if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) | 221 | if (ext4_block_in_group(sb, tmp, block_group)) |
226 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); | 222 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); |
227 | } | 223 | } |
228 | 224 | ||
@@ -442,7 +438,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) | |||
442 | goto verify; | 438 | goto verify; |
443 | } | 439 | } |
444 | ext4_lock_group(sb, block_group); | 440 | ext4_lock_group(sb, block_group); |
445 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 441 | if (ext4_has_group_desc_csum(sb) && |
442 | (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
443 | if (block_group == 0) { | ||
444 | ext4_unlock_group(sb, block_group); | ||
445 | unlock_buffer(bh); | ||
446 | ext4_error(sb, "Block bitmap for bg 0 marked " | ||
447 | "uninitialized"); | ||
448 | err = -EFSCORRUPTED; | ||
449 | goto out; | ||
450 | } | ||
446 | err = ext4_init_block_bitmap(sb, bh, block_group, desc); | 451 | err = ext4_init_block_bitmap(sb, bh, block_group, desc); |
447 | set_bitmap_uptodate(bh); | 452 | set_bitmap_uptodate(bh); |
448 | set_buffer_uptodate(bh); | 453 | set_buffer_uptodate(bh); |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0b127853c584..7c7123f265c2 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -1114,6 +1114,7 @@ struct ext4_inode_info { | |||
1114 | #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ | 1114 | #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ |
1115 | #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ | 1115 | #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ |
1116 | #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ | 1116 | #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ |
1117 | #define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */ | ||
1117 | #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ | 1118 | #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ |
1118 | #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ | 1119 | #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ |
1119 | #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ | 1120 | #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ |
@@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) | |||
1507 | static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | 1508 | static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) |
1508 | { | 1509 | { |
1509 | return ino == EXT4_ROOT_INO || | 1510 | return ino == EXT4_ROOT_INO || |
1510 | ino == EXT4_USR_QUOTA_INO || | ||
1511 | ino == EXT4_GRP_QUOTA_INO || | ||
1512 | ino == EXT4_BOOT_LOADER_INO || | ||
1513 | ino == EXT4_JOURNAL_INO || | ||
1514 | ino == EXT4_RESIZE_INO || | ||
1515 | (ino >= EXT4_FIRST_INO(sb) && | 1511 | (ino >= EXT4_FIRST_INO(sb) && |
1516 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); | 1512 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); |
1517 | } | 1513 | } |
@@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode, | |||
3018 | struct iomap; | 3014 | struct iomap; |
3019 | extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap); | 3015 | extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap); |
3020 | 3016 | ||
3021 | extern int ext4_try_to_evict_inline_data(handle_t *handle, | ||
3022 | struct inode *inode, | ||
3023 | int needed); | ||
3024 | extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); | 3017 | extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); |
3025 | 3018 | ||
3026 | extern int ext4_convert_inline_data(struct inode *inode); | 3019 | extern int ext4_convert_inline_data(struct inode *inode); |
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 98fb0c119c68..adf6668b596f 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h | |||
@@ -91,6 +91,7 @@ struct ext4_extent_header { | |||
91 | }; | 91 | }; |
92 | 92 | ||
93 | #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) | 93 | #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) |
94 | #define EXT4_MAX_EXTENT_DEPTH 5 | ||
94 | 95 | ||
95 | #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ | 96 | #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ |
96 | (sizeof(struct ext4_extent_header) + \ | 97 | (sizeof(struct ext4_extent_header) + \ |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0057fe3f248d..8ce6fd5b10dd 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, | |||
869 | 869 | ||
870 | eh = ext_inode_hdr(inode); | 870 | eh = ext_inode_hdr(inode); |
871 | depth = ext_depth(inode); | 871 | depth = ext_depth(inode); |
872 | if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { | ||
873 | EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", | ||
874 | depth); | ||
875 | ret = -EFSCORRUPTED; | ||
876 | goto err; | ||
877 | } | ||
872 | 878 | ||
873 | if (path) { | 879 | if (path) { |
874 | ext4_ext_drop_refs(path); | 880 | ext4_ext_drop_refs(path); |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index f525f909b559..fb83750c1a14 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -150,7 +150,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
150 | } | 150 | } |
151 | 151 | ||
152 | ext4_lock_group(sb, block_group); | 152 | ext4_lock_group(sb, block_group); |
153 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { | 153 | if (ext4_has_group_desc_csum(sb) && |
154 | (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { | ||
155 | if (block_group == 0) { | ||
156 | ext4_unlock_group(sb, block_group); | ||
157 | unlock_buffer(bh); | ||
158 | ext4_error(sb, "Inode bitmap for bg 0 marked " | ||
159 | "uninitialized"); | ||
160 | err = -EFSCORRUPTED; | ||
161 | goto out; | ||
162 | } | ||
154 | memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); | 163 | memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); |
155 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), | 164 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), |
156 | sb->s_blocksize * 8, bh->b_data); | 165 | sb->s_blocksize * 8, bh->b_data); |
@@ -994,7 +1003,8 @@ got: | |||
994 | 1003 | ||
995 | /* recheck and clear flag under lock if we still need to */ | 1004 | /* recheck and clear flag under lock if we still need to */ |
996 | ext4_lock_group(sb, group); | 1005 | ext4_lock_group(sb, group); |
997 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 1006 | if (ext4_has_group_desc_csum(sb) && |
1007 | (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
998 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | 1008 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); |
999 | ext4_free_group_clusters_set(sb, gdp, | 1009 | ext4_free_group_clusters_set(sb, gdp, |
1000 | ext4_free_clusters_after_init(sb, group, gdp)); | 1010 | ext4_free_clusters_after_init(sb, group, gdp)); |
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 285ed1588730..e55a8bc870bd 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c | |||
@@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, | |||
437 | 437 | ||
438 | memset((void *)ext4_raw_inode(&is.iloc)->i_block, | 438 | memset((void *)ext4_raw_inode(&is.iloc)->i_block, |
439 | 0, EXT4_MIN_INLINE_DATA_SIZE); | 439 | 0, EXT4_MIN_INLINE_DATA_SIZE); |
440 | memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE); | ||
440 | 441 | ||
441 | if (ext4_has_feature_extents(inode->i_sb)) { | 442 | if (ext4_has_feature_extents(inode->i_sb)) { |
442 | if (S_ISDIR(inode->i_mode) || | 443 | if (S_ISDIR(inode->i_mode) || |
@@ -886,11 +887,11 @@ retry_journal: | |||
886 | flags |= AOP_FLAG_NOFS; | 887 | flags |= AOP_FLAG_NOFS; |
887 | 888 | ||
888 | if (ret == -ENOSPC) { | 889 | if (ret == -ENOSPC) { |
890 | ext4_journal_stop(handle); | ||
889 | ret = ext4_da_convert_inline_data_to_extent(mapping, | 891 | ret = ext4_da_convert_inline_data_to_extent(mapping, |
890 | inode, | 892 | inode, |
891 | flags, | 893 | flags, |
892 | fsdata); | 894 | fsdata); |
893 | ext4_journal_stop(handle); | ||
894 | if (ret == -ENOSPC && | 895 | if (ret == -ENOSPC && |
895 | ext4_should_retry_alloc(inode->i_sb, &retries)) | 896 | ext4_should_retry_alloc(inode->i_sb, &retries)) |
896 | goto retry_journal; | 897 | goto retry_journal; |
@@ -1890,42 +1891,6 @@ out: | |||
1890 | return (error < 0 ? error : 0); | 1891 | return (error < 0 ? error : 0); |
1891 | } | 1892 | } |
1892 | 1893 | ||
1893 | /* | ||
1894 | * Called during xattr set, and if we can sparse space 'needed', | ||
1895 | * just create the extent tree evict the data to the outer block. | ||
1896 | * | ||
1897 | * We use jbd2 instead of page cache to move data to the 1st block | ||
1898 | * so that the whole transaction can be committed as a whole and | ||
1899 | * the data isn't lost because of the delayed page cache write. | ||
1900 | */ | ||
1901 | int ext4_try_to_evict_inline_data(handle_t *handle, | ||
1902 | struct inode *inode, | ||
1903 | int needed) | ||
1904 | { | ||
1905 | int error; | ||
1906 | struct ext4_xattr_entry *entry; | ||
1907 | struct ext4_inode *raw_inode; | ||
1908 | struct ext4_iloc iloc; | ||
1909 | |||
1910 | error = ext4_get_inode_loc(inode, &iloc); | ||
1911 | if (error) | ||
1912 | return error; | ||
1913 | |||
1914 | raw_inode = ext4_raw_inode(&iloc); | ||
1915 | entry = (struct ext4_xattr_entry *)((void *)raw_inode + | ||
1916 | EXT4_I(inode)->i_inline_off); | ||
1917 | if (EXT4_XATTR_LEN(entry->e_name_len) + | ||
1918 | EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) { | ||
1919 | error = -ENOSPC; | ||
1920 | goto out; | ||
1921 | } | ||
1922 | |||
1923 | error = ext4_convert_inline_data_nolock(handle, inode, &iloc); | ||
1924 | out: | ||
1925 | brelse(iloc.bh); | ||
1926 | return error; | ||
1927 | } | ||
1928 | |||
1929 | int ext4_inline_data_truncate(struct inode *inode, int *has_inline) | 1894 | int ext4_inline_data_truncate(struct inode *inode, int *has_inline) |
1930 | { | 1895 | { |
1931 | handle_t *handle; | 1896 | handle_t *handle; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2ea07efbe016..7d6c10017bdf 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func, | |||
402 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, | 402 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, |
403 | map->m_len)) { | 403 | map->m_len)) { |
404 | ext4_error_inode(inode, func, line, map->m_pblk, | 404 | ext4_error_inode(inode, func, line, map->m_pblk, |
405 | "lblock %lu mapped to illegal pblock " | 405 | "lblock %lu mapped to illegal pblock %llu " |
406 | "(length %d)", (unsigned long) map->m_lblk, | 406 | "(length %d)", (unsigned long) map->m_lblk, |
407 | map->m_len); | 407 | map->m_pblk, map->m_len); |
408 | return -EFSCORRUPTED; | 408 | return -EFSCORRUPTED; |
409 | } | 409 | } |
410 | return 0; | 410 | return 0; |
@@ -4506,7 +4506,8 @@ static int __ext4_get_inode_loc(struct inode *inode, | |||
4506 | int inodes_per_block, inode_offset; | 4506 | int inodes_per_block, inode_offset; |
4507 | 4507 | ||
4508 | iloc->bh = NULL; | 4508 | iloc->bh = NULL; |
4509 | if (!ext4_valid_inum(sb, inode->i_ino)) | 4509 | if (inode->i_ino < EXT4_ROOT_INO || |
4510 | inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) | ||
4510 | return -EFSCORRUPTED; | 4511 | return -EFSCORRUPTED; |
4511 | 4512 | ||
4512 | iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); | 4513 | iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 6eae2b91aafa..f7ab34088162 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, | |||
2423 | * initialize bb_free to be able to skip | 2423 | * initialize bb_free to be able to skip |
2424 | * empty groups without initialization | 2424 | * empty groups without initialization |
2425 | */ | 2425 | */ |
2426 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 2426 | if (ext4_has_group_desc_csum(sb) && |
2427 | (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
2427 | meta_group_info[i]->bb_free = | 2428 | meta_group_info[i]->bb_free = |
2428 | ext4_free_clusters_after_init(sb, group, desc); | 2429 | ext4_free_clusters_after_init(sb, group, desc); |
2429 | } else { | 2430 | } else { |
@@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2989 | #endif | 2990 | #endif |
2990 | ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, | 2991 | ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, |
2991 | ac->ac_b_ex.fe_len); | 2992 | ac->ac_b_ex.fe_len); |
2992 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | 2993 | if (ext4_has_group_desc_csum(sb) && |
2994 | (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
2993 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | 2995 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); |
2994 | ext4_free_group_clusters_set(sb, gdp, | 2996 | ext4_free_group_clusters_set(sb, gdp, |
2995 | ext4_free_clusters_after_init(sb, | 2997 | ext4_free_clusters_after_init(sb, |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 0c4c2201b3aa..ba2396a7bd04 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) | |||
405 | 405 | ||
406 | static void ext4_handle_error(struct super_block *sb) | 406 | static void ext4_handle_error(struct super_block *sb) |
407 | { | 407 | { |
408 | if (test_opt(sb, WARN_ON_ERROR)) | ||
409 | WARN_ON_ONCE(1); | ||
410 | |||
408 | if (sb_rdonly(sb)) | 411 | if (sb_rdonly(sb)) |
409 | return; | 412 | return; |
410 | 413 | ||
@@ -740,6 +743,9 @@ __acquires(bitlock) | |||
740 | va_end(args); | 743 | va_end(args); |
741 | } | 744 | } |
742 | 745 | ||
746 | if (test_opt(sb, WARN_ON_ERROR)) | ||
747 | WARN_ON_ONCE(1); | ||
748 | |||
743 | if (test_opt(sb, ERRORS_CONT)) { | 749 | if (test_opt(sb, ERRORS_CONT)) { |
744 | ext4_commit_super(sb, 0); | 750 | ext4_commit_super(sb, 0); |
745 | return; | 751 | return; |
@@ -1371,7 +1377,8 @@ enum { | |||
1371 | Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, | 1377 | Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, |
1372 | Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, | 1378 | Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, |
1373 | Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, | 1379 | Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, |
1374 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, | 1380 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, |
1381 | Opt_nowarn_on_error, Opt_mblk_io_submit, | ||
1375 | Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, | 1382 | Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, |
1376 | Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, | 1383 | Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, |
1377 | Opt_inode_readahead_blks, Opt_journal_ioprio, | 1384 | Opt_inode_readahead_blks, Opt_journal_ioprio, |
@@ -1438,6 +1445,8 @@ static const match_table_t tokens = { | |||
1438 | {Opt_dax, "dax"}, | 1445 | {Opt_dax, "dax"}, |
1439 | {Opt_stripe, "stripe=%u"}, | 1446 | {Opt_stripe, "stripe=%u"}, |
1440 | {Opt_delalloc, "delalloc"}, | 1447 | {Opt_delalloc, "delalloc"}, |
1448 | {Opt_warn_on_error, "warn_on_error"}, | ||
1449 | {Opt_nowarn_on_error, "nowarn_on_error"}, | ||
1441 | {Opt_lazytime, "lazytime"}, | 1450 | {Opt_lazytime, "lazytime"}, |
1442 | {Opt_nolazytime, "nolazytime"}, | 1451 | {Opt_nolazytime, "nolazytime"}, |
1443 | {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"}, | 1452 | {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"}, |
@@ -1602,6 +1611,8 @@ static const struct mount_opts { | |||
1602 | MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, | 1611 | MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, |
1603 | {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, | 1612 | {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, |
1604 | MOPT_EXT4_ONLY | MOPT_CLEAR}, | 1613 | MOPT_EXT4_ONLY | MOPT_CLEAR}, |
1614 | {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, | ||
1615 | {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, | ||
1605 | {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, | 1616 | {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, |
1606 | MOPT_EXT4_ONLY | MOPT_CLEAR}, | 1617 | MOPT_EXT4_ONLY | MOPT_CLEAR}, |
1607 | {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, | 1618 | {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, |
@@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb, | |||
2331 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 2342 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
2332 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); | 2343 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); |
2333 | ext4_fsblk_t last_block; | 2344 | ext4_fsblk_t last_block; |
2345 | ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; | ||
2334 | ext4_fsblk_t block_bitmap; | 2346 | ext4_fsblk_t block_bitmap; |
2335 | ext4_fsblk_t inode_bitmap; | 2347 | ext4_fsblk_t inode_bitmap; |
2336 | ext4_fsblk_t inode_table; | 2348 | ext4_fsblk_t inode_table; |
@@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb, | |||
2363 | if (!sb_rdonly(sb)) | 2375 | if (!sb_rdonly(sb)) |
2364 | return 0; | 2376 | return 0; |
2365 | } | 2377 | } |
2378 | if (block_bitmap >= sb_block + 1 && | ||
2379 | block_bitmap <= last_bg_block) { | ||
2380 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
2381 | "Block bitmap for group %u overlaps " | ||
2382 | "block group descriptors", i); | ||
2383 | if (!sb_rdonly(sb)) | ||
2384 | return 0; | ||
2385 | } | ||
2366 | if (block_bitmap < first_block || block_bitmap > last_block) { | 2386 | if (block_bitmap < first_block || block_bitmap > last_block) { |
2367 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | 2387 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
2368 | "Block bitmap for group %u not in group " | 2388 | "Block bitmap for group %u not in group " |
@@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb, | |||
2377 | if (!sb_rdonly(sb)) | 2397 | if (!sb_rdonly(sb)) |
2378 | return 0; | 2398 | return 0; |
2379 | } | 2399 | } |
2400 | if (inode_bitmap >= sb_block + 1 && | ||
2401 | inode_bitmap <= last_bg_block) { | ||
2402 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
2403 | "Inode bitmap for group %u overlaps " | ||
2404 | "block group descriptors", i); | ||
2405 | if (!sb_rdonly(sb)) | ||
2406 | return 0; | ||
2407 | } | ||
2380 | if (inode_bitmap < first_block || inode_bitmap > last_block) { | 2408 | if (inode_bitmap < first_block || inode_bitmap > last_block) { |
2381 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | 2409 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
2382 | "Inode bitmap for group %u not in group " | 2410 | "Inode bitmap for group %u not in group " |
@@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb, | |||
2391 | if (!sb_rdonly(sb)) | 2419 | if (!sb_rdonly(sb)) |
2392 | return 0; | 2420 | return 0; |
2393 | } | 2421 | } |
2422 | if (inode_table >= sb_block + 1 && | ||
2423 | inode_table <= last_bg_block) { | ||
2424 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
2425 | "Inode table for group %u overlaps " | ||
2426 | "block group descriptors", i); | ||
2427 | if (!sb_rdonly(sb)) | ||
2428 | return 0; | ||
2429 | } | ||
2394 | if (inode_table < first_block || | 2430 | if (inode_table < first_block || |
2395 | inode_table + sbi->s_itb_per_group - 1 > last_block) { | 2431 | inode_table + sbi->s_itb_per_group - 1 > last_block) { |
2396 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | 2432 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
@@ -3097,13 +3133,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) | |||
3097 | ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; | 3133 | ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; |
3098 | struct ext4_group_desc *gdp = NULL; | 3134 | struct ext4_group_desc *gdp = NULL; |
3099 | 3135 | ||
3136 | if (!ext4_has_group_desc_csum(sb)) | ||
3137 | return ngroups; | ||
3138 | |||
3100 | for (group = 0; group < ngroups; group++) { | 3139 | for (group = 0; group < ngroups; group++) { |
3101 | gdp = ext4_get_group_desc(sb, group, NULL); | 3140 | gdp = ext4_get_group_desc(sb, group, NULL); |
3102 | if (!gdp) | 3141 | if (!gdp) |
3103 | continue; | 3142 | continue; |
3104 | 3143 | ||
3105 | if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) | 3144 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) |
3145 | continue; | ||
3146 | if (group != 0) | ||
3106 | break; | 3147 | break; |
3148 | ext4_error(sb, "Inode table for bg 0 marked as " | ||
3149 | "needing zeroing"); | ||
3150 | if (sb_rdonly(sb)) | ||
3151 | return ngroups; | ||
3107 | } | 3152 | } |
3108 | 3153 | ||
3109 | return group; | 3154 | return group; |
@@ -3742,6 +3787,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3742 | le32_to_cpu(es->s_log_block_size)); | 3787 | le32_to_cpu(es->s_log_block_size)); |
3743 | goto failed_mount; | 3788 | goto failed_mount; |
3744 | } | 3789 | } |
3790 | if (le32_to_cpu(es->s_log_cluster_size) > | ||
3791 | (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { | ||
3792 | ext4_msg(sb, KERN_ERR, | ||
3793 | "Invalid log cluster size: %u", | ||
3794 | le32_to_cpu(es->s_log_cluster_size)); | ||
3795 | goto failed_mount; | ||
3796 | } | ||
3745 | 3797 | ||
3746 | if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { | 3798 | if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { |
3747 | ext4_msg(sb, KERN_ERR, | 3799 | ext4_msg(sb, KERN_ERR, |
@@ -3806,6 +3858,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3806 | } else { | 3858 | } else { |
3807 | sbi->s_inode_size = le16_to_cpu(es->s_inode_size); | 3859 | sbi->s_inode_size = le16_to_cpu(es->s_inode_size); |
3808 | sbi->s_first_ino = le32_to_cpu(es->s_first_ino); | 3860 | sbi->s_first_ino = le32_to_cpu(es->s_first_ino); |
3861 | if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { | ||
3862 | ext4_msg(sb, KERN_ERR, "invalid first ino: %u", | ||
3863 | sbi->s_first_ino); | ||
3864 | goto failed_mount; | ||
3865 | } | ||
3809 | if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || | 3866 | if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || |
3810 | (!is_power_of_2(sbi->s_inode_size)) || | 3867 | (!is_power_of_2(sbi->s_inode_size)) || |
3811 | (sbi->s_inode_size > blocksize)) { | 3868 | (sbi->s_inode_size > blocksize)) { |
@@ -3882,13 +3939,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3882 | "block size (%d)", clustersize, blocksize); | 3939 | "block size (%d)", clustersize, blocksize); |
3883 | goto failed_mount; | 3940 | goto failed_mount; |
3884 | } | 3941 | } |
3885 | if (le32_to_cpu(es->s_log_cluster_size) > | ||
3886 | (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { | ||
3887 | ext4_msg(sb, KERN_ERR, | ||
3888 | "Invalid log cluster size: %u", | ||
3889 | le32_to_cpu(es->s_log_cluster_size)); | ||
3890 | goto failed_mount; | ||
3891 | } | ||
3892 | sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - | 3942 | sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - |
3893 | le32_to_cpu(es->s_log_block_size); | 3943 | le32_to_cpu(es->s_log_block_size); |
3894 | sbi->s_clusters_per_group = | 3944 | sbi->s_clusters_per_group = |
@@ -3909,10 +3959,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3909 | } | 3959 | } |
3910 | } else { | 3960 | } else { |
3911 | if (clustersize != blocksize) { | 3961 | if (clustersize != blocksize) { |
3912 | ext4_warning(sb, "fragment/cluster size (%d) != " | 3962 | ext4_msg(sb, KERN_ERR, |
3913 | "block size (%d)", clustersize, | 3963 | "fragment/cluster size (%d) != " |
3914 | blocksize); | 3964 | "block size (%d)", clustersize, blocksize); |
3915 | clustersize = blocksize; | 3965 | goto failed_mount; |
3916 | } | 3966 | } |
3917 | if (sbi->s_blocks_per_group > blocksize * 8) { | 3967 | if (sbi->s_blocks_per_group > blocksize * 8) { |
3918 | ext4_msg(sb, KERN_ERR, | 3968 | ext4_msg(sb, KERN_ERR, |
@@ -3966,6 +4016,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3966 | ext4_blocks_count(es)); | 4016 | ext4_blocks_count(es)); |
3967 | goto failed_mount; | 4017 | goto failed_mount; |
3968 | } | 4018 | } |
4019 | if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && | ||
4020 | (sbi->s_cluster_ratio == 1)) { | ||
4021 | ext4_msg(sb, KERN_WARNING, "bad geometry: first data " | ||
4022 | "block is 0 with a 1k block and cluster size"); | ||
4023 | goto failed_mount; | ||
4024 | } | ||
4025 | |||
3969 | blocks_count = (ext4_blocks_count(es) - | 4026 | blocks_count = (ext4_blocks_count(es) - |
3970 | le32_to_cpu(es->s_first_data_block) + | 4027 | le32_to_cpu(es->s_first_data_block) + |
3971 | EXT4_BLOCKS_PER_GROUP(sb) - 1); | 4028 | EXT4_BLOCKS_PER_GROUP(sb) - 1); |
@@ -4001,6 +4058,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
4001 | ret = -ENOMEM; | 4058 | ret = -ENOMEM; |
4002 | goto failed_mount; | 4059 | goto failed_mount; |
4003 | } | 4060 | } |
4061 | if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != | ||
4062 | le32_to_cpu(es->s_inodes_count)) { | ||
4063 | ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", | ||
4064 | le32_to_cpu(es->s_inodes_count), | ||
4065 | ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); | ||
4066 | ret = -EINVAL; | ||
4067 | goto failed_mount; | ||
4068 | } | ||
4004 | 4069 | ||
4005 | bgl_lock_init(sbi->s_blockgroup_lock); | 4070 | bgl_lock_init(sbi->s_blockgroup_lock); |
4006 | 4071 | ||
@@ -4736,6 +4801,14 @@ static int ext4_commit_super(struct super_block *sb, int sync) | |||
4736 | 4801 | ||
4737 | if (!sbh || block_device_ejected(sb)) | 4802 | if (!sbh || block_device_ejected(sb)) |
4738 | return error; | 4803 | return error; |
4804 | |||
4805 | /* | ||
4806 | * The superblock bh should be mapped, but it might not be if the | ||
4807 | * device was hot-removed. Not much we can do but fail the I/O. | ||
4808 | */ | ||
4809 | if (!buffer_mapped(sbh)) | ||
4810 | return error; | ||
4811 | |||
4739 | /* | 4812 | /* |
4740 | * If the file system is mounted read-only, don't update the | 4813 | * If the file system is mounted read-only, don't update the |
4741 | * superblock write time. This avoids updating the superblock | 4814 | * superblock write time. This avoids updating the superblock |
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index fc4ced59c565..723df14f4084 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
@@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, | |||
230 | { | 230 | { |
231 | int error = -EFSCORRUPTED; | 231 | int error = -EFSCORRUPTED; |
232 | 232 | ||
233 | if (buffer_verified(bh)) | ||
234 | return 0; | ||
235 | |||
236 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || | 233 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || |
237 | BHDR(bh)->h_blocks != cpu_to_le32(1)) | 234 | BHDR(bh)->h_blocks != cpu_to_le32(1)) |
238 | goto errout; | 235 | goto errout; |
236 | if (buffer_verified(bh)) | ||
237 | return 0; | ||
238 | |||
239 | error = -EFSBADCRC; | 239 | error = -EFSBADCRC; |
240 | if (!ext4_xattr_block_csum_verify(inode, bh)) | 240 | if (!ext4_xattr_block_csum_verify(inode, bh)) |
241 | goto errout; | 241 | goto errout; |
@@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, | |||
1560 | handle_t *handle, struct inode *inode, | 1560 | handle_t *handle, struct inode *inode, |
1561 | bool is_block) | 1561 | bool is_block) |
1562 | { | 1562 | { |
1563 | struct ext4_xattr_entry *last; | 1563 | struct ext4_xattr_entry *last, *next; |
1564 | struct ext4_xattr_entry *here = s->here; | 1564 | struct ext4_xattr_entry *here = s->here; |
1565 | size_t min_offs = s->end - s->base, name_len = strlen(i->name); | 1565 | size_t min_offs = s->end - s->base, name_len = strlen(i->name); |
1566 | int in_inode = i->in_inode; | 1566 | int in_inode = i->in_inode; |
@@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, | |||
1595 | 1595 | ||
1596 | /* Compute min_offs and last. */ | 1596 | /* Compute min_offs and last. */ |
1597 | last = s->first; | 1597 | last = s->first; |
1598 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { | 1598 | for (; !IS_LAST_ENTRY(last); last = next) { |
1599 | next = EXT4_XATTR_NEXT(last); | ||
1600 | if ((void *)next >= s->end) { | ||
1601 | EXT4_ERROR_INODE(inode, "corrupted xattr entries"); | ||
1602 | ret = -EFSCORRUPTED; | ||
1603 | goto out; | ||
1604 | } | ||
1599 | if (!last->e_value_inum && last->e_value_size) { | 1605 | if (!last->e_value_inum && last->e_value_size) { |
1600 | size_t offs = le16_to_cpu(last->e_value_offs); | 1606 | size_t offs = le16_to_cpu(last->e_value_offs); |
1601 | if (offs < min_offs) | 1607 | if (offs < min_offs) |
@@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, | |||
2206 | if (EXT4_I(inode)->i_extra_isize == 0) | 2212 | if (EXT4_I(inode)->i_extra_isize == 0) |
2207 | return -ENOSPC; | 2213 | return -ENOSPC; |
2208 | error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); | 2214 | error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); |
2209 | if (error) { | 2215 | if (error) |
2210 | if (error == -ENOSPC && | 2216 | return error; |
2211 | ext4_has_inline_data(inode)) { | ||
2212 | error = ext4_try_to_evict_inline_data(handle, inode, | ||
2213 | EXT4_XATTR_LEN(strlen(i->name) + | ||
2214 | EXT4_XATTR_SIZE(i->value_len))); | ||
2215 | if (error) | ||
2216 | return error; | ||
2217 | error = ext4_xattr_ibody_find(inode, i, is); | ||
2218 | if (error) | ||
2219 | return error; | ||
2220 | error = ext4_xattr_set_entry(i, s, handle, inode, | ||
2221 | false /* is_block */); | ||
2222 | } | ||
2223 | if (error) | ||
2224 | return error; | ||
2225 | } | ||
2226 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); | 2217 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); |
2227 | if (!IS_LAST_ENTRY(s->first)) { | 2218 | if (!IS_LAST_ENTRY(s->first)) { |
2228 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); | 2219 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
@@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, | |||
2651 | last = IFIRST(header); | 2642 | last = IFIRST(header); |
2652 | /* Find the entry best suited to be pushed into EA block */ | 2643 | /* Find the entry best suited to be pushed into EA block */ |
2653 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { | 2644 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { |
2645 | /* never move system.data out of the inode */ | ||
2646 | if ((last->e_name_len == 4) && | ||
2647 | (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && | ||
2648 | !memcmp(last->e_name, "data", 4)) | ||
2649 | continue; | ||
2654 | total_size = EXT4_XATTR_LEN(last->e_name_len); | 2650 | total_size = EXT4_XATTR_LEN(last->e_name_len); |
2655 | if (!last->e_value_inum) | 2651 | if (!last->e_value_inum) |
2656 | total_size += EXT4_XATTR_SIZE( | 2652 | total_size += EXT4_XATTR_SIZE( |
diff --git a/fs/inode.c b/fs/inode.c index 2c300e981796..8c86c809ca17 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -1999,8 +1999,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir, | |||
1999 | inode->i_uid = current_fsuid(); | 1999 | inode->i_uid = current_fsuid(); |
2000 | if (dir && dir->i_mode & S_ISGID) { | 2000 | if (dir && dir->i_mode & S_ISGID) { |
2001 | inode->i_gid = dir->i_gid; | 2001 | inode->i_gid = dir->i_gid; |
2002 | |||
2003 | /* Directories are special, and always inherit S_ISGID */ | ||
2002 | if (S_ISDIR(mode)) | 2004 | if (S_ISDIR(mode)) |
2003 | mode |= S_ISGID; | 2005 | mode |= S_ISGID; |
2006 | else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && | ||
2007 | !in_group_p(inode->i_gid) && | ||
2008 | !capable_wrt_inode_uidgid(dir, CAP_FSETID)) | ||
2009 | mode &= ~S_ISGID; | ||
2004 | } else | 2010 | } else |
2005 | inode->i_gid = current_fsgid(); | 2011 | inode->i_gid = current_fsgid(); |
2006 | inode->i_mode = mode; | 2012 | inode->i_mode = mode; |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 51dd68e67b0f..c0b66a7a795b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1361 | if (jh->b_transaction == transaction && | 1361 | if (jh->b_transaction == transaction && |
1362 | jh->b_jlist != BJ_Metadata) { | 1362 | jh->b_jlist != BJ_Metadata) { |
1363 | jbd_lock_bh_state(bh); | 1363 | jbd_lock_bh_state(bh); |
1364 | if (jh->b_transaction == transaction && | ||
1365 | jh->b_jlist != BJ_Metadata) | ||
1366 | pr_err("JBD2: assertion failure: h_type=%u " | ||
1367 | "h_line_no=%u block_no=%llu jlist=%u\n", | ||
1368 | handle->h_type, handle->h_line_no, | ||
1369 | (unsigned long long) bh->b_blocknr, | ||
1370 | jh->b_jlist); | ||
1364 | J_ASSERT_JH(jh, jh->b_transaction != transaction || | 1371 | J_ASSERT_JH(jh, jh->b_transaction != transaction || |
1365 | jh->b_jlist == BJ_Metadata); | 1372 | jh->b_jlist == BJ_Metadata); |
1366 | jbd_unlock_bh_state(bh); | 1373 | jbd_unlock_bh_state(bh); |
@@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1380 | * of the transaction. This needs to be done | 1387 | * of the transaction. This needs to be done |
1381 | * once a transaction -bzzz | 1388 | * once a transaction -bzzz |
1382 | */ | 1389 | */ |
1383 | jh->b_modified = 1; | ||
1384 | if (handle->h_buffer_credits <= 0) { | 1390 | if (handle->h_buffer_credits <= 0) { |
1385 | ret = -ENOSPC; | 1391 | ret = -ENOSPC; |
1386 | goto out_unlock_bh; | 1392 | goto out_unlock_bh; |
1387 | } | 1393 | } |
1394 | jh->b_modified = 1; | ||
1388 | handle->h_buffer_credits--; | 1395 | handle->h_buffer_credits--; |
1389 | } | 1396 | } |
1390 | 1397 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e9679016271f..dfd73a4616ce 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) | |||
831 | SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); | 831 | SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); |
832 | SEQ_PUT_DEC(" kB\nSwapPss: ", | 832 | SEQ_PUT_DEC(" kB\nSwapPss: ", |
833 | mss->swap_pss >> PSS_SHIFT); | 833 | mss->swap_pss >> PSS_SHIFT); |
834 | SEQ_PUT_DEC(" kB\nLocked: ", mss->pss >> PSS_SHIFT); | 834 | SEQ_PUT_DEC(" kB\nLocked: ", |
835 | mss->pss_locked >> PSS_SHIFT); | ||
835 | seq_puts(m, " kB\n"); | 836 | seq_puts(m, " kB\n"); |
836 | } | 837 | } |
837 | if (!rollup_mode) { | 838 | if (!rollup_mode) { |
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 7e288d97adcb..9fed1c05f1f4 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c | |||
@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key) | |||
76 | } | 76 | } |
77 | 77 | ||
78 | /* %k */ | 78 | /* %k */ |
79 | static void sprintf_le_key(char *buf, struct reiserfs_key *key) | 79 | static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key) |
80 | { | 80 | { |
81 | if (key) | 81 | if (key) |
82 | sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id), | 82 | return scnprintf(buf, size, "[%d %d %s %s]", |
83 | le32_to_cpu(key->k_objectid), le_offset(key), | 83 | le32_to_cpu(key->k_dir_id), |
84 | le_type(key)); | 84 | le32_to_cpu(key->k_objectid), le_offset(key), |
85 | le_type(key)); | ||
85 | else | 86 | else |
86 | sprintf(buf, "[NULL]"); | 87 | return scnprintf(buf, size, "[NULL]"); |
87 | } | 88 | } |
88 | 89 | ||
89 | /* %K */ | 90 | /* %K */ |
90 | static void sprintf_cpu_key(char *buf, struct cpu_key *key) | 91 | static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key) |
91 | { | 92 | { |
92 | if (key) | 93 | if (key) |
93 | sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id, | 94 | return scnprintf(buf, size, "[%d %d %s %s]", |
94 | key->on_disk_key.k_objectid, reiserfs_cpu_offset(key), | 95 | key->on_disk_key.k_dir_id, |
95 | cpu_type(key)); | 96 | key->on_disk_key.k_objectid, |
97 | reiserfs_cpu_offset(key), cpu_type(key)); | ||
96 | else | 98 | else |
97 | sprintf(buf, "[NULL]"); | 99 | return scnprintf(buf, size, "[NULL]"); |
98 | } | 100 | } |
99 | 101 | ||
100 | static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh) | 102 | static int scnprintf_de_head(char *buf, size_t size, |
103 | struct reiserfs_de_head *deh) | ||
101 | { | 104 | { |
102 | if (deh) | 105 | if (deh) |
103 | sprintf(buf, | 106 | return scnprintf(buf, size, |
104 | "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", | 107 | "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", |
105 | deh_offset(deh), deh_dir_id(deh), deh_objectid(deh), | 108 | deh_offset(deh), deh_dir_id(deh), |
106 | deh_location(deh), deh_state(deh)); | 109 | deh_objectid(deh), deh_location(deh), |
110 | deh_state(deh)); | ||
107 | else | 111 | else |
108 | sprintf(buf, "[NULL]"); | 112 | return scnprintf(buf, size, "[NULL]"); |
109 | 113 | ||
110 | } | 114 | } |
111 | 115 | ||
112 | static void sprintf_item_head(char *buf, struct item_head *ih) | 116 | static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih) |
113 | { | 117 | { |
114 | if (ih) { | 118 | if (ih) { |
115 | strcpy(buf, | 119 | char *p = buf; |
116 | (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*"); | 120 | char * const end = buf + size; |
117 | sprintf_le_key(buf + strlen(buf), &(ih->ih_key)); | 121 | |
118 | sprintf(buf + strlen(buf), ", item_len %d, item_location %d, " | 122 | p += scnprintf(p, end - p, "%s", |
119 | "free_space(entry_count) %d", | 123 | (ih_version(ih) == KEY_FORMAT_3_6) ? |
120 | ih_item_len(ih), ih_location(ih), ih_free_space(ih)); | 124 | "*3.6* " : "*3.5*"); |
125 | |||
126 | p += scnprintf_le_key(p, end - p, &ih->ih_key); | ||
127 | |||
128 | p += scnprintf(p, end - p, | ||
129 | ", item_len %d, item_location %d, free_space(entry_count) %d", | ||
130 | ih_item_len(ih), ih_location(ih), | ||
131 | ih_free_space(ih)); | ||
132 | return p - buf; | ||
121 | } else | 133 | } else |
122 | sprintf(buf, "[NULL]"); | 134 | return scnprintf(buf, size, "[NULL]"); |
123 | } | 135 | } |
124 | 136 | ||
125 | static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de) | 137 | static int scnprintf_direntry(char *buf, size_t size, |
138 | struct reiserfs_dir_entry *de) | ||
126 | { | 139 | { |
127 | char name[20]; | 140 | char name[20]; |
128 | 141 | ||
129 | memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen); | 142 | memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen); |
130 | name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0; | 143 | name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0; |
131 | sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid); | 144 | return scnprintf(buf, size, "\"%s\"==>[%d %d]", |
145 | name, de->de_dir_id, de->de_objectid); | ||
132 | } | 146 | } |
133 | 147 | ||
134 | static void sprintf_block_head(char *buf, struct buffer_head *bh) | 148 | static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh) |
135 | { | 149 | { |
136 | sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ", | 150 | return scnprintf(buf, size, |
137 | B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); | 151 | "level=%d, nr_items=%d, free_space=%d rdkey ", |
152 | B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); | ||
138 | } | 153 | } |
139 | 154 | ||
140 | static void sprintf_buffer_head(char *buf, struct buffer_head *bh) | 155 | static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh) |
141 | { | 156 | { |
142 | sprintf(buf, | 157 | return scnprintf(buf, size, |
143 | "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", | 158 | "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", |
144 | bh->b_bdev, bh->b_size, | 159 | bh->b_bdev, bh->b_size, |
145 | (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), | 160 | (unsigned long long)bh->b_blocknr, |
146 | bh->b_state, bh->b_page, | 161 | atomic_read(&(bh->b_count)), |
147 | buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", | 162 | bh->b_state, bh->b_page, |
148 | buffer_dirty(bh) ? "DIRTY" : "CLEAN", | 163 | buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", |
149 | buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); | 164 | buffer_dirty(bh) ? "DIRTY" : "CLEAN", |
165 | buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); | ||
150 | } | 166 | } |
151 | 167 | ||
152 | static void sprintf_disk_child(char *buf, struct disk_child *dc) | 168 | static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc) |
153 | { | 169 | { |
154 | sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc), | 170 | return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]", |
155 | dc_size(dc)); | 171 | dc_block_number(dc), dc_size(dc)); |
156 | } | 172 | } |
157 | 173 | ||
158 | static char *is_there_reiserfs_struct(char *fmt, int *what) | 174 | static char *is_there_reiserfs_struct(char *fmt, int *what) |
@@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args) | |||
189 | char *fmt1 = fmt_buf; | 205 | char *fmt1 = fmt_buf; |
190 | char *k; | 206 | char *k; |
191 | char *p = error_buf; | 207 | char *p = error_buf; |
208 | char * const end = &error_buf[sizeof(error_buf)]; | ||
192 | int what; | 209 | int what; |
193 | 210 | ||
194 | spin_lock(&error_lock); | 211 | spin_lock(&error_lock); |
195 | 212 | ||
196 | strcpy(fmt1, fmt); | 213 | if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) { |
214 | strscpy(error_buf, "format string too long", end - error_buf); | ||
215 | goto out_unlock; | ||
216 | } | ||
197 | 217 | ||
198 | while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) { | 218 | while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) { |
199 | *k = 0; | 219 | *k = 0; |
200 | 220 | ||
201 | p += vsprintf(p, fmt1, args); | 221 | p += vscnprintf(p, end - p, fmt1, args); |
202 | 222 | ||
203 | switch (what) { | 223 | switch (what) { |
204 | case 'k': | 224 | case 'k': |
205 | sprintf_le_key(p, va_arg(args, struct reiserfs_key *)); | 225 | p += scnprintf_le_key(p, end - p, |
226 | va_arg(args, struct reiserfs_key *)); | ||
206 | break; | 227 | break; |
207 | case 'K': | 228 | case 'K': |
208 | sprintf_cpu_key(p, va_arg(args, struct cpu_key *)); | 229 | p += scnprintf_cpu_key(p, end - p, |
230 | va_arg(args, struct cpu_key *)); | ||
209 | break; | 231 | break; |
210 | case 'h': | 232 | case 'h': |
211 | sprintf_item_head(p, va_arg(args, struct item_head *)); | 233 | p += scnprintf_item_head(p, end - p, |
234 | va_arg(args, struct item_head *)); | ||
212 | break; | 235 | break; |
213 | case 't': | 236 | case 't': |
214 | sprintf_direntry(p, | 237 | p += scnprintf_direntry(p, end - p, |
215 | va_arg(args, | 238 | va_arg(args, struct reiserfs_dir_entry *)); |
216 | struct reiserfs_dir_entry *)); | ||
217 | break; | 239 | break; |
218 | case 'y': | 240 | case 'y': |
219 | sprintf_disk_child(p, | 241 | p += scnprintf_disk_child(p, end - p, |
220 | va_arg(args, struct disk_child *)); | 242 | va_arg(args, struct disk_child *)); |
221 | break; | 243 | break; |
222 | case 'z': | 244 | case 'z': |
223 | sprintf_block_head(p, | 245 | p += scnprintf_block_head(p, end - p, |
224 | va_arg(args, struct buffer_head *)); | 246 | va_arg(args, struct buffer_head *)); |
225 | break; | 247 | break; |
226 | case 'b': | 248 | case 'b': |
227 | sprintf_buffer_head(p, | 249 | p += scnprintf_buffer_head(p, end - p, |
228 | va_arg(args, struct buffer_head *)); | 250 | va_arg(args, struct buffer_head *)); |
229 | break; | 251 | break; |
230 | case 'a': | 252 | case 'a': |
231 | sprintf_de_head(p, | 253 | p += scnprintf_de_head(p, end - p, |
232 | va_arg(args, | 254 | va_arg(args, struct reiserfs_de_head *)); |
233 | struct reiserfs_de_head *)); | ||
234 | break; | 255 | break; |
235 | } | 256 | } |
236 | 257 | ||
237 | p += strlen(p); | ||
238 | fmt1 = k + 2; | 258 | fmt1 = k + 2; |
239 | } | 259 | } |
240 | vsprintf(p, fmt1, args); | 260 | p += vscnprintf(p, end - p, fmt1, args); |
261 | out_unlock: | ||
241 | spin_unlock(&error_lock); | 262 | spin_unlock(&error_lock); |
242 | 263 | ||
243 | } | 264 | } |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 123bf7d516fc..594d192b2331 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, | |||
222 | unsigned long reason) | 222 | unsigned long reason) |
223 | { | 223 | { |
224 | struct mm_struct *mm = ctx->mm; | 224 | struct mm_struct *mm = ctx->mm; |
225 | pte_t *pte; | 225 | pte_t *ptep, pte; |
226 | bool ret = true; | 226 | bool ret = true; |
227 | 227 | ||
228 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 228 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
229 | 229 | ||
230 | pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); | 230 | ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
231 | if (!pte) | 231 | |
232 | if (!ptep) | ||
232 | goto out; | 233 | goto out; |
233 | 234 | ||
234 | ret = false; | 235 | ret = false; |
236 | pte = huge_ptep_get(ptep); | ||
235 | 237 | ||
236 | /* | 238 | /* |
237 | * Lockless access: we're in a wait_event so it's ok if it | 239 | * Lockless access: we're in a wait_event so it's ok if it |
238 | * changes under us. | 240 | * changes under us. |
239 | */ | 241 | */ |
240 | if (huge_pte_none(*pte)) | 242 | if (huge_pte_none(pte)) |
241 | ret = true; | 243 | ret = true; |
242 | if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP)) | 244 | if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) |
243 | ret = true; | 245 | ret = true; |
244 | out: | 246 | out: |
245 | return ret; | 247 | return ret; |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index faddde44de8c..3063125197ad 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | |||
265 | * For now w.r.t page table cache, mark the range_size as PAGE_SIZE | 265 | * For now w.r.t page table cache, mark the range_size as PAGE_SIZE |
266 | */ | 266 | */ |
267 | 267 | ||
268 | #ifndef pte_free_tlb | ||
268 | #define pte_free_tlb(tlb, ptep, address) \ | 269 | #define pte_free_tlb(tlb, ptep, address) \ |
269 | do { \ | 270 | do { \ |
270 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ | 271 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
271 | __pte_free_tlb(tlb, ptep, address); \ | 272 | __pte_free_tlb(tlb, ptep, address); \ |
272 | } while (0) | 273 | } while (0) |
274 | #endif | ||
273 | 275 | ||
276 | #ifndef pmd_free_tlb | ||
274 | #define pmd_free_tlb(tlb, pmdp, address) \ | 277 | #define pmd_free_tlb(tlb, pmdp, address) \ |
275 | do { \ | 278 | do { \ |
276 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ | 279 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
277 | __pmd_free_tlb(tlb, pmdp, address); \ | 280 | __pmd_free_tlb(tlb, pmdp, address); \ |
278 | } while (0) | 281 | } while (0) |
282 | #endif | ||
279 | 283 | ||
280 | #ifndef __ARCH_HAS_4LEVEL_HACK | 284 | #ifndef __ARCH_HAS_4LEVEL_HACK |
285 | #ifndef pud_free_tlb | ||
281 | #define pud_free_tlb(tlb, pudp, address) \ | 286 | #define pud_free_tlb(tlb, pudp, address) \ |
282 | do { \ | 287 | do { \ |
283 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ | 288 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
284 | __pud_free_tlb(tlb, pudp, address); \ | 289 | __pud_free_tlb(tlb, pudp, address); \ |
285 | } while (0) | 290 | } while (0) |
286 | #endif | 291 | #endif |
292 | #endif | ||
287 | 293 | ||
288 | #ifndef __ARCH_HAS_5LEVEL_HACK | 294 | #ifndef __ARCH_HAS_5LEVEL_HACK |
295 | #ifndef p4d_free_tlb | ||
289 | #define p4d_free_tlb(tlb, pudp, address) \ | 296 | #define p4d_free_tlb(tlb, pudp, address) \ |
290 | do { \ | 297 | do { \ |
291 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ | 298 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
292 | __p4d_free_tlb(tlb, pudp, address); \ | 299 | __p4d_free_tlb(tlb, pudp, address); \ |
293 | } while (0) | 300 | } while (0) |
294 | #endif | 301 | #endif |
302 | #endif | ||
295 | 303 | ||
296 | #define tlb_migrate_finish(mm) do {} while (0) | 304 | #define tlb_migrate_finish(mm) do {} while (0) |
297 | 305 | ||
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h index 9564597cbfac..0aa1d9c3e0b9 100644 --- a/include/dt-bindings/clock/imx6ul-clock.h +++ b/include/dt-bindings/clock/imx6ul-clock.h | |||
@@ -235,27 +235,25 @@ | |||
235 | #define IMX6UL_CLK_CSI_PODF 222 | 235 | #define IMX6UL_CLK_CSI_PODF 222 |
236 | #define IMX6UL_CLK_PLL3_120M 223 | 236 | #define IMX6UL_CLK_PLL3_120M 223 |
237 | #define IMX6UL_CLK_KPP 224 | 237 | #define IMX6UL_CLK_KPP 224 |
238 | #define IMX6UL_CLK_CKO1_SEL 225 | 238 | #define IMX6ULL_CLK_ESAI_PRED 225 |
239 | #define IMX6UL_CLK_CKO1_PODF 226 | 239 | #define IMX6ULL_CLK_ESAI_PODF 226 |
240 | #define IMX6UL_CLK_CKO1 227 | 240 | #define IMX6ULL_CLK_ESAI_EXTAL 227 |
241 | #define IMX6UL_CLK_CKO2_SEL 228 | 241 | #define IMX6ULL_CLK_ESAI_MEM 228 |
242 | #define IMX6UL_CLK_CKO2_PODF 229 | 242 | #define IMX6ULL_CLK_ESAI_IPG 229 |
243 | #define IMX6UL_CLK_CKO2 230 | 243 | #define IMX6ULL_CLK_DCP_CLK 230 |
244 | #define IMX6UL_CLK_CKO 231 | 244 | #define IMX6ULL_CLK_EPDC_PRE_SEL 231 |
245 | 245 | #define IMX6ULL_CLK_EPDC_SEL 232 | |
246 | /* For i.MX6ULL */ | 246 | #define IMX6ULL_CLK_EPDC_PODF 233 |
247 | #define IMX6ULL_CLK_ESAI_PRED 232 | 247 | #define IMX6ULL_CLK_EPDC_ACLK 234 |
248 | #define IMX6ULL_CLK_ESAI_PODF 233 | 248 | #define IMX6ULL_CLK_EPDC_PIX 235 |
249 | #define IMX6ULL_CLK_ESAI_EXTAL 234 | 249 | #define IMX6ULL_CLK_ESAI_SEL 236 |
250 | #define IMX6ULL_CLK_ESAI_MEM 235 | 250 | #define IMX6UL_CLK_CKO1_SEL 237 |
251 | #define IMX6ULL_CLK_ESAI_IPG 236 | 251 | #define IMX6UL_CLK_CKO1_PODF 238 |
252 | #define IMX6ULL_CLK_DCP_CLK 237 | 252 | #define IMX6UL_CLK_CKO1 239 |
253 | #define IMX6ULL_CLK_EPDC_PRE_SEL 238 | 253 | #define IMX6UL_CLK_CKO2_SEL 240 |
254 | #define IMX6ULL_CLK_EPDC_SEL 239 | 254 | #define IMX6UL_CLK_CKO2_PODF 241 |
255 | #define IMX6ULL_CLK_EPDC_PODF 240 | 255 | #define IMX6UL_CLK_CKO2 242 |
256 | #define IMX6ULL_CLK_EPDC_ACLK 241 | 256 | #define IMX6UL_CLK_CKO 243 |
257 | #define IMX6ULL_CLK_EPDC_PIX 242 | ||
258 | #define IMX6ULL_CLK_ESAI_SEL 243 | ||
259 | #define IMX6UL_CLK_END 244 | 257 | #define IMX6UL_CLK_END 244 |
260 | 258 | ||
261 | #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ | 259 | #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ |
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 975fb4cf1bb7..79795c5fa7c3 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h | |||
@@ -188,12 +188,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, | |||
188 | \ | 188 | \ |
189 | __ret; \ | 189 | __ret; \ |
190 | }) | 190 | }) |
191 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, | ||
192 | enum bpf_prog_type ptype, struct bpf_prog *prog); | ||
193 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, | ||
194 | enum bpf_prog_type ptype); | ||
195 | int cgroup_bpf_prog_query(const union bpf_attr *attr, | ||
196 | union bpf_attr __user *uattr); | ||
191 | #else | 197 | #else |
192 | 198 | ||
199 | struct bpf_prog; | ||
193 | struct cgroup_bpf {}; | 200 | struct cgroup_bpf {}; |
194 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} | 201 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} |
195 | static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } | 202 | static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } |
196 | 203 | ||
204 | static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, | ||
205 | enum bpf_prog_type ptype, | ||
206 | struct bpf_prog *prog) | ||
207 | { | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, | ||
212 | enum bpf_prog_type ptype) | ||
213 | { | ||
214 | return -EINVAL; | ||
215 | } | ||
216 | |||
217 | static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, | ||
218 | union bpf_attr __user *uattr) | ||
219 | { | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
197 | #define cgroup_bpf_enabled (0) | 223 | #define cgroup_bpf_enabled (0) |
198 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) | 224 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) |
199 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) | 225 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 7df32a3200f7..8827e797ff97 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -696,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) | |||
696 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); | 696 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); |
697 | struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); | 697 | struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); |
698 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); | 698 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); |
699 | int sockmap_get_from_fd(const union bpf_attr *attr, int type, | ||
700 | struct bpf_prog *prog); | ||
699 | #else | 701 | #else |
700 | static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) | 702 | static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) |
701 | { | 703 | { |
@@ -714,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map, | |||
714 | { | 716 | { |
715 | return -EOPNOTSUPP; | 717 | return -EOPNOTSUPP; |
716 | } | 718 | } |
719 | |||
720 | static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, | ||
721 | struct bpf_prog *prog) | ||
722 | { | ||
723 | return -EINVAL; | ||
724 | } | ||
717 | #endif | 725 | #endif |
718 | 726 | ||
719 | #if defined(CONFIG_XDP_SOCKETS) | 727 | #if defined(CONFIG_XDP_SOCKETS) |
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h index 5f8a4283092d..9d9ff755ec29 100644 --- a/include/linux/bpf_lirc.h +++ b/include/linux/bpf_lirc.h | |||
@@ -5,11 +5,12 @@ | |||
5 | #include <uapi/linux/bpf.h> | 5 | #include <uapi/linux/bpf.h> |
6 | 6 | ||
7 | #ifdef CONFIG_BPF_LIRC_MODE2 | 7 | #ifdef CONFIG_BPF_LIRC_MODE2 |
8 | int lirc_prog_attach(const union bpf_attr *attr); | 8 | int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
9 | int lirc_prog_detach(const union bpf_attr *attr); | 9 | int lirc_prog_detach(const union bpf_attr *attr); |
10 | int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); | 10 | int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); |
11 | #else | 11 | #else |
12 | static inline int lirc_prog_attach(const union bpf_attr *attr) | 12 | static inline int lirc_prog_attach(const union bpf_attr *attr, |
13 | struct bpf_prog *prog) | ||
13 | { | 14 | { |
14 | return -EINVAL; | 15 | return -EINVAL; |
15 | } | 16 | } |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index fd282c7d3e5e..573f5a7d42d4 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -66,25 +66,40 @@ | |||
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Feature detection for gnu_inline (gnu89 extern inline semantics). Either | ||
70 | * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, | ||
71 | * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not | ||
72 | * defined so the gnu89 semantics are the default. | ||
73 | */ | ||
74 | #ifdef __GNUC_STDC_INLINE__ | ||
75 | # define __gnu_inline __attribute__((gnu_inline)) | ||
76 | #else | ||
77 | # define __gnu_inline | ||
78 | #endif | ||
79 | |||
80 | /* | ||
69 | * Force always-inline if the user requests it so via the .config, | 81 | * Force always-inline if the user requests it so via the .config, |
70 | * or if gcc is too old. | 82 | * or if gcc is too old. |
71 | * GCC does not warn about unused static inline functions for | 83 | * GCC does not warn about unused static inline functions for |
72 | * -Wunused-function. This turns out to avoid the need for complex #ifdef | 84 | * -Wunused-function. This turns out to avoid the need for complex #ifdef |
73 | * directives. Suppress the warning in clang as well by using "unused" | 85 | * directives. Suppress the warning in clang as well by using "unused" |
74 | * function attribute, which is redundant but not harmful for gcc. | 86 | * function attribute, which is redundant but not harmful for gcc. |
87 | * Prefer gnu_inline, so that extern inline functions do not emit an | ||
88 | * externally visible function. This makes extern inline behave as per gnu89 | ||
89 | * semantics rather than c99. This prevents multiple symbol definition errors | ||
90 | * of extern inline functions at link time. | ||
91 | * A lot of inline functions can cause havoc with function tracing. | ||
75 | */ | 92 | */ |
76 | #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ | 93 | #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ |
77 | !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) | 94 | !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
78 | #define inline inline __attribute__((always_inline,unused)) notrace | 95 | #define inline \ |
79 | #define __inline__ __inline__ __attribute__((always_inline,unused)) notrace | 96 | inline __attribute__((always_inline, unused)) notrace __gnu_inline |
80 | #define __inline __inline __attribute__((always_inline,unused)) notrace | ||
81 | #else | 97 | #else |
82 | /* A lot of inline functions can cause havoc with function tracing */ | 98 | #define inline inline __attribute__((unused)) notrace __gnu_inline |
83 | #define inline inline __attribute__((unused)) notrace | ||
84 | #define __inline__ __inline__ __attribute__((unused)) notrace | ||
85 | #define __inline __inline __attribute__((unused)) notrace | ||
86 | #endif | 99 | #endif |
87 | 100 | ||
101 | #define __inline__ inline | ||
102 | #define __inline inline | ||
88 | #define __always_inline inline __attribute__((always_inline)) | 103 | #define __always_inline inline __attribute__((always_inline)) |
89 | #define noinline __attribute__((noinline)) | 104 | #define noinline __attribute__((noinline)) |
90 | 105 | ||
diff --git a/include/linux/filter.h b/include/linux/filter.h index 20f2659dd829..300baad62c88 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -470,9 +470,7 @@ struct sock_fprog_kern { | |||
470 | }; | 470 | }; |
471 | 471 | ||
472 | struct bpf_binary_header { | 472 | struct bpf_binary_header { |
473 | u16 pages; | 473 | u32 pages; |
474 | u16 locked:1; | ||
475 | |||
476 | /* Some arches need word alignment for their instructions */ | 474 | /* Some arches need word alignment for their instructions */ |
477 | u8 image[] __aligned(4); | 475 | u8 image[] __aligned(4); |
478 | }; | 476 | }; |
@@ -481,7 +479,7 @@ struct bpf_prog { | |||
481 | u16 pages; /* Number of allocated pages */ | 479 | u16 pages; /* Number of allocated pages */ |
482 | u16 jited:1, /* Is our filter JIT'ed? */ | 480 | u16 jited:1, /* Is our filter JIT'ed? */ |
483 | jit_requested:1,/* archs need to JIT the prog */ | 481 | jit_requested:1,/* archs need to JIT the prog */ |
484 | locked:1, /* Program image locked? */ | 482 | undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ |
485 | gpl_compatible:1, /* Is filter GPL compatible? */ | 483 | gpl_compatible:1, /* Is filter GPL compatible? */ |
486 | cb_access:1, /* Is control block accessed? */ | 484 | cb_access:1, /* Is control block accessed? */ |
487 | dst_needed:1, /* Do we need dst entry? */ | 485 | dst_needed:1, /* Do we need dst entry? */ |
@@ -677,46 +675,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) | |||
677 | 675 | ||
678 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) | 676 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) |
679 | { | 677 | { |
680 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 678 | fp->undo_set_mem = 1; |
681 | fp->locked = 1; | 679 | set_memory_ro((unsigned long)fp, fp->pages); |
682 | if (set_memory_ro((unsigned long)fp, fp->pages)) | ||
683 | fp->locked = 0; | ||
684 | #endif | ||
685 | } | 680 | } |
686 | 681 | ||
687 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | 682 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) |
688 | { | 683 | { |
689 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 684 | if (fp->undo_set_mem) |
690 | if (fp->locked) { | 685 | set_memory_rw((unsigned long)fp, fp->pages); |
691 | WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); | ||
692 | /* In case set_memory_rw() fails, we want to be the first | ||
693 | * to crash here instead of some random place later on. | ||
694 | */ | ||
695 | fp->locked = 0; | ||
696 | } | ||
697 | #endif | ||
698 | } | 686 | } |
699 | 687 | ||
700 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) | 688 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) |
701 | { | 689 | { |
702 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 690 | set_memory_ro((unsigned long)hdr, hdr->pages); |
703 | hdr->locked = 1; | ||
704 | if (set_memory_ro((unsigned long)hdr, hdr->pages)) | ||
705 | hdr->locked = 0; | ||
706 | #endif | ||
707 | } | 691 | } |
708 | 692 | ||
709 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) | 693 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) |
710 | { | 694 | { |
711 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | 695 | set_memory_rw((unsigned long)hdr, hdr->pages); |
712 | if (hdr->locked) { | ||
713 | WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); | ||
714 | /* In case set_memory_rw() fails, we want to be the first | ||
715 | * to crash here instead of some random place later on. | ||
716 | */ | ||
717 | hdr->locked = 0; | ||
718 | } | ||
719 | #endif | ||
720 | } | 696 | } |
721 | 697 | ||
722 | static inline struct bpf_binary_header * | 698 | static inline struct bpf_binary_header * |
@@ -728,22 +704,6 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp) | |||
728 | return (void *)addr; | 704 | return (void *)addr; |
729 | } | 705 | } |
730 | 706 | ||
731 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | ||
732 | static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp) | ||
733 | { | ||
734 | if (!fp->locked) | ||
735 | return -ENOLCK; | ||
736 | if (fp->jited) { | ||
737 | const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); | ||
738 | |||
739 | if (!hdr->locked) | ||
740 | return -ENOLCK; | ||
741 | } | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | #endif | ||
746 | |||
747 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); | 707 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); |
748 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | 708 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) |
749 | { | 709 | { |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8154f4920fcb..ebb77674be90 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type; | |||
223 | */ | 223 | */ |
224 | int register_ftrace_function(struct ftrace_ops *ops); | 224 | int register_ftrace_function(struct ftrace_ops *ops); |
225 | int unregister_ftrace_function(struct ftrace_ops *ops); | 225 | int unregister_ftrace_function(struct ftrace_ops *ops); |
226 | void clear_ftrace_function(void); | ||
227 | 226 | ||
228 | extern void ftrace_stub(unsigned long a0, unsigned long a1, | 227 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
229 | struct ftrace_ops *op, struct pt_regs *regs); | 228 | struct ftrace_ops *op, struct pt_regs *regs); |
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void) | |||
239 | { | 238 | { |
240 | return 0; | 239 | return 0; |
241 | } | 240 | } |
242 | static inline void clear_ftrace_function(void) { } | ||
243 | static inline void ftrace_kill(void) { } | 241 | static inline void ftrace_kill(void) { } |
244 | static inline void ftrace_free_init_mem(void) { } | 242 | static inline void ftrace_free_init_mem(void) { } |
245 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } | 243 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } |
diff --git a/include/linux/hid.h b/include/linux/hid.h index 41a3d5775394..773bcb1d4044 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -511,6 +511,7 @@ struct hid_output_fifo { | |||
511 | #define HID_STAT_ADDED BIT(0) | 511 | #define HID_STAT_ADDED BIT(0) |
512 | #define HID_STAT_PARSED BIT(1) | 512 | #define HID_STAT_PARSED BIT(1) |
513 | #define HID_STAT_DUP_DETECTED BIT(2) | 513 | #define HID_STAT_DUP_DETECTED BIT(2) |
514 | #define HID_STAT_REPROBED BIT(3) | ||
514 | 515 | ||
515 | struct hid_input { | 516 | struct hid_input { |
516 | struct list_head list; | 517 | struct list_head list; |
@@ -579,7 +580,7 @@ struct hid_device { /* device report descriptor */ | |||
579 | bool battery_avoid_query; | 580 | bool battery_avoid_query; |
580 | #endif | 581 | #endif |
581 | 582 | ||
582 | unsigned int status; /* see STAT flags above */ | 583 | unsigned long status; /* see STAT flags above */ |
583 | unsigned claimed; /* Claimed by hidinput, hiddev? */ | 584 | unsigned claimed; /* Claimed by hidinput, hiddev? */ |
584 | unsigned quirks; /* Various quirks the device can pull on us */ | 585 | unsigned quirks; /* Various quirks the device can pull on us */ |
585 | bool io_started; /* If IO has started */ | 586 | bool io_started; /* If IO has started */ |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 2803264c512f..c1961761311d 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k); | |||
62 | int kthread_park(struct task_struct *k); | 62 | int kthread_park(struct task_struct *k); |
63 | void kthread_unpark(struct task_struct *k); | 63 | void kthread_unpark(struct task_struct *k); |
64 | void kthread_parkme(void); | 64 | void kthread_parkme(void); |
65 | void kthread_park_complete(struct task_struct *k); | ||
66 | 65 | ||
67 | int kthreadd(void *unused); | 66 | int kthreadd(void *unused); |
68 | extern struct task_struct *kthreadd_task; | 67 | extern struct task_struct *kthreadd_task; |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 8b8946dd63b9..32f247cb5e9e 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -210,6 +210,7 @@ enum { | |||
210 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ | 210 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ |
211 | /* (doesn't imply presence) */ | 211 | /* (doesn't imply presence) */ |
212 | ATA_FLAG_SATA = (1 << 1), | 212 | ATA_FLAG_SATA = (1 << 1), |
213 | ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ | ||
213 | ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ | 214 | ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ |
214 | ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ | 215 | ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ |
215 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ | 216 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ |
@@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag) | |||
1495 | return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); | 1496 | return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); |
1496 | } | 1497 | } |
1497 | 1498 | ||
1499 | #define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \ | ||
1500 | for ((tag) = 0; (tag) < (max_tag) && \ | ||
1501 | ({ qc = fn((ap), (tag)); 1; }); (tag)++) \ | ||
1502 | |||
1503 | /* | ||
1504 | * Internal use only, iterate commands ignoring error handling and | ||
1505 | * status of 'qc'. | ||
1506 | */ | ||
1507 | #define ata_qc_for_each_raw(ap, qc, tag) \ | ||
1508 | __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag) | ||
1509 | |||
1510 | /* | ||
1511 | * Iterate all potential commands that can be queued | ||
1512 | */ | ||
1513 | #define ata_qc_for_each(ap, qc, tag) \ | ||
1514 | __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag) | ||
1515 | |||
1516 | /* | ||
1517 | * Like ata_qc_for_each, but with the internal tag included | ||
1518 | */ | ||
1519 | #define ata_qc_for_each_with_internal(ap, qc, tag) \ | ||
1520 | __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag) | ||
1521 | |||
1498 | /* | 1522 | /* |
1499 | * device helpers | 1523 | * device helpers |
1500 | */ | 1524 | */ |
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index d3c9db492b30..fab5121ffb8f 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | #include <linux/mlx5/driver.h> | 9 | #include <linux/mlx5/driver.h> |
10 | 10 | ||
11 | #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) | ||
12 | |||
11 | enum { | 13 | enum { |
12 | SRIOV_NONE, | 14 | SRIOV_NONE, |
13 | SRIOV_LEGACY, | 15 | SRIOV_LEGACY, |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 27134c4fcb76..ac281f5ec9b8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
922 | u8 vnic_env_queue_counters[0x1]; | 922 | u8 vnic_env_queue_counters[0x1]; |
923 | u8 ets[0x1]; | 923 | u8 ets[0x1]; |
924 | u8 nic_flow_table[0x1]; | 924 | u8 nic_flow_table[0x1]; |
925 | u8 eswitch_flow_table[0x1]; | 925 | u8 eswitch_manager[0x1]; |
926 | u8 device_memory[0x1]; | 926 | u8 device_memory[0x1]; |
927 | u8 mcam_reg[0x1]; | 927 | u8 mcam_reg[0x1]; |
928 | u8 pcam_reg[0x1]; | 928 | u8 pcam_reg[0x1]; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ec9850c7936..3d0cc0b5cec2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, | |||
2789 | if (PTR_ERR(pp) != -EINPROGRESS) | 2789 | if (PTR_ERR(pp) != -EINPROGRESS) |
2790 | NAPI_GRO_CB(skb)->flush |= flush; | 2790 | NAPI_GRO_CB(skb)->flush |= flush; |
2791 | } | 2791 | } |
2792 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, | ||
2793 | struct sk_buff **pp, | ||
2794 | int flush, | ||
2795 | struct gro_remcsum *grc) | ||
2796 | { | ||
2797 | if (PTR_ERR(pp) != -EINPROGRESS) { | ||
2798 | NAPI_GRO_CB(skb)->flush |= flush; | ||
2799 | skb_gro_remcsum_cleanup(skb, grc); | ||
2800 | skb->remcsum_offload = 0; | ||
2801 | } | ||
2802 | } | ||
2792 | #else | 2803 | #else |
2793 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) | 2804 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) |
2794 | { | 2805 | { |
2795 | NAPI_GRO_CB(skb)->flush |= flush; | 2806 | NAPI_GRO_CB(skb)->flush |= flush; |
2796 | } | 2807 | } |
2808 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, | ||
2809 | struct sk_buff **pp, | ||
2810 | int flush, | ||
2811 | struct gro_remcsum *grc) | ||
2812 | { | ||
2813 | NAPI_GRO_CB(skb)->flush |= flush; | ||
2814 | skb_gro_remcsum_cleanup(skb, grc); | ||
2815 | skb->remcsum_offload = 0; | ||
2816 | } | ||
2797 | #endif | 2817 | #endif |
2798 | 2818 | ||
2799 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 2819 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9256118bd40c..43731fe51c97 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -118,7 +118,7 @@ struct task_group; | |||
118 | * the comment with set_special_state(). | 118 | * the comment with set_special_state(). |
119 | */ | 119 | */ |
120 | #define is_special_task_state(state) \ | 120 | #define is_special_task_state(state) \ |
121 | ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) | 121 | ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) |
122 | 122 | ||
123 | #define __set_current_state(state_value) \ | 123 | #define __set_current_state(state_value) \ |
124 | do { \ | 124 | do { \ |
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 6c5f2074e14f..6f8b68cd460f 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h | |||
@@ -75,7 +75,7 @@ struct uio_device { | |||
75 | struct fasync_struct *async_queue; | 75 | struct fasync_struct *async_queue; |
76 | wait_queue_head_t wait; | 76 | wait_queue_head_t wait; |
77 | struct uio_info *info; | 77 | struct uio_info *info; |
78 | spinlock_t info_lock; | 78 | struct mutex info_lock; |
79 | struct kobject *map_dir; | 79 | struct kobject *map_dir; |
80 | struct kobject *portio_dir; | 80 | struct kobject *portio_dir; |
81 | }; | 81 | }; |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 47e35cce3b64..a71264d75d7f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -128,6 +128,7 @@ struct net { | |||
128 | #endif | 128 | #endif |
129 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) | 129 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) |
130 | struct netns_nf_frag nf_frag; | 130 | struct netns_nf_frag nf_frag; |
131 | struct ctl_table_header *nf_frag_frags_hdr; | ||
131 | #endif | 132 | #endif |
132 | struct sock *nfnl; | 133 | struct sock *nfnl; |
133 | struct sock *nfnl_stash; | 134 | struct sock *nfnl_stash; |
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index c978a31b0f84..762ac9931b62 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h | |||
@@ -109,7 +109,6 @@ struct netns_ipv6 { | |||
109 | 109 | ||
110 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) | 110 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) |
111 | struct netns_nf_frag { | 111 | struct netns_nf_frag { |
112 | struct netns_sysctl_ipv6 sysctl; | ||
113 | struct netns_frags frags; | 112 | struct netns_frags frags; |
114 | }; | 113 | }; |
115 | #endif | 114 | #endif |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3c1a2c47cd4..20b059574e60 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, | |||
111 | { | 111 | { |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline bool tcf_block_shared(struct tcf_block *block) | ||
115 | { | ||
116 | return false; | ||
117 | } | ||
118 | |||
114 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) | 119 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) |
115 | { | 120 | { |
116 | return NULL; | 121 | return NULL; |
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 59b19b6a40d7..b7db3261c62d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -1857,7 +1857,8 @@ union bpf_attr { | |||
1857 | * is resolved), the nexthop address is returned in ipv4_dst | 1857 | * is resolved), the nexthop address is returned in ipv4_dst |
1858 | * or ipv6_dst based on family, smac is set to mac address of | 1858 | * or ipv6_dst based on family, smac is set to mac address of |
1859 | * egress device, dmac is set to nexthop mac address, rt_metric | 1859 | * egress device, dmac is set to nexthop mac address, rt_metric |
1860 | * is set to metric from route (IPv4/IPv6 only). | 1860 | * is set to metric from route (IPv4/IPv6 only), and ifindex |
1861 | * is set to the device index of the nexthop from the FIB lookup. | ||
1861 | * | 1862 | * |
1862 | * *plen* argument is the size of the passed in struct. | 1863 | * *plen* argument is the size of the passed in struct. |
1863 | * *flags* argument can be a combination of one or more of the | 1864 | * *flags* argument can be a combination of one or more of the |
@@ -1873,9 +1874,10 @@ union bpf_attr { | |||
1873 | * *ctx* is either **struct xdp_md** for XDP programs or | 1874 | * *ctx* is either **struct xdp_md** for XDP programs or |
1874 | * **struct sk_buff** tc cls_act programs. | 1875 | * **struct sk_buff** tc cls_act programs. |
1875 | * Return | 1876 | * Return |
1876 | * Egress device index on success, 0 if packet needs to continue | 1877 | * * < 0 if any input argument is invalid |
1877 | * up the stack for further processing or a negative error in case | 1878 | * * 0 on success (packet is forwarded, nexthop neighbor exists) |
1878 | * of failure. | 1879 | * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the |
1880 | * * packet is not forwarded or needs assist from full stack | ||
1879 | * | 1881 | * |
1880 | * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) | 1882 | * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) |
1881 | * Description | 1883 | * Description |
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args { | |||
2612 | #define BPF_FIB_LOOKUP_DIRECT BIT(0) | 2614 | #define BPF_FIB_LOOKUP_DIRECT BIT(0) |
2613 | #define BPF_FIB_LOOKUP_OUTPUT BIT(1) | 2615 | #define BPF_FIB_LOOKUP_OUTPUT BIT(1) |
2614 | 2616 | ||
2617 | enum { | ||
2618 | BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ | ||
2619 | BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ | ||
2620 | BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ | ||
2621 | BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ | ||
2622 | BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ | ||
2623 | BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ | ||
2624 | BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ | ||
2625 | BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ | ||
2626 | BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ | ||
2627 | }; | ||
2628 | |||
2615 | struct bpf_fib_lookup { | 2629 | struct bpf_fib_lookup { |
2616 | /* input: network family for lookup (AF_INET, AF_INET6) | 2630 | /* input: network family for lookup (AF_INET, AF_INET6) |
2617 | * output: network family of egress nexthop | 2631 | * output: network family of egress nexthop |
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup { | |||
2625 | 2639 | ||
2626 | /* total length of packet from network header - used for MTU check */ | 2640 | /* total length of packet from network header - used for MTU check */ |
2627 | __u16 tot_len; | 2641 | __u16 tot_len; |
2628 | __u32 ifindex; /* L3 device index for lookup */ | 2642 | |
2643 | /* input: L3 device index for lookup | ||
2644 | * output: device index from FIB lookup | ||
2645 | */ | ||
2646 | __u32 ifindex; | ||
2629 | 2647 | ||
2630 | union { | 2648 | union { |
2631 | /* inputs to lookup */ | 2649 | /* inputs to lookup */ |
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h index d620fa43756c..9a402fdb60e9 100644 --- a/include/uapi/linux/rseq.h +++ b/include/uapi/linux/rseq.h | |||
@@ -10,13 +10,8 @@ | |||
10 | * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | 10 | * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifdef __KERNEL__ | 13 | #include <linux/types.h> |
14 | # include <linux/types.h> | 14 | #include <asm/byteorder.h> |
15 | #else | ||
16 | # include <stdint.h> | ||
17 | #endif | ||
18 | |||
19 | #include <linux/types_32_64.h> | ||
20 | 15 | ||
21 | enum rseq_cpu_id_state { | 16 | enum rseq_cpu_id_state { |
22 | RSEQ_CPU_ID_UNINITIALIZED = -1, | 17 | RSEQ_CPU_ID_UNINITIALIZED = -1, |
@@ -52,10 +47,10 @@ struct rseq_cs { | |||
52 | __u32 version; | 47 | __u32 version; |
53 | /* enum rseq_cs_flags */ | 48 | /* enum rseq_cs_flags */ |
54 | __u32 flags; | 49 | __u32 flags; |
55 | LINUX_FIELD_u32_u64(start_ip); | 50 | __u64 start_ip; |
56 | /* Offset from start_ip. */ | 51 | /* Offset from start_ip. */ |
57 | LINUX_FIELD_u32_u64(post_commit_offset); | 52 | __u64 post_commit_offset; |
58 | LINUX_FIELD_u32_u64(abort_ip); | 53 | __u64 abort_ip; |
59 | } __attribute__((aligned(4 * sizeof(__u64)))); | 54 | } __attribute__((aligned(4 * sizeof(__u64)))); |
60 | 55 | ||
61 | /* | 56 | /* |
@@ -67,28 +62,30 @@ struct rseq_cs { | |||
67 | struct rseq { | 62 | struct rseq { |
68 | /* | 63 | /* |
69 | * Restartable sequences cpu_id_start field. Updated by the | 64 | * Restartable sequences cpu_id_start field. Updated by the |
70 | * kernel, and read by user-space with single-copy atomicity | 65 | * kernel. Read by user-space with single-copy atomicity |
71 | * semantics. Aligned on 32-bit. Always contains a value in the | 66 | * semantics. This field should only be read by the thread which |
72 | * range of possible CPUs, although the value may not be the | 67 | * registered this data structure. Aligned on 32-bit. Always |
73 | * actual current CPU (e.g. if rseq is not initialized). This | 68 | * contains a value in the range of possible CPUs, although the |
74 | * CPU number value should always be compared against the value | 69 | * value may not be the actual current CPU (e.g. if rseq is not |
75 | * of the cpu_id field before performing a rseq commit or | 70 | * initialized). This CPU number value should always be compared |
76 | * returning a value read from a data structure indexed using | 71 | * against the value of the cpu_id field before performing a rseq |
77 | * the cpu_id_start value. | 72 | * commit or returning a value read from a data structure indexed |
73 | * using the cpu_id_start value. | ||
78 | */ | 74 | */ |
79 | __u32 cpu_id_start; | 75 | __u32 cpu_id_start; |
80 | /* | 76 | /* |
81 | * Restartable sequences cpu_id field. Updated by the kernel, | 77 | * Restartable sequences cpu_id field. Updated by the kernel. |
82 | * and read by user-space with single-copy atomicity semantics. | 78 | * Read by user-space with single-copy atomicity semantics. This |
83 | * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and | 79 | * field should only be read by the thread which registered this |
84 | * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the | 80 | * data structure. Aligned on 32-bit. Values |
85 | * former means "rseq uninitialized", and latter means "rseq | 81 | * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED |
86 | * initialization failed". This value is meant to be read within | 82 | * have a special semantic: the former means "rseq uninitialized", |
87 | * rseq critical sections and compared with the cpu_id_start | 83 | * and latter means "rseq initialization failed". This value is |
88 | * value previously read, before performing the commit instruction, | 84 | * meant to be read within rseq critical sections and compared |
89 | * or read and compared with the cpu_id_start value before returning | 85 | * with the cpu_id_start value previously read, before performing |
90 | * a value loaded from a data structure indexed using the | 86 | * the commit instruction, or read and compared with the |
91 | * cpu_id_start value. | 87 | * cpu_id_start value before returning a value loaded from a data |
88 | * structure indexed using the cpu_id_start value. | ||
92 | */ | 89 | */ |
93 | __u32 cpu_id; | 90 | __u32 cpu_id; |
94 | /* | 91 | /* |
@@ -105,27 +102,44 @@ struct rseq { | |||
105 | * targeted by the rseq_cs. Also needs to be set to NULL by user-space | 102 | * targeted by the rseq_cs. Also needs to be set to NULL by user-space |
106 | * before reclaiming memory that contains the targeted struct rseq_cs. | 103 | * before reclaiming memory that contains the targeted struct rseq_cs. |
107 | * | 104 | * |
108 | * Read and set by the kernel with single-copy atomicity semantics. | 105 | * Read and set by the kernel. Set by user-space with single-copy |
109 | * Set by user-space with single-copy atomicity semantics. Aligned | 106 | * atomicity semantics. This field should only be updated by the |
110 | * on 64-bit. | 107 | * thread which registered this data structure. Aligned on 64-bit. |
111 | */ | 108 | */ |
112 | LINUX_FIELD_u32_u64(rseq_cs); | 109 | union { |
110 | __u64 ptr64; | ||
111 | #ifdef __LP64__ | ||
112 | __u64 ptr; | ||
113 | #else | ||
114 | struct { | ||
115 | #if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN) | ||
116 | __u32 padding; /* Initialized to zero. */ | ||
117 | __u32 ptr32; | ||
118 | #else /* LITTLE */ | ||
119 | __u32 ptr32; | ||
120 | __u32 padding; /* Initialized to zero. */ | ||
121 | #endif /* ENDIAN */ | ||
122 | } ptr; | ||
123 | #endif | ||
124 | } rseq_cs; | ||
125 | |||
113 | /* | 126 | /* |
114 | * - RSEQ_DISABLE flag: | 127 | * Restartable sequences flags field. |
128 | * | ||
129 | * This field should only be updated by the thread which | ||
130 | * registered this data structure. Read by the kernel. | ||
131 | * Mainly used for single-stepping through rseq critical sections | ||
132 | * with debuggers. | ||
115 | * | 133 | * |
116 | * Fallback fast-track flag for single-stepping. | ||
117 | * Set by user-space if lack of progress is detected. | ||
118 | * Cleared by user-space after rseq finish. | ||
119 | * Read by the kernel. | ||
120 | * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT | 134 | * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT |
121 | * Inhibit instruction sequence block restart and event | 135 | * Inhibit instruction sequence block restart on preemption |
122 | * counter increment on preemption for this thread. | 136 | * for this thread. |
123 | * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL | 137 | * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL |
124 | * Inhibit instruction sequence block restart and event | 138 | * Inhibit instruction sequence block restart on signal |
125 | * counter increment on signal delivery for this thread. | 139 | * delivery for this thread. |
126 | * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE | 140 | * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE |
127 | * Inhibit instruction sequence block restart and event | 141 | * Inhibit instruction sequence block restart on migration for |
128 | * counter increment on migration for this thread. | 142 | * this thread. |
129 | */ | 143 | */ |
130 | __u32 flags; | 144 | __u32 flags; |
131 | } __attribute__((aligned(4 * sizeof(__u64)))); | 145 | } __attribute__((aligned(4 * sizeof(__u64)))); |
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h deleted file mode 100644 index 0a87ace34a57..000000000000 --- a/include/uapi/linux/types_32_64.h +++ /dev/null | |||
@@ -1,50 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ | ||
2 | #ifndef _UAPI_LINUX_TYPES_32_64_H | ||
3 | #define _UAPI_LINUX_TYPES_32_64_H | ||
4 | |||
5 | /* | ||
6 | * linux/types_32_64.h | ||
7 | * | ||
8 | * Integer type declaration for pointers across 32-bit and 64-bit systems. | ||
9 | * | ||
10 | * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | ||
11 | */ | ||
12 | |||
13 | #ifdef __KERNEL__ | ||
14 | # include <linux/types.h> | ||
15 | #else | ||
16 | # include <stdint.h> | ||
17 | #endif | ||
18 | |||
19 | #include <asm/byteorder.h> | ||
20 | |||
21 | #ifdef __BYTE_ORDER | ||
22 | # if (__BYTE_ORDER == __BIG_ENDIAN) | ||
23 | # define LINUX_BYTE_ORDER_BIG_ENDIAN | ||
24 | # else | ||
25 | # define LINUX_BYTE_ORDER_LITTLE_ENDIAN | ||
26 | # endif | ||
27 | #else | ||
28 | # ifdef __BIG_ENDIAN | ||
29 | # define LINUX_BYTE_ORDER_BIG_ENDIAN | ||
30 | # else | ||
31 | # define LINUX_BYTE_ORDER_LITTLE_ENDIAN | ||
32 | # endif | ||
33 | #endif | ||
34 | |||
35 | #ifdef __LP64__ | ||
36 | # define LINUX_FIELD_u32_u64(field) __u64 field | ||
37 | # define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v | ||
38 | #else | ||
39 | # ifdef LINUX_BYTE_ORDER_BIG_ENDIAN | ||
40 | # define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field | ||
41 | # define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \ | ||
42 | field ## _padding = 0, field = (intptr_t)v | ||
43 | # else | ||
44 | # define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding | ||
45 | # define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \ | ||
46 | field = (intptr_t)v, field ## _padding = 0 | ||
47 | # endif | ||
48 | #endif | ||
49 | |||
50 | #endif /* _UAPI_LINUX_TYPES_32_64_H */ | ||
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index f7c00bd6f8e4..3d83ee7df381 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, | |||
428 | return ret; | 428 | return ret; |
429 | } | 429 | } |
430 | 430 | ||
431 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, | ||
432 | enum bpf_prog_type ptype, struct bpf_prog *prog) | ||
433 | { | ||
434 | struct cgroup *cgrp; | ||
435 | int ret; | ||
436 | |||
437 | cgrp = cgroup_get_from_fd(attr->target_fd); | ||
438 | if (IS_ERR(cgrp)) | ||
439 | return PTR_ERR(cgrp); | ||
440 | |||
441 | ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, | ||
442 | attr->attach_flags); | ||
443 | cgroup_put(cgrp); | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) | ||
448 | { | ||
449 | struct bpf_prog *prog; | ||
450 | struct cgroup *cgrp; | ||
451 | int ret; | ||
452 | |||
453 | cgrp = cgroup_get_from_fd(attr->target_fd); | ||
454 | if (IS_ERR(cgrp)) | ||
455 | return PTR_ERR(cgrp); | ||
456 | |||
457 | prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); | ||
458 | if (IS_ERR(prog)) | ||
459 | prog = NULL; | ||
460 | |||
461 | ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); | ||
462 | if (prog) | ||
463 | bpf_prog_put(prog); | ||
464 | |||
465 | cgroup_put(cgrp); | ||
466 | return ret; | ||
467 | } | ||
468 | |||
469 | int cgroup_bpf_prog_query(const union bpf_attr *attr, | ||
470 | union bpf_attr __user *uattr) | ||
471 | { | ||
472 | struct cgroup *cgrp; | ||
473 | int ret; | ||
474 | |||
475 | cgrp = cgroup_get_from_fd(attr->query.target_fd); | ||
476 | if (IS_ERR(cgrp)) | ||
477 | return PTR_ERR(cgrp); | ||
478 | |||
479 | ret = cgroup_bpf_query(cgrp, attr, uattr); | ||
480 | |||
481 | cgroup_put(cgrp); | ||
482 | return ret; | ||
483 | } | ||
484 | |||
431 | /** | 485 | /** |
432 | * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering | 486 | * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering |
433 | * @sk: The socket sending or receiving traffic | 487 | * @sk: The socket sending or receiving traffic |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a9e6c04d0f4a..1e5625d46414 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -598,8 +598,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |||
598 | bpf_fill_ill_insns(hdr, size); | 598 | bpf_fill_ill_insns(hdr, size); |
599 | 599 | ||
600 | hdr->pages = size / PAGE_SIZE; | 600 | hdr->pages = size / PAGE_SIZE; |
601 | hdr->locked = 0; | ||
602 | |||
603 | hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), | 601 | hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), |
604 | PAGE_SIZE - sizeof(*hdr)); | 602 | PAGE_SIZE - sizeof(*hdr)); |
605 | start = (get_random_int() % hole) & ~(alignment - 1); | 603 | start = (get_random_int() % hole) & ~(alignment - 1); |
@@ -1450,22 +1448,6 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) | |||
1450 | return 0; | 1448 | return 0; |
1451 | } | 1449 | } |
1452 | 1450 | ||
1453 | static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp) | ||
1454 | { | ||
1455 | #ifdef CONFIG_ARCH_HAS_SET_MEMORY | ||
1456 | int i, err; | ||
1457 | |||
1458 | for (i = 0; i < fp->aux->func_cnt; i++) { | ||
1459 | err = bpf_prog_check_pages_ro_single(fp->aux->func[i]); | ||
1460 | if (err) | ||
1461 | return err; | ||
1462 | } | ||
1463 | |||
1464 | return bpf_prog_check_pages_ro_single(fp); | ||
1465 | #endif | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | static void bpf_prog_select_func(struct bpf_prog *fp) | 1451 | static void bpf_prog_select_func(struct bpf_prog *fp) |
1470 | { | 1452 | { |
1471 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | 1453 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
@@ -1524,17 +1506,7 @@ finalize: | |||
1524 | * all eBPF JITs might immediately support all features. | 1506 | * all eBPF JITs might immediately support all features. |
1525 | */ | 1507 | */ |
1526 | *err = bpf_check_tail_call(fp); | 1508 | *err = bpf_check_tail_call(fp); |
1527 | if (*err) | 1509 | |
1528 | return fp; | ||
1529 | |||
1530 | /* Checkpoint: at this point onwards any cBPF -> eBPF or | ||
1531 | * native eBPF program is read-only. If we failed to change | ||
1532 | * the page attributes (e.g. allocation failure from | ||
1533 | * splitting large pages), then reject the whole program | ||
1534 | * in order to guarantee not ending up with any W+X pages | ||
1535 | * from BPF side in kernel. | ||
1536 | */ | ||
1537 | *err = bpf_prog_check_pages_ro_locked(fp); | ||
1538 | return fp; | 1510 | return fp; |
1539 | } | 1511 | } |
1540 | EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); | 1512 | EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 52a91d816c0e..cf7b6a6dbd1f 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
@@ -72,6 +72,7 @@ struct bpf_htab { | |||
72 | u32 n_buckets; | 72 | u32 n_buckets; |
73 | u32 elem_size; | 73 | u32 elem_size; |
74 | struct bpf_sock_progs progs; | 74 | struct bpf_sock_progs progs; |
75 | struct rcu_head rcu; | ||
75 | }; | 76 | }; |
76 | 77 | ||
77 | struct htab_elem { | 78 | struct htab_elem { |
@@ -89,8 +90,8 @@ enum smap_psock_state { | |||
89 | struct smap_psock_map_entry { | 90 | struct smap_psock_map_entry { |
90 | struct list_head list; | 91 | struct list_head list; |
91 | struct sock **entry; | 92 | struct sock **entry; |
92 | struct htab_elem *hash_link; | 93 | struct htab_elem __rcu *hash_link; |
93 | struct bpf_htab *htab; | 94 | struct bpf_htab __rcu *htab; |
94 | }; | 95 | }; |
95 | 96 | ||
96 | struct smap_psock { | 97 | struct smap_psock { |
@@ -120,6 +121,7 @@ struct smap_psock { | |||
120 | struct bpf_prog *bpf_parse; | 121 | struct bpf_prog *bpf_parse; |
121 | struct bpf_prog *bpf_verdict; | 122 | struct bpf_prog *bpf_verdict; |
122 | struct list_head maps; | 123 | struct list_head maps; |
124 | spinlock_t maps_lock; | ||
123 | 125 | ||
124 | /* Back reference used when sock callback trigger sockmap operations */ | 126 | /* Back reference used when sock callback trigger sockmap operations */ |
125 | struct sock *sock; | 127 | struct sock *sock; |
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
140 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | 142 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); |
141 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, | 143 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, |
142 | int offset, size_t size, int flags); | 144 | int offset, size_t size, int flags); |
145 | static void bpf_tcp_close(struct sock *sk, long timeout); | ||
143 | 146 | ||
144 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) | 147 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) |
145 | { | 148 | { |
@@ -161,7 +164,42 @@ out: | |||
161 | return !empty; | 164 | return !empty; |
162 | } | 165 | } |
163 | 166 | ||
164 | static struct proto tcp_bpf_proto; | 167 | enum { |
168 | SOCKMAP_IPV4, | ||
169 | SOCKMAP_IPV6, | ||
170 | SOCKMAP_NUM_PROTS, | ||
171 | }; | ||
172 | |||
173 | enum { | ||
174 | SOCKMAP_BASE, | ||
175 | SOCKMAP_TX, | ||
176 | SOCKMAP_NUM_CONFIGS, | ||
177 | }; | ||
178 | |||
179 | static struct proto *saved_tcpv6_prot __read_mostly; | ||
180 | static DEFINE_SPINLOCK(tcpv6_prot_lock); | ||
181 | static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS]; | ||
182 | static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], | ||
183 | struct proto *base) | ||
184 | { | ||
185 | prot[SOCKMAP_BASE] = *base; | ||
186 | prot[SOCKMAP_BASE].close = bpf_tcp_close; | ||
187 | prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; | ||
188 | prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; | ||
189 | |||
190 | prot[SOCKMAP_TX] = prot[SOCKMAP_BASE]; | ||
191 | prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg; | ||
192 | prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage; | ||
193 | } | ||
194 | |||
195 | static void update_sk_prot(struct sock *sk, struct smap_psock *psock) | ||
196 | { | ||
197 | int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4; | ||
198 | int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE; | ||
199 | |||
200 | sk->sk_prot = &bpf_tcp_prots[family][conf]; | ||
201 | } | ||
202 | |||
165 | static int bpf_tcp_init(struct sock *sk) | 203 | static int bpf_tcp_init(struct sock *sk) |
166 | { | 204 | { |
167 | struct smap_psock *psock; | 205 | struct smap_psock *psock; |
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk) | |||
181 | psock->save_close = sk->sk_prot->close; | 219 | psock->save_close = sk->sk_prot->close; |
182 | psock->sk_proto = sk->sk_prot; | 220 | psock->sk_proto = sk->sk_prot; |
183 | 221 | ||
184 | if (psock->bpf_tx_msg) { | 222 | /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */ |
185 | tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg; | 223 | if (sk->sk_family == AF_INET6 && |
186 | tcp_bpf_proto.sendpage = bpf_tcp_sendpage; | 224 | unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { |
187 | tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg; | 225 | spin_lock_bh(&tcpv6_prot_lock); |
188 | tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read; | 226 | if (likely(sk->sk_prot != saved_tcpv6_prot)) { |
227 | build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot); | ||
228 | smp_store_release(&saved_tcpv6_prot, sk->sk_prot); | ||
229 | } | ||
230 | spin_unlock_bh(&tcpv6_prot_lock); | ||
189 | } | 231 | } |
190 | 232 | update_sk_prot(sk, psock); | |
191 | sk->sk_prot = &tcp_bpf_proto; | ||
192 | rcu_read_unlock(); | 233 | rcu_read_unlock(); |
193 | return 0; | 234 | return 0; |
194 | } | 235 | } |
@@ -219,16 +260,54 @@ out: | |||
219 | rcu_read_unlock(); | 260 | rcu_read_unlock(); |
220 | } | 261 | } |
221 | 262 | ||
263 | static struct htab_elem *lookup_elem_raw(struct hlist_head *head, | ||
264 | u32 hash, void *key, u32 key_size) | ||
265 | { | ||
266 | struct htab_elem *l; | ||
267 | |||
268 | hlist_for_each_entry_rcu(l, head, hash_node) { | ||
269 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) | ||
270 | return l; | ||
271 | } | ||
272 | |||
273 | return NULL; | ||
274 | } | ||
275 | |||
276 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) | ||
277 | { | ||
278 | return &htab->buckets[hash & (htab->n_buckets - 1)]; | ||
279 | } | ||
280 | |||
281 | static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) | ||
282 | { | ||
283 | return &__select_bucket(htab, hash)->head; | ||
284 | } | ||
285 | |||
222 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | 286 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
223 | { | 287 | { |
224 | atomic_dec(&htab->count); | 288 | atomic_dec(&htab->count); |
225 | kfree_rcu(l, rcu); | 289 | kfree_rcu(l, rcu); |
226 | } | 290 | } |
227 | 291 | ||
292 | static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, | ||
293 | struct smap_psock *psock) | ||
294 | { | ||
295 | struct smap_psock_map_entry *e; | ||
296 | |||
297 | spin_lock_bh(&psock->maps_lock); | ||
298 | e = list_first_entry_or_null(&psock->maps, | ||
299 | struct smap_psock_map_entry, | ||
300 | list); | ||
301 | if (e) | ||
302 | list_del(&e->list); | ||
303 | spin_unlock_bh(&psock->maps_lock); | ||
304 | return e; | ||
305 | } | ||
306 | |||
228 | static void bpf_tcp_close(struct sock *sk, long timeout) | 307 | static void bpf_tcp_close(struct sock *sk, long timeout) |
229 | { | 308 | { |
230 | void (*close_fun)(struct sock *sk, long timeout); | 309 | void (*close_fun)(struct sock *sk, long timeout); |
231 | struct smap_psock_map_entry *e, *tmp; | 310 | struct smap_psock_map_entry *e; |
232 | struct sk_msg_buff *md, *mtmp; | 311 | struct sk_msg_buff *md, *mtmp; |
233 | struct smap_psock *psock; | 312 | struct smap_psock *psock; |
234 | struct sock *osk; | 313 | struct sock *osk; |
@@ -247,7 +326,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
247 | */ | 326 | */ |
248 | close_fun = psock->save_close; | 327 | close_fun = psock->save_close; |
249 | 328 | ||
250 | write_lock_bh(&sk->sk_callback_lock); | ||
251 | if (psock->cork) { | 329 | if (psock->cork) { |
252 | free_start_sg(psock->sock, psock->cork); | 330 | free_start_sg(psock->sock, psock->cork); |
253 | kfree(psock->cork); | 331 | kfree(psock->cork); |
@@ -260,20 +338,38 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
260 | kfree(md); | 338 | kfree(md); |
261 | } | 339 | } |
262 | 340 | ||
263 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { | 341 | e = psock_map_pop(sk, psock); |
342 | while (e) { | ||
264 | if (e->entry) { | 343 | if (e->entry) { |
265 | osk = cmpxchg(e->entry, sk, NULL); | 344 | osk = cmpxchg(e->entry, sk, NULL); |
266 | if (osk == sk) { | 345 | if (osk == sk) { |
267 | list_del(&e->list); | ||
268 | smap_release_sock(psock, sk); | 346 | smap_release_sock(psock, sk); |
269 | } | 347 | } |
270 | } else { | 348 | } else { |
271 | hlist_del_rcu(&e->hash_link->hash_node); | 349 | struct htab_elem *link = rcu_dereference(e->hash_link); |
272 | smap_release_sock(psock, e->hash_link->sk); | 350 | struct bpf_htab *htab = rcu_dereference(e->htab); |
273 | free_htab_elem(e->htab, e->hash_link); | 351 | struct hlist_head *head; |
352 | struct htab_elem *l; | ||
353 | struct bucket *b; | ||
354 | |||
355 | b = __select_bucket(htab, link->hash); | ||
356 | head = &b->head; | ||
357 | raw_spin_lock_bh(&b->lock); | ||
358 | l = lookup_elem_raw(head, | ||
359 | link->hash, link->key, | ||
360 | htab->map.key_size); | ||
361 | /* If another thread deleted this object skip deletion. | ||
362 | * The refcnt on psock may or may not be zero. | ||
363 | */ | ||
364 | if (l) { | ||
365 | hlist_del_rcu(&link->hash_node); | ||
366 | smap_release_sock(psock, link->sk); | ||
367 | free_htab_elem(htab, link); | ||
368 | } | ||
369 | raw_spin_unlock_bh(&b->lock); | ||
274 | } | 370 | } |
371 | e = psock_map_pop(sk, psock); | ||
275 | } | 372 | } |
276 | write_unlock_bh(&sk->sk_callback_lock); | ||
277 | rcu_read_unlock(); | 373 | rcu_read_unlock(); |
278 | close_fun(sk, timeout); | 374 | close_fun(sk, timeout); |
279 | } | 375 | } |
@@ -1111,8 +1207,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock, | |||
1111 | 1207 | ||
1112 | static int bpf_tcp_ulp_register(void) | 1208 | static int bpf_tcp_ulp_register(void) |
1113 | { | 1209 | { |
1114 | tcp_bpf_proto = tcp_prot; | 1210 | build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot); |
1115 | tcp_bpf_proto.close = bpf_tcp_close; | ||
1116 | /* Once BPF TX ULP is registered it is never unregistered. It | 1211 | /* Once BPF TX ULP is registered it is never unregistered. It |
1117 | * will be in the ULP list for the lifetime of the system. Doing | 1212 | * will be in the ULP list for the lifetime of the system. Doing |
1118 | * duplicate registers is not a problem. | 1213 | * duplicate registers is not a problem. |
@@ -1357,7 +1452,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock) | |||
1357 | { | 1452 | { |
1358 | if (refcount_dec_and_test(&psock->refcnt)) { | 1453 | if (refcount_dec_and_test(&psock->refcnt)) { |
1359 | tcp_cleanup_ulp(sock); | 1454 | tcp_cleanup_ulp(sock); |
1455 | write_lock_bh(&sock->sk_callback_lock); | ||
1360 | smap_stop_sock(psock, sock); | 1456 | smap_stop_sock(psock, sock); |
1457 | write_unlock_bh(&sock->sk_callback_lock); | ||
1361 | clear_bit(SMAP_TX_RUNNING, &psock->state); | 1458 | clear_bit(SMAP_TX_RUNNING, &psock->state); |
1362 | rcu_assign_sk_user_data(sock, NULL); | 1459 | rcu_assign_sk_user_data(sock, NULL); |
1363 | call_rcu_sched(&psock->rcu, smap_destroy_psock); | 1460 | call_rcu_sched(&psock->rcu, smap_destroy_psock); |
@@ -1508,6 +1605,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node) | |||
1508 | INIT_LIST_HEAD(&psock->maps); | 1605 | INIT_LIST_HEAD(&psock->maps); |
1509 | INIT_LIST_HEAD(&psock->ingress); | 1606 | INIT_LIST_HEAD(&psock->ingress); |
1510 | refcount_set(&psock->refcnt, 1); | 1607 | refcount_set(&psock->refcnt, 1); |
1608 | spin_lock_init(&psock->maps_lock); | ||
1511 | 1609 | ||
1512 | rcu_assign_sk_user_data(sock, psock); | 1610 | rcu_assign_sk_user_data(sock, psock); |
1513 | sock_hold(sock); | 1611 | sock_hold(sock); |
@@ -1564,18 +1662,32 @@ free_stab: | |||
1564 | return ERR_PTR(err); | 1662 | return ERR_PTR(err); |
1565 | } | 1663 | } |
1566 | 1664 | ||
1567 | static void smap_list_remove(struct smap_psock *psock, | 1665 | static void smap_list_map_remove(struct smap_psock *psock, |
1568 | struct sock **entry, | 1666 | struct sock **entry) |
1569 | struct htab_elem *hash_link) | ||
1570 | { | 1667 | { |
1571 | struct smap_psock_map_entry *e, *tmp; | 1668 | struct smap_psock_map_entry *e, *tmp; |
1572 | 1669 | ||
1670 | spin_lock_bh(&psock->maps_lock); | ||
1573 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { | 1671 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { |
1574 | if (e->entry == entry || e->hash_link == hash_link) { | 1672 | if (e->entry == entry) |
1575 | list_del(&e->list); | 1673 | list_del(&e->list); |
1576 | break; | ||
1577 | } | ||
1578 | } | 1674 | } |
1675 | spin_unlock_bh(&psock->maps_lock); | ||
1676 | } | ||
1677 | |||
1678 | static void smap_list_hash_remove(struct smap_psock *psock, | ||
1679 | struct htab_elem *hash_link) | ||
1680 | { | ||
1681 | struct smap_psock_map_entry *e, *tmp; | ||
1682 | |||
1683 | spin_lock_bh(&psock->maps_lock); | ||
1684 | list_for_each_entry_safe(e, tmp, &psock->maps, list) { | ||
1685 | struct htab_elem *c = rcu_dereference(e->hash_link); | ||
1686 | |||
1687 | if (c == hash_link) | ||
1688 | list_del(&e->list); | ||
1689 | } | ||
1690 | spin_unlock_bh(&psock->maps_lock); | ||
1579 | } | 1691 | } |
1580 | 1692 | ||
1581 | static void sock_map_free(struct bpf_map *map) | 1693 | static void sock_map_free(struct bpf_map *map) |
@@ -1601,7 +1713,6 @@ static void sock_map_free(struct bpf_map *map) | |||
1601 | if (!sock) | 1713 | if (!sock) |
1602 | continue; | 1714 | continue; |
1603 | 1715 | ||
1604 | write_lock_bh(&sock->sk_callback_lock); | ||
1605 | psock = smap_psock_sk(sock); | 1716 | psock = smap_psock_sk(sock); |
1606 | /* This check handles a racing sock event that can get the | 1717 | /* This check handles a racing sock event that can get the |
1607 | * sk_callback_lock before this case but after xchg happens | 1718 | * sk_callback_lock before this case but after xchg happens |
@@ -1609,10 +1720,9 @@ static void sock_map_free(struct bpf_map *map) | |||
1609 | * to be null and queued for garbage collection. | 1720 | * to be null and queued for garbage collection. |
1610 | */ | 1721 | */ |
1611 | if (likely(psock)) { | 1722 | if (likely(psock)) { |
1612 | smap_list_remove(psock, &stab->sock_map[i], NULL); | 1723 | smap_list_map_remove(psock, &stab->sock_map[i]); |
1613 | smap_release_sock(psock, sock); | 1724 | smap_release_sock(psock, sock); |
1614 | } | 1725 | } |
1615 | write_unlock_bh(&sock->sk_callback_lock); | ||
1616 | } | 1726 | } |
1617 | rcu_read_unlock(); | 1727 | rcu_read_unlock(); |
1618 | 1728 | ||
@@ -1661,17 +1771,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key) | |||
1661 | if (!sock) | 1771 | if (!sock) |
1662 | return -EINVAL; | 1772 | return -EINVAL; |
1663 | 1773 | ||
1664 | write_lock_bh(&sock->sk_callback_lock); | ||
1665 | psock = smap_psock_sk(sock); | 1774 | psock = smap_psock_sk(sock); |
1666 | if (!psock) | 1775 | if (!psock) |
1667 | goto out; | 1776 | goto out; |
1668 | 1777 | ||
1669 | if (psock->bpf_parse) | 1778 | if (psock->bpf_parse) |
1670 | smap_stop_sock(psock, sock); | 1779 | smap_stop_sock(psock, sock); |
1671 | smap_list_remove(psock, &stab->sock_map[k], NULL); | 1780 | smap_list_map_remove(psock, &stab->sock_map[k]); |
1672 | smap_release_sock(psock, sock); | 1781 | smap_release_sock(psock, sock); |
1673 | out: | 1782 | out: |
1674 | write_unlock_bh(&sock->sk_callback_lock); | ||
1675 | return 0; | 1783 | return 0; |
1676 | } | 1784 | } |
1677 | 1785 | ||
@@ -1752,7 +1860,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1752 | } | 1860 | } |
1753 | } | 1861 | } |
1754 | 1862 | ||
1755 | write_lock_bh(&sock->sk_callback_lock); | ||
1756 | psock = smap_psock_sk(sock); | 1863 | psock = smap_psock_sk(sock); |
1757 | 1864 | ||
1758 | /* 2. Do not allow inheriting programs if psock exists and has | 1865 | /* 2. Do not allow inheriting programs if psock exists and has |
@@ -1809,7 +1916,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1809 | if (err) | 1916 | if (err) |
1810 | goto out_free; | 1917 | goto out_free; |
1811 | smap_init_progs(psock, verdict, parse); | 1918 | smap_init_progs(psock, verdict, parse); |
1919 | write_lock_bh(&sock->sk_callback_lock); | ||
1812 | smap_start_sock(psock, sock); | 1920 | smap_start_sock(psock, sock); |
1921 | write_unlock_bh(&sock->sk_callback_lock); | ||
1813 | } | 1922 | } |
1814 | 1923 | ||
1815 | /* 4. Place psock in sockmap for use and stop any programs on | 1924 | /* 4. Place psock in sockmap for use and stop any programs on |
@@ -1819,9 +1928,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1819 | */ | 1928 | */ |
1820 | if (map_link) { | 1929 | if (map_link) { |
1821 | e->entry = map_link; | 1930 | e->entry = map_link; |
1931 | spin_lock_bh(&psock->maps_lock); | ||
1822 | list_add_tail(&e->list, &psock->maps); | 1932 | list_add_tail(&e->list, &psock->maps); |
1933 | spin_unlock_bh(&psock->maps_lock); | ||
1823 | } | 1934 | } |
1824 | write_unlock_bh(&sock->sk_callback_lock); | ||
1825 | return err; | 1935 | return err; |
1826 | out_free: | 1936 | out_free: |
1827 | smap_release_sock(psock, sock); | 1937 | smap_release_sock(psock, sock); |
@@ -1832,7 +1942,6 @@ out_progs: | |||
1832 | } | 1942 | } |
1833 | if (tx_msg) | 1943 | if (tx_msg) |
1834 | bpf_prog_put(tx_msg); | 1944 | bpf_prog_put(tx_msg); |
1835 | write_unlock_bh(&sock->sk_callback_lock); | ||
1836 | kfree(e); | 1945 | kfree(e); |
1837 | return err; | 1946 | return err; |
1838 | } | 1947 | } |
@@ -1869,10 +1978,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
1869 | if (osock) { | 1978 | if (osock) { |
1870 | struct smap_psock *opsock = smap_psock_sk(osock); | 1979 | struct smap_psock *opsock = smap_psock_sk(osock); |
1871 | 1980 | ||
1872 | write_lock_bh(&osock->sk_callback_lock); | 1981 | smap_list_map_remove(opsock, &stab->sock_map[i]); |
1873 | smap_list_remove(opsock, &stab->sock_map[i], NULL); | ||
1874 | smap_release_sock(opsock, osock); | 1982 | smap_release_sock(opsock, osock); |
1875 | write_unlock_bh(&osock->sk_callback_lock); | ||
1876 | } | 1983 | } |
1877 | out: | 1984 | out: |
1878 | return err; | 1985 | return err; |
@@ -1915,6 +2022,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type) | |||
1915 | return 0; | 2022 | return 0; |
1916 | } | 2023 | } |
1917 | 2024 | ||
2025 | int sockmap_get_from_fd(const union bpf_attr *attr, int type, | ||
2026 | struct bpf_prog *prog) | ||
2027 | { | ||
2028 | int ufd = attr->target_fd; | ||
2029 | struct bpf_map *map; | ||
2030 | struct fd f; | ||
2031 | int err; | ||
2032 | |||
2033 | f = fdget(ufd); | ||
2034 | map = __bpf_map_get(f); | ||
2035 | if (IS_ERR(map)) | ||
2036 | return PTR_ERR(map); | ||
2037 | |||
2038 | err = sock_map_prog(map, prog, attr->attach_type); | ||
2039 | fdput(f); | ||
2040 | return err; | ||
2041 | } | ||
2042 | |||
1918 | static void *sock_map_lookup(struct bpf_map *map, void *key) | 2043 | static void *sock_map_lookup(struct bpf_map *map, void *key) |
1919 | { | 2044 | { |
1920 | return NULL; | 2045 | return NULL; |
@@ -2043,14 +2168,13 @@ free_htab: | |||
2043 | return ERR_PTR(err); | 2168 | return ERR_PTR(err); |
2044 | } | 2169 | } |
2045 | 2170 | ||
2046 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) | 2171 | static void __bpf_htab_free(struct rcu_head *rcu) |
2047 | { | 2172 | { |
2048 | return &htab->buckets[hash & (htab->n_buckets - 1)]; | 2173 | struct bpf_htab *htab; |
2049 | } | ||
2050 | 2174 | ||
2051 | static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) | 2175 | htab = container_of(rcu, struct bpf_htab, rcu); |
2052 | { | 2176 | bpf_map_area_free(htab->buckets); |
2053 | return &__select_bucket(htab, hash)->head; | 2177 | kfree(htab); |
2054 | } | 2178 | } |
2055 | 2179 | ||
2056 | static void sock_hash_free(struct bpf_map *map) | 2180 | static void sock_hash_free(struct bpf_map *map) |
@@ -2069,16 +2193,18 @@ static void sock_hash_free(struct bpf_map *map) | |||
2069 | */ | 2193 | */ |
2070 | rcu_read_lock(); | 2194 | rcu_read_lock(); |
2071 | for (i = 0; i < htab->n_buckets; i++) { | 2195 | for (i = 0; i < htab->n_buckets; i++) { |
2072 | struct hlist_head *head = select_bucket(htab, i); | 2196 | struct bucket *b = __select_bucket(htab, i); |
2197 | struct hlist_head *head; | ||
2073 | struct hlist_node *n; | 2198 | struct hlist_node *n; |
2074 | struct htab_elem *l; | 2199 | struct htab_elem *l; |
2075 | 2200 | ||
2201 | raw_spin_lock_bh(&b->lock); | ||
2202 | head = &b->head; | ||
2076 | hlist_for_each_entry_safe(l, n, head, hash_node) { | 2203 | hlist_for_each_entry_safe(l, n, head, hash_node) { |
2077 | struct sock *sock = l->sk; | 2204 | struct sock *sock = l->sk; |
2078 | struct smap_psock *psock; | 2205 | struct smap_psock *psock; |
2079 | 2206 | ||
2080 | hlist_del_rcu(&l->hash_node); | 2207 | hlist_del_rcu(&l->hash_node); |
2081 | write_lock_bh(&sock->sk_callback_lock); | ||
2082 | psock = smap_psock_sk(sock); | 2208 | psock = smap_psock_sk(sock); |
2083 | /* This check handles a racing sock event that can get | 2209 | /* This check handles a racing sock event that can get |
2084 | * the sk_callback_lock before this case but after xchg | 2210 | * the sk_callback_lock before this case but after xchg |
@@ -2086,16 +2212,15 @@ static void sock_hash_free(struct bpf_map *map) | |||
2086 | * (psock) to be null and queued for garbage collection. | 2212 | * (psock) to be null and queued for garbage collection. |
2087 | */ | 2213 | */ |
2088 | if (likely(psock)) { | 2214 | if (likely(psock)) { |
2089 | smap_list_remove(psock, NULL, l); | 2215 | smap_list_hash_remove(psock, l); |
2090 | smap_release_sock(psock, sock); | 2216 | smap_release_sock(psock, sock); |
2091 | } | 2217 | } |
2092 | write_unlock_bh(&sock->sk_callback_lock); | 2218 | free_htab_elem(htab, l); |
2093 | kfree(l); | ||
2094 | } | 2219 | } |
2220 | raw_spin_unlock_bh(&b->lock); | ||
2095 | } | 2221 | } |
2096 | rcu_read_unlock(); | 2222 | rcu_read_unlock(); |
2097 | bpf_map_area_free(htab->buckets); | 2223 | call_rcu(&htab->rcu, __bpf_htab_free); |
2098 | kfree(htab); | ||
2099 | } | 2224 | } |
2100 | 2225 | ||
2101 | static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, | 2226 | static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, |
@@ -2122,19 +2247,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, | |||
2122 | return l_new; | 2247 | return l_new; |
2123 | } | 2248 | } |
2124 | 2249 | ||
2125 | static struct htab_elem *lookup_elem_raw(struct hlist_head *head, | ||
2126 | u32 hash, void *key, u32 key_size) | ||
2127 | { | ||
2128 | struct htab_elem *l; | ||
2129 | |||
2130 | hlist_for_each_entry_rcu(l, head, hash_node) { | ||
2131 | if (l->hash == hash && !memcmp(&l->key, key, key_size)) | ||
2132 | return l; | ||
2133 | } | ||
2134 | |||
2135 | return NULL; | ||
2136 | } | ||
2137 | |||
2138 | static inline u32 htab_map_hash(const void *key, u32 key_len) | 2250 | static inline u32 htab_map_hash(const void *key, u32 key_len) |
2139 | { | 2251 | { |
2140 | return jhash(key, key_len, 0); | 2252 | return jhash(key, key_len, 0); |
@@ -2254,9 +2366,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
2254 | goto bucket_err; | 2366 | goto bucket_err; |
2255 | } | 2367 | } |
2256 | 2368 | ||
2257 | e->hash_link = l_new; | 2369 | rcu_assign_pointer(e->hash_link, l_new); |
2258 | e->htab = container_of(map, struct bpf_htab, map); | 2370 | rcu_assign_pointer(e->htab, |
2371 | container_of(map, struct bpf_htab, map)); | ||
2372 | spin_lock_bh(&psock->maps_lock); | ||
2259 | list_add_tail(&e->list, &psock->maps); | 2373 | list_add_tail(&e->list, &psock->maps); |
2374 | spin_unlock_bh(&psock->maps_lock); | ||
2260 | 2375 | ||
2261 | /* add new element to the head of the list, so that | 2376 | /* add new element to the head of the list, so that |
2262 | * concurrent search will find it before old elem | 2377 | * concurrent search will find it before old elem |
@@ -2266,7 +2381,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
2266 | psock = smap_psock_sk(l_old->sk); | 2381 | psock = smap_psock_sk(l_old->sk); |
2267 | 2382 | ||
2268 | hlist_del_rcu(&l_old->hash_node); | 2383 | hlist_del_rcu(&l_old->hash_node); |
2269 | smap_list_remove(psock, NULL, l_old); | 2384 | smap_list_hash_remove(psock, l_old); |
2270 | smap_release_sock(psock, l_old->sk); | 2385 | smap_release_sock(psock, l_old->sk); |
2271 | free_htab_elem(htab, l_old); | 2386 | free_htab_elem(htab, l_old); |
2272 | } | 2387 | } |
@@ -2326,7 +2441,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) | |||
2326 | struct smap_psock *psock; | 2441 | struct smap_psock *psock; |
2327 | 2442 | ||
2328 | hlist_del_rcu(&l->hash_node); | 2443 | hlist_del_rcu(&l->hash_node); |
2329 | write_lock_bh(&sock->sk_callback_lock); | ||
2330 | psock = smap_psock_sk(sock); | 2444 | psock = smap_psock_sk(sock); |
2331 | /* This check handles a racing sock event that can get the | 2445 | /* This check handles a racing sock event that can get the |
2332 | * sk_callback_lock before this case but after xchg happens | 2446 | * sk_callback_lock before this case but after xchg happens |
@@ -2334,10 +2448,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) | |||
2334 | * to be null and queued for garbage collection. | 2448 | * to be null and queued for garbage collection. |
2335 | */ | 2449 | */ |
2336 | if (likely(psock)) { | 2450 | if (likely(psock)) { |
2337 | smap_list_remove(psock, NULL, l); | 2451 | smap_list_hash_remove(psock, l); |
2338 | smap_release_sock(psock, sock); | 2452 | smap_release_sock(psock, sock); |
2339 | } | 2453 | } |
2340 | write_unlock_bh(&sock->sk_callback_lock); | ||
2341 | free_htab_elem(htab, l); | 2454 | free_htab_elem(htab, l); |
2342 | ret = 0; | 2455 | ret = 0; |
2343 | } | 2456 | } |
@@ -2383,6 +2496,7 @@ const struct bpf_map_ops sock_hash_ops = { | |||
2383 | .map_get_next_key = sock_hash_get_next_key, | 2496 | .map_get_next_key = sock_hash_get_next_key, |
2384 | .map_update_elem = sock_hash_update_elem, | 2497 | .map_update_elem = sock_hash_update_elem, |
2385 | .map_delete_elem = sock_hash_delete_elem, | 2498 | .map_delete_elem = sock_hash_delete_elem, |
2499 | .map_release_uref = sock_map_release, | ||
2386 | }; | 2500 | }; |
2387 | 2501 | ||
2388 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, | 2502 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 35dc466641f2..d10ecd78105f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -1483,8 +1483,6 @@ out_free_tp: | |||
1483 | return err; | 1483 | return err; |
1484 | } | 1484 | } |
1485 | 1485 | ||
1486 | #ifdef CONFIG_CGROUP_BPF | ||
1487 | |||
1488 | static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, | 1486 | static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, |
1489 | enum bpf_attach_type attach_type) | 1487 | enum bpf_attach_type attach_type) |
1490 | { | 1488 | { |
@@ -1499,40 +1497,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, | |||
1499 | 1497 | ||
1500 | #define BPF_PROG_ATTACH_LAST_FIELD attach_flags | 1498 | #define BPF_PROG_ATTACH_LAST_FIELD attach_flags |
1501 | 1499 | ||
1502 | static int sockmap_get_from_fd(const union bpf_attr *attr, | ||
1503 | int type, bool attach) | ||
1504 | { | ||
1505 | struct bpf_prog *prog = NULL; | ||
1506 | int ufd = attr->target_fd; | ||
1507 | struct bpf_map *map; | ||
1508 | struct fd f; | ||
1509 | int err; | ||
1510 | |||
1511 | f = fdget(ufd); | ||
1512 | map = __bpf_map_get(f); | ||
1513 | if (IS_ERR(map)) | ||
1514 | return PTR_ERR(map); | ||
1515 | |||
1516 | if (attach) { | ||
1517 | prog = bpf_prog_get_type(attr->attach_bpf_fd, type); | ||
1518 | if (IS_ERR(prog)) { | ||
1519 | fdput(f); | ||
1520 | return PTR_ERR(prog); | ||
1521 | } | ||
1522 | } | ||
1523 | |||
1524 | err = sock_map_prog(map, prog, attr->attach_type); | ||
1525 | if (err) { | ||
1526 | fdput(f); | ||
1527 | if (prog) | ||
1528 | bpf_prog_put(prog); | ||
1529 | return err; | ||
1530 | } | ||
1531 | |||
1532 | fdput(f); | ||
1533 | return 0; | ||
1534 | } | ||
1535 | |||
1536 | #define BPF_F_ATTACH_MASK \ | 1500 | #define BPF_F_ATTACH_MASK \ |
1537 | (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) | 1501 | (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) |
1538 | 1502 | ||
@@ -1540,7 +1504,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1540 | { | 1504 | { |
1541 | enum bpf_prog_type ptype; | 1505 | enum bpf_prog_type ptype; |
1542 | struct bpf_prog *prog; | 1506 | struct bpf_prog *prog; |
1543 | struct cgroup *cgrp; | ||
1544 | int ret; | 1507 | int ret; |
1545 | 1508 | ||
1546 | if (!capable(CAP_NET_ADMIN)) | 1509 | if (!capable(CAP_NET_ADMIN)) |
@@ -1577,12 +1540,15 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1577 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; | 1540 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; |
1578 | break; | 1541 | break; |
1579 | case BPF_SK_MSG_VERDICT: | 1542 | case BPF_SK_MSG_VERDICT: |
1580 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true); | 1543 | ptype = BPF_PROG_TYPE_SK_MSG; |
1544 | break; | ||
1581 | case BPF_SK_SKB_STREAM_PARSER: | 1545 | case BPF_SK_SKB_STREAM_PARSER: |
1582 | case BPF_SK_SKB_STREAM_VERDICT: | 1546 | case BPF_SK_SKB_STREAM_VERDICT: |
1583 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true); | 1547 | ptype = BPF_PROG_TYPE_SK_SKB; |
1548 | break; | ||
1584 | case BPF_LIRC_MODE2: | 1549 | case BPF_LIRC_MODE2: |
1585 | return lirc_prog_attach(attr); | 1550 | ptype = BPF_PROG_TYPE_LIRC_MODE2; |
1551 | break; | ||
1586 | default: | 1552 | default: |
1587 | return -EINVAL; | 1553 | return -EINVAL; |
1588 | } | 1554 | } |
@@ -1596,18 +1562,20 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1596 | return -EINVAL; | 1562 | return -EINVAL; |
1597 | } | 1563 | } |
1598 | 1564 | ||
1599 | cgrp = cgroup_get_from_fd(attr->target_fd); | 1565 | switch (ptype) { |
1600 | if (IS_ERR(cgrp)) { | 1566 | case BPF_PROG_TYPE_SK_SKB: |
1601 | bpf_prog_put(prog); | 1567 | case BPF_PROG_TYPE_SK_MSG: |
1602 | return PTR_ERR(cgrp); | 1568 | ret = sockmap_get_from_fd(attr, ptype, prog); |
1569 | break; | ||
1570 | case BPF_PROG_TYPE_LIRC_MODE2: | ||
1571 | ret = lirc_prog_attach(attr, prog); | ||
1572 | break; | ||
1573 | default: | ||
1574 | ret = cgroup_bpf_prog_attach(attr, ptype, prog); | ||
1603 | } | 1575 | } |
1604 | 1576 | ||
1605 | ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, | ||
1606 | attr->attach_flags); | ||
1607 | if (ret) | 1577 | if (ret) |
1608 | bpf_prog_put(prog); | 1578 | bpf_prog_put(prog); |
1609 | cgroup_put(cgrp); | ||
1610 | |||
1611 | return ret; | 1579 | return ret; |
1612 | } | 1580 | } |
1613 | 1581 | ||
@@ -1616,9 +1584,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) | |||
1616 | static int bpf_prog_detach(const union bpf_attr *attr) | 1584 | static int bpf_prog_detach(const union bpf_attr *attr) |
1617 | { | 1585 | { |
1618 | enum bpf_prog_type ptype; | 1586 | enum bpf_prog_type ptype; |
1619 | struct bpf_prog *prog; | ||
1620 | struct cgroup *cgrp; | ||
1621 | int ret; | ||
1622 | 1587 | ||
1623 | if (!capable(CAP_NET_ADMIN)) | 1588 | if (!capable(CAP_NET_ADMIN)) |
1624 | return -EPERM; | 1589 | return -EPERM; |
@@ -1651,29 +1616,17 @@ static int bpf_prog_detach(const union bpf_attr *attr) | |||
1651 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; | 1616 | ptype = BPF_PROG_TYPE_CGROUP_DEVICE; |
1652 | break; | 1617 | break; |
1653 | case BPF_SK_MSG_VERDICT: | 1618 | case BPF_SK_MSG_VERDICT: |
1654 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false); | 1619 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL); |
1655 | case BPF_SK_SKB_STREAM_PARSER: | 1620 | case BPF_SK_SKB_STREAM_PARSER: |
1656 | case BPF_SK_SKB_STREAM_VERDICT: | 1621 | case BPF_SK_SKB_STREAM_VERDICT: |
1657 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false); | 1622 | return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); |
1658 | case BPF_LIRC_MODE2: | 1623 | case BPF_LIRC_MODE2: |
1659 | return lirc_prog_detach(attr); | 1624 | return lirc_prog_detach(attr); |
1660 | default: | 1625 | default: |
1661 | return -EINVAL; | 1626 | return -EINVAL; |
1662 | } | 1627 | } |
1663 | 1628 | ||
1664 | cgrp = cgroup_get_from_fd(attr->target_fd); | 1629 | return cgroup_bpf_prog_detach(attr, ptype); |
1665 | if (IS_ERR(cgrp)) | ||
1666 | return PTR_ERR(cgrp); | ||
1667 | |||
1668 | prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); | ||
1669 | if (IS_ERR(prog)) | ||
1670 | prog = NULL; | ||
1671 | |||
1672 | ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); | ||
1673 | if (prog) | ||
1674 | bpf_prog_put(prog); | ||
1675 | cgroup_put(cgrp); | ||
1676 | return ret; | ||
1677 | } | 1630 | } |
1678 | 1631 | ||
1679 | #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt | 1632 | #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt |
@@ -1681,9 +1634,6 @@ static int bpf_prog_detach(const union bpf_attr *attr) | |||
1681 | static int bpf_prog_query(const union bpf_attr *attr, | 1634 | static int bpf_prog_query(const union bpf_attr *attr, |
1682 | union bpf_attr __user *uattr) | 1635 | union bpf_attr __user *uattr) |
1683 | { | 1636 | { |
1684 | struct cgroup *cgrp; | ||
1685 | int ret; | ||
1686 | |||
1687 | if (!capable(CAP_NET_ADMIN)) | 1637 | if (!capable(CAP_NET_ADMIN)) |
1688 | return -EPERM; | 1638 | return -EPERM; |
1689 | if (CHECK_ATTR(BPF_PROG_QUERY)) | 1639 | if (CHECK_ATTR(BPF_PROG_QUERY)) |
@@ -1711,14 +1661,9 @@ static int bpf_prog_query(const union bpf_attr *attr, | |||
1711 | default: | 1661 | default: |
1712 | return -EINVAL; | 1662 | return -EINVAL; |
1713 | } | 1663 | } |
1714 | cgrp = cgroup_get_from_fd(attr->query.target_fd); | 1664 | |
1715 | if (IS_ERR(cgrp)) | 1665 | return cgroup_bpf_prog_query(attr, uattr); |
1716 | return PTR_ERR(cgrp); | ||
1717 | ret = cgroup_bpf_query(cgrp, attr, uattr); | ||
1718 | cgroup_put(cgrp); | ||
1719 | return ret; | ||
1720 | } | 1666 | } |
1721 | #endif /* CONFIG_CGROUP_BPF */ | ||
1722 | 1667 | ||
1723 | #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration | 1668 | #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration |
1724 | 1669 | ||
@@ -2365,7 +2310,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz | |||
2365 | case BPF_OBJ_GET: | 2310 | case BPF_OBJ_GET: |
2366 | err = bpf_obj_get(&attr); | 2311 | err = bpf_obj_get(&attr); |
2367 | break; | 2312 | break; |
2368 | #ifdef CONFIG_CGROUP_BPF | ||
2369 | case BPF_PROG_ATTACH: | 2313 | case BPF_PROG_ATTACH: |
2370 | err = bpf_prog_attach(&attr); | 2314 | err = bpf_prog_attach(&attr); |
2371 | break; | 2315 | break; |
@@ -2375,7 +2319,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz | |||
2375 | case BPF_PROG_QUERY: | 2319 | case BPF_PROG_QUERY: |
2376 | err = bpf_prog_query(&attr, uattr); | 2320 | err = bpf_prog_query(&attr, uattr); |
2377 | break; | 2321 | break; |
2378 | #endif | ||
2379 | case BPF_PROG_TEST_RUN: | 2322 | case BPF_PROG_TEST_RUN: |
2380 | err = bpf_prog_test_run(&attr, uattr); | 2323 | err = bpf_prog_test_run(&attr, uattr); |
2381 | break; | 2324 | break; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 481951bf091d..750cb8082694 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task) | |||
177 | static void __kthread_parkme(struct kthread *self) | 177 | static void __kthread_parkme(struct kthread *self) |
178 | { | 178 | { |
179 | for (;;) { | 179 | for (;;) { |
180 | set_current_state(TASK_PARKED); | 180 | /* |
181 | * TASK_PARKED is a special state; we must serialize against | ||
182 | * possible pending wakeups to avoid store-store collisions on | ||
183 | * task->state. | ||
184 | * | ||
185 | * Such a collision might possibly result in the task state | ||
186 | * changin from TASK_PARKED and us failing the | ||
187 | * wait_task_inactive() in kthread_park(). | ||
188 | */ | ||
189 | set_special_state(TASK_PARKED); | ||
181 | if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) | 190 | if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) |
182 | break; | 191 | break; |
192 | |||
193 | complete_all(&self->parked); | ||
183 | schedule(); | 194 | schedule(); |
184 | } | 195 | } |
185 | __set_current_state(TASK_RUNNING); | 196 | __set_current_state(TASK_RUNNING); |
@@ -191,11 +202,6 @@ void kthread_parkme(void) | |||
191 | } | 202 | } |
192 | EXPORT_SYMBOL_GPL(kthread_parkme); | 203 | EXPORT_SYMBOL_GPL(kthread_parkme); |
193 | 204 | ||
194 | void kthread_park_complete(struct task_struct *k) | ||
195 | { | ||
196 | complete_all(&to_kthread(k)->parked); | ||
197 | } | ||
198 | |||
199 | static int kthread(void *_create) | 205 | static int kthread(void *_create) |
200 | { | 206 | { |
201 | /* Copy data: it's on kthread's stack */ | 207 | /* Copy data: it's on kthread's stack */ |
@@ -461,6 +467,9 @@ void kthread_unpark(struct task_struct *k) | |||
461 | 467 | ||
462 | reinit_completion(&kthread->parked); | 468 | reinit_completion(&kthread->parked); |
463 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 469 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
470 | /* | ||
471 | * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. | ||
472 | */ | ||
464 | wake_up_state(k, TASK_PARKED); | 473 | wake_up_state(k, TASK_PARKED); |
465 | } | 474 | } |
466 | EXPORT_SYMBOL_GPL(kthread_unpark); | 475 | EXPORT_SYMBOL_GPL(kthread_unpark); |
@@ -487,7 +496,16 @@ int kthread_park(struct task_struct *k) | |||
487 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 496 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
488 | if (k != current) { | 497 | if (k != current) { |
489 | wake_up_process(k); | 498 | wake_up_process(k); |
499 | /* | ||
500 | * Wait for __kthread_parkme() to complete(), this means we | ||
501 | * _will_ have TASK_PARKED and are about to call schedule(). | ||
502 | */ | ||
490 | wait_for_completion(&kthread->parked); | 503 | wait_for_completion(&kthread->parked); |
504 | /* | ||
505 | * Now wait for that schedule() to complete and the task to | ||
506 | * get scheduled out. | ||
507 | */ | ||
508 | WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); | ||
491 | } | 509 | } |
492 | 510 | ||
493 | return 0; | 511 | return 0; |
diff --git a/kernel/rseq.c b/kernel/rseq.c index 22b6acf1ad63..c6242d8594dc 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c | |||
@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t) | |||
85 | { | 85 | { |
86 | u32 cpu_id = raw_smp_processor_id(); | 86 | u32 cpu_id = raw_smp_processor_id(); |
87 | 87 | ||
88 | if (__put_user(cpu_id, &t->rseq->cpu_id_start)) | 88 | if (put_user(cpu_id, &t->rseq->cpu_id_start)) |
89 | return -EFAULT; | 89 | return -EFAULT; |
90 | if (__put_user(cpu_id, &t->rseq->cpu_id)) | 90 | if (put_user(cpu_id, &t->rseq->cpu_id)) |
91 | return -EFAULT; | 91 | return -EFAULT; |
92 | trace_rseq_update(t); | 92 | trace_rseq_update(t); |
93 | return 0; | 93 | return 0; |
@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) | |||
100 | /* | 100 | /* |
101 | * Reset cpu_id_start to its initial state (0). | 101 | * Reset cpu_id_start to its initial state (0). |
102 | */ | 102 | */ |
103 | if (__put_user(cpu_id_start, &t->rseq->cpu_id_start)) | 103 | if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) |
104 | return -EFAULT; | 104 | return -EFAULT; |
105 | /* | 105 | /* |
106 | * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming | 106 | * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming |
107 | * in after unregistration can figure out that rseq needs to be | 107 | * in after unregistration can figure out that rseq needs to be |
108 | * registered again. | 108 | * registered again. |
109 | */ | 109 | */ |
110 | if (__put_user(cpu_id, &t->rseq->cpu_id)) | 110 | if (put_user(cpu_id, &t->rseq->cpu_id)) |
111 | return -EFAULT; | 111 | return -EFAULT; |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
@@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) | |||
115 | static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) | 115 | static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) |
116 | { | 116 | { |
117 | struct rseq_cs __user *urseq_cs; | 117 | struct rseq_cs __user *urseq_cs; |
118 | unsigned long ptr; | 118 | u64 ptr; |
119 | u32 __user *usig; | 119 | u32 __user *usig; |
120 | u32 sig; | 120 | u32 sig; |
121 | int ret; | 121 | int ret; |
122 | 122 | ||
123 | ret = __get_user(ptr, &t->rseq->rseq_cs); | 123 | if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) |
124 | if (ret) | 124 | return -EFAULT; |
125 | return ret; | ||
126 | if (!ptr) { | 125 | if (!ptr) { |
127 | memset(rseq_cs, 0, sizeof(*rseq_cs)); | 126 | memset(rseq_cs, 0, sizeof(*rseq_cs)); |
128 | return 0; | 127 | return 0; |
129 | } | 128 | } |
130 | urseq_cs = (struct rseq_cs __user *)ptr; | 129 | if (ptr >= TASK_SIZE) |
130 | return -EINVAL; | ||
131 | urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; | ||
131 | if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) | 132 | if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) |
132 | return -EFAULT; | 133 | return -EFAULT; |
133 | if (rseq_cs->version > 0) | ||
134 | return -EINVAL; | ||
135 | 134 | ||
135 | if (rseq_cs->start_ip >= TASK_SIZE || | ||
136 | rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE || | ||
137 | rseq_cs->abort_ip >= TASK_SIZE || | ||
138 | rseq_cs->version > 0) | ||
139 | return -EINVAL; | ||
140 | /* Check for overflow. */ | ||
141 | if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip) | ||
142 | return -EINVAL; | ||
136 | /* Ensure that abort_ip is not in the critical section. */ | 143 | /* Ensure that abort_ip is not in the critical section. */ |
137 | if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) | 144 | if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) |
138 | return -EINVAL; | 145 | return -EINVAL; |
139 | 146 | ||
140 | usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32)); | 147 | usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32)); |
141 | ret = get_user(sig, usig); | 148 | ret = get_user(sig, usig); |
142 | if (ret) | 149 | if (ret) |
143 | return ret; | 150 | return ret; |
@@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) | |||
146 | printk_ratelimited(KERN_WARNING | 153 | printk_ratelimited(KERN_WARNING |
147 | "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", | 154 | "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", |
148 | sig, current->rseq_sig, current->pid, usig); | 155 | sig, current->rseq_sig, current->pid, usig); |
149 | return -EPERM; | 156 | return -EINVAL; |
150 | } | 157 | } |
151 | return 0; | 158 | return 0; |
152 | } | 159 | } |
@@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags) | |||
157 | int ret; | 164 | int ret; |
158 | 165 | ||
159 | /* Get thread flags. */ | 166 | /* Get thread flags. */ |
160 | ret = __get_user(flags, &t->rseq->flags); | 167 | ret = get_user(flags, &t->rseq->flags); |
161 | if (ret) | 168 | if (ret) |
162 | return ret; | 169 | return ret; |
163 | 170 | ||
@@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t) | |||
195 | * of code outside of the rseq assembly block. This performs | 202 | * of code outside of the rseq assembly block. This performs |
196 | * a lazy clear of the rseq_cs field. | 203 | * a lazy clear of the rseq_cs field. |
197 | * | 204 | * |
198 | * Set rseq_cs to NULL with single-copy atomicity. | 205 | * Set rseq_cs to NULL. |
199 | */ | 206 | */ |
200 | return __put_user(0UL, &t->rseq->rseq_cs); | 207 | if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64))) |
208 | return -EFAULT; | ||
209 | return 0; | ||
201 | } | 210 | } |
202 | 211 | ||
203 | /* | 212 | /* |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 78d8facba456..fe365c9a08e9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7,7 +7,6 @@ | |||
7 | */ | 7 | */ |
8 | #include "sched.h" | 8 | #include "sched.h" |
9 | 9 | ||
10 | #include <linux/kthread.h> | ||
11 | #include <linux/nospec.h> | 10 | #include <linux/nospec.h> |
12 | 11 | ||
13 | #include <linux/kcov.h> | 12 | #include <linux/kcov.h> |
@@ -2724,28 +2723,20 @@ static struct rq *finish_task_switch(struct task_struct *prev) | |||
2724 | membarrier_mm_sync_core_before_usermode(mm); | 2723 | membarrier_mm_sync_core_before_usermode(mm); |
2725 | mmdrop(mm); | 2724 | mmdrop(mm); |
2726 | } | 2725 | } |
2727 | if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) { | 2726 | if (unlikely(prev_state == TASK_DEAD)) { |
2728 | switch (prev_state) { | 2727 | if (prev->sched_class->task_dead) |
2729 | case TASK_DEAD: | 2728 | prev->sched_class->task_dead(prev); |
2730 | if (prev->sched_class->task_dead) | ||
2731 | prev->sched_class->task_dead(prev); | ||
2732 | 2729 | ||
2733 | /* | 2730 | /* |
2734 | * Remove function-return probe instances associated with this | 2731 | * Remove function-return probe instances associated with this |
2735 | * task and put them back on the free list. | 2732 | * task and put them back on the free list. |
2736 | */ | 2733 | */ |
2737 | kprobe_flush_task(prev); | 2734 | kprobe_flush_task(prev); |
2738 | |||
2739 | /* Task is done with its stack. */ | ||
2740 | put_task_stack(prev); | ||
2741 | 2735 | ||
2742 | put_task_struct(prev); | 2736 | /* Task is done with its stack. */ |
2743 | break; | 2737 | put_task_stack(prev); |
2744 | 2738 | ||
2745 | case TASK_PARKED: | 2739 | put_task_struct(prev); |
2746 | kthread_park_complete(prev); | ||
2747 | break; | ||
2748 | } | ||
2749 | } | 2740 | } |
2750 | 2741 | ||
2751 | tick_nohz_task_switch(); | 2742 | tick_nohz_task_switch(); |
@@ -3113,7 +3104,9 @@ static void sched_tick_remote(struct work_struct *work) | |||
3113 | struct tick_work *twork = container_of(dwork, struct tick_work, work); | 3104 | struct tick_work *twork = container_of(dwork, struct tick_work, work); |
3114 | int cpu = twork->cpu; | 3105 | int cpu = twork->cpu; |
3115 | struct rq *rq = cpu_rq(cpu); | 3106 | struct rq *rq = cpu_rq(cpu); |
3107 | struct task_struct *curr; | ||
3116 | struct rq_flags rf; | 3108 | struct rq_flags rf; |
3109 | u64 delta; | ||
3117 | 3110 | ||
3118 | /* | 3111 | /* |
3119 | * Handle the tick only if it appears the remote CPU is running in full | 3112 | * Handle the tick only if it appears the remote CPU is running in full |
@@ -3122,24 +3115,28 @@ static void sched_tick_remote(struct work_struct *work) | |||
3122 | * statistics and checks timeslices in a time-independent way, regardless | 3115 | * statistics and checks timeslices in a time-independent way, regardless |
3123 | * of when exactly it is running. | 3116 | * of when exactly it is running. |
3124 | */ | 3117 | */ |
3125 | if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) { | 3118 | if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) |
3126 | struct task_struct *curr; | 3119 | goto out_requeue; |
3127 | u64 delta; | ||
3128 | 3120 | ||
3129 | rq_lock_irq(rq, &rf); | 3121 | rq_lock_irq(rq, &rf); |
3130 | update_rq_clock(rq); | 3122 | curr = rq->curr; |
3131 | curr = rq->curr; | 3123 | if (is_idle_task(curr)) |
3132 | delta = rq_clock_task(rq) - curr->se.exec_start; | 3124 | goto out_unlock; |
3133 | 3125 | ||
3134 | /* | 3126 | update_rq_clock(rq); |
3135 | * Make sure the next tick runs within a reasonable | 3127 | delta = rq_clock_task(rq) - curr->se.exec_start; |
3136 | * amount of time. | 3128 | |
3137 | */ | 3129 | /* |
3138 | WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); | 3130 | * Make sure the next tick runs within a reasonable |
3139 | curr->sched_class->task_tick(rq, curr, 0); | 3131 | * amount of time. |
3140 | rq_unlock_irq(rq, &rf); | 3132 | */ |
3141 | } | 3133 | WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); |
3134 | curr->sched_class->task_tick(rq, curr, 0); | ||
3135 | |||
3136 | out_unlock: | ||
3137 | rq_unlock_irq(rq, &rf); | ||
3142 | 3138 | ||
3139 | out_requeue: | ||
3143 | /* | 3140 | /* |
3144 | * Run the remote tick once per second (1Hz). This arbitrary | 3141 | * Run the remote tick once per second (1Hz). This arbitrary |
3145 | * frequency is large enough to avoid overload but short enough | 3142 | * frequency is large enough to avoid overload but short enough |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 3cde46483f0a..c907fde01eaa 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) | |||
192 | { | 192 | { |
193 | struct rq *rq = cpu_rq(sg_cpu->cpu); | 193 | struct rq *rq = cpu_rq(sg_cpu->cpu); |
194 | 194 | ||
195 | if (rq->rt.rt_nr_running) | 195 | if (rt_rq_is_runnable(&rq->rt)) |
196 | return sg_cpu->max; | 196 | return sg_cpu->max; |
197 | 197 | ||
198 | /* | 198 | /* |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1866e64792a7..2f0a0be4d344 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3982,18 +3982,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) | |||
3982 | if (!sched_feat(UTIL_EST)) | 3982 | if (!sched_feat(UTIL_EST)) |
3983 | return; | 3983 | return; |
3984 | 3984 | ||
3985 | /* | 3985 | /* Update root cfs_rq's estimated utilization */ |
3986 | * Update root cfs_rq's estimated utilization | 3986 | ue.enqueued = cfs_rq->avg.util_est.enqueued; |
3987 | * | 3987 | ue.enqueued -= min_t(unsigned int, ue.enqueued, |
3988 | * If *p is the last task then the root cfs_rq's estimated utilization | 3988 | (_task_util_est(p) | UTIL_AVG_UNCHANGED)); |
3989 | * of a CPU is 0 by definition. | ||
3990 | */ | ||
3991 | ue.enqueued = 0; | ||
3992 | if (cfs_rq->nr_running) { | ||
3993 | ue.enqueued = cfs_rq->avg.util_est.enqueued; | ||
3994 | ue.enqueued -= min_t(unsigned int, ue.enqueued, | ||
3995 | (_task_util_est(p) | UTIL_AVG_UNCHANGED)); | ||
3996 | } | ||
3997 | WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); | 3989 | WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); |
3998 | 3990 | ||
3999 | /* | 3991 | /* |
@@ -4590,6 +4582,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) | |||
4590 | now = sched_clock_cpu(smp_processor_id()); | 4582 | now = sched_clock_cpu(smp_processor_id()); |
4591 | cfs_b->runtime = cfs_b->quota; | 4583 | cfs_b->runtime = cfs_b->quota; |
4592 | cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); | 4584 | cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); |
4585 | cfs_b->expires_seq++; | ||
4593 | } | 4586 | } |
4594 | 4587 | ||
4595 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) | 4588 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
@@ -4612,6 +4605,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
4612 | struct task_group *tg = cfs_rq->tg; | 4605 | struct task_group *tg = cfs_rq->tg; |
4613 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); | 4606 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); |
4614 | u64 amount = 0, min_amount, expires; | 4607 | u64 amount = 0, min_amount, expires; |
4608 | int expires_seq; | ||
4615 | 4609 | ||
4616 | /* note: this is a positive sum as runtime_remaining <= 0 */ | 4610 | /* note: this is a positive sum as runtime_remaining <= 0 */ |
4617 | min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; | 4611 | min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; |
@@ -4628,6 +4622,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
4628 | cfs_b->idle = 0; | 4622 | cfs_b->idle = 0; |
4629 | } | 4623 | } |
4630 | } | 4624 | } |
4625 | expires_seq = cfs_b->expires_seq; | ||
4631 | expires = cfs_b->runtime_expires; | 4626 | expires = cfs_b->runtime_expires; |
4632 | raw_spin_unlock(&cfs_b->lock); | 4627 | raw_spin_unlock(&cfs_b->lock); |
4633 | 4628 | ||
@@ -4637,8 +4632,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
4637 | * spread between our sched_clock and the one on which runtime was | 4632 | * spread between our sched_clock and the one on which runtime was |
4638 | * issued. | 4633 | * issued. |
4639 | */ | 4634 | */ |
4640 | if ((s64)(expires - cfs_rq->runtime_expires) > 0) | 4635 | if (cfs_rq->expires_seq != expires_seq) { |
4636 | cfs_rq->expires_seq = expires_seq; | ||
4641 | cfs_rq->runtime_expires = expires; | 4637 | cfs_rq->runtime_expires = expires; |
4638 | } | ||
4642 | 4639 | ||
4643 | return cfs_rq->runtime_remaining > 0; | 4640 | return cfs_rq->runtime_remaining > 0; |
4644 | } | 4641 | } |
@@ -4664,12 +4661,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
4664 | * has not truly expired. | 4661 | * has not truly expired. |
4665 | * | 4662 | * |
4666 | * Fortunately we can check determine whether this the case by checking | 4663 | * Fortunately we can check determine whether this the case by checking |
4667 | * whether the global deadline has advanced. It is valid to compare | 4664 | * whether the global deadline(cfs_b->expires_seq) has advanced. |
4668 | * cfs_b->runtime_expires without any locks since we only care about | ||
4669 | * exact equality, so a partial write will still work. | ||
4670 | */ | 4665 | */ |
4671 | 4666 | if (cfs_rq->expires_seq == cfs_b->expires_seq) { | |
4672 | if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { | ||
4673 | /* extend local deadline, drift is bounded above by 2 ticks */ | 4667 | /* extend local deadline, drift is bounded above by 2 ticks */ |
4674 | cfs_rq->runtime_expires += TICK_NSEC; | 4668 | cfs_rq->runtime_expires += TICK_NSEC; |
4675 | } else { | 4669 | } else { |
@@ -5202,13 +5196,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
5202 | 5196 | ||
5203 | void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 5197 | void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
5204 | { | 5198 | { |
5199 | u64 overrun; | ||
5200 | |||
5205 | lockdep_assert_held(&cfs_b->lock); | 5201 | lockdep_assert_held(&cfs_b->lock); |
5206 | 5202 | ||
5207 | if (!cfs_b->period_active) { | 5203 | if (cfs_b->period_active) |
5208 | cfs_b->period_active = 1; | 5204 | return; |
5209 | hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); | 5205 | |
5210 | hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); | 5206 | cfs_b->period_active = 1; |
5211 | } | 5207 | overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); |
5208 | cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period); | ||
5209 | cfs_b->expires_seq++; | ||
5210 | hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); | ||
5212 | } | 5211 | } |
5213 | 5212 | ||
5214 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 5213 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 47556b0c9a95..572567078b60 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | |||
508 | 508 | ||
509 | rt_se = rt_rq->tg->rt_se[cpu]; | 509 | rt_se = rt_rq->tg->rt_se[cpu]; |
510 | 510 | ||
511 | if (!rt_se) | 511 | if (!rt_se) { |
512 | dequeue_top_rt_rq(rt_rq); | 512 | dequeue_top_rt_rq(rt_rq); |
513 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ | ||
514 | cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); | ||
515 | } | ||
513 | else if (on_rt_rq(rt_se)) | 516 | else if (on_rt_rq(rt_se)) |
514 | dequeue_rt_entity(rt_se, 0); | 517 | dequeue_rt_entity(rt_se, 0); |
515 | } | 518 | } |
@@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq) | |||
1001 | sub_nr_running(rq, rt_rq->rt_nr_running); | 1004 | sub_nr_running(rq, rt_rq->rt_nr_running); |
1002 | rt_rq->rt_queued = 0; | 1005 | rt_rq->rt_queued = 0; |
1003 | 1006 | ||
1004 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ | ||
1005 | cpufreq_update_util(rq, 0); | ||
1006 | } | 1007 | } |
1007 | 1008 | ||
1008 | static void | 1009 | static void |
@@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) | |||
1014 | 1015 | ||
1015 | if (rt_rq->rt_queued) | 1016 | if (rt_rq->rt_queued) |
1016 | return; | 1017 | return; |
1017 | if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) | 1018 | |
1019 | if (rt_rq_throttled(rt_rq)) | ||
1018 | return; | 1020 | return; |
1019 | 1021 | ||
1020 | add_nr_running(rq, rt_rq->rt_nr_running); | 1022 | if (rt_rq->rt_nr_running) { |
1021 | rt_rq->rt_queued = 1; | 1023 | add_nr_running(rq, rt_rq->rt_nr_running); |
1024 | rt_rq->rt_queued = 1; | ||
1025 | } | ||
1022 | 1026 | ||
1023 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ | 1027 | /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ |
1024 | cpufreq_update_util(rq, 0); | 1028 | cpufreq_update_util(rq, 0); |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6601baf2361c..c7742dcc136c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -334,9 +334,10 @@ struct cfs_bandwidth { | |||
334 | u64 runtime; | 334 | u64 runtime; |
335 | s64 hierarchical_quota; | 335 | s64 hierarchical_quota; |
336 | u64 runtime_expires; | 336 | u64 runtime_expires; |
337 | int expires_seq; | ||
337 | 338 | ||
338 | int idle; | 339 | short idle; |
339 | int period_active; | 340 | short period_active; |
340 | struct hrtimer period_timer; | 341 | struct hrtimer period_timer; |
341 | struct hrtimer slack_timer; | 342 | struct hrtimer slack_timer; |
342 | struct list_head throttled_cfs_rq; | 343 | struct list_head throttled_cfs_rq; |
@@ -551,6 +552,7 @@ struct cfs_rq { | |||
551 | 552 | ||
552 | #ifdef CONFIG_CFS_BANDWIDTH | 553 | #ifdef CONFIG_CFS_BANDWIDTH |
553 | int runtime_enabled; | 554 | int runtime_enabled; |
555 | int expires_seq; | ||
554 | u64 runtime_expires; | 556 | u64 runtime_expires; |
555 | s64 runtime_remaining; | 557 | s64 runtime_remaining; |
556 | 558 | ||
@@ -609,6 +611,11 @@ struct rt_rq { | |||
609 | #endif | 611 | #endif |
610 | }; | 612 | }; |
611 | 613 | ||
614 | static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) | ||
615 | { | ||
616 | return rt_rq->rt_queued && rt_rq->rt_nr_running; | ||
617 | } | ||
618 | |||
612 | /* Deadline class' related fields in a runqueue */ | 619 | /* Deadline class' related fields in a runqueue */ |
613 | struct dl_rq { | 620 | struct dl_rq { |
614 | /* runqueue is an rbtree, ordered by deadline */ | 621 | /* runqueue is an rbtree, ordered by deadline */ |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b7005dd21ec1..14de3727b18e 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -277,8 +277,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev, | |||
277 | */ | 277 | */ |
278 | return !curdev || | 278 | return !curdev || |
279 | newdev->rating > curdev->rating || | 279 | newdev->rating > curdev->rating || |
280 | (!cpumask_equal(curdev->cpumask, newdev->cpumask) && | 280 | !cpumask_equal(curdev->cpumask, newdev->cpumask); |
281 | !tick_check_percpu(curdev, newdev, smp_processor_id())); | ||
282 | } | 281 | } |
283 | 282 | ||
284 | /* | 283 | /* |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index efed9c1cfb7e..caf9cbf35816 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -192,17 +192,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | |||
192 | op->saved_func(ip, parent_ip, op, regs); | 192 | op->saved_func(ip, parent_ip, op, regs); |
193 | } | 193 | } |
194 | 194 | ||
195 | /** | ||
196 | * clear_ftrace_function - reset the ftrace function | ||
197 | * | ||
198 | * This NULLs the ftrace function and in essence stops | ||
199 | * tracing. There may be lag | ||
200 | */ | ||
201 | void clear_ftrace_function(void) | ||
202 | { | ||
203 | ftrace_trace_function = ftrace_stub; | ||
204 | } | ||
205 | |||
206 | static void ftrace_sync(struct work_struct *work) | 195 | static void ftrace_sync(struct work_struct *work) |
207 | { | 196 | { |
208 | /* | 197 | /* |
@@ -6689,7 +6678,7 @@ void ftrace_kill(void) | |||
6689 | { | 6678 | { |
6690 | ftrace_disabled = 1; | 6679 | ftrace_disabled = 1; |
6691 | ftrace_enabled = 0; | 6680 | ftrace_enabled = 0; |
6692 | clear_ftrace_function(); | 6681 | ftrace_trace_function = ftrace_stub; |
6693 | } | 6682 | } |
6694 | 6683 | ||
6695 | /** | 6684 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a0079b4c7a49..87cf25171fb8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2953,6 +2953,7 @@ out_nobuffer: | |||
2953 | } | 2953 | } |
2954 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 2954 | EXPORT_SYMBOL_GPL(trace_vbprintk); |
2955 | 2955 | ||
2956 | __printf(3, 0) | ||
2956 | static int | 2957 | static int |
2957 | __trace_array_vprintk(struct ring_buffer *buffer, | 2958 | __trace_array_vprintk(struct ring_buffer *buffer, |
2958 | unsigned long ip, const char *fmt, va_list args) | 2959 | unsigned long ip, const char *fmt, va_list args) |
@@ -3007,12 +3008,14 @@ out_nobuffer: | |||
3007 | return len; | 3008 | return len; |
3008 | } | 3009 | } |
3009 | 3010 | ||
3011 | __printf(3, 0) | ||
3010 | int trace_array_vprintk(struct trace_array *tr, | 3012 | int trace_array_vprintk(struct trace_array *tr, |
3011 | unsigned long ip, const char *fmt, va_list args) | 3013 | unsigned long ip, const char *fmt, va_list args) |
3012 | { | 3014 | { |
3013 | return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); | 3015 | return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); |
3014 | } | 3016 | } |
3015 | 3017 | ||
3018 | __printf(3, 0) | ||
3016 | int trace_array_printk(struct trace_array *tr, | 3019 | int trace_array_printk(struct trace_array *tr, |
3017 | unsigned long ip, const char *fmt, ...) | 3020 | unsigned long ip, const char *fmt, ...) |
3018 | { | 3021 | { |
@@ -3028,6 +3031,7 @@ int trace_array_printk(struct trace_array *tr, | |||
3028 | return ret; | 3031 | return ret; |
3029 | } | 3032 | } |
3030 | 3033 | ||
3034 | __printf(3, 4) | ||
3031 | int trace_array_printk_buf(struct ring_buffer *buffer, | 3035 | int trace_array_printk_buf(struct ring_buffer *buffer, |
3032 | unsigned long ip, const char *fmt, ...) | 3036 | unsigned long ip, const char *fmt, ...) |
3033 | { | 3037 | { |
@@ -3043,6 +3047,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer, | |||
3043 | return ret; | 3047 | return ret; |
3044 | } | 3048 | } |
3045 | 3049 | ||
3050 | __printf(2, 0) | ||
3046 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 3051 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
3047 | { | 3052 | { |
3048 | return trace_array_vprintk(&global_trace, ip, fmt, args); | 3053 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
@@ -3360,8 +3365,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, | |||
3360 | 3365 | ||
3361 | print_event_info(buf, m); | 3366 | print_event_info(buf, m); |
3362 | 3367 | ||
3363 | seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); | 3368 | seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); |
3364 | seq_printf(m, "# | | | %s | |\n", tgid ? " | " : ""); | 3369 | seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); |
3365 | } | 3370 | } |
3366 | 3371 | ||
3367 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, | 3372 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, |
@@ -3381,9 +3386,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file | |||
3381 | tgid ? tgid_space : space); | 3386 | tgid ? tgid_space : space); |
3382 | seq_printf(m, "# %s||| / delay\n", | 3387 | seq_printf(m, "# %s||| / delay\n", |
3383 | tgid ? tgid_space : space); | 3388 | tgid ? tgid_space : space); |
3384 | seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", | 3389 | seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", |
3385 | tgid ? " TGID " : space); | 3390 | tgid ? " TGID " : space); |
3386 | seq_printf(m, "# | | | %s|||| | |\n", | 3391 | seq_printf(m, "# | | %s | |||| | |\n", |
3387 | tgid ? " | " : space); | 3392 | tgid ? " | " : space); |
3388 | } | 3393 | } |
3389 | 3394 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 630c5a24b2b2..f8f86231ad90 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -583,9 +583,7 @@ static __always_inline void trace_clear_recursion(int bit) | |||
583 | static inline struct ring_buffer_iter * | 583 | static inline struct ring_buffer_iter * |
584 | trace_buffer_iter(struct trace_iterator *iter, int cpu) | 584 | trace_buffer_iter(struct trace_iterator *iter, int cpu) |
585 | { | 585 | { |
586 | if (iter->buffer_iter && iter->buffer_iter[cpu]) | 586 | return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; |
587 | return iter->buffer_iter[cpu]; | ||
588 | return NULL; | ||
589 | } | 587 | } |
590 | 588 | ||
591 | int tracer_init(struct tracer *t, struct trace_array *tr); | 589 | int tracer_init(struct tracer *t, struct trace_array *tr); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0dceb77d1d42..893a206bcba4 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -1701,6 +1701,7 @@ static void create_filter_finish(struct filter_parse_error *pe) | |||
1701 | * @filter_str: filter string | 1701 | * @filter_str: filter string |
1702 | * @set_str: remember @filter_str and enable detailed error in filter | 1702 | * @set_str: remember @filter_str and enable detailed error in filter |
1703 | * @filterp: out param for created filter (always updated on return) | 1703 | * @filterp: out param for created filter (always updated on return) |
1704 | * Must be a pointer that references a NULL pointer. | ||
1704 | * | 1705 | * |
1705 | * Creates a filter for @call with @filter_str. If @set_str is %true, | 1706 | * Creates a filter for @call with @filter_str. If @set_str is %true, |
1706 | * @filter_str is copied and recorded in the new filter. | 1707 | * @filter_str is copied and recorded in the new filter. |
@@ -1718,6 +1719,10 @@ static int create_filter(struct trace_event_call *call, | |||
1718 | struct filter_parse_error *pe = NULL; | 1719 | struct filter_parse_error *pe = NULL; |
1719 | int err; | 1720 | int err; |
1720 | 1721 | ||
1722 | /* filterp must point to NULL */ | ||
1723 | if (WARN_ON(*filterp)) | ||
1724 | *filterp = NULL; | ||
1725 | |||
1721 | err = create_filter_start(filter_string, set_str, &pe, filterp); | 1726 | err = create_filter_start(filter_string, set_str, &pe, filterp); |
1722 | if (err) | 1727 | if (err) |
1723 | return err; | 1728 | return err; |
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 046c716a6536..aae18af94c94 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c | |||
@@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var) | |||
393 | else if (system) | 393 | else if (system) |
394 | snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event); | 394 | snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event); |
395 | else | 395 | else |
396 | strncpy(err, var, MAX_FILTER_STR_VAL); | 396 | strscpy(err, var, MAX_FILTER_STR_VAL); |
397 | 397 | ||
398 | hist_err(str, err); | 398 | hist_err(str, err); |
399 | } | 399 | } |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 23c0b0cb5fb9..169b3c44ee97 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
831 | struct ftrace_graph_ret *graph_ret; | 831 | struct ftrace_graph_ret *graph_ret; |
832 | struct ftrace_graph_ent *call; | 832 | struct ftrace_graph_ent *call; |
833 | unsigned long long duration; | 833 | unsigned long long duration; |
834 | int cpu = iter->cpu; | ||
834 | int i; | 835 | int i; |
835 | 836 | ||
836 | graph_ret = &ret_entry->ret; | 837 | graph_ret = &ret_entry->ret; |
@@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
839 | 840 | ||
840 | if (data) { | 841 | if (data) { |
841 | struct fgraph_cpu_data *cpu_data; | 842 | struct fgraph_cpu_data *cpu_data; |
842 | int cpu = iter->cpu; | ||
843 | 843 | ||
844 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | 844 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
845 | 845 | ||
@@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
869 | 869 | ||
870 | trace_seq_printf(s, "%ps();\n", (void *)call->func); | 870 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
871 | 871 | ||
872 | print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, | ||
873 | cpu, iter->ent->pid, flags); | ||
874 | |||
872 | return trace_handle_return(s); | 875 | return trace_handle_return(s); |
873 | } | 876 | } |
874 | 877 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index daa81571b22a..21f718472942 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1480,8 +1480,10 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, | |||
1480 | } | 1480 | } |
1481 | 1481 | ||
1482 | ret = __register_trace_kprobe(tk); | 1482 | ret = __register_trace_kprobe(tk); |
1483 | if (ret < 0) | 1483 | if (ret < 0) { |
1484 | kfree(tk->tp.call.print_fmt); | ||
1484 | goto error; | 1485 | goto error; |
1486 | } | ||
1485 | 1487 | ||
1486 | return &tk->tp.call; | 1488 | return &tk->tp.call; |
1487 | error: | 1489 | error: |
@@ -1501,6 +1503,8 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call) | |||
1501 | } | 1503 | } |
1502 | 1504 | ||
1503 | __unregister_trace_kprobe(tk); | 1505 | __unregister_trace_kprobe(tk); |
1506 | |||
1507 | kfree(tk->tp.call.print_fmt); | ||
1504 | free_trace_kprobe(tk); | 1508 | free_trace_kprobe(tk); |
1505 | } | 1509 | } |
1506 | #endif /* CONFIG_PERF_EVENTS */ | 1510 | #endif /* CONFIG_PERF_EVENTS */ |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 90db994ac900..1c8e30fda46a 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter) | |||
594 | 594 | ||
595 | trace_find_cmdline(entry->pid, comm); | 595 | trace_find_cmdline(entry->pid, comm); |
596 | 596 | ||
597 | trace_seq_printf(s, "%16s-%-5d [%03d] ", | 597 | trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); |
598 | comm, entry->pid, iter->cpu); | ||
599 | 598 | ||
600 | if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { | 599 | if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { |
601 | unsigned int tgid = trace_find_tgid(entry->pid); | 600 | unsigned int tgid = trace_find_tgid(entry->pid); |
@@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter) | |||
606 | trace_seq_printf(s, "(%5d) ", tgid); | 605 | trace_seq_printf(s, "(%5d) ", tgid); |
607 | } | 606 | } |
608 | 607 | ||
608 | trace_seq_printf(s, "[%03d] ", iter->cpu); | ||
609 | |||
609 | if (tr->trace_flags & TRACE_ITER_IRQ_INFO) | 610 | if (tr->trace_flags & TRACE_ITER_IRQ_INFO) |
610 | trace_print_lat_fmt(s, entry); | 611 | trace_print_lat_fmt(s, entry); |
611 | 612 | ||
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 60aedc879361..08d3d59dca17 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = { | |||
5282 | { /* Mainly checking JIT here. */ | 5282 | { /* Mainly checking JIT here. */ |
5283 | "BPF_MAXINSNS: Ctx heavy transformations", | 5283 | "BPF_MAXINSNS: Ctx heavy transformations", |
5284 | { }, | 5284 | { }, |
5285 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5286 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
5287 | #else | ||
5285 | CLASSIC, | 5288 | CLASSIC, |
5289 | #endif | ||
5286 | { }, | 5290 | { }, |
5287 | { | 5291 | { |
5288 | { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, | 5292 | { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, |
5289 | { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } | 5293 | { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } |
5290 | }, | 5294 | }, |
5291 | .fill_helper = bpf_fill_maxinsns6, | 5295 | .fill_helper = bpf_fill_maxinsns6, |
5296 | .expected_errcode = -ENOTSUPP, | ||
5292 | }, | 5297 | }, |
5293 | { /* Mainly checking JIT here. */ | 5298 | { /* Mainly checking JIT here. */ |
5294 | "BPF_MAXINSNS: Call heavy transformations", | 5299 | "BPF_MAXINSNS: Call heavy transformations", |
5295 | { }, | 5300 | { }, |
5301 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5302 | CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, | ||
5303 | #else | ||
5296 | CLASSIC | FLAG_NO_DATA, | 5304 | CLASSIC | FLAG_NO_DATA, |
5305 | #endif | ||
5297 | { }, | 5306 | { }, |
5298 | { { 1, 0 }, { 10, 0 } }, | 5307 | { { 1, 0 }, { 10, 0 } }, |
5299 | .fill_helper = bpf_fill_maxinsns7, | 5308 | .fill_helper = bpf_fill_maxinsns7, |
5309 | .expected_errcode = -ENOTSUPP, | ||
5300 | }, | 5310 | }, |
5301 | { /* Mainly checking JIT here. */ | 5311 | { /* Mainly checking JIT here. */ |
5302 | "BPF_MAXINSNS: Jump heavy test", | 5312 | "BPF_MAXINSNS: Jump heavy test", |
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = { | |||
5347 | { | 5357 | { |
5348 | "BPF_MAXINSNS: exec all MSH", | 5358 | "BPF_MAXINSNS: exec all MSH", |
5349 | { }, | 5359 | { }, |
5360 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5361 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
5362 | #else | ||
5350 | CLASSIC, | 5363 | CLASSIC, |
5364 | #endif | ||
5351 | { 0xfa, 0xfb, 0xfc, 0xfd, }, | 5365 | { 0xfa, 0xfb, 0xfc, 0xfd, }, |
5352 | { { 4, 0xababab83 } }, | 5366 | { { 4, 0xababab83 } }, |
5353 | .fill_helper = bpf_fill_maxinsns13, | 5367 | .fill_helper = bpf_fill_maxinsns13, |
5368 | .expected_errcode = -ENOTSUPP, | ||
5354 | }, | 5369 | }, |
5355 | { | 5370 | { |
5356 | "BPF_MAXINSNS: ld_abs+get_processor_id", | 5371 | "BPF_MAXINSNS: ld_abs+get_processor_id", |
5357 | { }, | 5372 | { }, |
5373 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) | ||
5374 | CLASSIC | FLAG_EXPECTED_FAIL, | ||
5375 | #else | ||
5358 | CLASSIC, | 5376 | CLASSIC, |
5377 | #endif | ||
5359 | { }, | 5378 | { }, |
5360 | { { 1, 0xbee } }, | 5379 | { { 1, 0xbee } }, |
5361 | .fill_helper = bpf_fill_ld_abs_get_processor_id, | 5380 | .fill_helper = bpf_fill_ld_abs_get_processor_id, |
5381 | .expected_errcode = -ENOTSUPP, | ||
5362 | }, | 5382 | }, |
5363 | /* | 5383 | /* |
5364 | * LD_IND / LD_ABS on fragmented SKBs | 5384 | * LD_IND / LD_ABS on fragmented SKBs |
diff --git a/mm/debug.c b/mm/debug.c index 56e2d9125ea5..38c926520c97 100644 --- a/mm/debug.c +++ b/mm/debug.c | |||
@@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = { | |||
43 | 43 | ||
44 | void __dump_page(struct page *page, const char *reason) | 44 | void __dump_page(struct page *page, const char *reason) |
45 | { | 45 | { |
46 | bool page_poisoned = PagePoisoned(page); | ||
47 | int mapcount; | ||
48 | |||
49 | /* | ||
50 | * If struct page is poisoned don't access Page*() functions as that | ||
51 | * leads to recursive loop. Page*() check for poisoned pages, and calls | ||
52 | * dump_page() when detected. | ||
53 | */ | ||
54 | if (page_poisoned) { | ||
55 | pr_emerg("page:%px is uninitialized and poisoned", page); | ||
56 | goto hex_only; | ||
57 | } | ||
58 | |||
46 | /* | 59 | /* |
47 | * Avoid VM_BUG_ON() in page_mapcount(). | 60 | * Avoid VM_BUG_ON() in page_mapcount(). |
48 | * page->_mapcount space in struct page is used by sl[aou]b pages to | 61 | * page->_mapcount space in struct page is used by sl[aou]b pages to |
49 | * encode own info. | 62 | * encode own info. |
50 | */ | 63 | */ |
51 | int mapcount = PageSlab(page) ? 0 : page_mapcount(page); | 64 | mapcount = PageSlab(page) ? 0 : page_mapcount(page); |
52 | 65 | ||
53 | pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", | 66 | pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", |
54 | page, page_ref_count(page), mapcount, | 67 | page, page_ref_count(page), mapcount, |
@@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason) | |||
60 | 73 | ||
61 | pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); | 74 | pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); |
62 | 75 | ||
76 | hex_only: | ||
63 | print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, | 77 | print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, |
64 | sizeof(unsigned long), page, | 78 | sizeof(unsigned long), page, |
65 | sizeof(struct page), false); | 79 | sizeof(struct page), false); |
@@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason) | |||
68 | pr_alert("page dumped because: %s\n", reason); | 82 | pr_alert("page dumped because: %s\n", reason); |
69 | 83 | ||
70 | #ifdef CONFIG_MEMCG | 84 | #ifdef CONFIG_MEMCG |
71 | if (page->mem_cgroup) | 85 | if (!page_poisoned && page->mem_cgroup) |
72 | pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); | 86 | pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); |
73 | #endif | 87 | #endif |
74 | } | 88 | } |
@@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |||
1238 | int locked = 0; | 1238 | int locked = 0; |
1239 | long ret = 0; | 1239 | long ret = 0; |
1240 | 1240 | ||
1241 | VM_BUG_ON(start & ~PAGE_MASK); | ||
1242 | VM_BUG_ON(len != PAGE_ALIGN(len)); | ||
1243 | end = start + len; | 1241 | end = start + len; |
1244 | 1242 | ||
1245 | for (nstart = start; nstart < end; nstart = nend) { | 1243 | for (nstart = start; nstart < end; nstart = nend) { |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3612fbb32e9d..039ddbc574e9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void) | |||
2163 | */ | 2163 | */ |
2164 | if (hstate_is_gigantic(h)) | 2164 | if (hstate_is_gigantic(h)) |
2165 | adjust_managed_page_count(page, 1 << h->order); | 2165 | adjust_managed_page_count(page, 1 << h->order); |
2166 | cond_resched(); | ||
2166 | } | 2167 | } |
2167 | } | 2168 | } |
2168 | 2169 | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index f185455b3406..c3bd5209da38 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip) | |||
619 | int kasan_module_alloc(void *addr, size_t size) | 619 | int kasan_module_alloc(void *addr, size_t size) |
620 | { | 620 | { |
621 | void *ret; | 621 | void *ret; |
622 | size_t scaled_size; | ||
622 | size_t shadow_size; | 623 | size_t shadow_size; |
623 | unsigned long shadow_start; | 624 | unsigned long shadow_start; |
624 | 625 | ||
625 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | 626 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
626 | shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, | 627 | scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; |
627 | PAGE_SIZE); | 628 | shadow_size = round_up(scaled_size, PAGE_SIZE); |
628 | 629 | ||
629 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) | 630 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) |
630 | return -EINVAL; | 631 | return -EINVAL; |
diff --git a/mm/memblock.c b/mm/memblock.c index 03d48d8835ba..11e46f83e1ad 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -227,7 +227,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, | |||
227 | * so we use WARN_ONCE() here to see the stack trace if | 227 | * so we use WARN_ONCE() here to see the stack trace if |
228 | * fail happens. | 228 | * fail happens. |
229 | */ | 229 | */ |
230 | WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); | 230 | WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), |
231 | "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); | ||
231 | } | 232 | } |
232 | 233 | ||
233 | return __memblock_find_range_top_down(start, end, size, align, nid, | 234 | return __memblock_find_range_top_down(start, end, size, align, nid, |
@@ -186,8 +186,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
186 | return next; | 186 | return next; |
187 | } | 187 | } |
188 | 188 | ||
189 | static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf); | 189 | static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, |
190 | 190 | struct list_head *uf); | |
191 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 191 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
192 | { | 192 | { |
193 | unsigned long retval; | 193 | unsigned long retval; |
@@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
245 | goto out; | 245 | goto out; |
246 | 246 | ||
247 | /* Ok, looks good - let it rip. */ | 247 | /* Ok, looks good - let it rip. */ |
248 | if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0) | 248 | if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) |
249 | goto out; | 249 | goto out; |
250 | 250 | ||
251 | set_brk: | 251 | set_brk: |
@@ -2929,21 +2929,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) | |||
2929 | * anonymous maps. eventually we may be able to do some | 2929 | * anonymous maps. eventually we may be able to do some |
2930 | * brk-specific accounting here. | 2930 | * brk-specific accounting here. |
2931 | */ | 2931 | */ |
2932 | static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf) | 2932 | static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) |
2933 | { | 2933 | { |
2934 | struct mm_struct *mm = current->mm; | 2934 | struct mm_struct *mm = current->mm; |
2935 | struct vm_area_struct *vma, *prev; | 2935 | struct vm_area_struct *vma, *prev; |
2936 | unsigned long len; | ||
2937 | struct rb_node **rb_link, *rb_parent; | 2936 | struct rb_node **rb_link, *rb_parent; |
2938 | pgoff_t pgoff = addr >> PAGE_SHIFT; | 2937 | pgoff_t pgoff = addr >> PAGE_SHIFT; |
2939 | int error; | 2938 | int error; |
2940 | 2939 | ||
2941 | len = PAGE_ALIGN(request); | ||
2942 | if (len < request) | ||
2943 | return -ENOMEM; | ||
2944 | if (!len) | ||
2945 | return 0; | ||
2946 | |||
2947 | /* Until we need other flags, refuse anything except VM_EXEC. */ | 2940 | /* Until we need other flags, refuse anything except VM_EXEC. */ |
2948 | if ((flags & (~VM_EXEC)) != 0) | 2941 | if ((flags & (~VM_EXEC)) != 0) |
2949 | return -EINVAL; | 2942 | return -EINVAL; |
@@ -3015,18 +3008,20 @@ out: | |||
3015 | return 0; | 3008 | return 0; |
3016 | } | 3009 | } |
3017 | 3010 | ||
3018 | static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf) | 3011 | int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) |
3019 | { | ||
3020 | return do_brk_flags(addr, len, 0, uf); | ||
3021 | } | ||
3022 | |||
3023 | int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) | ||
3024 | { | 3012 | { |
3025 | struct mm_struct *mm = current->mm; | 3013 | struct mm_struct *mm = current->mm; |
3014 | unsigned long len; | ||
3026 | int ret; | 3015 | int ret; |
3027 | bool populate; | 3016 | bool populate; |
3028 | LIST_HEAD(uf); | 3017 | LIST_HEAD(uf); |
3029 | 3018 | ||
3019 | len = PAGE_ALIGN(request); | ||
3020 | if (len < request) | ||
3021 | return -ENOMEM; | ||
3022 | if (!len) | ||
3023 | return 0; | ||
3024 | |||
3030 | if (down_write_killable(&mm->mmap_sem)) | 3025 | if (down_write_killable(&mm->mmap_sem)) |
3031 | return -EINTR; | 3026 | return -EINTR; |
3032 | 3027 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1521100f1e63..5d800d61ddb7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
6847 | /* Initialise every node */ | 6847 | /* Initialise every node */ |
6848 | mminit_verify_pageflags_layout(); | 6848 | mminit_verify_pageflags_layout(); |
6849 | setup_nr_node_ids(); | 6849 | setup_nr_node_ids(); |
6850 | zero_resv_unavail(); | ||
6850 | for_each_online_node(nid) { | 6851 | for_each_online_node(nid) { |
6851 | pg_data_t *pgdat = NODE_DATA(nid); | 6852 | pg_data_t *pgdat = NODE_DATA(nid); |
6852 | free_area_init_node(nid, NULL, | 6853 | free_area_init_node(nid, NULL, |
@@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
6857 | node_set_state(nid, N_MEMORY); | 6858 | node_set_state(nid, N_MEMORY); |
6858 | check_for_memory(pgdat, nid); | 6859 | check_for_memory(pgdat, nid); |
6859 | } | 6860 | } |
6860 | zero_resv_unavail(); | ||
6861 | } | 6861 | } |
6862 | 6862 | ||
6863 | static int __init cmdline_parse_core(char *p, unsigned long *core, | 6863 | static int __init cmdline_parse_core(char *p, unsigned long *core, |
@@ -7033,9 +7033,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) | |||
7033 | 7033 | ||
7034 | void __init free_area_init(unsigned long *zones_size) | 7034 | void __init free_area_init(unsigned long *zones_size) |
7035 | { | 7035 | { |
7036 | zero_resv_unavail(); | ||
7036 | free_area_init_node(0, zones_size, | 7037 | free_area_init_node(0, zones_size, |
7037 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); | 7038 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); |
7038 | zero_resv_unavail(); | ||
7039 | } | 7039 | } |
7040 | 7040 | ||
7041 | static int page_alloc_cpu_dead(unsigned int cpu) | 7041 | static int page_alloc_cpu_dead(unsigned int cpu) |
@@ -64,6 +64,7 @@ | |||
64 | #include <linux/backing-dev.h> | 64 | #include <linux/backing-dev.h> |
65 | #include <linux/page_idle.h> | 65 | #include <linux/page_idle.h> |
66 | #include <linux/memremap.h> | 66 | #include <linux/memremap.h> |
67 | #include <linux/userfaultfd_k.h> | ||
67 | 68 | ||
68 | #include <asm/tlbflush.h> | 69 | #include <asm/tlbflush.h> |
69 | 70 | ||
@@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1481 | set_pte_at(mm, address, pvmw.pte, pteval); | 1482 | set_pte_at(mm, address, pvmw.pte, pteval); |
1482 | } | 1483 | } |
1483 | 1484 | ||
1484 | } else if (pte_unused(pteval)) { | 1485 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
1485 | /* | 1486 | /* |
1486 | * The guest indicated that the page content is of no | 1487 | * The guest indicated that the page content is of no |
1487 | * interest anymore. Simply discard the pte, vmscan | 1488 | * interest anymore. Simply discard the pte, vmscan |
1488 | * will take care of the rest. | 1489 | * will take care of the rest. |
1490 | * A future reference will then fault in a new zero | ||
1491 | * page. When userfaultfd is active, we must not drop | ||
1492 | * this page though, as its main user (postcopy | ||
1493 | * migration) will not expect userfaults on already | ||
1494 | * copied pages. | ||
1489 | */ | 1495 | */ |
1490 | dec_mm_counter(mm, mm_counter(page)); | 1496 | dec_mm_counter(mm, mm_counter(page)); |
1491 | /* We have to invalidate as we cleared the pte */ | 1497 | /* We have to invalidate as we cleared the pte */ |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 73a65789271b..8ccee3d01822 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, | |||
693 | out_unlock: | 693 | out_unlock: |
694 | rcu_read_unlock(); | 694 | rcu_read_unlock(); |
695 | out: | 695 | out: |
696 | NAPI_GRO_CB(skb)->flush |= flush; | 696 | skb_gro_flush_final(skb, pp, flush); |
697 | 697 | ||
698 | return pp; | 698 | return pp; |
699 | } | 699 | } |
diff --git a/net/9p/client.c b/net/9p/client.c index 18c5271910dc..5c1343195292 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -225,7 +225,8 @@ static int parse_opts(char *opts, struct p9_client *clnt) | |||
225 | } | 225 | } |
226 | 226 | ||
227 | free_and_return: | 227 | free_and_return: |
228 | v9fs_put_trans(clnt->trans_mod); | 228 | if (ret) |
229 | v9fs_put_trans(clnt->trans_mod); | ||
229 | kfree(tmp_options); | 230 | kfree(tmp_options); |
230 | return ret; | 231 | return ret; |
231 | } | 232 | } |
diff --git a/net/Makefile b/net/Makefile index 13ec0d5415c7..bdaf53925acd 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS) += tls/ | |||
20 | obj-$(CONFIG_XFRM) += xfrm/ | 20 | obj-$(CONFIG_XFRM) += xfrm/ |
21 | obj-$(CONFIG_UNIX) += unix/ | 21 | obj-$(CONFIG_UNIX) += unix/ |
22 | obj-$(CONFIG_NET) += ipv6/ | 22 | obj-$(CONFIG_NET) += ipv6/ |
23 | ifneq ($(CC_CAN_LINK),y) | ||
24 | $(warning CC cannot link executables. Skipping bpfilter.) | ||
25 | else | ||
26 | obj-$(CONFIG_BPFILTER) += bpfilter/ | 23 | obj-$(CONFIG_BPFILTER) += bpfilter/ |
27 | endif | ||
28 | obj-$(CONFIG_PACKET) += packet/ | 24 | obj-$(CONFIG_PACKET) += packet/ |
29 | obj-$(CONFIG_NET_KEY) += key/ | 25 | obj-$(CONFIG_NET_KEY) += key/ |
30 | obj-$(CONFIG_BRIDGE) += bridge/ | 26 | obj-$(CONFIG_BRIDGE) += bridge/ |
diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig index a948b072c28f..76deb6615883 100644 --- a/net/bpfilter/Kconfig +++ b/net/bpfilter/Kconfig | |||
@@ -1,6 +1,5 @@ | |||
1 | menuconfig BPFILTER | 1 | menuconfig BPFILTER |
2 | bool "BPF based packet filtering framework (BPFILTER)" | 2 | bool "BPF based packet filtering framework (BPFILTER)" |
3 | default n | ||
4 | depends on NET && BPF && INET | 3 | depends on NET && BPF && INET |
5 | help | 4 | help |
6 | This builds experimental bpfilter framework that is aiming to | 5 | This builds experimental bpfilter framework that is aiming to |
@@ -9,6 +8,7 @@ menuconfig BPFILTER | |||
9 | if BPFILTER | 8 | if BPFILTER |
10 | config BPFILTER_UMH | 9 | config BPFILTER_UMH |
11 | tristate "bpfilter kernel module with user mode helper" | 10 | tristate "bpfilter kernel module with user mode helper" |
11 | depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC)) | ||
12 | default m | 12 | default m |
13 | help | 13 | help |
14 | This builds bpfilter kernel module with embedded user mode helper | 14 | This builds bpfilter kernel module with embedded user mode helper |
diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile index 051dc18b8ccb..39c6980b5d99 100644 --- a/net/bpfilter/Makefile +++ b/net/bpfilter/Makefile | |||
@@ -15,20 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y) | |||
15 | HOSTLDFLAGS += -static | 15 | HOSTLDFLAGS += -static |
16 | endif | 16 | endif |
17 | 17 | ||
18 | # a bit of elf magic to convert bpfilter_umh binary into a binary blob | 18 | $(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh |
19 | # inside bpfilter_umh.o elf file referenced by | ||
20 | # _binary_net_bpfilter_bpfilter_umh_start symbol | ||
21 | # which bpfilter_kern.c passes further into umh blob loader at run-time | ||
22 | quiet_cmd_copy_umh = GEN $@ | ||
23 | cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \ | ||
24 | $(OBJCOPY) -I binary \ | ||
25 | `LC_ALL=C $(OBJDUMP) -f net/bpfilter/bpfilter_umh \ | ||
26 | |awk -F' |,' '/file format/{print "-O",$$NF} \ | ||
27 | /^architecture:/{print "-B",$$2}'` \ | ||
28 | --rename-section .data=.init.rodata $< $@ | ||
29 | |||
30 | $(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh | ||
31 | $(call cmd,copy_umh) | ||
32 | 19 | ||
33 | obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o | 20 | obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o |
34 | bpfilter-objs += bpfilter_kern.o bpfilter_umh.o | 21 | bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o |
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 09522573f611..f0fc182d3db7 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c | |||
@@ -10,11 +10,8 @@ | |||
10 | #include <linux/file.h> | 10 | #include <linux/file.h> |
11 | #include "msgfmt.h" | 11 | #include "msgfmt.h" |
12 | 12 | ||
13 | #define UMH_start _binary_net_bpfilter_bpfilter_umh_start | 13 | extern char bpfilter_umh_start; |
14 | #define UMH_end _binary_net_bpfilter_bpfilter_umh_end | 14 | extern char bpfilter_umh_end; |
15 | |||
16 | extern char UMH_start; | ||
17 | extern char UMH_end; | ||
18 | 15 | ||
19 | static struct umh_info info; | 16 | static struct umh_info info; |
20 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ | 17 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ |
@@ -93,7 +90,9 @@ static int __init load_umh(void) | |||
93 | int err; | 90 | int err; |
94 | 91 | ||
95 | /* fork usermode process */ | 92 | /* fork usermode process */ |
96 | err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info); | 93 | err = fork_usermode_blob(&bpfilter_umh_start, |
94 | &bpfilter_umh_end - &bpfilter_umh_start, | ||
95 | &info); | ||
97 | if (err) | 96 | if (err) |
98 | return err; | 97 | return err; |
99 | pr_info("Loaded bpfilter_umh pid %d\n", info.pid); | 98 | pr_info("Loaded bpfilter_umh pid %d\n", info.pid); |
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S new file mode 100644 index 000000000000..40311d10d2f2 --- /dev/null +++ b/net/bpfilter/bpfilter_umh_blob.S | |||
@@ -0,0 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | .section .init.rodata, "a" | ||
3 | .global bpfilter_umh_start | ||
4 | bpfilter_umh_start: | ||
5 | .incbin "net/bpfilter/bpfilter_umh" | ||
6 | .global bpfilter_umh_end | ||
7 | bpfilter_umh_end: | ||
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index a04e1e88bf3a..50537ff961a7 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c | |||
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
285 | if (ifr->ifr_qlen < 0) | 285 | if (ifr->ifr_qlen < 0) |
286 | return -EINVAL; | 286 | return -EINVAL; |
287 | if (dev->tx_queue_len ^ ifr->ifr_qlen) { | 287 | if (dev->tx_queue_len ^ ifr->ifr_qlen) { |
288 | unsigned int orig_len = dev->tx_queue_len; | 288 | err = dev_change_tx_queue_len(dev, ifr->ifr_qlen); |
289 | 289 | if (err) | |
290 | dev->tx_queue_len = ifr->ifr_qlen; | ||
291 | err = call_netdevice_notifiers( | ||
292 | NETDEV_CHANGE_TX_QUEUE_LEN, dev); | ||
293 | err = notifier_to_errno(err); | ||
294 | if (err) { | ||
295 | dev->tx_queue_len = orig_len; | ||
296 | return err; | 290 | return err; |
297 | } | ||
298 | } | 291 | } |
299 | return 0; | 292 | return 0; |
300 | 293 | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 126ffc5bc630..f64aa13811ea 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, | |||
416 | if (rule->mark && r->mark != rule->mark) | 416 | if (rule->mark && r->mark != rule->mark) |
417 | continue; | 417 | continue; |
418 | 418 | ||
419 | if (rule->suppress_ifgroup != -1 && | ||
420 | r->suppress_ifgroup != rule->suppress_ifgroup) | ||
421 | continue; | ||
422 | |||
423 | if (rule->suppress_prefixlen != -1 && | ||
424 | r->suppress_prefixlen != rule->suppress_prefixlen) | ||
425 | continue; | ||
426 | |||
419 | if (rule->mark_mask && r->mark_mask != rule->mark_mask) | 427 | if (rule->mark_mask && r->mark_mask != rule->mark_mask) |
420 | continue; | 428 | continue; |
421 | 429 | ||
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, | |||
436 | if (rule->ip_proto && r->ip_proto != rule->ip_proto) | 444 | if (rule->ip_proto && r->ip_proto != rule->ip_proto) |
437 | continue; | 445 | continue; |
438 | 446 | ||
447 | if (rule->proto && r->proto != rule->proto) | ||
448 | continue; | ||
449 | |||
439 | if (fib_rule_port_range_set(&rule->sport_range) && | 450 | if (fib_rule_port_range_set(&rule->sport_range) && |
440 | !fib_rule_port_range_compare(&r->sport_range, | 451 | !fib_rule_port_range_compare(&r->sport_range, |
441 | &rule->sport_range)) | 452 | &rule->sport_range)) |
@@ -645,6 +656,73 @@ errout: | |||
645 | return err; | 656 | return err; |
646 | } | 657 | } |
647 | 658 | ||
659 | static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, | ||
660 | struct nlattr **tb, struct fib_rule *rule) | ||
661 | { | ||
662 | struct fib_rule *r; | ||
663 | |||
664 | list_for_each_entry(r, &ops->rules_list, list) { | ||
665 | if (r->action != rule->action) | ||
666 | continue; | ||
667 | |||
668 | if (r->table != rule->table) | ||
669 | continue; | ||
670 | |||
671 | if (r->pref != rule->pref) | ||
672 | continue; | ||
673 | |||
674 | if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) | ||
675 | continue; | ||
676 | |||
677 | if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) | ||
678 | continue; | ||
679 | |||
680 | if (r->mark != rule->mark) | ||
681 | continue; | ||
682 | |||
683 | if (r->suppress_ifgroup != rule->suppress_ifgroup) | ||
684 | continue; | ||
685 | |||
686 | if (r->suppress_prefixlen != rule->suppress_prefixlen) | ||
687 | continue; | ||
688 | |||
689 | if (r->mark_mask != rule->mark_mask) | ||
690 | continue; | ||
691 | |||
692 | if (r->tun_id != rule->tun_id) | ||
693 | continue; | ||
694 | |||
695 | if (r->fr_net != rule->fr_net) | ||
696 | continue; | ||
697 | |||
698 | if (r->l3mdev != rule->l3mdev) | ||
699 | continue; | ||
700 | |||
701 | if (!uid_eq(r->uid_range.start, rule->uid_range.start) || | ||
702 | !uid_eq(r->uid_range.end, rule->uid_range.end)) | ||
703 | continue; | ||
704 | |||
705 | if (r->ip_proto != rule->ip_proto) | ||
706 | continue; | ||
707 | |||
708 | if (r->proto != rule->proto) | ||
709 | continue; | ||
710 | |||
711 | if (!fib_rule_port_range_compare(&r->sport_range, | ||
712 | &rule->sport_range)) | ||
713 | continue; | ||
714 | |||
715 | if (!fib_rule_port_range_compare(&r->dport_range, | ||
716 | &rule->dport_range)) | ||
717 | continue; | ||
718 | |||
719 | if (!ops->compare(r, frh, tb)) | ||
720 | continue; | ||
721 | return 1; | ||
722 | } | ||
723 | return 0; | ||
724 | } | ||
725 | |||
648 | int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, | 726 | int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, |
649 | struct netlink_ext_ack *extack) | 727 | struct netlink_ext_ack *extack) |
650 | { | 728 | { |
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
679 | goto errout; | 757 | goto errout; |
680 | 758 | ||
681 | if ((nlh->nlmsg_flags & NLM_F_EXCL) && | 759 | if ((nlh->nlmsg_flags & NLM_F_EXCL) && |
682 | rule_find(ops, frh, tb, rule, user_priority)) { | 760 | rule_exists(ops, frh, tb, rule)) { |
683 | err = -EEXIST; | 761 | err = -EEXIST; |
684 | goto errout_free; | 762 | goto errout_free; |
685 | } | 763 | } |
diff --git a/net/core/filter.c b/net/core/filter.c index e7f12e9f598c..0ca6907d7efe 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -4073,8 +4073,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, | |||
4073 | memcpy(params->smac, dev->dev_addr, ETH_ALEN); | 4073 | memcpy(params->smac, dev->dev_addr, ETH_ALEN); |
4074 | params->h_vlan_TCI = 0; | 4074 | params->h_vlan_TCI = 0; |
4075 | params->h_vlan_proto = 0; | 4075 | params->h_vlan_proto = 0; |
4076 | params->ifindex = dev->ifindex; | ||
4076 | 4077 | ||
4077 | return dev->ifindex; | 4078 | return 0; |
4078 | } | 4079 | } |
4079 | #endif | 4080 | #endif |
4080 | 4081 | ||
@@ -4098,7 +4099,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4098 | /* verify forwarding is enabled on this interface */ | 4099 | /* verify forwarding is enabled on this interface */ |
4099 | in_dev = __in_dev_get_rcu(dev); | 4100 | in_dev = __in_dev_get_rcu(dev); |
4100 | if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) | 4101 | if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) |
4101 | return 0; | 4102 | return BPF_FIB_LKUP_RET_FWD_DISABLED; |
4102 | 4103 | ||
4103 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { | 4104 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { |
4104 | fl4.flowi4_iif = 1; | 4105 | fl4.flowi4_iif = 1; |
@@ -4123,7 +4124,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4123 | 4124 | ||
4124 | tb = fib_get_table(net, tbid); | 4125 | tb = fib_get_table(net, tbid); |
4125 | if (unlikely(!tb)) | 4126 | if (unlikely(!tb)) |
4126 | return 0; | 4127 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4127 | 4128 | ||
4128 | err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); | 4129 | err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); |
4129 | } else { | 4130 | } else { |
@@ -4135,8 +4136,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4135 | err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); | 4136 | err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); |
4136 | } | 4137 | } |
4137 | 4138 | ||
4138 | if (err || res.type != RTN_UNICAST) | 4139 | if (err) { |
4139 | return 0; | 4140 | /* map fib lookup errors to RTN_ type */ |
4141 | if (err == -EINVAL) | ||
4142 | return BPF_FIB_LKUP_RET_BLACKHOLE; | ||
4143 | if (err == -EHOSTUNREACH) | ||
4144 | return BPF_FIB_LKUP_RET_UNREACHABLE; | ||
4145 | if (err == -EACCES) | ||
4146 | return BPF_FIB_LKUP_RET_PROHIBIT; | ||
4147 | |||
4148 | return BPF_FIB_LKUP_RET_NOT_FWDED; | ||
4149 | } | ||
4150 | |||
4151 | if (res.type != RTN_UNICAST) | ||
4152 | return BPF_FIB_LKUP_RET_NOT_FWDED; | ||
4140 | 4153 | ||
4141 | if (res.fi->fib_nhs > 1) | 4154 | if (res.fi->fib_nhs > 1) |
4142 | fib_select_path(net, &res, &fl4, NULL); | 4155 | fib_select_path(net, &res, &fl4, NULL); |
@@ -4144,19 +4157,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4144 | if (check_mtu) { | 4157 | if (check_mtu) { |
4145 | mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); | 4158 | mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); |
4146 | if (params->tot_len > mtu) | 4159 | if (params->tot_len > mtu) |
4147 | return 0; | 4160 | return BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4148 | } | 4161 | } |
4149 | 4162 | ||
4150 | nh = &res.fi->fib_nh[res.nh_sel]; | 4163 | nh = &res.fi->fib_nh[res.nh_sel]; |
4151 | 4164 | ||
4152 | /* do not handle lwt encaps right now */ | 4165 | /* do not handle lwt encaps right now */ |
4153 | if (nh->nh_lwtstate) | 4166 | if (nh->nh_lwtstate) |
4154 | return 0; | 4167 | return BPF_FIB_LKUP_RET_UNSUPP_LWT; |
4155 | 4168 | ||
4156 | dev = nh->nh_dev; | 4169 | dev = nh->nh_dev; |
4157 | if (unlikely(!dev)) | ||
4158 | return 0; | ||
4159 | |||
4160 | if (nh->nh_gw) | 4170 | if (nh->nh_gw) |
4161 | params->ipv4_dst = nh->nh_gw; | 4171 | params->ipv4_dst = nh->nh_gw; |
4162 | 4172 | ||
@@ -4166,10 +4176,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4166 | * rcu_read_lock_bh is not needed here | 4176 | * rcu_read_lock_bh is not needed here |
4167 | */ | 4177 | */ |
4168 | neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); | 4178 | neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); |
4169 | if (neigh) | 4179 | if (!neigh) |
4170 | return bpf_fib_set_fwd_params(params, neigh, dev); | 4180 | return BPF_FIB_LKUP_RET_NO_NEIGH; |
4171 | 4181 | ||
4172 | return 0; | 4182 | return bpf_fib_set_fwd_params(params, neigh, dev); |
4173 | } | 4183 | } |
4174 | #endif | 4184 | #endif |
4175 | 4185 | ||
@@ -4190,7 +4200,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4190 | 4200 | ||
4191 | /* link local addresses are never forwarded */ | 4201 | /* link local addresses are never forwarded */ |
4192 | if (rt6_need_strict(dst) || rt6_need_strict(src)) | 4202 | if (rt6_need_strict(dst) || rt6_need_strict(src)) |
4193 | return 0; | 4203 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4194 | 4204 | ||
4195 | dev = dev_get_by_index_rcu(net, params->ifindex); | 4205 | dev = dev_get_by_index_rcu(net, params->ifindex); |
4196 | if (unlikely(!dev)) | 4206 | if (unlikely(!dev)) |
@@ -4198,7 +4208,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4198 | 4208 | ||
4199 | idev = __in6_dev_get_safely(dev); | 4209 | idev = __in6_dev_get_safely(dev); |
4200 | if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) | 4210 | if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) |
4201 | return 0; | 4211 | return BPF_FIB_LKUP_RET_FWD_DISABLED; |
4202 | 4212 | ||
4203 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { | 4213 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { |
4204 | fl6.flowi6_iif = 1; | 4214 | fl6.flowi6_iif = 1; |
@@ -4225,7 +4235,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4225 | 4235 | ||
4226 | tb = ipv6_stub->fib6_get_table(net, tbid); | 4236 | tb = ipv6_stub->fib6_get_table(net, tbid); |
4227 | if (unlikely(!tb)) | 4237 | if (unlikely(!tb)) |
4228 | return 0; | 4238 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4229 | 4239 | ||
4230 | f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict); | 4240 | f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict); |
4231 | } else { | 4241 | } else { |
@@ -4238,11 +4248,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4238 | } | 4248 | } |
4239 | 4249 | ||
4240 | if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry)) | 4250 | if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry)) |
4241 | return 0; | 4251 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4252 | |||
4253 | if (unlikely(f6i->fib6_flags & RTF_REJECT)) { | ||
4254 | switch (f6i->fib6_type) { | ||
4255 | case RTN_BLACKHOLE: | ||
4256 | return BPF_FIB_LKUP_RET_BLACKHOLE; | ||
4257 | case RTN_UNREACHABLE: | ||
4258 | return BPF_FIB_LKUP_RET_UNREACHABLE; | ||
4259 | case RTN_PROHIBIT: | ||
4260 | return BPF_FIB_LKUP_RET_PROHIBIT; | ||
4261 | default: | ||
4262 | return BPF_FIB_LKUP_RET_NOT_FWDED; | ||
4263 | } | ||
4264 | } | ||
4242 | 4265 | ||
4243 | if (unlikely(f6i->fib6_flags & RTF_REJECT || | 4266 | if (f6i->fib6_type != RTN_UNICAST) |
4244 | f6i->fib6_type != RTN_UNICAST)) | 4267 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4245 | return 0; | ||
4246 | 4268 | ||
4247 | if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0) | 4269 | if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0) |
4248 | f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6, | 4270 | f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6, |
@@ -4252,11 +4274,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4252 | if (check_mtu) { | 4274 | if (check_mtu) { |
4253 | mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src); | 4275 | mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src); |
4254 | if (params->tot_len > mtu) | 4276 | if (params->tot_len > mtu) |
4255 | return 0; | 4277 | return BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4256 | } | 4278 | } |
4257 | 4279 | ||
4258 | if (f6i->fib6_nh.nh_lwtstate) | 4280 | if (f6i->fib6_nh.nh_lwtstate) |
4259 | return 0; | 4281 | return BPF_FIB_LKUP_RET_UNSUPP_LWT; |
4260 | 4282 | ||
4261 | if (f6i->fib6_flags & RTF_GATEWAY) | 4283 | if (f6i->fib6_flags & RTF_GATEWAY) |
4262 | *dst = f6i->fib6_nh.nh_gw; | 4284 | *dst = f6i->fib6_nh.nh_gw; |
@@ -4270,10 +4292,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |||
4270 | */ | 4292 | */ |
4271 | neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, | 4293 | neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, |
4272 | ndisc_hashfn, dst, dev); | 4294 | ndisc_hashfn, dst, dev); |
4273 | if (neigh) | 4295 | if (!neigh) |
4274 | return bpf_fib_set_fwd_params(params, neigh, dev); | 4296 | return BPF_FIB_LKUP_RET_NO_NEIGH; |
4275 | 4297 | ||
4276 | return 0; | 4298 | return bpf_fib_set_fwd_params(params, neigh, dev); |
4277 | } | 4299 | } |
4278 | #endif | 4300 | #endif |
4279 | 4301 | ||
@@ -4315,7 +4337,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, | |||
4315 | struct bpf_fib_lookup *, params, int, plen, u32, flags) | 4337 | struct bpf_fib_lookup *, params, int, plen, u32, flags) |
4316 | { | 4338 | { |
4317 | struct net *net = dev_net(skb->dev); | 4339 | struct net *net = dev_net(skb->dev); |
4318 | int index = -EAFNOSUPPORT; | 4340 | int rc = -EAFNOSUPPORT; |
4319 | 4341 | ||
4320 | if (plen < sizeof(*params)) | 4342 | if (plen < sizeof(*params)) |
4321 | return -EINVAL; | 4343 | return -EINVAL; |
@@ -4326,25 +4348,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, | |||
4326 | switch (params->family) { | 4348 | switch (params->family) { |
4327 | #if IS_ENABLED(CONFIG_INET) | 4349 | #if IS_ENABLED(CONFIG_INET) |
4328 | case AF_INET: | 4350 | case AF_INET: |
4329 | index = bpf_ipv4_fib_lookup(net, params, flags, false); | 4351 | rc = bpf_ipv4_fib_lookup(net, params, flags, false); |
4330 | break; | 4352 | break; |
4331 | #endif | 4353 | #endif |
4332 | #if IS_ENABLED(CONFIG_IPV6) | 4354 | #if IS_ENABLED(CONFIG_IPV6) |
4333 | case AF_INET6: | 4355 | case AF_INET6: |
4334 | index = bpf_ipv6_fib_lookup(net, params, flags, false); | 4356 | rc = bpf_ipv6_fib_lookup(net, params, flags, false); |
4335 | break; | 4357 | break; |
4336 | #endif | 4358 | #endif |
4337 | } | 4359 | } |
4338 | 4360 | ||
4339 | if (index > 0) { | 4361 | if (!rc) { |
4340 | struct net_device *dev; | 4362 | struct net_device *dev; |
4341 | 4363 | ||
4342 | dev = dev_get_by_index_rcu(net, index); | 4364 | dev = dev_get_by_index_rcu(net, params->ifindex); |
4343 | if (!is_skb_forwardable(dev, skb)) | 4365 | if (!is_skb_forwardable(dev, skb)) |
4344 | index = 0; | 4366 | rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4345 | } | 4367 | } |
4346 | 4368 | ||
4347 | return index; | 4369 | return rc; |
4348 | } | 4370 | } |
4349 | 4371 | ||
4350 | static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { | 4372 | static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c642304f178c..eba8dae22c25 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -5276,8 +5276,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
5276 | if (npages >= 1 << order) { | 5276 | if (npages >= 1 << order) { |
5277 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | | 5277 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | |
5278 | __GFP_COMP | | 5278 | __GFP_COMP | |
5279 | __GFP_NOWARN | | 5279 | __GFP_NOWARN, |
5280 | __GFP_NORETRY, | ||
5281 | order); | 5280 | order); |
5282 | if (page) | 5281 | if (page) |
5283 | goto fill_page; | 5282 | goto fill_page; |
diff --git a/net/core/sock.c b/net/core/sock.c index bcc41829a16d..9e8f65585b81 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot) | |||
3243 | 3243 | ||
3244 | rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, | 3244 | rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, |
3245 | rsk_prot->obj_size, 0, | 3245 | rsk_prot->obj_size, 0, |
3246 | prot->slab_flags, NULL); | 3246 | SLAB_ACCOUNT | prot->slab_flags, |
3247 | NULL); | ||
3247 | 3248 | ||
3248 | if (!rsk_prot->slab) { | 3249 | if (!rsk_prot->slab) { |
3249 | pr_crit("%s: Can't create request sock SLAB cache!\n", | 3250 | pr_crit("%s: Can't create request sock SLAB cache!\n", |
@@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
3258 | if (alloc_slab) { | 3259 | if (alloc_slab) { |
3259 | prot->slab = kmem_cache_create_usercopy(prot->name, | 3260 | prot->slab = kmem_cache_create_usercopy(prot->name, |
3260 | prot->obj_size, 0, | 3261 | prot->obj_size, 0, |
3261 | SLAB_HWCACHE_ALIGN | prot->slab_flags, | 3262 | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | |
3263 | prot->slab_flags, | ||
3262 | prot->useroffset, prot->usersize, | 3264 | prot->useroffset, prot->usersize, |
3263 | NULL); | 3265 | NULL); |
3264 | 3266 | ||
@@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
3281 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, | 3283 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, |
3282 | prot->twsk_prot->twsk_obj_size, | 3284 | prot->twsk_prot->twsk_obj_size, |
3283 | 0, | 3285 | 0, |
3286 | SLAB_ACCOUNT | | ||
3284 | prot->slab_flags, | 3287 | prot->slab_flags, |
3285 | NULL); | 3288 | NULL); |
3286 | if (prot->twsk_prot->twsk_slab == NULL) | 3289 | if (prot->twsk_prot->twsk_slab == NULL) |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 1540db65241a..c9ec1603666b 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -448,9 +448,7 @@ next_proto: | |||
448 | out_unlock: | 448 | out_unlock: |
449 | rcu_read_unlock(); | 449 | rcu_read_unlock(); |
450 | out: | 450 | out: |
451 | NAPI_GRO_CB(skb)->flush |= flush; | 451 | skb_gro_flush_final_remcsum(skb, pp, flush, &grc); |
452 | skb_gro_remcsum_cleanup(skb, &grc); | ||
453 | skb->remcsum_offload = 0; | ||
454 | 452 | ||
455 | return pp; | 453 | return pp; |
456 | } | 454 | } |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1859c473b21a..6a7d980105f6 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, | |||
223 | out_unlock: | 223 | out_unlock: |
224 | rcu_read_unlock(); | 224 | rcu_read_unlock(); |
225 | out: | 225 | out: |
226 | NAPI_GRO_CB(skb)->flush |= flush; | 226 | skb_gro_flush_final(skb, pp, flush); |
227 | 227 | ||
228 | return pp; | 228 | return pp; |
229 | } | 229 | } |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d06247ba08b2..af0a857d8352 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -265,8 +265,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, | |||
265 | ipv4.sysctl_tcp_fastopen); | 265 | ipv4.sysctl_tcp_fastopen); |
266 | struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; | 266 | struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; |
267 | struct tcp_fastopen_context *ctxt; | 267 | struct tcp_fastopen_context *ctxt; |
268 | int ret; | ||
269 | u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ | 268 | u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ |
269 | __le32 key[4]; | ||
270 | int ret, i; | ||
270 | 271 | ||
271 | tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); | 272 | tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); |
272 | if (!tbl.data) | 273 | if (!tbl.data) |
@@ -275,11 +276,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, | |||
275 | rcu_read_lock(); | 276 | rcu_read_lock(); |
276 | ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); | 277 | ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
277 | if (ctxt) | 278 | if (ctxt) |
278 | memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); | 279 | memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); |
279 | else | 280 | else |
280 | memset(user_key, 0, sizeof(user_key)); | 281 | memset(key, 0, sizeof(key)); |
281 | rcu_read_unlock(); | 282 | rcu_read_unlock(); |
282 | 283 | ||
284 | for (i = 0; i < ARRAY_SIZE(key); i++) | ||
285 | user_key[i] = le32_to_cpu(key[i]); | ||
286 | |||
283 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", | 287 | snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", |
284 | user_key[0], user_key[1], user_key[2], user_key[3]); | 288 | user_key[0], user_key[1], user_key[2], user_key[3]); |
285 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); | 289 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); |
@@ -290,13 +294,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, | |||
290 | ret = -EINVAL; | 294 | ret = -EINVAL; |
291 | goto bad_key; | 295 | goto bad_key; |
292 | } | 296 | } |
293 | tcp_fastopen_reset_cipher(net, NULL, user_key, | 297 | |
298 | for (i = 0; i < ARRAY_SIZE(user_key); i++) | ||
299 | key[i] = cpu_to_le32(user_key[i]); | ||
300 | |||
301 | tcp_fastopen_reset_cipher(net, NULL, key, | ||
294 | TCP_FASTOPEN_KEY_LENGTH); | 302 | TCP_FASTOPEN_KEY_LENGTH); |
295 | } | 303 | } |
296 | 304 | ||
297 | bad_key: | 305 | bad_key: |
298 | pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", | 306 | pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", |
299 | user_key[0], user_key[1], user_key[2], user_key[3], | 307 | user_key[0], user_key[1], user_key[2], user_key[3], |
300 | (char *)tbl.data, ret); | 308 | (char *)tbl.data, ret); |
301 | kfree(tbl.data); | 309 | kfree(tbl.data); |
302 | return ret; | 310 | return ret; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 355d3dffd021..8e5522c6833a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -265,7 +265,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) | |||
265 | * it is probably a retransmit. | 265 | * it is probably a retransmit. |
266 | */ | 266 | */ |
267 | if (tp->ecn_flags & TCP_ECN_SEEN) | 267 | if (tp->ecn_flags & TCP_ECN_SEEN) |
268 | tcp_enter_quickack_mode(sk, 1); | 268 | tcp_enter_quickack_mode(sk, 2); |
269 | break; | 269 | break; |
270 | case INET_ECN_CE: | 270 | case INET_ECN_CE: |
271 | if (tcp_ca_needs_ecn(sk)) | 271 | if (tcp_ca_needs_ecn(sk)) |
@@ -273,7 +273,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) | |||
273 | 273 | ||
274 | if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { | 274 | if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { |
275 | /* Better not delay acks, sender can have a very low cwnd */ | 275 | /* Better not delay acks, sender can have a very low cwnd */ |
276 | tcp_enter_quickack_mode(sk, 1); | 276 | tcp_enter_quickack_mode(sk, 2); |
277 | tp->ecn_flags |= TCP_ECN_DEMAND_CWR; | 277 | tp->ecn_flags |= TCP_ECN_DEMAND_CWR; |
278 | } | 278 | } |
279 | tp->ecn_flags |= TCP_ECN_SEEN; | 279 | tp->ecn_flags |= TCP_ECN_SEEN; |
@@ -3181,6 +3181,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, | |||
3181 | 3181 | ||
3182 | if (tcp_is_reno(tp)) { | 3182 | if (tcp_is_reno(tp)) { |
3183 | tcp_remove_reno_sacks(sk, pkts_acked); | 3183 | tcp_remove_reno_sacks(sk, pkts_acked); |
3184 | |||
3185 | /* If any of the cumulatively ACKed segments was | ||
3186 | * retransmitted, non-SACK case cannot confirm that | ||
3187 | * progress was due to original transmission due to | ||
3188 | * lack of TCPCB_SACKED_ACKED bits even if some of | ||
3189 | * the packets may have been never retransmitted. | ||
3190 | */ | ||
3191 | if (flag & FLAG_RETRANS_DATA_ACKED) | ||
3192 | flag &= ~FLAG_ORIG_SACK_ACKED; | ||
3184 | } else { | 3193 | } else { |
3185 | int delta; | 3194 | int delta; |
3186 | 3195 | ||
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 92dc9e5a7ff3..69c54540d5b4 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -394,7 +394,7 @@ unflush: | |||
394 | out_unlock: | 394 | out_unlock: |
395 | rcu_read_unlock(); | 395 | rcu_read_unlock(); |
396 | out: | 396 | out: |
397 | NAPI_GRO_CB(skb)->flush |= flush; | 397 | skb_gro_flush_final(skb, pp, flush); |
398 | return pp; | 398 | return pp; |
399 | } | 399 | } |
400 | EXPORT_SYMBOL(udp_gro_receive); | 400 | EXPORT_SYMBOL(udp_gro_receive); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c134286d6a41..91580c62bb86 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4528,6 +4528,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, | |||
4528 | unsigned long expires, u32 flags) | 4528 | unsigned long expires, u32 flags) |
4529 | { | 4529 | { |
4530 | struct fib6_info *f6i; | 4530 | struct fib6_info *f6i; |
4531 | u32 prio; | ||
4531 | 4532 | ||
4532 | f6i = addrconf_get_prefix_route(&ifp->addr, | 4533 | f6i = addrconf_get_prefix_route(&ifp->addr, |
4533 | ifp->prefix_len, | 4534 | ifp->prefix_len, |
@@ -4536,13 +4537,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, | |||
4536 | if (!f6i) | 4537 | if (!f6i) |
4537 | return -ENOENT; | 4538 | return -ENOENT; |
4538 | 4539 | ||
4539 | if (f6i->fib6_metric != ifp->rt_priority) { | 4540 | prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF; |
4541 | if (f6i->fib6_metric != prio) { | ||
4542 | /* delete old one */ | ||
4543 | ip6_del_rt(dev_net(ifp->idev->dev), f6i); | ||
4544 | |||
4540 | /* add new one */ | 4545 | /* add new one */ |
4541 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, | 4546 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, |
4542 | ifp->rt_priority, ifp->idev->dev, | 4547 | ifp->rt_priority, ifp->idev->dev, |
4543 | expires, flags, GFP_KERNEL); | 4548 | expires, flags, GFP_KERNEL); |
4544 | /* delete old one */ | ||
4545 | ip6_del_rt(dev_net(ifp->idev->dev), f6i); | ||
4546 | } else { | 4549 | } else { |
4547 | if (!expires) | 4550 | if (!expires) |
4548 | fib6_clean_expires(f6i); | 4551 | fib6_clean_expires(f6i); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 5e0332014c17..a452d99c9f52 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net) | |||
107 | if (hdr == NULL) | 107 | if (hdr == NULL) |
108 | goto err_reg; | 108 | goto err_reg; |
109 | 109 | ||
110 | net->nf_frag.sysctl.frags_hdr = hdr; | 110 | net->nf_frag_frags_hdr = hdr; |
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | err_reg: | 113 | err_reg: |
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) | |||
121 | { | 121 | { |
122 | struct ctl_table *table; | 122 | struct ctl_table *table; |
123 | 123 | ||
124 | table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; | 124 | table = net->nf_frag_frags_hdr->ctl_table_arg; |
125 | unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); | 125 | unregister_net_sysctl_table(net->nf_frag_frags_hdr); |
126 | if (!net_eq(net, &init_net)) | 126 | if (!net_eq(net, &init_net)) |
127 | kfree(table); | 127 | kfree(table); |
128 | } | 128 | } |
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 33fb35cbfac1..558fe8cc6d43 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c | |||
@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void) | |||
373 | return -ENOMEM; | 373 | return -ENOMEM; |
374 | 374 | ||
375 | for_each_possible_cpu(cpu) { | 375 | for_each_possible_cpu(cpu) { |
376 | tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL); | 376 | tfm = crypto_alloc_shash(algo->name, 0, 0); |
377 | if (IS_ERR(tfm)) | 377 | if (IS_ERR(tfm)) |
378 | return PTR_ERR(tfm); | 378 | return PTR_ERR(tfm); |
379 | p_tfm = per_cpu_ptr(algo->tfms, cpu); | 379 | p_tfm = per_cpu_ptr(algo->tfms, cpu); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 44b5dfe8727d..fa1f1e63a264 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, | |||
4845 | skb_reset_network_header(skb); | 4845 | skb_reset_network_header(skb); |
4846 | skb_reset_mac_header(skb); | 4846 | skb_reset_mac_header(skb); |
4847 | 4847 | ||
4848 | local_bh_disable(); | ||
4848 | __ieee80211_subif_start_xmit(skb, skb->dev, flags); | 4849 | __ieee80211_subif_start_xmit(skb, skb->dev, flags); |
4850 | local_bh_enable(); | ||
4849 | 4851 | ||
4850 | return 0; | 4852 | return 0; |
4851 | } | 4853 | } |
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index d8383609fe28..510039862aa9 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c | |||
@@ -47,6 +47,8 @@ struct nf_conncount_tuple { | |||
47 | struct hlist_node node; | 47 | struct hlist_node node; |
48 | struct nf_conntrack_tuple tuple; | 48 | struct nf_conntrack_tuple tuple; |
49 | struct nf_conntrack_zone zone; | 49 | struct nf_conntrack_zone zone; |
50 | int cpu; | ||
51 | u32 jiffies32; | ||
50 | }; | 52 | }; |
51 | 53 | ||
52 | struct nf_conncount_rb { | 54 | struct nf_conncount_rb { |
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head, | |||
91 | return false; | 93 | return false; |
92 | conn->tuple = *tuple; | 94 | conn->tuple = *tuple; |
93 | conn->zone = *zone; | 95 | conn->zone = *zone; |
96 | conn->cpu = raw_smp_processor_id(); | ||
97 | conn->jiffies32 = (u32)jiffies; | ||
94 | hlist_add_head(&conn->node, head); | 98 | hlist_add_head(&conn->node, head); |
95 | return true; | 99 | return true; |
96 | } | 100 | } |
97 | EXPORT_SYMBOL_GPL(nf_conncount_add); | 101 | EXPORT_SYMBOL_GPL(nf_conncount_add); |
98 | 102 | ||
103 | static const struct nf_conntrack_tuple_hash * | ||
104 | find_or_evict(struct net *net, struct nf_conncount_tuple *conn) | ||
105 | { | ||
106 | const struct nf_conntrack_tuple_hash *found; | ||
107 | unsigned long a, b; | ||
108 | int cpu = raw_smp_processor_id(); | ||
109 | __s32 age; | ||
110 | |||
111 | found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); | ||
112 | if (found) | ||
113 | return found; | ||
114 | b = conn->jiffies32; | ||
115 | a = (u32)jiffies; | ||
116 | |||
117 | /* conn might have been added just before by another cpu and | ||
118 | * might still be unconfirmed. In this case, nf_conntrack_find() | ||
119 | * returns no result. Thus only evict if this cpu added the | ||
120 | * stale entry or if the entry is older than two jiffies. | ||
121 | */ | ||
122 | age = a - b; | ||
123 | if (conn->cpu == cpu || age >= 2) { | ||
124 | hlist_del(&conn->node); | ||
125 | kmem_cache_free(conncount_conn_cachep, conn); | ||
126 | return ERR_PTR(-ENOENT); | ||
127 | } | ||
128 | |||
129 | return ERR_PTR(-EAGAIN); | ||
130 | } | ||
131 | |||
99 | unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, | 132 | unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, |
100 | const struct nf_conntrack_tuple *tuple, | 133 | const struct nf_conntrack_tuple *tuple, |
101 | const struct nf_conntrack_zone *zone, | 134 | const struct nf_conntrack_zone *zone, |
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, | |||
103 | { | 136 | { |
104 | const struct nf_conntrack_tuple_hash *found; | 137 | const struct nf_conntrack_tuple_hash *found; |
105 | struct nf_conncount_tuple *conn; | 138 | struct nf_conncount_tuple *conn; |
106 | struct hlist_node *n; | ||
107 | struct nf_conn *found_ct; | 139 | struct nf_conn *found_ct; |
140 | struct hlist_node *n; | ||
108 | unsigned int length = 0; | 141 | unsigned int length = 0; |
109 | 142 | ||
110 | *addit = tuple ? true : false; | 143 | *addit = tuple ? true : false; |
111 | 144 | ||
112 | /* check the saved connections */ | 145 | /* check the saved connections */ |
113 | hlist_for_each_entry_safe(conn, n, head, node) { | 146 | hlist_for_each_entry_safe(conn, n, head, node) { |
114 | found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); | 147 | found = find_or_evict(net, conn); |
115 | if (found == NULL) { | 148 | if (IS_ERR(found)) { |
116 | hlist_del(&conn->node); | 149 | /* Not found, but might be about to be confirmed */ |
117 | kmem_cache_free(conncount_conn_cachep, conn); | 150 | if (PTR_ERR(found) == -EAGAIN) { |
151 | length++; | ||
152 | if (!tuple) | ||
153 | continue; | ||
154 | |||
155 | if (nf_ct_tuple_equal(&conn->tuple, tuple) && | ||
156 | nf_ct_zone_id(&conn->zone, conn->zone.dir) == | ||
157 | nf_ct_zone_id(zone, zone->dir)) | ||
158 | *addit = false; | ||
159 | } | ||
118 | continue; | 160 | continue; |
119 | } | 161 | } |
120 | 162 | ||
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 551a1eddf0fa..a75b11c39312 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) | |||
465 | 465 | ||
466 | nf_ct_expect_iterate_destroy(expect_iter_me, NULL); | 466 | nf_ct_expect_iterate_destroy(expect_iter_me, NULL); |
467 | nf_ct_iterate_destroy(unhelp, me); | 467 | nf_ct_iterate_destroy(unhelp, me); |
468 | |||
469 | /* Maybe someone has gotten the helper already when unhelp above. | ||
470 | * So need to wait it. | ||
471 | */ | ||
472 | synchronize_rcu(); | ||
468 | } | 473 | } |
469 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); | 474 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); |
470 | 475 | ||
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 426457047578..a61d6df6e5f6 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, | |||
424 | if (write) { | 424 | if (write) { |
425 | struct ctl_table tmp = *table; | 425 | struct ctl_table tmp = *table; |
426 | 426 | ||
427 | /* proc_dostring() can append to existing strings, so we need to | ||
428 | * initialize it as an empty string. | ||
429 | */ | ||
430 | buf[0] = '\0'; | ||
427 | tmp.data = buf; | 431 | tmp.data = buf; |
428 | r = proc_dostring(&tmp, write, buffer, lenp, ppos); | 432 | r = proc_dostring(&tmp, write, buffer, lenp, ppos); |
429 | if (r) | 433 | if (r) |
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, | |||
442 | rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); | 446 | rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); |
443 | mutex_unlock(&nf_log_mutex); | 447 | mutex_unlock(&nf_log_mutex); |
444 | } else { | 448 | } else { |
449 | struct ctl_table tmp = *table; | ||
450 | |||
451 | tmp.data = buf; | ||
445 | mutex_lock(&nf_log_mutex); | 452 | mutex_lock(&nf_log_mutex); |
446 | logger = nft_log_dereference(net->nf.nf_loggers[tindex]); | 453 | logger = nft_log_dereference(net->nf.nf_loggers[tindex]); |
447 | if (!logger) | 454 | if (!logger) |
448 | table->data = "NONE"; | 455 | strlcpy(buf, "NONE", sizeof(buf)); |
449 | else | 456 | else |
450 | table->data = logger->name; | 457 | strlcpy(buf, logger->name, sizeof(buf)); |
451 | r = proc_dostring(table, write, buffer, lenp, ppos); | ||
452 | mutex_unlock(&nf_log_mutex); | 458 | mutex_unlock(&nf_log_mutex); |
459 | r = proc_dostring(&tmp, write, buffer, lenp, ppos); | ||
453 | } | 460 | } |
454 | 461 | ||
455 | return r; | 462 | return r; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 4ccd2988f9db..ea4ba551abb2 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl, | |||
1243 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { | 1243 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { |
1244 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, | 1244 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, |
1245 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, | 1245 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, |
1246 | [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, | ||
1247 | [NFQA_CFG_MASK] = { .type = NLA_U32 }, | ||
1248 | [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, | ||
1246 | }; | 1249 | }; |
1247 | 1250 | ||
1248 | static const struct nf_queue_handler nfqh = { | 1251 | static const struct nf_queue_handler nfqh = { |
diff --git a/net/rds/connection.c b/net/rds/connection.c index abef75da89a7..cfb05953b0e5 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len, | |||
659 | 659 | ||
660 | int rds_conn_init(void) | 660 | int rds_conn_init(void) |
661 | { | 661 | { |
662 | int ret; | ||
663 | |||
664 | ret = rds_loop_net_init(); /* register pernet callback */ | ||
665 | if (ret) | ||
666 | return ret; | ||
667 | |||
662 | rds_conn_slab = kmem_cache_create("rds_connection", | 668 | rds_conn_slab = kmem_cache_create("rds_connection", |
663 | sizeof(struct rds_connection), | 669 | sizeof(struct rds_connection), |
664 | 0, 0, NULL); | 670 | 0, 0, NULL); |
665 | if (!rds_conn_slab) | 671 | if (!rds_conn_slab) { |
672 | rds_loop_net_exit(); | ||
666 | return -ENOMEM; | 673 | return -ENOMEM; |
674 | } | ||
667 | 675 | ||
668 | rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); | 676 | rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); |
669 | rds_info_register_func(RDS_INFO_SEND_MESSAGES, | 677 | rds_info_register_func(RDS_INFO_SEND_MESSAGES, |
@@ -676,6 +684,7 @@ int rds_conn_init(void) | |||
676 | 684 | ||
677 | void rds_conn_exit(void) | 685 | void rds_conn_exit(void) |
678 | { | 686 | { |
687 | rds_loop_net_exit(); /* unregister pernet callback */ | ||
679 | rds_loop_exit(); | 688 | rds_loop_exit(); |
680 | 689 | ||
681 | WARN_ON(!hlist_empty(rds_conn_hash)); | 690 | WARN_ON(!hlist_empty(rds_conn_hash)); |
diff --git a/net/rds/loop.c b/net/rds/loop.c index dac6218a460e..feea1f96ee2a 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/in.h> | 35 | #include <linux/in.h> |
36 | #include <net/net_namespace.h> | ||
37 | #include <net/netns/generic.h> | ||
36 | 38 | ||
37 | #include "rds_single_path.h" | 39 | #include "rds_single_path.h" |
38 | #include "rds.h" | 40 | #include "rds.h" |
@@ -40,6 +42,17 @@ | |||
40 | 42 | ||
41 | static DEFINE_SPINLOCK(loop_conns_lock); | 43 | static DEFINE_SPINLOCK(loop_conns_lock); |
42 | static LIST_HEAD(loop_conns); | 44 | static LIST_HEAD(loop_conns); |
45 | static atomic_t rds_loop_unloading = ATOMIC_INIT(0); | ||
46 | |||
47 | static void rds_loop_set_unloading(void) | ||
48 | { | ||
49 | atomic_set(&rds_loop_unloading, 1); | ||
50 | } | ||
51 | |||
52 | static bool rds_loop_is_unloading(struct rds_connection *conn) | ||
53 | { | ||
54 | return atomic_read(&rds_loop_unloading) != 0; | ||
55 | } | ||
43 | 56 | ||
44 | /* | 57 | /* |
45 | * This 'loopback' transport is a special case for flows that originate | 58 | * This 'loopback' transport is a special case for flows that originate |
@@ -165,6 +178,8 @@ void rds_loop_exit(void) | |||
165 | struct rds_loop_connection *lc, *_lc; | 178 | struct rds_loop_connection *lc, *_lc; |
166 | LIST_HEAD(tmp_list); | 179 | LIST_HEAD(tmp_list); |
167 | 180 | ||
181 | rds_loop_set_unloading(); | ||
182 | synchronize_rcu(); | ||
168 | /* avoid calling conn_destroy with irqs off */ | 183 | /* avoid calling conn_destroy with irqs off */ |
169 | spin_lock_irq(&loop_conns_lock); | 184 | spin_lock_irq(&loop_conns_lock); |
170 | list_splice(&loop_conns, &tmp_list); | 185 | list_splice(&loop_conns, &tmp_list); |
@@ -177,6 +192,46 @@ void rds_loop_exit(void) | |||
177 | } | 192 | } |
178 | } | 193 | } |
179 | 194 | ||
195 | static void rds_loop_kill_conns(struct net *net) | ||
196 | { | ||
197 | struct rds_loop_connection *lc, *_lc; | ||
198 | LIST_HEAD(tmp_list); | ||
199 | |||
200 | spin_lock_irq(&loop_conns_lock); | ||
201 | list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node) { | ||
202 | struct net *c_net = read_pnet(&lc->conn->c_net); | ||
203 | |||
204 | if (net != c_net) | ||
205 | continue; | ||
206 | list_move_tail(&lc->loop_node, &tmp_list); | ||
207 | } | ||
208 | spin_unlock_irq(&loop_conns_lock); | ||
209 | |||
210 | list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { | ||
211 | WARN_ON(lc->conn->c_passive); | ||
212 | rds_conn_destroy(lc->conn); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static void __net_exit rds_loop_exit_net(struct net *net) | ||
217 | { | ||
218 | rds_loop_kill_conns(net); | ||
219 | } | ||
220 | |||
221 | static struct pernet_operations rds_loop_net_ops = { | ||
222 | .exit = rds_loop_exit_net, | ||
223 | }; | ||
224 | |||
225 | int rds_loop_net_init(void) | ||
226 | { | ||
227 | return register_pernet_device(&rds_loop_net_ops); | ||
228 | } | ||
229 | |||
230 | void rds_loop_net_exit(void) | ||
231 | { | ||
232 | unregister_pernet_device(&rds_loop_net_ops); | ||
233 | } | ||
234 | |||
180 | /* | 235 | /* |
181 | * This is missing .xmit_* because loop doesn't go through generic | 236 | * This is missing .xmit_* because loop doesn't go through generic |
182 | * rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and | 237 | * rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and |
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = { | |||
194 | .inc_free = rds_loop_inc_free, | 249 | .inc_free = rds_loop_inc_free, |
195 | .t_name = "loopback", | 250 | .t_name = "loopback", |
196 | .t_type = RDS_TRANS_LOOP, | 251 | .t_type = RDS_TRANS_LOOP, |
252 | .t_unloading = rds_loop_is_unloading, | ||
197 | }; | 253 | }; |
diff --git a/net/rds/loop.h b/net/rds/loop.h index 469fa4b2da4f..bbc8cdd030df 100644 --- a/net/rds/loop.h +++ b/net/rds/loop.h | |||
@@ -5,6 +5,8 @@ | |||
5 | /* loop.c */ | 5 | /* loop.c */ |
6 | extern struct rds_transport rds_loop_transport; | 6 | extern struct rds_transport rds_loop_transport; |
7 | 7 | ||
8 | int rds_loop_net_init(void); | ||
9 | void rds_loop_net_exit(void); | ||
8 | void rds_loop_exit(void); | 10 | void rds_loop_exit(void); |
9 | 11 | ||
10 | #endif | 12 | #endif |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 973b4471b532..3c1405df936c 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | static void smc_tcp_listen_work(struct work_struct *); | 47 | static void smc_tcp_listen_work(struct work_struct *); |
48 | static void smc_connect_work(struct work_struct *); | ||
48 | 49 | ||
49 | static void smc_set_keepalive(struct sock *sk, int val) | 50 | static void smc_set_keepalive(struct sock *sk, int val) |
50 | { | 51 | { |
@@ -122,6 +123,12 @@ static int smc_release(struct socket *sock) | |||
122 | goto out; | 123 | goto out; |
123 | 124 | ||
124 | smc = smc_sk(sk); | 125 | smc = smc_sk(sk); |
126 | |||
127 | /* cleanup for a dangling non-blocking connect */ | ||
128 | flush_work(&smc->connect_work); | ||
129 | kfree(smc->connect_info); | ||
130 | smc->connect_info = NULL; | ||
131 | |||
125 | if (sk->sk_state == SMC_LISTEN) | 132 | if (sk->sk_state == SMC_LISTEN) |
126 | /* smc_close_non_accepted() is called and acquires | 133 | /* smc_close_non_accepted() is called and acquires |
127 | * sock lock for child sockets again | 134 | * sock lock for child sockets again |
@@ -186,6 +193,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, | |||
186 | sk->sk_protocol = protocol; | 193 | sk->sk_protocol = protocol; |
187 | smc = smc_sk(sk); | 194 | smc = smc_sk(sk); |
188 | INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); | 195 | INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); |
196 | INIT_WORK(&smc->connect_work, smc_connect_work); | ||
189 | INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); | 197 | INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); |
190 | INIT_LIST_HEAD(&smc->accept_q); | 198 | INIT_LIST_HEAD(&smc->accept_q); |
191 | spin_lock_init(&smc->accept_q_lock); | 199 | spin_lock_init(&smc->accept_q_lock); |
@@ -576,6 +584,35 @@ static int __smc_connect(struct smc_sock *smc) | |||
576 | return 0; | 584 | return 0; |
577 | } | 585 | } |
578 | 586 | ||
587 | static void smc_connect_work(struct work_struct *work) | ||
588 | { | ||
589 | struct smc_sock *smc = container_of(work, struct smc_sock, | ||
590 | connect_work); | ||
591 | int rc; | ||
592 | |||
593 | lock_sock(&smc->sk); | ||
594 | rc = kernel_connect(smc->clcsock, &smc->connect_info->addr, | ||
595 | smc->connect_info->alen, smc->connect_info->flags); | ||
596 | if (smc->clcsock->sk->sk_err) { | ||
597 | smc->sk.sk_err = smc->clcsock->sk->sk_err; | ||
598 | goto out; | ||
599 | } | ||
600 | if (rc < 0) { | ||
601 | smc->sk.sk_err = -rc; | ||
602 | goto out; | ||
603 | } | ||
604 | |||
605 | rc = __smc_connect(smc); | ||
606 | if (rc < 0) | ||
607 | smc->sk.sk_err = -rc; | ||
608 | |||
609 | out: | ||
610 | smc->sk.sk_state_change(&smc->sk); | ||
611 | kfree(smc->connect_info); | ||
612 | smc->connect_info = NULL; | ||
613 | release_sock(&smc->sk); | ||
614 | } | ||
615 | |||
579 | static int smc_connect(struct socket *sock, struct sockaddr *addr, | 616 | static int smc_connect(struct socket *sock, struct sockaddr *addr, |
580 | int alen, int flags) | 617 | int alen, int flags) |
581 | { | 618 | { |
@@ -605,15 +642,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, | |||
605 | 642 | ||
606 | smc_copy_sock_settings_to_clc(smc); | 643 | smc_copy_sock_settings_to_clc(smc); |
607 | tcp_sk(smc->clcsock->sk)->syn_smc = 1; | 644 | tcp_sk(smc->clcsock->sk)->syn_smc = 1; |
608 | rc = kernel_connect(smc->clcsock, addr, alen, flags); | 645 | if (flags & O_NONBLOCK) { |
609 | if (rc) | 646 | if (smc->connect_info) { |
610 | goto out; | 647 | rc = -EALREADY; |
648 | goto out; | ||
649 | } | ||
650 | smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL); | ||
651 | if (!smc->connect_info) { | ||
652 | rc = -ENOMEM; | ||
653 | goto out; | ||
654 | } | ||
655 | smc->connect_info->alen = alen; | ||
656 | smc->connect_info->flags = flags ^ O_NONBLOCK; | ||
657 | memcpy(&smc->connect_info->addr, addr, alen); | ||
658 | schedule_work(&smc->connect_work); | ||
659 | rc = -EINPROGRESS; | ||
660 | } else { | ||
661 | rc = kernel_connect(smc->clcsock, addr, alen, flags); | ||
662 | if (rc) | ||
663 | goto out; | ||
611 | 664 | ||
612 | rc = __smc_connect(smc); | 665 | rc = __smc_connect(smc); |
613 | if (rc < 0) | 666 | if (rc < 0) |
614 | goto out; | 667 | goto out; |
615 | else | 668 | else |
616 | rc = 0; /* success cases including fallback */ | 669 | rc = 0; /* success cases including fallback */ |
670 | } | ||
617 | 671 | ||
618 | out: | 672 | out: |
619 | release_sock(sk); | 673 | release_sock(sk); |
@@ -1279,40 +1333,20 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
1279 | struct sock *sk = sock->sk; | 1333 | struct sock *sk = sock->sk; |
1280 | __poll_t mask = 0; | 1334 | __poll_t mask = 0; |
1281 | struct smc_sock *smc; | 1335 | struct smc_sock *smc; |
1282 | int rc; | ||
1283 | 1336 | ||
1284 | if (!sk) | 1337 | if (!sk) |
1285 | return EPOLLNVAL; | 1338 | return EPOLLNVAL; |
1286 | 1339 | ||
1287 | smc = smc_sk(sock->sk); | 1340 | smc = smc_sk(sock->sk); |
1288 | sock_hold(sk); | ||
1289 | lock_sock(sk); | ||
1290 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { | 1341 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { |
1291 | /* delegate to CLC child sock */ | 1342 | /* delegate to CLC child sock */ |
1292 | release_sock(sk); | ||
1293 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); | 1343 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); |
1294 | lock_sock(sk); | ||
1295 | sk->sk_err = smc->clcsock->sk->sk_err; | 1344 | sk->sk_err = smc->clcsock->sk->sk_err; |
1296 | if (sk->sk_err) { | 1345 | if (sk->sk_err) |
1297 | mask |= EPOLLERR; | 1346 | mask |= EPOLLERR; |
1298 | } else { | ||
1299 | /* if non-blocking connect finished ... */ | ||
1300 | if (sk->sk_state == SMC_INIT && | ||
1301 | mask & EPOLLOUT && | ||
1302 | smc->clcsock->sk->sk_state != TCP_CLOSE) { | ||
1303 | rc = __smc_connect(smc); | ||
1304 | if (rc < 0) | ||
1305 | mask |= EPOLLERR; | ||
1306 | /* success cases including fallback */ | ||
1307 | mask |= EPOLLOUT | EPOLLWRNORM; | ||
1308 | } | ||
1309 | } | ||
1310 | } else { | 1347 | } else { |
1311 | if (sk->sk_state != SMC_CLOSED) { | 1348 | if (sk->sk_state != SMC_CLOSED) |
1312 | release_sock(sk); | ||
1313 | sock_poll_wait(file, sk_sleep(sk), wait); | 1349 | sock_poll_wait(file, sk_sleep(sk), wait); |
1314 | lock_sock(sk); | ||
1315 | } | ||
1316 | if (sk->sk_err) | 1350 | if (sk->sk_err) |
1317 | mask |= EPOLLERR; | 1351 | mask |= EPOLLERR; |
1318 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || | 1352 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || |
@@ -1338,10 +1372,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
1338 | } | 1372 | } |
1339 | if (smc->conn.urg_state == SMC_URG_VALID) | 1373 | if (smc->conn.urg_state == SMC_URG_VALID) |
1340 | mask |= EPOLLPRI; | 1374 | mask |= EPOLLPRI; |
1341 | |||
1342 | } | 1375 | } |
1343 | release_sock(sk); | ||
1344 | sock_put(sk); | ||
1345 | 1376 | ||
1346 | return mask; | 1377 | return mask; |
1347 | } | 1378 | } |
diff --git a/net/smc/smc.h b/net/smc/smc.h index 51ae1f10d81a..d7ca26570482 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h | |||
@@ -187,11 +187,19 @@ struct smc_connection { | |||
187 | struct work_struct close_work; /* peer sent some closing */ | 187 | struct work_struct close_work; /* peer sent some closing */ |
188 | }; | 188 | }; |
189 | 189 | ||
190 | struct smc_connect_info { | ||
191 | int flags; | ||
192 | int alen; | ||
193 | struct sockaddr addr; | ||
194 | }; | ||
195 | |||
190 | struct smc_sock { /* smc sock container */ | 196 | struct smc_sock { /* smc sock container */ |
191 | struct sock sk; | 197 | struct sock sk; |
192 | struct socket *clcsock; /* internal tcp socket */ | 198 | struct socket *clcsock; /* internal tcp socket */ |
193 | struct smc_connection conn; /* smc connection */ | 199 | struct smc_connection conn; /* smc connection */ |
194 | struct smc_sock *listen_smc; /* listen parent */ | 200 | struct smc_sock *listen_smc; /* listen parent */ |
201 | struct smc_connect_info *connect_info; /* connect address & flags */ | ||
202 | struct work_struct connect_work; /* handle non-blocking connect*/ | ||
195 | struct work_struct tcp_listen_work;/* handle tcp socket accepts */ | 203 | struct work_struct tcp_listen_work;/* handle tcp socket accepts */ |
196 | struct work_struct smc_listen_work;/* prepare new accept socket */ | 204 | struct work_struct smc_listen_work;/* prepare new accept socket */ |
197 | struct list_head accept_q; /* sockets to be accepted */ | 205 | struct list_head accept_q; /* sockets to be accepted */ |
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 373836615c57..625acb27efcc 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
@@ -35,7 +35,6 @@ struct _strp_msg { | |||
35 | */ | 35 | */ |
36 | struct strp_msg strp; | 36 | struct strp_msg strp; |
37 | int accum_len; | 37 | int accum_len; |
38 | int early_eaten; | ||
39 | }; | 38 | }; |
40 | 39 | ||
41 | static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) | 40 | static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) |
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
115 | head = strp->skb_head; | 114 | head = strp->skb_head; |
116 | if (head) { | 115 | if (head) { |
117 | /* Message already in progress */ | 116 | /* Message already in progress */ |
118 | |||
119 | stm = _strp_msg(head); | ||
120 | if (unlikely(stm->early_eaten)) { | ||
121 | /* Already some number of bytes on the receive sock | ||
122 | * data saved in skb_head, just indicate they | ||
123 | * are consumed. | ||
124 | */ | ||
125 | eaten = orig_len <= stm->early_eaten ? | ||
126 | orig_len : stm->early_eaten; | ||
127 | stm->early_eaten -= eaten; | ||
128 | |||
129 | return eaten; | ||
130 | } | ||
131 | |||
132 | if (unlikely(orig_offset)) { | 117 | if (unlikely(orig_offset)) { |
133 | /* Getting data with a non-zero offset when a message is | 118 | /* Getting data with a non-zero offset when a message is |
134 | * in progress is not expected. If it does happen, we | 119 | * in progress is not expected. If it does happen, we |
@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
297 | } | 282 | } |
298 | 283 | ||
299 | stm->accum_len += cand_len; | 284 | stm->accum_len += cand_len; |
285 | eaten += cand_len; | ||
300 | strp->need_bytes = stm->strp.full_len - | 286 | strp->need_bytes = stm->strp.full_len - |
301 | stm->accum_len; | 287 | stm->accum_len; |
302 | stm->early_eaten = cand_len; | ||
303 | STRP_STATS_ADD(strp->stats.bytes, cand_len); | 288 | STRP_STATS_ADD(strp->stats.bytes, cand_len); |
304 | desc->count = 0; /* Stop reading socket */ | 289 | desc->count = 0; /* Stop reading socket */ |
305 | break; | 290 | break; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c7bbe5f0aae8..4eece06be1e7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -6231,7 +6231,7 @@ do { \ | |||
6231 | nl80211_check_s32); | 6231 | nl80211_check_s32); |
6232 | /* | 6232 | /* |
6233 | * Check HT operation mode based on | 6233 | * Check HT operation mode based on |
6234 | * IEEE 802.11 2012 8.4.2.59 HT Operation element. | 6234 | * IEEE 802.11-2016 9.4.2.57 HT Operation element. |
6235 | */ | 6235 | */ |
6236 | if (tb[NL80211_MESHCONF_HT_OPMODE]) { | 6236 | if (tb[NL80211_MESHCONF_HT_OPMODE]) { |
6237 | ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); | 6237 | ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); |
@@ -6241,22 +6241,9 @@ do { \ | |||
6241 | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | 6241 | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) |
6242 | return -EINVAL; | 6242 | return -EINVAL; |
6243 | 6243 | ||
6244 | if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) && | 6244 | /* NON_HT_STA bit is reserved, but some programs set it */ |
6245 | (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | 6245 | ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; |
6246 | return -EINVAL; | ||
6247 | 6246 | ||
6248 | switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) { | ||
6249 | case IEEE80211_HT_OP_MODE_PROTECTION_NONE: | ||
6250 | case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: | ||
6251 | if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) | ||
6252 | return -EINVAL; | ||
6253 | break; | ||
6254 | case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: | ||
6255 | case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: | ||
6256 | if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | ||
6257 | return -EINVAL; | ||
6258 | break; | ||
6259 | } | ||
6260 | cfg->ht_opmode = ht_opmode; | 6247 | cfg->ht_opmode = ht_opmode; |
6261 | mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); | 6248 | mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); |
6262 | } | 6249 | } |
@@ -10962,9 +10949,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
10962 | rem) { | 10949 | rem) { |
10963 | u8 *mask_pat; | 10950 | u8 *mask_pat; |
10964 | 10951 | ||
10965 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, | 10952 | err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
10966 | nl80211_packet_pattern_policy, | 10953 | nl80211_packet_pattern_policy, |
10967 | info->extack); | 10954 | info->extack); |
10955 | if (err) | ||
10956 | goto error; | ||
10957 | |||
10968 | err = -EINVAL; | 10958 | err = -EINVAL; |
10969 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 10959 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
10970 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 10960 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
@@ -11213,8 +11203,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, | |||
11213 | rem) { | 11203 | rem) { |
11214 | u8 *mask_pat; | 11204 | u8 *mask_pat; |
11215 | 11205 | ||
11216 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, | 11206 | err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
11217 | nl80211_packet_pattern_policy, NULL); | 11207 | nl80211_packet_pattern_policy, NULL); |
11208 | if (err) | ||
11209 | return err; | ||
11210 | |||
11218 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 11211 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
11219 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 11212 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
11220 | return -EINVAL; | 11213 | return -EINVAL; |
diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c index 6673cdb9f55c..a7e94e7ff87d 100644 --- a/samples/bpf/xdp_fwd_kern.c +++ b/samples/bpf/xdp_fwd_kern.c | |||
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
48 | struct ethhdr *eth = data; | 48 | struct ethhdr *eth = data; |
49 | struct ipv6hdr *ip6h; | 49 | struct ipv6hdr *ip6h; |
50 | struct iphdr *iph; | 50 | struct iphdr *iph; |
51 | int out_index; | ||
52 | u16 h_proto; | 51 | u16 h_proto; |
53 | u64 nh_off; | 52 | u64 nh_off; |
53 | int rc; | ||
54 | 54 | ||
55 | nh_off = sizeof(*eth); | 55 | nh_off = sizeof(*eth); |
56 | if (data + nh_off > data_end) | 56 | if (data + nh_off > data_end) |
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
101 | 101 | ||
102 | fib_params.ifindex = ctx->ingress_ifindex; | 102 | fib_params.ifindex = ctx->ingress_ifindex; |
103 | 103 | ||
104 | out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); | 104 | rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); |
105 | 105 | ||
106 | /* verify egress index has xdp support | 106 | /* verify egress index has xdp support |
107 | * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with | 107 | * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with |
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
109 | * NOTE: without verification that egress index supports XDP | 109 | * NOTE: without verification that egress index supports XDP |
110 | * forwarding packets are dropped. | 110 | * forwarding packets are dropped. |
111 | */ | 111 | */ |
112 | if (out_index > 0) { | 112 | if (rc == 0) { |
113 | if (h_proto == htons(ETH_P_IP)) | 113 | if (h_proto == htons(ETH_P_IP)) |
114 | ip_decrease_ttl(iph); | 114 | ip_decrease_ttl(iph); |
115 | else if (h_proto == htons(ETH_P_IPV6)) | 115 | else if (h_proto == htons(ETH_P_IPV6)) |
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) | |||
117 | 117 | ||
118 | memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); | 118 | memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); |
119 | memcpy(eth->h_source, fib_params.smac, ETH_ALEN); | 119 | memcpy(eth->h_source, fib_params.smac, ETH_ALEN); |
120 | return bpf_redirect_map(&tx_port, out_index, 0); | 120 | return bpf_redirect_map(&tx_port, fib_params.ifindex, 0); |
121 | } | 121 | } |
122 | 122 | ||
123 | return XDP_PASS; | 123 | return XDP_PASS; |
diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 2960e26c6ea4..2535c3677c7b 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c | |||
@@ -178,6 +178,8 @@ static const char *vbe_name(u32 index) | |||
178 | return "(invalid)"; | 178 | return "(invalid)"; |
179 | } | 179 | } |
180 | 180 | ||
181 | static struct page *__mbochs_get_page(struct mdev_state *mdev_state, | ||
182 | pgoff_t pgoff); | ||
181 | static struct page *mbochs_get_page(struct mdev_state *mdev_state, | 183 | static struct page *mbochs_get_page(struct mdev_state *mdev_state, |
182 | pgoff_t pgoff); | 184 | pgoff_t pgoff); |
183 | 185 | ||
@@ -394,7 +396,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, | |||
394 | MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { | 396 | MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { |
395 | pos -= MBOCHS_MMIO_BAR_OFFSET; | 397 | pos -= MBOCHS_MMIO_BAR_OFFSET; |
396 | poff = pos & ~PAGE_MASK; | 398 | poff = pos & ~PAGE_MASK; |
397 | pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); | 399 | pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); |
398 | map = kmap(pg); | 400 | map = kmap(pg); |
399 | if (is_write) | 401 | if (is_write) |
400 | memcpy(map + poff, buf, count); | 402 | memcpy(map + poff, buf, count); |
@@ -657,7 +659,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state) | |||
657 | dev_dbg(dev, "%s: %d pages released\n", __func__, count); | 659 | dev_dbg(dev, "%s: %d pages released\n", __func__, count); |
658 | } | 660 | } |
659 | 661 | ||
660 | static int mbochs_region_vm_fault(struct vm_fault *vmf) | 662 | static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf) |
661 | { | 663 | { |
662 | struct vm_area_struct *vma = vmf->vma; | 664 | struct vm_area_struct *vma = vmf->vma; |
663 | struct mdev_state *mdev_state = vma->vm_private_data; | 665 | struct mdev_state *mdev_state = vma->vm_private_data; |
@@ -695,7 +697,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
695 | return 0; | 697 | return 0; |
696 | } | 698 | } |
697 | 699 | ||
698 | static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf) | 700 | static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf) |
699 | { | 701 | { |
700 | struct vm_area_struct *vma = vmf->vma; | 702 | struct vm_area_struct *vma = vmf->vma; |
701 | struct mbochs_dmabuf *dmabuf = vma->vm_private_data; | 703 | struct mbochs_dmabuf *dmabuf = vma->vm_private_data; |
@@ -803,29 +805,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf) | |||
803 | mutex_unlock(&mdev_state->ops_lock); | 805 | mutex_unlock(&mdev_state->ops_lock); |
804 | } | 806 | } |
805 | 807 | ||
806 | static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf, | 808 | static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num) |
807 | unsigned long page_num) | ||
808 | { | 809 | { |
809 | struct mbochs_dmabuf *dmabuf = buf->priv; | 810 | struct mbochs_dmabuf *dmabuf = buf->priv; |
810 | struct page *page = dmabuf->pages[page_num]; | 811 | struct page *page = dmabuf->pages[page_num]; |
811 | 812 | ||
812 | return kmap_atomic(page); | 813 | return kmap(page); |
813 | } | 814 | } |
814 | 815 | ||
815 | static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num) | 816 | static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num, |
817 | void *vaddr) | ||
816 | { | 818 | { |
817 | struct mbochs_dmabuf *dmabuf = buf->priv; | 819 | kunmap(vaddr); |
818 | struct page *page = dmabuf->pages[page_num]; | ||
819 | |||
820 | return kmap(page); | ||
821 | } | 820 | } |
822 | 821 | ||
823 | static struct dma_buf_ops mbochs_dmabuf_ops = { | 822 | static struct dma_buf_ops mbochs_dmabuf_ops = { |
824 | .map_dma_buf = mbochs_map_dmabuf, | 823 | .map_dma_buf = mbochs_map_dmabuf, |
825 | .unmap_dma_buf = mbochs_unmap_dmabuf, | 824 | .unmap_dma_buf = mbochs_unmap_dmabuf, |
826 | .release = mbochs_release_dmabuf, | 825 | .release = mbochs_release_dmabuf, |
827 | .map_atomic = mbochs_kmap_atomic_dmabuf, | ||
828 | .map = mbochs_kmap_dmabuf, | 826 | .map = mbochs_kmap_dmabuf, |
827 | .unmap = mbochs_kunmap_dmabuf, | ||
829 | .mmap = mbochs_mmap_dmabuf, | 828 | .mmap = mbochs_mmap_dmabuf, |
830 | }; | 829 | }; |
831 | 830 | ||
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c8156d61678c..86321f06461e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
@@ -214,7 +214,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj | |||
214 | # Prefix -I with $(srctree) if it is not an absolute path. | 214 | # Prefix -I with $(srctree) if it is not an absolute path. |
215 | # skip if -I has no parameter | 215 | # skip if -I has no parameter |
216 | addtree = $(if $(patsubst -I%,%,$(1)), \ | 216 | addtree = $(if $(patsubst -I%,%,$(1)), \ |
217 | $(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1))) | 217 | $(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)),$(1)) |
218 | 218 | ||
219 | # Find all -I options and call addtree | 219 | # Find all -I options and call addtree |
220 | flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o))) | 220 | flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o))) |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index e7889f486ca1..514ed63ff571 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -590,7 +590,4 @@ endif | |||
590 | # We never want them to be removed automatically. | 590 | # We never want them to be removed automatically. |
591 | .SECONDARY: $(targets) | 591 | .SECONDARY: $(targets) |
592 | 592 | ||
593 | # Declare the contents of the .PHONY variable as phony. We keep that | ||
594 | # information in a variable se we can use it in if_changed and friends. | ||
595 | |||
596 | .PHONY: $(PHONY) | 593 | .PHONY: $(PHONY) |
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 808d09f27ad4..17ef94c635cd 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean | |||
@@ -88,7 +88,4 @@ PHONY += $(subdir-ymn) | |||
88 | $(subdir-ymn): | 88 | $(subdir-ymn): |
89 | $(Q)$(MAKE) $(clean)=$@ | 89 | $(Q)$(MAKE) $(clean)=$@ |
90 | 90 | ||
91 | # Declare the contents of the .PHONY variable as phony. We keep that | ||
92 | # information in a variable se we can use it in if_changed and friends. | ||
93 | |||
94 | .PHONY: $(PHONY) | 91 | .PHONY: $(PHONY) |
diff --git a/scripts/Makefile.modbuiltin b/scripts/Makefile.modbuiltin index a763b4775d06..40867a41615b 100644 --- a/scripts/Makefile.modbuiltin +++ b/scripts/Makefile.modbuiltin | |||
@@ -54,8 +54,4 @@ PHONY += $(subdir-ym) | |||
54 | $(subdir-ym): | 54 | $(subdir-ym): |
55 | $(Q)$(MAKE) $(modbuiltin)=$@ | 55 | $(Q)$(MAKE) $(modbuiltin)=$@ |
56 | 56 | ||
57 | |||
58 | # Declare the contents of the .PHONY variable as phony. We keep that | ||
59 | # information in a variable se we can use it in if_changed and friends. | ||
60 | |||
61 | .PHONY: $(PHONY) | 57 | .PHONY: $(PHONY) |
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst index 51ca0244fc8a..ff5ca9817a85 100644 --- a/scripts/Makefile.modinst +++ b/scripts/Makefile.modinst | |||
@@ -35,8 +35,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) | |||
35 | $(modules): | 35 | $(modules): |
36 | $(call cmd,modules_install,$(MODLIB)/$(modinst_dir)) | 36 | $(call cmd,modules_install,$(MODLIB)/$(modinst_dir)) |
37 | 37 | ||
38 | |||
39 | # Declare the contents of the .PHONY variable as phony. We keep that | ||
40 | # information in a variable so we can use it in if_changed and friends. | ||
41 | |||
42 | .PHONY: $(PHONY) | 38 | .PHONY: $(PHONY) |
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index df4174405feb..dd92dbbbaa68 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost | |||
@@ -149,8 +149,4 @@ ifneq ($(cmd_files),) | |||
149 | include $(cmd_files) | 149 | include $(cmd_files) |
150 | endif | 150 | endif |
151 | 151 | ||
152 | |||
153 | # Declare the contents of the .PHONY variable as phony. We keep that | ||
154 | # information in a variable se we can use it in if_changed and friends. | ||
155 | |||
156 | .PHONY: $(PHONY) | 152 | .PHONY: $(PHONY) |
diff --git a/scripts/Makefile.modsign b/scripts/Makefile.modsign index 171483bc0538..da56aa78d245 100644 --- a/scripts/Makefile.modsign +++ b/scripts/Makefile.modsign | |||
@@ -27,7 +27,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) | |||
27 | $(modules): | 27 | $(modules): |
28 | $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir)) | 28 | $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir)) |
29 | 29 | ||
30 | # Declare the contents of the .PHONY variable as phony. We keep that | ||
31 | # information in a variable se we can use it in if_changed and friends. | ||
32 | |||
33 | .PHONY: $(PHONY) | 30 | .PHONY: $(PHONY) |
diff --git a/scripts/cc-can-link.sh b/scripts/cc-can-link.sh index 208eb2825dab..6efcead31989 100755 --- a/scripts/cc-can-link.sh +++ b/scripts/cc-can-link.sh | |||
@@ -1,7 +1,7 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y" | 4 | cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 |
5 | #include <stdio.h> | 5 | #include <stdio.h> |
6 | int main(void) | 6 | int main(void) |
7 | { | 7 | { |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index a9c05506e325..447857ffaf6b 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -5813,14 +5813,14 @@ sub process { | |||
5813 | defined $stat && | 5813 | defined $stat && |
5814 | $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s && | 5814 | $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s && |
5815 | $1 !~ /^_*volatile_*$/) { | 5815 | $1 !~ /^_*volatile_*$/) { |
5816 | my $specifier; | ||
5817 | my $extension; | ||
5818 | my $bad_specifier = ""; | ||
5819 | my $stat_real; | 5816 | my $stat_real; |
5820 | 5817 | ||
5821 | my $lc = $stat =~ tr@\n@@; | 5818 | my $lc = $stat =~ tr@\n@@; |
5822 | $lc = $lc + $linenr; | 5819 | $lc = $lc + $linenr; |
5823 | for (my $count = $linenr; $count <= $lc; $count++) { | 5820 | for (my $count = $linenr; $count <= $lc; $count++) { |
5821 | my $specifier; | ||
5822 | my $extension; | ||
5823 | my $bad_specifier = ""; | ||
5824 | my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0)); | 5824 | my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0)); |
5825 | $fmt =~ s/%%//g; | 5825 | $fmt =~ s/%%//g; |
5826 | 5826 | ||
diff --git a/scripts/extract-vmlinux b/scripts/extract-vmlinux index 5061abcc2540..e6239f39abad 100755 --- a/scripts/extract-vmlinux +++ b/scripts/extract-vmlinux | |||
@@ -57,6 +57,8 @@ try_decompress '\3757zXZ\000' abcde unxz | |||
57 | try_decompress 'BZh' xy bunzip2 | 57 | try_decompress 'BZh' xy bunzip2 |
58 | try_decompress '\135\0\0\0' xxx unlzma | 58 | try_decompress '\135\0\0\0' xxx unlzma |
59 | try_decompress '\211\114\132' xy 'lzop -d' | 59 | try_decompress '\211\114\132' xy 'lzop -d' |
60 | try_decompress '\002!L\030' xxx 'lz4 -d' | ||
61 | try_decompress '(\265/\375' xxx unzstd | ||
60 | 62 | ||
61 | # Bail out: | 63 | # Bail out: |
62 | echo "$me: Cannot find vmlinux." >&2 | 64 | echo "$me: Cannot find vmlinux." >&2 |
diff --git a/scripts/tags.sh b/scripts/tags.sh index 66f08bb1cce9..412a70cce558 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
@@ -245,7 +245,7 @@ exuberant() | |||
245 | { | 245 | { |
246 | setup_regex exuberant asm c | 246 | setup_regex exuberant asm c |
247 | all_target_sources | xargs $1 -a \ | 247 | all_target_sources | xargs $1 -a \ |
248 | -I __initdata,__exitdata,__initconst, \ | 248 | -I __initdata,__exitdata,__initconst,__ro_after_init \ |
249 | -I __initdata_memblock \ | 249 | -I __initdata_memblock \ |
250 | -I __refdata,__attribute,__maybe_unused,__always_unused \ | 250 | -I __refdata,__attribute,__maybe_unused,__always_unused \ |
251 | -I __acquires,__releases,__deprecated \ | 251 | -I __acquires,__releases,__deprecated \ |
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 4ff5320378e2..321e95c409c1 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c | |||
@@ -1048,7 +1048,8 @@ static const struct snd_pci_quirk ca0132_quirks[] = { | |||
1048 | SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), | 1048 | SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), |
1049 | SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), | 1049 | SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), |
1050 | SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), | 1050 | SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), |
1051 | SND_PCI_QUIRK(0x1458, 0xA036, "Recon3Di", QUIRK_R3DI), | 1051 | SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), |
1052 | SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), | ||
1052 | {} | 1053 | {} |
1053 | }; | 1054 | }; |
1054 | 1055 | ||
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 98e1c411c56a..8a49415aebac 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/pm_runtime.h> | ||
36 | #include <sound/core.h> | 37 | #include <sound/core.h> |
37 | #include <sound/jack.h> | 38 | #include <sound/jack.h> |
38 | #include <sound/asoundef.h> | 39 | #include <sound/asoundef.h> |
@@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid, | |||
764 | 765 | ||
765 | if (pin_idx < 0) | 766 | if (pin_idx < 0) |
766 | return; | 767 | return; |
768 | mutex_lock(&spec->pcm_lock); | ||
767 | if (hdmi_present_sense(get_pin(spec, pin_idx), 1)) | 769 | if (hdmi_present_sense(get_pin(spec, pin_idx), 1)) |
768 | snd_hda_jack_report_sync(codec); | 770 | snd_hda_jack_report_sync(codec); |
771 | mutex_unlock(&spec->pcm_lock); | ||
769 | } | 772 | } |
770 | 773 | ||
771 | static void jack_callback(struct hda_codec *codec, | 774 | static void jack_callback(struct hda_codec *codec, |
@@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec, | |||
1628 | static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) | 1631 | static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) |
1629 | { | 1632 | { |
1630 | struct hda_codec *codec = per_pin->codec; | 1633 | struct hda_codec *codec = per_pin->codec; |
1631 | struct hdmi_spec *spec = codec->spec; | ||
1632 | int ret; | 1634 | int ret; |
1633 | 1635 | ||
1634 | /* no temporary power up/down needed for component notifier */ | 1636 | /* no temporary power up/down needed for component notifier */ |
1635 | if (!codec_has_acomp(codec)) | 1637 | if (!codec_has_acomp(codec)) { |
1636 | snd_hda_power_up_pm(codec); | 1638 | ret = snd_hda_power_up_pm(codec); |
1639 | if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) { | ||
1640 | snd_hda_power_down_pm(codec); | ||
1641 | return false; | ||
1642 | } | ||
1643 | } | ||
1637 | 1644 | ||
1638 | mutex_lock(&spec->pcm_lock); | ||
1639 | if (codec_has_acomp(codec)) { | 1645 | if (codec_has_acomp(codec)) { |
1640 | sync_eld_via_acomp(codec, per_pin); | 1646 | sync_eld_via_acomp(codec, per_pin); |
1641 | ret = false; /* don't call snd_hda_jack_report_sync() */ | 1647 | ret = false; /* don't call snd_hda_jack_report_sync() */ |
1642 | } else { | 1648 | } else { |
1643 | ret = hdmi_present_sense_via_verbs(per_pin, repoll); | 1649 | ret = hdmi_present_sense_via_verbs(per_pin, repoll); |
1644 | } | 1650 | } |
1645 | mutex_unlock(&spec->pcm_lock); | ||
1646 | 1651 | ||
1647 | if (!codec_has_acomp(codec)) | 1652 | if (!codec_has_acomp(codec)) |
1648 | snd_hda_power_down_pm(codec); | 1653 | snd_hda_power_down_pm(codec); |
@@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work) | |||
1654 | { | 1659 | { |
1655 | struct hdmi_spec_per_pin *per_pin = | 1660 | struct hdmi_spec_per_pin *per_pin = |
1656 | container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); | 1661 | container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); |
1662 | struct hda_codec *codec = per_pin->codec; | ||
1663 | struct hdmi_spec *spec = codec->spec; | ||
1657 | 1664 | ||
1658 | if (per_pin->repoll_count++ > 6) | 1665 | if (per_pin->repoll_count++ > 6) |
1659 | per_pin->repoll_count = 0; | 1666 | per_pin->repoll_count = 0; |
1660 | 1667 | ||
1668 | mutex_lock(&spec->pcm_lock); | ||
1661 | if (hdmi_present_sense(per_pin, per_pin->repoll_count)) | 1669 | if (hdmi_present_sense(per_pin, per_pin->repoll_count)) |
1662 | snd_hda_jack_report_sync(per_pin->codec); | 1670 | snd_hda_jack_report_sync(per_pin->codec); |
1671 | mutex_unlock(&spec->pcm_lock); | ||
1663 | } | 1672 | } |
1664 | 1673 | ||
1665 | static void intel_haswell_fixup_connect_list(struct hda_codec *codec, | 1674 | static void intel_haswell_fixup_connect_list(struct hda_codec *codec, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5ad6c7e5f92e..7496be4491b1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -6612,7 +6612,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6612 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6612 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
6613 | SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6613 | SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
6614 | SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6614 | SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
6615 | SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | ||
6616 | SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6615 | SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
6617 | SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), | 6616 | SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), |
6618 | SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), | 6617 | SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), |
@@ -6796,6 +6795,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
6796 | {0x1a, 0x02a11040}, | 6795 | {0x1a, 0x02a11040}, |
6797 | {0x1b, 0x01014020}, | 6796 | {0x1b, 0x01014020}, |
6798 | {0x21, 0x0221101f}), | 6797 | {0x21, 0x0221101f}), |
6798 | SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, | ||
6799 | {0x14, 0x90170110}, | ||
6800 | {0x19, 0x02a11020}, | ||
6801 | {0x1a, 0x02a11030}, | ||
6802 | {0x21, 0x0221101f}), | ||
6799 | SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 6803 | SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
6800 | {0x12, 0x90a60140}, | 6804 | {0x12, 0x90a60140}, |
6801 | {0x14, 0x90170110}, | 6805 | {0x14, 0x90170110}, |
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 05f42a46d6ed..959aa53ab678 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c | |||
@@ -694,15 +694,19 @@ static int do_load(int argc, char **argv) | |||
694 | return -1; | 694 | return -1; |
695 | } | 695 | } |
696 | 696 | ||
697 | if (do_pin_fd(prog_fd, argv[1])) { | 697 | if (do_pin_fd(prog_fd, argv[1])) |
698 | p_err("failed to pin program"); | 698 | goto err_close_obj; |
699 | return -1; | ||
700 | } | ||
701 | 699 | ||
702 | if (json_output) | 700 | if (json_output) |
703 | jsonw_null(json_wtr); | 701 | jsonw_null(json_wtr); |
704 | 702 | ||
703 | bpf_object__close(obj); | ||
704 | |||
705 | return 0; | 705 | return 0; |
706 | |||
707 | err_close_obj: | ||
708 | bpf_object__close(obj); | ||
709 | return -1; | ||
706 | } | 710 | } |
707 | 711 | ||
708 | static int do_help(int argc, char **argv) | 712 | static int do_help(int argc, char **argv) |
diff --git a/tools/build/Build.include b/tools/build/Build.include index a4bbb984941d..950c1504ca37 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include | |||
@@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), | |||
63 | $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \ | 63 | $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \ |
64 | rm -f $(depfile); \ | 64 | rm -f $(depfile); \ |
65 | mv -f $(dot-target).tmp $(dot-target).cmd, \ | 65 | mv -f $(dot-target).tmp $(dot-target).cmd, \ |
66 | printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ | 66 | printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ |
67 | printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \ | 67 | printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \ |
68 | cat $(depfile) >> $(dot-target).cmd; \ | 68 | cat $(depfile) >> $(dot-target).cmd; \ |
69 | printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd) | 69 | printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd) |
70 | 70 | ||
@@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX | |||
98 | ### | 98 | ### |
99 | ## HOSTCC C flags | 99 | ## HOSTCC C flags |
100 | 100 | ||
101 | host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj)) | 101 | host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj)) |
diff --git a/tools/build/Makefile b/tools/build/Makefile index 5eb4b5ad79cb..5edf65e684ab 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile | |||
@@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE | |||
43 | $(Q)$(MAKE) $(build)=fixdep | 43 | $(Q)$(MAKE) $(build)=fixdep |
44 | 44 | ||
45 | $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o | 45 | $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o |
46 | $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $< | 46 | $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $< |
47 | 47 | ||
48 | FORCE: | 48 | FORCE: |
49 | 49 | ||
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 4e60e105583e..0d1acb704f64 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c | |||
@@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf) | |||
302 | continue; | 302 | continue; |
303 | sym->pfunc = sym->cfunc = sym; | 303 | sym->pfunc = sym->cfunc = sym; |
304 | coldstr = strstr(sym->name, ".cold."); | 304 | coldstr = strstr(sym->name, ".cold."); |
305 | if (coldstr) { | 305 | if (!coldstr) |
306 | coldstr[0] = '\0'; | 306 | continue; |
307 | pfunc = find_symbol_by_name(elf, sym->name); | 307 | |
308 | coldstr[0] = '.'; | 308 | coldstr[0] = '\0'; |
309 | 309 | pfunc = find_symbol_by_name(elf, sym->name); | |
310 | if (!pfunc) { | 310 | coldstr[0] = '.'; |
311 | WARN("%s(): can't find parent function", | 311 | |
312 | sym->name); | 312 | if (!pfunc) { |
313 | goto err; | 313 | WARN("%s(): can't find parent function", |
314 | } | 314 | sym->name); |
315 | 315 | goto err; | |
316 | sym->pfunc = pfunc; | 316 | } |
317 | pfunc->cfunc = sym; | 317 | |
318 | sym->pfunc = pfunc; | ||
319 | pfunc->cfunc = sym; | ||
320 | |||
321 | /* | ||
322 | * Unfortunately, -fnoreorder-functions puts the child | ||
323 | * inside the parent. Remove the overlap so we can | ||
324 | * have sane assumptions. | ||
325 | * | ||
326 | * Note that pfunc->len now no longer matches | ||
327 | * pfunc->sym.st_size. | ||
328 | */ | ||
329 | if (sym->sec == pfunc->sec && | ||
330 | sym->offset >= pfunc->offset && | ||
331 | sym->offset + sym->len == pfunc->offset + pfunc->len) { | ||
332 | pfunc->len -= sym->len; | ||
318 | } | 333 | } |
319 | } | 334 | } |
320 | } | 335 | } |
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index b5ac356ba323..f5a3b402589e 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config | |||
@@ -207,8 +207,7 @@ ifdef PYTHON_CONFIG | |||
207 | PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) | 207 | PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) |
208 | PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) | 208 | PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) |
209 | PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil | 209 | PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil |
210 | PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) | 210 | PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) |
211 | PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) | ||
212 | FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) | 211 | FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) |
213 | endif | 212 | endif |
214 | 213 | ||
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index 4b2caf6d48e7..fead6b3b4206 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c | |||
@@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op) | |||
226 | else if (rm[2].rm_so != rm[2].rm_eo) | 226 | else if (rm[2].rm_so != rm[2].rm_eo) |
227 | prefix[0] = '+'; | 227 | prefix[0] = '+'; |
228 | else | 228 | else |
229 | strncpy(prefix, "+0", 2); | 229 | scnprintf(prefix, sizeof(prefix), "+0"); |
230 | } | 230 | } |
231 | 231 | ||
232 | /* Rename register */ | 232 | /* Rename register */ |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 22547a490e1f..05be023c3f0e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -1742,7 +1742,7 @@ static void print_interval(char *prefix, struct timespec *ts) | |||
1742 | } | 1742 | } |
1743 | } | 1743 | } |
1744 | 1744 | ||
1745 | if ((num_print_interval == 0 && metric_only) || interval_clear) | 1745 | if ((num_print_interval == 0 || interval_clear) && metric_only) |
1746 | print_metric_headers(" ", true); | 1746 | print_metric_headers(" ", true); |
1747 | if (++num_print_interval == 25) | 1747 | if (++num_print_interval == 25) |
1748 | num_print_interval = 0; | 1748 | num_print_interval = 0; |
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c index 0c6d1002b524..ac1bcdc17dae 100644 --- a/tools/perf/jvmti/jvmti_agent.c +++ b/tools/perf/jvmti/jvmti_agent.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <sys/mman.h> | 35 | #include <sys/mman.h> |
36 | #include <syscall.h> /* for gettid() */ | 36 | #include <syscall.h> /* for gettid() */ |
37 | #include <err.h> | 37 | #include <err.h> |
38 | #include <linux/kernel.h> | ||
38 | 39 | ||
39 | #include "jvmti_agent.h" | 40 | #include "jvmti_agent.h" |
40 | #include "../util/jitdump.h" | 41 | #include "../util/jitdump.h" |
@@ -249,7 +250,7 @@ void *jvmti_open(void) | |||
249 | /* | 250 | /* |
250 | * jitdump file name | 251 | * jitdump file name |
251 | */ | 252 | */ |
252 | snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); | 253 | scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); |
253 | 254 | ||
254 | fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666); | 255 | fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666); |
255 | if (fd == -1) | 256 | if (fd == -1) |
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build index 17783913d330..215ba30b8534 100644 --- a/tools/perf/pmu-events/Build +++ b/tools/perf/pmu-events/Build | |||
@@ -1,7 +1,7 @@ | |||
1 | hostprogs := jevents | 1 | hostprogs := jevents |
2 | 2 | ||
3 | jevents-y += json.o jsmn.o jevents.o | 3 | jevents-y += json.o jsmn.o jevents.o |
4 | CHOSTFLAGS_jevents.o = -I$(srctree)/tools/include | 4 | HOSTCFLAGS_jevents.o = -I$(srctree)/tools/include |
5 | pmu-events-y += pmu-events.o | 5 | pmu-events-y += pmu-events.o |
6 | JDIR = pmu-events/arch/$(SRCARCH) | 6 | JDIR = pmu-events/arch/$(SRCARCH) |
7 | JSON = $(shell [ -d $(JDIR) ] && \ | 7 | JSON = $(shell [ -d $(JDIR) ] && \ |
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py index 38dfb720fb6f..54ace2f6bc36 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | |||
@@ -31,10 +31,8 @@ def flag_str(event_name, field_name, value): | |||
31 | string = "" | 31 | string = "" |
32 | 32 | ||
33 | if flag_fields[event_name][field_name]: | 33 | if flag_fields[event_name][field_name]: |
34 | print_delim = 0 | 34 | print_delim = 0 |
35 | keys = flag_fields[event_name][field_name]['values'].keys() | 35 | for idx in sorted(flag_fields[event_name][field_name]['values']): |
36 | keys.sort() | ||
37 | for idx in keys: | ||
38 | if not value and not idx: | 36 | if not value and not idx: |
39 | string += flag_fields[event_name][field_name]['values'][idx] | 37 | string += flag_fields[event_name][field_name]['values'][idx] |
40 | break | 38 | break |
@@ -51,14 +49,12 @@ def symbol_str(event_name, field_name, value): | |||
51 | string = "" | 49 | string = "" |
52 | 50 | ||
53 | if symbolic_fields[event_name][field_name]: | 51 | if symbolic_fields[event_name][field_name]: |
54 | keys = symbolic_fields[event_name][field_name]['values'].keys() | 52 | for idx in sorted(symbolic_fields[event_name][field_name]['values']): |
55 | keys.sort() | ||
56 | for idx in keys: | ||
57 | if not value and not idx: | 53 | if not value and not idx: |
58 | string = symbolic_fields[event_name][field_name]['values'][idx] | 54 | string = symbolic_fields[event_name][field_name]['values'][idx] |
59 | break | 55 | break |
60 | if (value == idx): | 56 | if (value == idx): |
61 | string = symbolic_fields[event_name][field_name]['values'][idx] | 57 | string = symbolic_fields[event_name][field_name]['values'][idx] |
62 | break | 58 | break |
63 | 59 | ||
64 | return string | 60 | return string |
@@ -74,19 +70,17 @@ def trace_flag_str(value): | |||
74 | string = "" | 70 | string = "" |
75 | print_delim = 0 | 71 | print_delim = 0 |
76 | 72 | ||
77 | keys = trace_flags.keys() | 73 | for idx in trace_flags: |
78 | 74 | if not value and not idx: | |
79 | for idx in keys: | 75 | string += "NONE" |
80 | if not value and not idx: | 76 | break |
81 | string += "NONE" | 77 | |
82 | break | 78 | if idx and (value & idx) == idx: |
83 | 79 | if print_delim: | |
84 | if idx and (value & idx) == idx: | 80 | string += " | "; |
85 | if print_delim: | 81 | string += trace_flags[idx] |
86 | string += " | "; | 82 | print_delim = 1 |
87 | string += trace_flags[idx] | 83 | value &= ~idx |
88 | print_delim = 1 | ||
89 | value &= ~idx | ||
90 | 84 | ||
91 | return string | 85 | return string |
92 | 86 | ||
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py index 81a56cd2b3c1..21a7a1298094 100755 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | |||
@@ -8,6 +8,7 @@ | |||
8 | # PerfEvent is the base class for all perf event sample, PebsEvent | 8 | # PerfEvent is the base class for all perf event sample, PebsEvent |
9 | # is a HW base Intel x86 PEBS event, and user could add more SW/HW | 9 | # is a HW base Intel x86 PEBS event, and user could add more SW/HW |
10 | # event classes based on requirements. | 10 | # event classes based on requirements. |
11 | from __future__ import print_function | ||
11 | 12 | ||
12 | import struct | 13 | import struct |
13 | 14 | ||
@@ -44,7 +45,8 @@ class PerfEvent(object): | |||
44 | PerfEvent.event_num += 1 | 45 | PerfEvent.event_num += 1 |
45 | 46 | ||
46 | def show(self): | 47 | def show(self): |
47 | print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) | 48 | print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % |
49 | (self.name, self.symbol, self.comm, self.dso)) | ||
48 | 50 | ||
49 | # | 51 | # |
50 | # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer | 52 | # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer |
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py index fdd92f699055..cac7b2542ee8 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | |||
@@ -11,7 +11,7 @@ | |||
11 | try: | 11 | try: |
12 | import wx | 12 | import wx |
13 | except ImportError: | 13 | except ImportError: |
14 | raise ImportError, "You need to install the wxpython lib for this script" | 14 | raise ImportError("You need to install the wxpython lib for this script") |
15 | 15 | ||
16 | 16 | ||
17 | class RootFrame(wx.Frame): | 17 | class RootFrame(wx.Frame): |
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py index f6c84966e4f8..7384dcb628c4 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | |||
@@ -5,6 +5,7 @@ | |||
5 | # This software may be distributed under the terms of the GNU General | 5 | # This software may be distributed under the terms of the GNU General |
6 | # Public License ("GPL") version 2 as published by the Free Software | 6 | # Public License ("GPL") version 2 as published by the Free Software |
7 | # Foundation. | 7 | # Foundation. |
8 | from __future__ import print_function | ||
8 | 9 | ||
9 | import errno, os | 10 | import errno, os |
10 | 11 | ||
@@ -33,7 +34,7 @@ def nsecs_str(nsecs): | |||
33 | return str | 34 | return str |
34 | 35 | ||
35 | def add_stats(dict, key, value): | 36 | def add_stats(dict, key, value): |
36 | if not dict.has_key(key): | 37 | if key not in dict: |
37 | dict[key] = (value, value, value, 1) | 38 | dict[key] = (value, value, value, 1) |
38 | else: | 39 | else: |
39 | min, max, avg, count = dict[key] | 40 | min, max, avg, count = dict[key] |
@@ -72,10 +73,10 @@ try: | |||
72 | except: | 73 | except: |
73 | if not audit_package_warned: | 74 | if not audit_package_warned: |
74 | audit_package_warned = True | 75 | audit_package_warned = True |
75 | print "Install the audit-libs-python package to get syscall names.\n" \ | 76 | print("Install the audit-libs-python package to get syscall names.\n" |
76 | "For example:\n # apt-get install python-audit (Ubuntu)" \ | 77 | "For example:\n # apt-get install python-audit (Ubuntu)" |
77 | "\n # yum install audit-libs-python (Fedora)" \ | 78 | "\n # yum install audit-libs-python (Fedora)" |
78 | "\n etc.\n" | 79 | "\n etc.\n") |
79 | 80 | ||
80 | def syscall_name(id): | 81 | def syscall_name(id): |
81 | try: | 82 | try: |
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py index de66cb3b72c9..3473e7f66081 100644 --- a/tools/perf/scripts/python/sched-migration.py +++ b/tools/perf/scripts/python/sched-migration.py | |||
@@ -9,13 +9,17 @@ | |||
9 | # This software is distributed under the terms of the GNU General | 9 | # This software is distributed under the terms of the GNU General |
10 | # Public License ("GPL") version 2 as published by the Free Software | 10 | # Public License ("GPL") version 2 as published by the Free Software |
11 | # Foundation. | 11 | # Foundation. |
12 | 12 | from __future__ import print_function | |
13 | 13 | ||
14 | import os | 14 | import os |
15 | import sys | 15 | import sys |
16 | 16 | ||
17 | from collections import defaultdict | 17 | from collections import defaultdict |
18 | from UserList import UserList | 18 | try: |
19 | from UserList import UserList | ||
20 | except ImportError: | ||
21 | # Python 3: UserList moved to the collections package | ||
22 | from collections import UserList | ||
19 | 23 | ||
20 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 24 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
21 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | 25 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
@@ -300,7 +304,7 @@ class TimeSliceList(UserList): | |||
300 | if i == -1: | 304 | if i == -1: |
301 | return | 305 | return |
302 | 306 | ||
303 | for i in xrange(i, len(self.data)): | 307 | for i in range(i, len(self.data)): |
304 | timeslice = self.data[i] | 308 | timeslice = self.data[i] |
305 | if timeslice.start > end: | 309 | if timeslice.start > end: |
306 | return | 310 | return |
@@ -336,8 +340,8 @@ class SchedEventProxy: | |||
336 | on_cpu_task = self.current_tsk[headers.cpu] | 340 | on_cpu_task = self.current_tsk[headers.cpu] |
337 | 341 | ||
338 | if on_cpu_task != -1 and on_cpu_task != prev_pid: | 342 | if on_cpu_task != -1 and on_cpu_task != prev_pid: |
339 | print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ | 343 | print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ |
340 | (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) | 344 | headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) |
341 | 345 | ||
342 | threads[prev_pid] = prev_comm | 346 | threads[prev_pid] = prev_comm |
343 | threads[next_pid] = next_comm | 347 | threads[next_pid] = next_comm |
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 2bde505e2e7e..dd850a26d579 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c | |||
@@ -422,7 +422,7 @@ static const char *shell_test__description(char *description, size_t size, | |||
422 | 422 | ||
423 | #define for_each_shell_test(dir, base, ent) \ | 423 | #define for_each_shell_test(dir, base, ent) \ |
424 | while ((ent = readdir(dir)) != NULL) \ | 424 | while ((ent = readdir(dir)) != NULL) \ |
425 | if (!is_directory(base, ent)) | 425 | if (!is_directory(base, ent) && ent->d_name[0] != '.') |
426 | 426 | ||
427 | static const char *shell_tests__dir(char *path, size_t size) | 427 | static const char *shell_tests__dir(char *path, size_t size) |
428 | { | 428 | { |
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 263057039693..94e513e62b34 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh | |||
@@ -14,35 +14,40 @@ libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1 | |||
14 | nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254 | 14 | nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254 |
15 | 15 | ||
16 | trace_libc_inet_pton_backtrace() { | 16 | trace_libc_inet_pton_backtrace() { |
17 | idx=0 | 17 | |
18 | expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" | 18 | expected=`mktemp -u /tmp/expected.XXX` |
19 | expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" | 19 | |
20 | echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected | ||
21 | echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected | ||
20 | case "$(uname -m)" in | 22 | case "$(uname -m)" in |
21 | s390x) | 23 | s390x) |
22 | eventattr='call-graph=dwarf,max-stack=4' | 24 | eventattr='call-graph=dwarf,max-stack=4' |
23 | expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" | 25 | echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected |
24 | expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" | 26 | echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected |
25 | expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" | 27 | echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected |
26 | ;; | 28 | ;; |
27 | *) | 29 | *) |
28 | eventattr='max-stack=3' | 30 | eventattr='max-stack=3' |
29 | expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" | 31 | echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected |
30 | expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" | 32 | echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected |
31 | ;; | 33 | ;; |
32 | esac | 34 | esac |
33 | 35 | ||
34 | file=`mktemp -u /tmp/perf.data.XXX` | 36 | perf_data=`mktemp -u /tmp/perf.data.XXX` |
37 | perf_script=`mktemp -u /tmp/perf.script.XXX` | ||
38 | perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1 | ||
39 | perf script -i $perf_data > $perf_script | ||
35 | 40 | ||
36 | perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1 | 41 | exec 3<$perf_script |
37 | perf script -i $file | while read line ; do | 42 | exec 4<$expected |
43 | while read line <&3 && read -r pattern <&4; do | ||
44 | [ -z "$pattern" ] && break | ||
38 | echo $line | 45 | echo $line |
39 | echo "$line" | egrep -q "${expected[$idx]}" | 46 | echo "$line" | egrep -q "$pattern" |
40 | if [ $? -ne 0 ] ; then | 47 | if [ $? -ne 0 ] ; then |
41 | printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line" | 48 | printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line" |
42 | exit 1 | 49 | exit 1 |
43 | fi | 50 | fi |
44 | let idx+=1 | ||
45 | [ -z "${expected[$idx]}" ] && break | ||
46 | done | 51 | done |
47 | 52 | ||
48 | # If any statements are executed from this point onwards, | 53 | # If any statements are executed from this point onwards, |
@@ -58,6 +63,6 @@ skip_if_no_perf_probe && \ | |||
58 | perf probe -q $libc inet_pton && \ | 63 | perf probe -q $libc inet_pton && \ |
59 | trace_libc_inet_pton_backtrace | 64 | trace_libc_inet_pton_backtrace |
60 | err=$? | 65 | err=$? |
61 | rm -f ${file} | 66 | rm -f ${perf_data} ${perf_script} ${expected} |
62 | perf probe -q -d probe_libc:inet_pton | 67 | perf probe -q -d probe_libc:inet_pton |
63 | exit $err | 68 | exit $err |
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh index 55ad9793d544..4ce276efe6b4 100755 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh | |||
@@ -17,7 +17,7 @@ skip_if_no_perf_probe || exit 2 | |||
17 | file=$(mktemp /tmp/temporary_file.XXXXX) | 17 | file=$(mktemp /tmp/temporary_file.XXXXX) |
18 | 18 | ||
19 | trace_open_vfs_getname() { | 19 | trace_open_vfs_getname() { |
20 | evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/') | 20 | evts=$(echo $(perf list syscalls:sys_enter_open* 2>&1 | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/') |
21 | perf trace -e $evts touch $file 2>&1 | \ | 21 | perf trace -e $evts touch $file 2>&1 | \ |
22 | egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" | 22 | egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" |
23 | } | 23 | } |
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 976e658e38dc..5e94857dfca2 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c | |||
@@ -266,16 +266,16 @@ static const char *kinc_fetch_script = | |||
266 | "#!/usr/bin/env sh\n" | 266 | "#!/usr/bin/env sh\n" |
267 | "if ! test -d \"$KBUILD_DIR\"\n" | 267 | "if ! test -d \"$KBUILD_DIR\"\n" |
268 | "then\n" | 268 | "then\n" |
269 | " exit -1\n" | 269 | " exit 1\n" |
270 | "fi\n" | 270 | "fi\n" |
271 | "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n" | 271 | "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n" |
272 | "then\n" | 272 | "then\n" |
273 | " exit -1\n" | 273 | " exit 1\n" |
274 | "fi\n" | 274 | "fi\n" |
275 | "TMPDIR=`mktemp -d`\n" | 275 | "TMPDIR=`mktemp -d`\n" |
276 | "if test -z \"$TMPDIR\"\n" | 276 | "if test -z \"$TMPDIR\"\n" |
277 | "then\n" | 277 | "then\n" |
278 | " exit -1\n" | 278 | " exit 1\n" |
279 | "fi\n" | 279 | "fi\n" |
280 | "cat << EOF > $TMPDIR/Makefile\n" | 280 | "cat << EOF > $TMPDIR/Makefile\n" |
281 | "obj-y := dummy.o\n" | 281 | "obj-y := dummy.o\n" |
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 46e9e19ab1ac..bc32e57d17be 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -908,14 +908,11 @@ static void python_process_tracepoint(struct perf_sample *sample, | |||
908 | if (_PyTuple_Resize(&t, n) == -1) | 908 | if (_PyTuple_Resize(&t, n) == -1) |
909 | Py_FatalError("error resizing Python tuple"); | 909 | Py_FatalError("error resizing Python tuple"); |
910 | 910 | ||
911 | if (!dict) { | 911 | if (!dict) |
912 | call_object(handler, t, handler_name); | 912 | call_object(handler, t, handler_name); |
913 | } else { | 913 | else |
914 | call_object(handler, t, default_handler_name); | 914 | call_object(handler, t, default_handler_name); |
915 | Py_DECREF(dict); | ||
916 | } | ||
917 | 915 | ||
918 | Py_XDECREF(all_entries_dict); | ||
919 | Py_DECREF(t); | 916 | Py_DECREF(t); |
920 | } | 917 | } |
921 | 918 | ||
@@ -1235,7 +1232,6 @@ static void python_process_general_event(struct perf_sample *sample, | |||
1235 | 1232 | ||
1236 | call_object(handler, t, handler_name); | 1233 | call_object(handler, t, handler_name); |
1237 | 1234 | ||
1238 | Py_DECREF(dict); | ||
1239 | Py_DECREF(t); | 1235 | Py_DECREF(t); |
1240 | } | 1236 | } |
1241 | 1237 | ||
@@ -1627,6 +1623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) | |||
1627 | fprintf(ofp, "# See the perf-script-python Documentation for the list " | 1623 | fprintf(ofp, "# See the perf-script-python Documentation for the list " |
1628 | "of available functions.\n\n"); | 1624 | "of available functions.\n\n"); |
1629 | 1625 | ||
1626 | fprintf(ofp, "from __future__ import print_function\n\n"); | ||
1630 | fprintf(ofp, "import os\n"); | 1627 | fprintf(ofp, "import os\n"); |
1631 | fprintf(ofp, "import sys\n\n"); | 1628 | fprintf(ofp, "import sys\n\n"); |
1632 | 1629 | ||
@@ -1636,10 +1633,10 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) | |||
1636 | fprintf(ofp, "from Core import *\n\n\n"); | 1633 | fprintf(ofp, "from Core import *\n\n\n"); |
1637 | 1634 | ||
1638 | fprintf(ofp, "def trace_begin():\n"); | 1635 | fprintf(ofp, "def trace_begin():\n"); |
1639 | fprintf(ofp, "\tprint \"in trace_begin\"\n\n"); | 1636 | fprintf(ofp, "\tprint(\"in trace_begin\")\n\n"); |
1640 | 1637 | ||
1641 | fprintf(ofp, "def trace_end():\n"); | 1638 | fprintf(ofp, "def trace_end():\n"); |
1642 | fprintf(ofp, "\tprint \"in trace_end\"\n\n"); | 1639 | fprintf(ofp, "\tprint(\"in trace_end\")\n\n"); |
1643 | 1640 | ||
1644 | while ((event = trace_find_next_event(pevent, event))) { | 1641 | while ((event = trace_find_next_event(pevent, event))) { |
1645 | fprintf(ofp, "def %s__%s(", event->system, event->name); | 1642 | fprintf(ofp, "def %s__%s(", event->system, event->name); |
@@ -1675,7 +1672,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) | |||
1675 | "common_secs, common_nsecs,\n\t\t\t" | 1672 | "common_secs, common_nsecs,\n\t\t\t" |
1676 | "common_pid, common_comm)\n\n"); | 1673 | "common_pid, common_comm)\n\n"); |
1677 | 1674 | ||
1678 | fprintf(ofp, "\t\tprint \""); | 1675 | fprintf(ofp, "\t\tprint(\""); |
1679 | 1676 | ||
1680 | not_first = 0; | 1677 | not_first = 0; |
1681 | count = 0; | 1678 | count = 0; |
@@ -1736,31 +1733,31 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) | |||
1736 | fprintf(ofp, "%s", f->name); | 1733 | fprintf(ofp, "%s", f->name); |
1737 | } | 1734 | } |
1738 | 1735 | ||
1739 | fprintf(ofp, ")\n\n"); | 1736 | fprintf(ofp, "))\n\n"); |
1740 | 1737 | ||
1741 | fprintf(ofp, "\t\tprint 'Sample: {'+" | 1738 | fprintf(ofp, "\t\tprint('Sample: {'+" |
1742 | "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n"); | 1739 | "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n"); |
1743 | 1740 | ||
1744 | fprintf(ofp, "\t\tfor node in common_callchain:"); | 1741 | fprintf(ofp, "\t\tfor node in common_callchain:"); |
1745 | fprintf(ofp, "\n\t\t\tif 'sym' in node:"); | 1742 | fprintf(ofp, "\n\t\t\tif 'sym' in node:"); |
1746 | fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])"); | 1743 | fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))"); |
1747 | fprintf(ofp, "\n\t\t\telse:"); | 1744 | fprintf(ofp, "\n\t\t\telse:"); |
1748 | fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n"); | 1745 | fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n"); |
1749 | fprintf(ofp, "\t\tprint \"\\n\"\n\n"); | 1746 | fprintf(ofp, "\t\tprint()\n\n"); |
1750 | 1747 | ||
1751 | } | 1748 | } |
1752 | 1749 | ||
1753 | fprintf(ofp, "def trace_unhandled(event_name, context, " | 1750 | fprintf(ofp, "def trace_unhandled(event_name, context, " |
1754 | "event_fields_dict, perf_sample_dict):\n"); | 1751 | "event_fields_dict, perf_sample_dict):\n"); |
1755 | 1752 | ||
1756 | fprintf(ofp, "\t\tprint get_dict_as_string(event_fields_dict)\n"); | 1753 | fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n"); |
1757 | fprintf(ofp, "\t\tprint 'Sample: {'+" | 1754 | fprintf(ofp, "\t\tprint('Sample: {'+" |
1758 | "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n"); | 1755 | "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n"); |
1759 | 1756 | ||
1760 | fprintf(ofp, "def print_header(" | 1757 | fprintf(ofp, "def print_header(" |
1761 | "event_name, cpu, secs, nsecs, pid, comm):\n" | 1758 | "event_name, cpu, secs, nsecs, pid, comm):\n" |
1762 | "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" | 1759 | "\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" |
1763 | "(event_name, cpu, secs, nsecs, pid, comm),\n\n"); | 1760 | "(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n"); |
1764 | 1761 | ||
1765 | fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n" | 1762 | fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n" |
1766 | "\treturn delimiter.join" | 1763 | "\treturn delimiter.join" |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index a8fb63edcf89..e2926f72a821 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
@@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
1991 | pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; | 1991 | pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; |
1992 | pcap->header.length = sizeof(*pcap); | 1992 | pcap->header.length = sizeof(*pcap); |
1993 | pcap->highest_capability = 1; | 1993 | pcap->highest_capability = 1; |
1994 | pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH | | 1994 | pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH; |
1995 | ACPI_NFIT_CAPABILITY_MEM_FLUSH; | ||
1996 | offset += pcap->header.length; | 1995 | offset += pcap->header.length; |
1997 | 1996 | ||
1998 | if (t->setup_hotplug) { | 1997 | if (t->setup_hotplug) { |
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 7eb613ffef55..b4994a94968b 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config | |||
@@ -6,6 +6,7 @@ CONFIG_TEST_BPF=m | |||
6 | CONFIG_CGROUP_BPF=y | 6 | CONFIG_CGROUP_BPF=y |
7 | CONFIG_NETDEVSIM=m | 7 | CONFIG_NETDEVSIM=m |
8 | CONFIG_NET_CLS_ACT=y | 8 | CONFIG_NET_CLS_ACT=y |
9 | CONFIG_NET_SCHED=y | ||
9 | CONFIG_NET_SCH_INGRESS=y | 10 | CONFIG_NET_SCH_INGRESS=y |
10 | CONFIG_NET_IPIP=y | 11 | CONFIG_NET_IPIP=y |
11 | CONFIG_IPV6=y | 12 | CONFIG_IPV6=y |
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh index 35669ccd4d23..9df0d2ac45f8 100755 --- a/tools/testing/selftests/bpf/test_kmod.sh +++ b/tools/testing/selftests/bpf/test_kmod.sh | |||
@@ -1,6 +1,15 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | # Kselftest framework requirement - SKIP code is 4. | ||
5 | ksft_skip=4 | ||
6 | |||
7 | msg="skip all tests:" | ||
8 | if [ "$(id -u)" != "0" ]; then | ||
9 | echo $msg please run this as root >&2 | ||
10 | exit $ksft_skip | ||
11 | fi | ||
12 | |||
4 | SRC_TREE=../../../../ | 13 | SRC_TREE=../../../../ |
5 | 14 | ||
6 | test_run() | 15 | test_run() |
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index ce2e15e4f976..677686198df3 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh | |||
@@ -1,6 +1,15 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | # Kselftest framework requirement - SKIP code is 4. | ||
5 | ksft_skip=4 | ||
6 | |||
7 | msg="skip all tests:" | ||
8 | if [ $UID != 0 ]; then | ||
9 | echo $msg please run this as root >&2 | ||
10 | exit $ksft_skip | ||
11 | fi | ||
12 | |||
4 | GREEN='\033[0;92m' | 13 | GREEN='\033[0;92m' |
5 | RED='\033[0;31m' | 14 | RED='\033[0;31m' |
6 | NC='\033[0m' # No Color | 15 | NC='\033[0m' # No Color |
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh index 1c77994b5e71..270fa8f49573 100755 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh | |||
@@ -21,6 +21,15 @@ | |||
21 | # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this | 21 | # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this |
22 | # datagram can be read on NS6 when binding to fb00::6. | 22 | # datagram can be read on NS6 when binding to fb00::6. |
23 | 23 | ||
24 | # Kselftest framework requirement - SKIP code is 4. | ||
25 | ksft_skip=4 | ||
26 | |||
27 | msg="skip all tests:" | ||
28 | if [ $UID != 0 ]; then | ||
29 | echo $msg please run this as root >&2 | ||
30 | exit $ksft_skip | ||
31 | fi | ||
32 | |||
24 | TMP_FILE="/tmp/selftest_lwt_seg6local.txt" | 33 | TMP_FILE="/tmp/selftest_lwt_seg6local.txt" |
25 | 34 | ||
26 | cleanup() | 35 | cleanup() |
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 05c8cb71724a..9e78df207919 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c | |||
@@ -1413,18 +1413,12 @@ out: | |||
1413 | 1413 | ||
1414 | int main(int argc, char **argv) | 1414 | int main(int argc, char **argv) |
1415 | { | 1415 | { |
1416 | struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; | ||
1417 | int iov_count = 1, length = 1024, rate = 1; | 1416 | int iov_count = 1, length = 1024, rate = 1; |
1418 | struct sockmap_options options = {0}; | 1417 | struct sockmap_options options = {0}; |
1419 | int opt, longindex, err, cg_fd = 0; | 1418 | int opt, longindex, err, cg_fd = 0; |
1420 | char *bpf_file = BPF_SOCKMAP_FILENAME; | 1419 | char *bpf_file = BPF_SOCKMAP_FILENAME; |
1421 | int test = PING_PONG; | 1420 | int test = PING_PONG; |
1422 | 1421 | ||
1423 | if (setrlimit(RLIMIT_MEMLOCK, &r)) { | ||
1424 | perror("setrlimit(RLIMIT_MEMLOCK)"); | ||
1425 | return 1; | ||
1426 | } | ||
1427 | |||
1428 | if (argc < 2) | 1422 | if (argc < 2) |
1429 | return test_suite(); | 1423 | return test_suite(); |
1430 | 1424 | ||
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 78245d60d8bc..78245d60d8bc 100644..100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh | |||
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index a4684112676c..86ce22417e0d 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h | |||
@@ -133,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void) | |||
133 | return cpu; | 133 | return cpu; |
134 | } | 134 | } |
135 | 135 | ||
136 | static inline void rseq_clear_rseq_cs(void) | ||
137 | { | ||
138 | #ifdef __LP64__ | ||
139 | __rseq_abi.rseq_cs.ptr = 0; | ||
140 | #else | ||
141 | __rseq_abi.rseq_cs.ptr.ptr32 = 0; | ||
142 | #endif | ||
143 | } | ||
144 | |||
136 | /* | 145 | /* |
137 | * rseq_prepare_unload() should be invoked by each thread using rseq_finish*() | 146 | * rseq_prepare_unload() should be invoked by each thread executing a rseq |
138 | * at least once between their last rseq_finish*() and library unload of the | 147 | * critical section at least once between their last critical section and |
139 | * library defining the rseq critical section (struct rseq_cs). This also | 148 | * library unload of the library defining the rseq critical section |
140 | * applies to use of rseq in code generated by JIT: rseq_prepare_unload() | 149 | * (struct rseq_cs). This also applies to use of rseq in code generated by |
141 | * should be invoked at least once by each thread using rseq_finish*() before | 150 | * JIT: rseq_prepare_unload() should be invoked at least once by each |
142 | * reclaim of the memory holding the struct rseq_cs. | 151 | * thread executing a rseq critical section before reclaim of the memory |
152 | * holding the struct rseq_cs. | ||
143 | */ | 153 | */ |
144 | static inline void rseq_prepare_unload(void) | 154 | static inline void rseq_prepare_unload(void) |
145 | { | 155 | { |
146 | __rseq_abi.rseq_cs = 0; | 156 | rseq_clear_rseq_cs(); |
147 | } | 157 | } |
148 | 158 | ||
149 | #endif /* RSEQ_H_ */ | 159 | #endif /* RSEQ_H_ */ |