diff options
author | Radim Krčmář <rkrcmar@redhat.com> | 2018-05-26 07:45:49 -0400 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2018-05-26 07:45:49 -0400 |
commit | f33ecec9bb5199c5a4dd296af604f70273d2636e (patch) | |
tree | ba12665fe8f7952a64013bef9125bfac27ff1fda | |
parent | 0ea3286e2df74c9ec2fadbf91170cd3edd14e3e5 (diff) | |
parent | 2d2ccf24939cf369f7473c7e4ea309891be91848 (diff) |
Merge branch 'x86/hyperv' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
To resolve conflicts with the PV TLB flush series.
387 files changed, 4052 insertions, 1810 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl index 640f65e79ef1..8e69345c37cc 100644 --- a/Documentation/ABI/testing/sysfs-class-cxl +++ b/Documentation/ABI/testing/sysfs-class-cxl | |||
@@ -244,3 +244,11 @@ Description: read only | |||
244 | Returns 1 if the psl timebase register is synchronized | 244 | Returns 1 if the psl timebase register is synchronized |
245 | with the core timebase register, 0 otherwise. | 245 | with the core timebase register, 0 otherwise. |
246 | Users: https://github.com/ibm-capi/libcxl | 246 | Users: https://github.com/ibm-capi/libcxl |
247 | |||
248 | What: /sys/class/cxl/<card>/tunneled_ops_supported | ||
249 | Date: May 2018 | ||
250 | Contact: linuxppc-dev@lists.ozlabs.org | ||
251 | Description: read only | ||
252 | Returns 1 if tunneled operations are supported in capi mode, | ||
253 | 0 otherwise. | ||
254 | Users: https://github.com/ibm-capi/libcxl | ||
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index d2b6fda3d67b..ab2fe0eda1d7 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst | |||
@@ -145,7 +145,7 @@ feature enabled.] | |||
145 | 145 | ||
146 | In this mode ``intel_pstate`` registers utilization update callbacks with the | 146 | In this mode ``intel_pstate`` registers utilization update callbacks with the |
147 | CPU scheduler in order to run a P-state selection algorithm, either | 147 | CPU scheduler in order to run a P-state selection algorithm, either |
148 | ``powersave`` or ``performance``, depending on the ``scaling_cur_freq`` policy | 148 | ``powersave`` or ``performance``, depending on the ``scaling_governor`` policy |
149 | setting in ``sysfs``. The current CPU frequency information to be made | 149 | setting in ``sysfs``. The current CPU frequency information to be made |
150 | available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is | 150 | available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is |
151 | periodically updated by those utilization update callbacks too. | 151 | periodically updated by those utilization update callbacks too. |
diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst index 1e5c0f00cb2f..dbf5acd49f35 100644 --- a/Documentation/admin-guide/pm/sleep-states.rst +++ b/Documentation/admin-guide/pm/sleep-states.rst | |||
@@ -15,7 +15,7 @@ Sleep States That Can Be Supported | |||
15 | ================================== | 15 | ================================== |
16 | 16 | ||
17 | Depending on its configuration and the capabilities of the platform it runs on, | 17 | Depending on its configuration and the capabilities of the platform it runs on, |
18 | the Linux kernel can support up to four system sleep states, includig | 18 | the Linux kernel can support up to four system sleep states, including |
19 | hibernation and up to three variants of system suspend. The sleep states that | 19 | hibernation and up to three variants of system suspend. The sleep states that |
20 | can be supported by the kernel are listed below. | 20 | can be supported by the kernel are listed below. |
21 | 21 | ||
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 4bcd4b7f79f9..3d01948ea061 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt | |||
@@ -264,7 +264,10 @@ i) Constructor | |||
264 | data device, but just remove the mapping. | 264 | data device, but just remove the mapping. |
265 | 265 | ||
266 | read_only: Don't allow any changes to be made to the pool | 266 | read_only: Don't allow any changes to be made to the pool |
267 | metadata. | 267 | metadata. This mode is only available after the |
268 | thin-pool has been created and first used in full | ||
269 | read/write mode. It cannot be specified on initial | ||
270 | thin-pool creation. | ||
268 | 271 | ||
269 | error_if_no_space: Error IOs, instead of queueing, if no space. | 272 | error_if_no_space: Error IOs, instead of queueing, if no space. |
270 | 273 | ||
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt index f4006d3c9fdf..c760ecb81381 100644 --- a/Documentation/devicetree/bindings/ata/ahci-platform.txt +++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt | |||
@@ -30,7 +30,6 @@ compatible: | |||
30 | Optional properties: | 30 | Optional properties: |
31 | - dma-coherent : Present if dma operations are coherent | 31 | - dma-coherent : Present if dma operations are coherent |
32 | - clocks : a list of phandle + clock specifier pairs | 32 | - clocks : a list of phandle + clock specifier pairs |
33 | - resets : a list of phandle + reset specifier pairs | ||
34 | - target-supply : regulator for SATA target power | 33 | - target-supply : regulator for SATA target power |
35 | - phys : reference to the SATA PHY node | 34 | - phys : reference to the SATA PHY node |
36 | - phy-names : must be "sata-phy" | 35 | - phy-names : must be "sata-phy" |
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt index 557fa765adcb..5d2519af4bb5 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-common.txt +++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt | |||
@@ -38,7 +38,7 @@ Display Timings | |||
38 | require specific display timings. The panel-timing subnode expresses those | 38 | require specific display timings. The panel-timing subnode expresses those |
39 | timings as specified in the timing subnode section of the display timing | 39 | timings as specified in the timing subnode section of the display timing |
40 | bindings defined in | 40 | bindings defined in |
41 | Documentation/devicetree/bindings/display/display-timing.txt. | 41 | Documentation/devicetree/bindings/display/panel/display-timing.txt. |
42 | 42 | ||
43 | 43 | ||
44 | Connectivity | 44 | Connectivity |
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index aadfb236d53a..61315eaa7660 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | |||
@@ -26,6 +26,7 @@ Required Properties: | |||
26 | - "renesas,dmac-r8a7794" (R-Car E2) | 26 | - "renesas,dmac-r8a7794" (R-Car E2) |
27 | - "renesas,dmac-r8a7795" (R-Car H3) | 27 | - "renesas,dmac-r8a7795" (R-Car H3) |
28 | - "renesas,dmac-r8a7796" (R-Car M3-W) | 28 | - "renesas,dmac-r8a7796" (R-Car M3-W) |
29 | - "renesas,dmac-r8a77965" (R-Car M3-N) | ||
29 | - "renesas,dmac-r8a77970" (R-Car V3M) | 30 | - "renesas,dmac-r8a77970" (R-Car V3M) |
30 | - "renesas,dmac-r8a77980" (R-Car V3H) | 31 | - "renesas,dmac-r8a77980" (R-Car V3H) |
31 | 32 | ||
diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt index 93c3a6ae32f9..ac71daa46195 100644 --- a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt +++ b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt | |||
@@ -5,7 +5,9 @@ Required properties: | |||
5 | - compatible: Must contain one or more of the following: | 5 | - compatible: Must contain one or more of the following: |
6 | - "renesas,rcar-gen3-canfd" for R-Car Gen3 compatible controller. | 6 | - "renesas,rcar-gen3-canfd" for R-Car Gen3 compatible controller. |
7 | - "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller. | 7 | - "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller. |
8 | - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3) compatible controller. | 8 | - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3-W) compatible controller. |
9 | - "renesas,r8a77970-canfd" for R8A77970 (R-Car V3M) compatible controller. | ||
10 | - "renesas,r8a77980-canfd" for R8A77980 (R-Car V3H) compatible controller. | ||
9 | 11 | ||
10 | When compatible with the generic version, nodes must list the | 12 | When compatible with the generic version, nodes must list the |
11 | SoC-specific version corresponding to the platform first, followed by the | 13 | SoC-specific version corresponding to the platform first, followed by the |
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index c306f55d335b..890526dbfc26 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt | |||
@@ -18,6 +18,7 @@ Required properties: | |||
18 | 18 | ||
19 | - "renesas,etheravb-r8a7795" for the R8A7795 SoC. | 19 | - "renesas,etheravb-r8a7795" for the R8A7795 SoC. |
20 | - "renesas,etheravb-r8a7796" for the R8A7796 SoC. | 20 | - "renesas,etheravb-r8a7796" for the R8A7796 SoC. |
21 | - "renesas,etheravb-r8a77965" for the R8A77965 SoC. | ||
21 | - "renesas,etheravb-r8a77970" for the R8A77970 SoC. | 22 | - "renesas,etheravb-r8a77970" for the R8A77970 SoC. |
22 | - "renesas,etheravb-r8a77980" for the R8A77980 SoC. | 23 | - "renesas,etheravb-r8a77980" for the R8A77980 SoC. |
23 | - "renesas,etheravb-r8a77995" for the R8A77995 SoC. | 24 | - "renesas,etheravb-r8a77995" for the R8A77995 SoC. |
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt index ed5eb547afc8..64bc5c2a76da 100644 --- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt | |||
@@ -56,9 +56,9 @@ pins it needs, and how they should be configured, with regard to muxer | |||
56 | configuration, drive strength and pullups. If one of these options is | 56 | configuration, drive strength and pullups. If one of these options is |
57 | not set, its actual value will be unspecified. | 57 | not set, its actual value will be unspecified. |
58 | 58 | ||
59 | This driver supports the generic pin multiplexing and configuration | 59 | Allwinner A1X Pin Controller supports the generic pin multiplexing and |
60 | bindings. For details on each properties, you can refer to | 60 | configuration bindings. For details on each properties, you can refer to |
61 | ./pinctrl-bindings.txt. | 61 | ./pinctrl-bindings.txt. |
62 | 62 | ||
63 | Required sub-node properties: | 63 | Required sub-node properties: |
64 | - pins | 64 | - pins |
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt index a006ea4d065f..106808b55b6d 100644 --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt | |||
@@ -43,6 +43,8 @@ Required properties: | |||
43 | - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. | 43 | - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. |
44 | - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. | 44 | - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. |
45 | - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. | 45 | - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. |
46 | - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART. | ||
47 | - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART. | ||
46 | - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART. | 48 | - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART. |
47 | - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART. | 49 | - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART. |
48 | - "renesas,scif-r8a77980" for R8A77980 (R-Car V3H) SCIF compatible UART. | 50 | - "renesas,scif-r8a77980" for R8A77980 (R-Car V3H) SCIF compatible UART. |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index b5f978a4cac6..a38d8bfae19c 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -182,6 +182,7 @@ karo Ka-Ro electronics GmbH | |||
182 | keithkoep Keith & Koep GmbH | 182 | keithkoep Keith & Koep GmbH |
183 | keymile Keymile GmbH | 183 | keymile Keymile GmbH |
184 | khadas Khadas | 184 | khadas Khadas |
185 | kiebackpeter Kieback & Peter GmbH | ||
185 | kinetic Kinetic Technologies | 186 | kinetic Kinetic Technologies |
186 | kingnovel Kingnovel Technology Co., Ltd. | 187 | kingnovel Kingnovel Technology Co., Ltd. |
187 | kosagi Sutajio Ko-Usagi PTE Ltd. | 188 | kosagi Sutajio Ko-Usagi PTE Ltd. |
diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt index a4feb6dde8cd..725fb8d255c1 100644 --- a/Documentation/devicetree/overlay-notes.txt +++ b/Documentation/devicetree/overlay-notes.txt | |||
@@ -98,6 +98,14 @@ Finally, if you need to remove all overlays in one-go, just call | |||
98 | of_overlay_remove_all() which will remove every single one in the correct | 98 | of_overlay_remove_all() which will remove every single one in the correct |
99 | order. | 99 | order. |
100 | 100 | ||
101 | In addition, there is the option to register notifiers that get called on | ||
102 | overlay operations. See of_overlay_notifier_register/unregister and | ||
103 | enum of_overlay_notify_action for details. | ||
104 | |||
105 | Note that a notifier callback is not supposed to store pointers to a device | ||
106 | tree node or its content beyond OF_OVERLAY_POST_REMOVE corresponding to the | ||
107 | respective node it received. | ||
108 | |||
101 | Overlay DTS Format | 109 | Overlay DTS Format |
102 | ------------------ | 110 | ------------------ |
103 | 111 | ||
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index d4f33eb805dd..ab022dcd0911 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt | |||
@@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side | |||
72 | 72 | ||
73 | flag || value || meaning | 73 | flag || value || meaning |
74 | ================================================================================== | 74 | ================================================================================== |
75 | KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to | 75 | KVM_HINTS_REALTIME || 0 || guest checks this feature bit to |
76 | || || determine if there is vCPU pinning | 76 | || || determine that vCPUs are never |
77 | || || and there is no vCPU over-commitment, | 77 | || || preempted for an unlimited time, |
78 | || || allowing optimizations | 78 | || || allowing optimizations |
79 | ---------------------------------------------------------------------------------- | 79 | ---------------------------------------------------------------------------------- |
diff --git a/MAINTAINERS b/MAINTAINERS index df6e9bb2559a..d155d1e0dbc2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -137,9 +137,9 @@ Maintainers List (try to look for most precise areas first) | |||
137 | ----------------------------------- | 137 | ----------------------------------- |
138 | 138 | ||
139 | 3C59X NETWORK DRIVER | 139 | 3C59X NETWORK DRIVER |
140 | M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de> | 140 | M: Steffen Klassert <klassert@kernel.org> |
141 | L: netdev@vger.kernel.org | 141 | L: netdev@vger.kernel.org |
142 | S: Maintained | 142 | S: Odd Fixes |
143 | F: Documentation/networking/vortex.txt | 143 | F: Documentation/networking/vortex.txt |
144 | F: drivers/net/ethernet/3com/3c59x.c | 144 | F: drivers/net/ethernet/3com/3c59x.c |
145 | 145 | ||
@@ -3691,7 +3691,6 @@ F: drivers/cpufreq/arm_big_little_dt.c | |||
3691 | 3691 | ||
3692 | CPU POWER MONITORING SUBSYSTEM | 3692 | CPU POWER MONITORING SUBSYSTEM |
3693 | M: Thomas Renninger <trenn@suse.com> | 3693 | M: Thomas Renninger <trenn@suse.com> |
3694 | M: Shuah Khan <shuahkh@osg.samsung.com> | ||
3695 | M: Shuah Khan <shuah@kernel.org> | 3694 | M: Shuah Khan <shuah@kernel.org> |
3696 | L: linux-pm@vger.kernel.org | 3695 | L: linux-pm@vger.kernel.org |
3697 | S: Maintained | 3696 | S: Maintained |
@@ -7696,10 +7695,10 @@ F: include/linux/sunrpc/ | |||
7696 | F: include/uapi/linux/sunrpc/ | 7695 | F: include/uapi/linux/sunrpc/ |
7697 | 7696 | ||
7698 | KERNEL SELFTEST FRAMEWORK | 7697 | KERNEL SELFTEST FRAMEWORK |
7699 | M: Shuah Khan <shuahkh@osg.samsung.com> | ||
7700 | M: Shuah Khan <shuah@kernel.org> | 7698 | M: Shuah Khan <shuah@kernel.org> |
7701 | L: linux-kselftest@vger.kernel.org | 7699 | L: linux-kselftest@vger.kernel.org |
7702 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git | 7700 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git |
7701 | Q: https://patchwork.kernel.org/project/linux-kselftest/list/ | ||
7703 | S: Maintained | 7702 | S: Maintained |
7704 | F: tools/testing/selftests/ | 7703 | F: tools/testing/selftests/ |
7705 | F: Documentation/dev-tools/kselftest* | 7704 | F: Documentation/dev-tools/kselftest* |
@@ -9873,7 +9872,7 @@ F: include/linux/platform_data/nxp-nci.h | |||
9873 | F: Documentation/devicetree/bindings/net/nfc/ | 9872 | F: Documentation/devicetree/bindings/net/nfc/ |
9874 | 9873 | ||
9875 | NFS, SUNRPC, AND LOCKD CLIENTS | 9874 | NFS, SUNRPC, AND LOCKD CLIENTS |
9876 | M: Trond Myklebust <trond.myklebust@primarydata.com> | 9875 | M: Trond Myklebust <trond.myklebust@hammerspace.com> |
9877 | M: Anna Schumaker <anna.schumaker@netapp.com> | 9876 | M: Anna Schumaker <anna.schumaker@netapp.com> |
9878 | L: linux-nfs@vger.kernel.org | 9877 | L: linux-nfs@vger.kernel.org |
9879 | W: http://client.linux-nfs.org | 9878 | W: http://client.linux-nfs.org |
@@ -12222,7 +12221,7 @@ F: Documentation/s390/vfio-ccw.txt | |||
12222 | F: include/uapi/linux/vfio_ccw.h | 12221 | F: include/uapi/linux/vfio_ccw.h |
12223 | 12222 | ||
12224 | S390 ZCRYPT DRIVER | 12223 | S390 ZCRYPT DRIVER |
12225 | M: Harald Freudenberger <freude@de.ibm.com> | 12224 | M: Harald Freudenberger <freude@linux.ibm.com> |
12226 | L: linux-s390@vger.kernel.org | 12225 | L: linux-s390@vger.kernel.org |
12227 | W: http://www.ibm.com/developerworks/linux/linux390/ | 12226 | W: http://www.ibm.com/developerworks/linux/linux390/ |
12228 | S: Supported | 12227 | S: Supported |
@@ -13266,6 +13265,12 @@ M: Jan-Benedict Glaw <jbglaw@lug-owl.de> | |||
13266 | S: Maintained | 13265 | S: Maintained |
13267 | F: arch/alpha/kernel/srm_env.c | 13266 | F: arch/alpha/kernel/srm_env.c |
13268 | 13267 | ||
13268 | ST STM32 I2C/SMBUS DRIVER | ||
13269 | M: Pierre-Yves MORDRET <pierre-yves.mordret@st.com> | ||
13270 | L: linux-i2c@vger.kernel.org | ||
13271 | S: Maintained | ||
13272 | F: drivers/i2c/busses/i2c-stm32* | ||
13273 | |||
13269 | STABLE BRANCH | 13274 | STABLE BRANCH |
13270 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 13275 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
13271 | L: stable@vger.kernel.org | 13276 | L: stable@vger.kernel.org |
@@ -14650,7 +14655,6 @@ F: drivers/usb/common/usb-otg-fsm.c | |||
14650 | 14655 | ||
14651 | USB OVER IP DRIVER | 14656 | USB OVER IP DRIVER |
14652 | M: Valentina Manea <valentina.manea.m@gmail.com> | 14657 | M: Valentina Manea <valentina.manea.m@gmail.com> |
14653 | M: Shuah Khan <shuahkh@osg.samsung.com> | ||
14654 | M: Shuah Khan <shuah@kernel.org> | 14658 | M: Shuah Khan <shuah@kernel.org> |
14655 | L: linux-usb@vger.kernel.org | 14659 | L: linux-usb@vger.kernel.org |
14656 | S: Maintained | 14660 | S: Maintained |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 17 | 3 | PATCHLEVEL = 17 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc4 | 5 | EXTRAVERSION = -rc5 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/Kconfig b/arch/Kconfig index 8e0d665c8d53..75dd23acf133 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -464,6 +464,10 @@ config GCC_PLUGIN_LATENT_ENTROPY | |||
464 | config GCC_PLUGIN_STRUCTLEAK | 464 | config GCC_PLUGIN_STRUCTLEAK |
465 | bool "Force initialization of variables containing userspace addresses" | 465 | bool "Force initialization of variables containing userspace addresses" |
466 | depends on GCC_PLUGINS | 466 | depends on GCC_PLUGINS |
467 | # Currently STRUCTLEAK inserts initialization out of live scope of | ||
468 | # variables from KASAN point of view. This leads to KASAN false | ||
469 | # positive reports. Prohibit this combination for now. | ||
470 | depends on !KASAN_EXTRA | ||
467 | help | 471 | help |
468 | This plugin zero-initializes any structures containing a | 472 | This plugin zero-initializes any structures containing a |
469 | __user attribute. This can prevent some classes of information | 473 | __user attribute. This can prevent some classes of information |
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index bf343195697e..54111ed218b1 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi | |||
@@ -303,7 +303,7 @@ | |||
303 | }; | 303 | }; |
304 | 304 | ||
305 | can1: can@53fe4000 { | 305 | can1: can@53fe4000 { |
306 | compatible = "fsl,imx35-flexcan"; | 306 | compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan"; |
307 | reg = <0x53fe4000 0x1000>; | 307 | reg = <0x53fe4000 0x1000>; |
308 | clocks = <&clks 33>, <&clks 33>; | 308 | clocks = <&clks 33>, <&clks 33>; |
309 | clock-names = "ipg", "per"; | 309 | clock-names = "ipg", "per"; |
@@ -312,7 +312,7 @@ | |||
312 | }; | 312 | }; |
313 | 313 | ||
314 | can2: can@53fe8000 { | 314 | can2: can@53fe8000 { |
315 | compatible = "fsl,imx35-flexcan"; | 315 | compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan"; |
316 | reg = <0x53fe8000 0x1000>; | 316 | reg = <0x53fe8000 0x1000>; |
317 | clocks = <&clks 34>, <&clks 34>; | 317 | clocks = <&clks 34>, <&clks 34>; |
318 | clock-names = "ipg", "per"; | 318 | clock-names = "ipg", "per"; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 7d647d043f52..3d65c0192f69 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
@@ -551,7 +551,7 @@ | |||
551 | }; | 551 | }; |
552 | 552 | ||
553 | can1: can@53fc8000 { | 553 | can1: can@53fc8000 { |
554 | compatible = "fsl,imx53-flexcan"; | 554 | compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan"; |
555 | reg = <0x53fc8000 0x4000>; | 555 | reg = <0x53fc8000 0x4000>; |
556 | interrupts = <82>; | 556 | interrupts = <82>; |
557 | clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, | 557 | clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, |
@@ -561,7 +561,7 @@ | |||
561 | }; | 561 | }; |
562 | 562 | ||
563 | can2: can@53fcc000 { | 563 | can2: can@53fcc000 { |
564 | compatible = "fsl,imx53-flexcan"; | 564 | compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan"; |
565 | reg = <0x53fcc000 0x4000>; | 565 | reg = <0x53fcc000 0x4000>; |
566 | interrupts = <83>; | 566 | interrupts = <83>; |
567 | clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>, | 567 | clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>, |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 707a1f06dc5d..f675162663f0 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
309 | return 8; | 309 | return 8; |
310 | } | 310 | } |
311 | 311 | ||
312 | /* | ||
313 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
314 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
315 | * can immediately drop the lock again. | ||
316 | */ | ||
317 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
318 | gpa_t gpa, void *data, unsigned long len) | ||
319 | { | ||
320 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
321 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
322 | |||
323 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
324 | |||
325 | return ret; | ||
326 | } | ||
327 | |||
312 | static inline void *kvm_get_hyp_vector(void) | 328 | static inline void *kvm_get_hyp_vector(void) |
313 | { | 329 | { |
314 | return kvm_ksym_ref(__kvm_hyp_vector); | 330 | return kvm_ksym_ref(__kvm_hyp_vector); |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 30014a9f8f2b..ea690b3562af 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -75,6 +75,7 @@ | |||
75 | #define ARM_CPU_IMP_CAVIUM 0x43 | 75 | #define ARM_CPU_IMP_CAVIUM 0x43 |
76 | #define ARM_CPU_IMP_BRCM 0x42 | 76 | #define ARM_CPU_IMP_BRCM 0x42 |
77 | #define ARM_CPU_IMP_QCOM 0x51 | 77 | #define ARM_CPU_IMP_QCOM 0x51 |
78 | #define ARM_CPU_IMP_NVIDIA 0x4E | ||
78 | 79 | ||
79 | #define ARM_CPU_PART_AEM_V8 0xD0F | 80 | #define ARM_CPU_PART_AEM_V8 0xD0F |
80 | #define ARM_CPU_PART_FOUNDATION 0xD00 | 81 | #define ARM_CPU_PART_FOUNDATION 0xD00 |
@@ -99,6 +100,9 @@ | |||
99 | #define QCOM_CPU_PART_FALKOR 0xC00 | 100 | #define QCOM_CPU_PART_FALKOR 0xC00 |
100 | #define QCOM_CPU_PART_KRYO 0x200 | 101 | #define QCOM_CPU_PART_KRYO 0x200 |
101 | 102 | ||
103 | #define NVIDIA_CPU_PART_DENVER 0x003 | ||
104 | #define NVIDIA_CPU_PART_CARMEL 0x004 | ||
105 | |||
102 | #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) | 106 | #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) |
103 | #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) | 107 | #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) |
104 | #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) | 108 | #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) |
@@ -114,6 +118,8 @@ | |||
114 | #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) | 118 | #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) |
115 | #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) | 119 | #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) |
116 | #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) | 120 | #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) |
121 | #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) | ||
122 | #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) | ||
117 | 123 | ||
118 | #ifndef __ASSEMBLY__ | 124 | #ifndef __ASSEMBLY__ |
119 | 125 | ||
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 082110993647..6128992c2ded 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; | 360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; |
361 | } | 361 | } |
362 | 362 | ||
363 | /* | ||
364 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
365 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
366 | * can immediately drop the lock again. | ||
367 | */ | ||
368 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
369 | gpa_t gpa, void *data, unsigned long len) | ||
370 | { | ||
371 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
372 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
373 | |||
374 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
363 | #ifdef CONFIG_KVM_INDIRECT_VECTORS | 379 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
364 | /* | 380 | /* |
365 | * EL2 vectors can be mapped and rerouted in a number of ways, | 381 | * EL2 vectors can be mapped and rerouted in a number of ways, |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a900befadfe8..e4a1182deff7 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
@@ -316,6 +316,7 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = { | |||
316 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | 316 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
317 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), | 317 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), |
318 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), | 318 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), |
319 | MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER), | ||
319 | {}, | 320 | {}, |
320 | }; | 321 | }; |
321 | 322 | ||
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9f3c47acf8ff..1b18b4722420 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -646,8 +646,10 @@ static int keep_initrd __initdata; | |||
646 | 646 | ||
647 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 647 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
648 | { | 648 | { |
649 | if (!keep_initrd) | 649 | if (!keep_initrd) { |
650 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); | 650 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); |
651 | memblock_free(__virt_to_phys(start), end - start); | ||
652 | } | ||
651 | } | 653 | } |
652 | 654 | ||
653 | static int __init keepinitrd_setup(char *__unused) | 655 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 9abddde372ab..b2dabd06659d 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h | |||
@@ -69,17 +69,30 @@ struct dyn_arch_ftrace { | |||
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | #if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__) | 71 | #if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__) |
72 | #ifdef PPC64_ELF_ABI_v1 | 72 | /* |
73 | * Some syscall entry functions on powerpc start with "ppc_" (fork and clone, | ||
74 | * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with | ||
75 | * those. | ||
76 | */ | ||
73 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME | 77 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
78 | #ifdef PPC64_ELF_ABI_v1 | ||
79 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) | ||
80 | { | ||
81 | /* We need to skip past the initial dot, and the __se_sys alias */ | ||
82 | return !strcmp(sym + 1, name) || | ||
83 | (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) || | ||
84 | (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) || | ||
85 | (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) || | ||
86 | (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4)); | ||
87 | } | ||
88 | #else | ||
74 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) | 89 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) |
75 | { | 90 | { |
76 | /* | 91 | return !strcmp(sym, name) || |
77 | * Compare the symbol name with the system call name. Skip the .sys or .SyS | 92 | (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) || |
78 | * prefix from the symbol name and the sys prefix from the system call name and | 93 | (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) || |
79 | * just match the rest. This is only needed on ppc64 since symbol names on | 94 | (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || |
80 | * 32bit do not start with a period so the generic function will work. | 95 | (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); |
81 | */ | ||
82 | return !strcmp(sym + 4, name + 3); | ||
83 | } | 96 | } |
84 | #endif | 97 | #endif |
85 | #endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */ | 98 | #endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */ |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 4185f1c96125..3f109a3e3edb 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -165,7 +165,6 @@ struct paca_struct { | |||
165 | u64 saved_msr; /* MSR saved here by enter_rtas */ | 165 | u64 saved_msr; /* MSR saved here by enter_rtas */ |
166 | u16 trap_save; /* Used when bad stack is encountered */ | 166 | u16 trap_save; /* Used when bad stack is encountered */ |
167 | u8 irq_soft_mask; /* mask for irq soft masking */ | 167 | u8 irq_soft_mask; /* mask for irq soft masking */ |
168 | u8 soft_enabled; /* irq soft-enable flag */ | ||
169 | u8 irq_happened; /* irq happened while soft-disabled */ | 168 | u8 irq_happened; /* irq happened while soft-disabled */ |
170 | u8 io_sync; /* writel() needs spin_unlock sync */ | 169 | u8 io_sync; /* writel() needs spin_unlock sync */ |
171 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ | 170 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ |
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 9f421641a35c..16b077801a5f 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h | |||
@@ -91,6 +91,7 @@ extern int start_topology_update(void); | |||
91 | extern int stop_topology_update(void); | 91 | extern int stop_topology_update(void); |
92 | extern int prrn_is_enabled(void); | 92 | extern int prrn_is_enabled(void); |
93 | extern int find_and_online_cpu_nid(int cpu); | 93 | extern int find_and_online_cpu_nid(int cpu); |
94 | extern int timed_topology_update(int nsecs); | ||
94 | #else | 95 | #else |
95 | static inline int start_topology_update(void) | 96 | static inline int start_topology_update(void) |
96 | { | 97 | { |
@@ -108,16 +109,12 @@ static inline int find_and_online_cpu_nid(int cpu) | |||
108 | { | 109 | { |
109 | return 0; | 110 | return 0; |
110 | } | 111 | } |
112 | static inline int timed_topology_update(int nsecs) | ||
113 | { | ||
114 | return 0; | ||
115 | } | ||
111 | #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ | 116 | #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ |
112 | 117 | ||
113 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES) | ||
114 | #if defined(CONFIG_PPC_SPLPAR) | ||
115 | extern int timed_topology_update(int nsecs); | ||
116 | #else | ||
117 | #define timed_topology_update(nsecs) | ||
118 | #endif /* CONFIG_PPC_SPLPAR */ | ||
119 | #endif /* CONFIG_HOTPLUG_CPU || CONFIG_NEED_MULTIPLE_NODES */ | ||
120 | |||
121 | #include <asm-generic/topology.h> | 118 | #include <asm-generic/topology.h> |
122 | 119 | ||
123 | #ifdef CONFIG_SMP | 120 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index 1bceb95f422d..5584247f5029 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c | |||
@@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index) | |||
44 | return count; | 44 | return count; |
45 | } | 45 | } |
46 | 46 | ||
47 | /* | ||
48 | * This can be called in the panic path with interrupts off, so use | ||
49 | * mdelay in that case. | ||
50 | */ | ||
47 | static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) | 51 | static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) |
48 | { | 52 | { |
49 | s64 rc = OPAL_BUSY; | 53 | s64 rc = OPAL_BUSY; |
@@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) | |||
58 | while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { | 62 | while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { |
59 | rc = opal_write_nvram(__pa(buf), count, off); | 63 | rc = opal_write_nvram(__pa(buf), count, off); |
60 | if (rc == OPAL_BUSY_EVENT) { | 64 | if (rc == OPAL_BUSY_EVENT) { |
61 | msleep(OPAL_BUSY_DELAY_MS); | 65 | if (in_interrupt() || irqs_disabled()) |
66 | mdelay(OPAL_BUSY_DELAY_MS); | ||
67 | else | ||
68 | msleep(OPAL_BUSY_DELAY_MS); | ||
62 | opal_poll_events(NULL); | 69 | opal_poll_events(NULL); |
63 | } else if (rc == OPAL_BUSY) { | 70 | } else if (rc == OPAL_BUSY) { |
64 | msleep(OPAL_BUSY_DELAY_MS); | 71 | if (in_interrupt() || irqs_disabled()) |
72 | mdelay(OPAL_BUSY_DELAY_MS); | ||
73 | else | ||
74 | msleep(OPAL_BUSY_DELAY_MS); | ||
65 | } | 75 | } |
66 | } | 76 | } |
67 | 77 | ||
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 6176fe9795ca..941d8cc6c9f5 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig | |||
@@ -261,9 +261,9 @@ CONFIG_IP_VS_NQ=m | |||
261 | CONFIG_IP_VS_FTP=m | 261 | CONFIG_IP_VS_FTP=m |
262 | CONFIG_IP_VS_PE_SIP=m | 262 | CONFIG_IP_VS_PE_SIP=m |
263 | CONFIG_NF_CONNTRACK_IPV4=m | 263 | CONFIG_NF_CONNTRACK_IPV4=m |
264 | CONFIG_NF_TABLES_IPV4=m | 264 | CONFIG_NF_TABLES_IPV4=y |
265 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 265 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
266 | CONFIG_NF_TABLES_ARP=m | 266 | CONFIG_NF_TABLES_ARP=y |
267 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 267 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
268 | CONFIG_IP_NF_IPTABLES=m | 268 | CONFIG_IP_NF_IPTABLES=m |
269 | CONFIG_IP_NF_MATCH_AH=m | 269 | CONFIG_IP_NF_MATCH_AH=m |
@@ -284,7 +284,7 @@ CONFIG_IP_NF_ARPTABLES=m | |||
284 | CONFIG_IP_NF_ARPFILTER=m | 284 | CONFIG_IP_NF_ARPFILTER=m |
285 | CONFIG_IP_NF_ARP_MANGLE=m | 285 | CONFIG_IP_NF_ARP_MANGLE=m |
286 | CONFIG_NF_CONNTRACK_IPV6=m | 286 | CONFIG_NF_CONNTRACK_IPV6=m |
287 | CONFIG_NF_TABLES_IPV6=m | 287 | CONFIG_NF_TABLES_IPV6=y |
288 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 288 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
289 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 289 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
290 | CONFIG_IP6_NF_IPTABLES=m | 290 | CONFIG_IP6_NF_IPTABLES=m |
@@ -305,7 +305,7 @@ CONFIG_IP6_NF_RAW=m | |||
305 | CONFIG_IP6_NF_SECURITY=m | 305 | CONFIG_IP6_NF_SECURITY=m |
306 | CONFIG_IP6_NF_NAT=m | 306 | CONFIG_IP6_NF_NAT=m |
307 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 307 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
308 | CONFIG_NF_TABLES_BRIDGE=m | 308 | CONFIG_NF_TABLES_BRIDGE=y |
309 | CONFIG_RDS=m | 309 | CONFIG_RDS=m |
310 | CONFIG_RDS_RDMA=m | 310 | CONFIG_RDS_RDMA=m |
311 | CONFIG_RDS_TCP=m | 311 | CONFIG_RDS_TCP=m |
@@ -604,7 +604,6 @@ CONFIG_DETECT_HUNG_TASK=y | |||
604 | CONFIG_WQ_WATCHDOG=y | 604 | CONFIG_WQ_WATCHDOG=y |
605 | CONFIG_PANIC_ON_OOPS=y | 605 | CONFIG_PANIC_ON_OOPS=y |
606 | CONFIG_DEBUG_TIMEKEEPING=y | 606 | CONFIG_DEBUG_TIMEKEEPING=y |
607 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y | ||
608 | CONFIG_PROVE_LOCKING=y | 607 | CONFIG_PROVE_LOCKING=y |
609 | CONFIG_LOCK_STAT=y | 608 | CONFIG_LOCK_STAT=y |
610 | CONFIG_DEBUG_LOCKDEP=y | 609 | CONFIG_DEBUG_LOCKDEP=y |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index c105bcc6d7a6..eb6f75f24208 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
@@ -259,9 +259,9 @@ CONFIG_IP_VS_NQ=m | |||
259 | CONFIG_IP_VS_FTP=m | 259 | CONFIG_IP_VS_FTP=m |
260 | CONFIG_IP_VS_PE_SIP=m | 260 | CONFIG_IP_VS_PE_SIP=m |
261 | CONFIG_NF_CONNTRACK_IPV4=m | 261 | CONFIG_NF_CONNTRACK_IPV4=m |
262 | CONFIG_NF_TABLES_IPV4=m | 262 | CONFIG_NF_TABLES_IPV4=y |
263 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 263 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
264 | CONFIG_NF_TABLES_ARP=m | 264 | CONFIG_NF_TABLES_ARP=y |
265 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 265 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
266 | CONFIG_IP_NF_IPTABLES=m | 266 | CONFIG_IP_NF_IPTABLES=m |
267 | CONFIG_IP_NF_MATCH_AH=m | 267 | CONFIG_IP_NF_MATCH_AH=m |
@@ -282,7 +282,7 @@ CONFIG_IP_NF_ARPTABLES=m | |||
282 | CONFIG_IP_NF_ARPFILTER=m | 282 | CONFIG_IP_NF_ARPFILTER=m |
283 | CONFIG_IP_NF_ARP_MANGLE=m | 283 | CONFIG_IP_NF_ARP_MANGLE=m |
284 | CONFIG_NF_CONNTRACK_IPV6=m | 284 | CONFIG_NF_CONNTRACK_IPV6=m |
285 | CONFIG_NF_TABLES_IPV6=m | 285 | CONFIG_NF_TABLES_IPV6=y |
286 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 286 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
287 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 287 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
288 | CONFIG_IP6_NF_IPTABLES=m | 288 | CONFIG_IP6_NF_IPTABLES=m |
@@ -303,7 +303,7 @@ CONFIG_IP6_NF_RAW=m | |||
303 | CONFIG_IP6_NF_SECURITY=m | 303 | CONFIG_IP6_NF_SECURITY=m |
304 | CONFIG_IP6_NF_NAT=m | 304 | CONFIG_IP6_NF_NAT=m |
305 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 305 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
306 | CONFIG_NF_TABLES_BRIDGE=m | 306 | CONFIG_NF_TABLES_BRIDGE=y |
307 | CONFIG_RDS=m | 307 | CONFIG_RDS=m |
308 | CONFIG_RDS_RDMA=m | 308 | CONFIG_RDS_RDMA=m |
309 | CONFIG_RDS_TCP=m | 309 | CONFIG_RDS_TCP=m |
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S index e8077f0971f8..2bf01ba44107 100644 --- a/arch/s390/crypto/crc32be-vx.S +++ b/arch/s390/crypto/crc32be-vx.S | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/nospec-insn.h> | ||
16 | #include <asm/vx-insn.h> | 17 | #include <asm/vx-insn.h> |
17 | 18 | ||
18 | /* Vector register range containing CRC-32 constants */ | 19 | /* Vector register range containing CRC-32 constants */ |
@@ -67,6 +68,8 @@ | |||
67 | 68 | ||
68 | .previous | 69 | .previous |
69 | 70 | ||
71 | GEN_BR_THUNK %r14 | ||
72 | |||
70 | .text | 73 | .text |
71 | /* | 74 | /* |
72 | * The CRC-32 function(s) use these calling conventions: | 75 | * The CRC-32 function(s) use these calling conventions: |
@@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16) | |||
203 | 206 | ||
204 | .Ldone: | 207 | .Ldone: |
205 | VLGVF %r2,%v2,3 | 208 | VLGVF %r2,%v2,3 |
206 | br %r14 | 209 | BR_EX %r14 |
207 | 210 | ||
208 | .previous | 211 | .previous |
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S index d8c67a58c0c5..7d6f568bd3ad 100644 --- a/arch/s390/crypto/crc32le-vx.S +++ b/arch/s390/crypto/crc32le-vx.S | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <asm/nospec-insn.h> | ||
17 | #include <asm/vx-insn.h> | 18 | #include <asm/vx-insn.h> |
18 | 19 | ||
19 | /* Vector register range containing CRC-32 constants */ | 20 | /* Vector register range containing CRC-32 constants */ |
@@ -76,6 +77,7 @@ | |||
76 | 77 | ||
77 | .previous | 78 | .previous |
78 | 79 | ||
80 | GEN_BR_THUNK %r14 | ||
79 | 81 | ||
80 | .text | 82 | .text |
81 | 83 | ||
@@ -264,6 +266,6 @@ crc32_le_vgfm_generic: | |||
264 | 266 | ||
265 | .Ldone: | 267 | .Ldone: |
266 | VLGVF %r2,%v2,2 | 268 | VLGVF %r2,%v2,2 |
267 | br %r14 | 269 | BR_EX %r14 |
268 | 270 | ||
269 | .previous | 271 | .previous |
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h new file mode 100644 index 000000000000..a01f81186e86 --- /dev/null +++ b/arch/s390/include/asm/nospec-insn.h | |||
@@ -0,0 +1,196 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _ASM_S390_NOSPEC_ASM_H | ||
3 | #define _ASM_S390_NOSPEC_ASM_H | ||
4 | |||
5 | #include <asm/alternative-asm.h> | ||
6 | #include <asm/asm-offsets.h> | ||
7 | #include <asm/dwarf.h> | ||
8 | |||
9 | #ifdef __ASSEMBLY__ | ||
10 | |||
11 | #ifdef CONFIG_EXPOLINE | ||
12 | |||
13 | _LC_BR_R1 = __LC_BR_R1 | ||
14 | |||
15 | /* | ||
16 | * The expoline macros are used to create thunks in the same format | ||
17 | * as gcc generates them. The 'comdat' section flag makes sure that | ||
18 | * the various thunks are merged into a single copy. | ||
19 | */ | ||
20 | .macro __THUNK_PROLOG_NAME name | ||
21 | .pushsection .text.\name,"axG",@progbits,\name,comdat | ||
22 | .globl \name | ||
23 | .hidden \name | ||
24 | .type \name,@function | ||
25 | \name: | ||
26 | CFI_STARTPROC | ||
27 | .endm | ||
28 | |||
29 | .macro __THUNK_EPILOG | ||
30 | CFI_ENDPROC | ||
31 | .popsection | ||
32 | .endm | ||
33 | |||
34 | .macro __THUNK_PROLOG_BR r1,r2 | ||
35 | __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1 | ||
36 | .endm | ||
37 | |||
38 | .macro __THUNK_PROLOG_BC d0,r1,r2 | ||
39 | __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1 | ||
40 | .endm | ||
41 | |||
42 | .macro __THUNK_BR r1,r2 | ||
43 | jg __s390x_indirect_jump_r\r2\()use_r\r1 | ||
44 | .endm | ||
45 | |||
46 | .macro __THUNK_BC d0,r1,r2 | ||
47 | jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1 | ||
48 | .endm | ||
49 | |||
50 | .macro __THUNK_BRASL r1,r2,r3 | ||
51 | brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2 | ||
52 | .endm | ||
53 | |||
54 | .macro __DECODE_RR expand,reg,ruse | ||
55 | .set __decode_fail,1 | ||
56 | .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
57 | .ifc \reg,%r\r1 | ||
58 | .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
59 | .ifc \ruse,%r\r2 | ||
60 | \expand \r1,\r2 | ||
61 | .set __decode_fail,0 | ||
62 | .endif | ||
63 | .endr | ||
64 | .endif | ||
65 | .endr | ||
66 | .if __decode_fail == 1 | ||
67 | .error "__DECODE_RR failed" | ||
68 | .endif | ||
69 | .endm | ||
70 | |||
71 | .macro __DECODE_RRR expand,rsave,rtarget,ruse | ||
72 | .set __decode_fail,1 | ||
73 | .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
74 | .ifc \rsave,%r\r1 | ||
75 | .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
76 | .ifc \rtarget,%r\r2 | ||
77 | .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
78 | .ifc \ruse,%r\r3 | ||
79 | \expand \r1,\r2,\r3 | ||
80 | .set __decode_fail,0 | ||
81 | .endif | ||
82 | .endr | ||
83 | .endif | ||
84 | .endr | ||
85 | .endif | ||
86 | .endr | ||
87 | .if __decode_fail == 1 | ||
88 | .error "__DECODE_RRR failed" | ||
89 | .endif | ||
90 | .endm | ||
91 | |||
92 | .macro __DECODE_DRR expand,disp,reg,ruse | ||
93 | .set __decode_fail,1 | ||
94 | .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
95 | .ifc \reg,%r\r1 | ||
96 | .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
97 | .ifc \ruse,%r\r2 | ||
98 | \expand \disp,\r1,\r2 | ||
99 | .set __decode_fail,0 | ||
100 | .endif | ||
101 | .endr | ||
102 | .endif | ||
103 | .endr | ||
104 | .if __decode_fail == 1 | ||
105 | .error "__DECODE_DRR failed" | ||
106 | .endif | ||
107 | .endm | ||
108 | |||
109 | .macro __THUNK_EX_BR reg,ruse | ||
110 | # Be very careful when adding instructions to this macro! | ||
111 | # The ALTERNATIVE replacement code has a .+10 which targets | ||
112 | # the "br \reg" after the code has been patched. | ||
113 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | ||
114 | exrl 0,555f | ||
115 | j . | ||
116 | #else | ||
117 | .ifc \reg,%r1 | ||
118 | ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 | ||
119 | j . | ||
120 | .else | ||
121 | larl \ruse,555f | ||
122 | ex 0,0(\ruse) | ||
123 | j . | ||
124 | .endif | ||
125 | #endif | ||
126 | 555: br \reg | ||
127 | .endm | ||
128 | |||
129 | .macro __THUNK_EX_BC disp,reg,ruse | ||
130 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | ||
131 | exrl 0,556f | ||
132 | j . | ||
133 | #else | ||
134 | larl \ruse,556f | ||
135 | ex 0,0(\ruse) | ||
136 | j . | ||
137 | #endif | ||
138 | 556: b \disp(\reg) | ||
139 | .endm | ||
140 | |||
141 | .macro GEN_BR_THUNK reg,ruse=%r1 | ||
142 | __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse | ||
143 | __THUNK_EX_BR \reg,\ruse | ||
144 | __THUNK_EPILOG | ||
145 | .endm | ||
146 | |||
147 | .macro GEN_B_THUNK disp,reg,ruse=%r1 | ||
148 | __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse | ||
149 | __THUNK_EX_BC \disp,\reg,\ruse | ||
150 | __THUNK_EPILOG | ||
151 | .endm | ||
152 | |||
153 | .macro BR_EX reg,ruse=%r1 | ||
154 | 557: __DECODE_RR __THUNK_BR,\reg,\ruse | ||
155 | .pushsection .s390_indirect_branches,"a",@progbits | ||
156 | .long 557b-. | ||
157 | .popsection | ||
158 | .endm | ||
159 | |||
160 | .macro B_EX disp,reg,ruse=%r1 | ||
161 | 558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse | ||
162 | .pushsection .s390_indirect_branches,"a",@progbits | ||
163 | .long 558b-. | ||
164 | .popsection | ||
165 | .endm | ||
166 | |||
167 | .macro BASR_EX rsave,rtarget,ruse=%r1 | ||
168 | 559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse | ||
169 | .pushsection .s390_indirect_branches,"a",@progbits | ||
170 | .long 559b-. | ||
171 | .popsection | ||
172 | .endm | ||
173 | |||
174 | #else | ||
175 | .macro GEN_BR_THUNK reg,ruse=%r1 | ||
176 | .endm | ||
177 | |||
178 | .macro GEN_B_THUNK disp,reg,ruse=%r1 | ||
179 | .endm | ||
180 | |||
181 | .macro BR_EX reg,ruse=%r1 | ||
182 | br \reg | ||
183 | .endm | ||
184 | |||
185 | .macro B_EX disp,reg,ruse=%r1 | ||
186 | b \disp(\reg) | ||
187 | .endm | ||
188 | |||
189 | .macro BASR_EX rsave,rtarget,ruse=%r1 | ||
190 | basr \rsave,\rtarget | ||
191 | .endm | ||
192 | #endif | ||
193 | |||
194 | #endif /* __ASSEMBLY__ */ | ||
195 | |||
196 | #endif /* _ASM_S390_NOSPEC_ASM_H */ | ||
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h index e297bcfc476f..6090670df51f 100644 --- a/arch/s390/include/asm/purgatory.h +++ b/arch/s390/include/asm/purgatory.h | |||
@@ -13,5 +13,11 @@ | |||
13 | 13 | ||
14 | int verify_sha256_digest(void); | 14 | int verify_sha256_digest(void); |
15 | 15 | ||
16 | extern u64 kernel_entry; | ||
17 | extern u64 kernel_type; | ||
18 | |||
19 | extern u64 crash_start; | ||
20 | extern u64 crash_size; | ||
21 | |||
16 | #endif /* __ASSEMBLY__ */ | 22 | #endif /* __ASSEMBLY__ */ |
17 | #endif /* _S390_PURGATORY_H_ */ | 23 | #endif /* _S390_PURGATORY_H_ */ |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 84ea6225efb4..f92dd8ed3884 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -65,6 +65,7 @@ obj-y += nospec-branch.o | |||
65 | 65 | ||
66 | extra-y += head.o head64.o vmlinux.lds | 66 | extra-y += head.o head64.o vmlinux.lds |
67 | 67 | ||
68 | obj-$(CONFIG_SYSFS) += nospec-sysfs.o | ||
68 | CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) | 69 | CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) |
69 | 70 | ||
70 | obj-$(CONFIG_MODULES) += module.o | 71 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index eb2a5c0443cd..11aea745a2a6 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -181,6 +181,7 @@ int main(void) | |||
181 | OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); | 181 | OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); |
182 | OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); | 182 | OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); |
183 | OFFSET(__LC_GMAP, lowcore, gmap); | 183 | OFFSET(__LC_GMAP, lowcore, gmap); |
184 | OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); | ||
184 | /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ | 185 | /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ |
185 | OFFSET(__LC_DUMP_REIPL, lowcore, ipib); | 186 | OFFSET(__LC_DUMP_REIPL, lowcore, ipib); |
186 | /* hardware defined lowcore locations 0x1000 - 0x18ff */ | 187 | /* hardware defined lowcore locations 0x1000 - 0x18ff */ |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f6c56009e822..b65874b0b412 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -9,18 +9,22 @@ | |||
9 | 9 | ||
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/asm-offsets.h> | 11 | #include <asm/asm-offsets.h> |
12 | #include <asm/nospec-insn.h> | ||
12 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
13 | #include <asm/sigp.h> | 14 | #include <asm/sigp.h> |
14 | 15 | ||
16 | GEN_BR_THUNK %r9 | ||
17 | GEN_BR_THUNK %r14 | ||
18 | |||
15 | ENTRY(s390_base_mcck_handler) | 19 | ENTRY(s390_base_mcck_handler) |
16 | basr %r13,0 | 20 | basr %r13,0 |
17 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | 21 | 0: lg %r15,__LC_PANIC_STACK # load panic stack |
18 | aghi %r15,-STACK_FRAME_OVERHEAD | 22 | aghi %r15,-STACK_FRAME_OVERHEAD |
19 | larl %r1,s390_base_mcck_handler_fn | 23 | larl %r1,s390_base_mcck_handler_fn |
20 | lg %r1,0(%r1) | 24 | lg %r9,0(%r1) |
21 | ltgr %r1,%r1 | 25 | ltgr %r9,%r9 |
22 | jz 1f | 26 | jz 1f |
23 | basr %r14,%r1 | 27 | BASR_EX %r14,%r9 |
24 | 1: la %r1,4095 | 28 | 1: la %r1,4095 |
25 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) | 29 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) |
26 | lpswe __LC_MCK_OLD_PSW | 30 | lpswe __LC_MCK_OLD_PSW |
@@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler) | |||
37 | basr %r13,0 | 41 | basr %r13,0 |
38 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | 42 | 0: aghi %r15,-STACK_FRAME_OVERHEAD |
39 | larl %r1,s390_base_ext_handler_fn | 43 | larl %r1,s390_base_ext_handler_fn |
40 | lg %r1,0(%r1) | 44 | lg %r9,0(%r1) |
41 | ltgr %r1,%r1 | 45 | ltgr %r9,%r9 |
42 | jz 1f | 46 | jz 1f |
43 | basr %r14,%r1 | 47 | BASR_EX %r14,%r9 |
44 | 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC | 48 | 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC |
45 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | 49 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit |
46 | lpswe __LC_EXT_OLD_PSW | 50 | lpswe __LC_EXT_OLD_PSW |
@@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler) | |||
57 | basr %r13,0 | 61 | basr %r13,0 |
58 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | 62 | 0: aghi %r15,-STACK_FRAME_OVERHEAD |
59 | larl %r1,s390_base_pgm_handler_fn | 63 | larl %r1,s390_base_pgm_handler_fn |
60 | lg %r1,0(%r1) | 64 | lg %r9,0(%r1) |
61 | ltgr %r1,%r1 | 65 | ltgr %r9,%r9 |
62 | jz 1f | 66 | jz 1f |
63 | basr %r14,%r1 | 67 | BASR_EX %r14,%r9 |
64 | lmg %r0,%r15,__LC_SAVE_AREA_SYNC | 68 | lmg %r0,%r15,__LC_SAVE_AREA_SYNC |
65 | lpswe __LC_PGM_OLD_PSW | 69 | lpswe __LC_PGM_OLD_PSW |
66 | 1: lpswe disabled_wait_psw-0b(%r13) | 70 | 1: lpswe disabled_wait_psw-0b(%r13) |
@@ -117,7 +121,7 @@ ENTRY(diag308_reset) | |||
117 | larl %r4,.Lcontinue_psw # Restore PSW flags | 121 | larl %r4,.Lcontinue_psw # Restore PSW flags |
118 | lpswe 0(%r4) | 122 | lpswe 0(%r4) |
119 | .Lcontinue: | 123 | .Lcontinue: |
120 | br %r14 | 124 | BR_EX %r14 |
121 | .align 16 | 125 | .align 16 |
122 | .Lrestart_psw: | 126 | .Lrestart_psw: |
123 | .long 0x00080000,0x80000000 + .Lrestart_part2 | 127 | .long 0x00080000,0x80000000 + .Lrestart_part2 |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 3f22f139a041..f03402efab4b 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/setup.h> | 28 | #include <asm/setup.h> |
29 | #include <asm/nmi.h> | 29 | #include <asm/nmi.h> |
30 | #include <asm/export.h> | 30 | #include <asm/export.h> |
31 | #include <asm/nospec-insn.h> | ||
31 | 32 | ||
32 | __PT_R0 = __PT_GPRS | 33 | __PT_R0 = __PT_GPRS |
33 | __PT_R1 = __PT_GPRS + 8 | 34 | __PT_R1 = __PT_GPRS + 8 |
@@ -183,67 +184,9 @@ _LPP_OFFSET = __LC_LPP | |||
183 | "jnz .+8; .long 0xb2e8d000", 82 | 184 | "jnz .+8; .long 0xb2e8d000", 82 |
184 | .endm | 185 | .endm |
185 | 186 | ||
186 | #ifdef CONFIG_EXPOLINE | 187 | GEN_BR_THUNK %r9 |
187 | 188 | GEN_BR_THUNK %r14 | |
188 | .macro GEN_BR_THUNK name,reg,tmp | 189 | GEN_BR_THUNK %r14,%r11 |
189 | .section .text.\name,"axG",@progbits,\name,comdat | ||
190 | .globl \name | ||
191 | .hidden \name | ||
192 | .type \name,@function | ||
193 | \name: | ||
194 | CFI_STARTPROC | ||
195 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | ||
196 | exrl 0,0f | ||
197 | #else | ||
198 | larl \tmp,0f | ||
199 | ex 0,0(\tmp) | ||
200 | #endif | ||
201 | j . | ||
202 | 0: br \reg | ||
203 | CFI_ENDPROC | ||
204 | .endm | ||
205 | |||
206 | GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 | ||
207 | GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1 | ||
208 | GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11 | ||
209 | |||
210 | .macro BASR_R14_R9 | ||
211 | 0: brasl %r14,__s390x_indirect_jump_r1use_r9 | ||
212 | .pushsection .s390_indirect_branches,"a",@progbits | ||
213 | .long 0b-. | ||
214 | .popsection | ||
215 | .endm | ||
216 | |||
217 | .macro BR_R1USE_R14 | ||
218 | 0: jg __s390x_indirect_jump_r1use_r14 | ||
219 | .pushsection .s390_indirect_branches,"a",@progbits | ||
220 | .long 0b-. | ||
221 | .popsection | ||
222 | .endm | ||
223 | |||
224 | .macro BR_R11USE_R14 | ||
225 | 0: jg __s390x_indirect_jump_r11use_r14 | ||
226 | .pushsection .s390_indirect_branches,"a",@progbits | ||
227 | .long 0b-. | ||
228 | .popsection | ||
229 | .endm | ||
230 | |||
231 | #else /* CONFIG_EXPOLINE */ | ||
232 | |||
233 | .macro BASR_R14_R9 | ||
234 | basr %r14,%r9 | ||
235 | .endm | ||
236 | |||
237 | .macro BR_R1USE_R14 | ||
238 | br %r14 | ||
239 | .endm | ||
240 | |||
241 | .macro BR_R11USE_R14 | ||
242 | br %r14 | ||
243 | .endm | ||
244 | |||
245 | #endif /* CONFIG_EXPOLINE */ | ||
246 | |||
247 | 190 | ||
248 | .section .kprobes.text, "ax" | 191 | .section .kprobes.text, "ax" |
249 | .Ldummy: | 192 | .Ldummy: |
@@ -260,7 +203,7 @@ _LPP_OFFSET = __LC_LPP | |||
260 | ENTRY(__bpon) | 203 | ENTRY(__bpon) |
261 | .globl __bpon | 204 | .globl __bpon |
262 | BPON | 205 | BPON |
263 | BR_R1USE_R14 | 206 | BR_EX %r14 |
264 | 207 | ||
265 | /* | 208 | /* |
266 | * Scheduler resume function, called by switch_to | 209 | * Scheduler resume function, called by switch_to |
@@ -284,7 +227,7 @@ ENTRY(__switch_to) | |||
284 | mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next | 227 | mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next |
285 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | 228 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
286 | ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 | 229 | ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 |
287 | BR_R1USE_R14 | 230 | BR_EX %r14 |
288 | 231 | ||
289 | .L__critical_start: | 232 | .L__critical_start: |
290 | 233 | ||
@@ -351,7 +294,7 @@ sie_exit: | |||
351 | xgr %r5,%r5 | 294 | xgr %r5,%r5 |
352 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers | 295 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers |
353 | lg %r2,__SF_SIE_REASON(%r15) # return exit reason code | 296 | lg %r2,__SF_SIE_REASON(%r15) # return exit reason code |
354 | BR_R1USE_R14 | 297 | BR_EX %r14 |
355 | .Lsie_fault: | 298 | .Lsie_fault: |
356 | lghi %r14,-EFAULT | 299 | lghi %r14,-EFAULT |
357 | stg %r14,__SF_SIE_REASON(%r15) # set exit reason code | 300 | stg %r14,__SF_SIE_REASON(%r15) # set exit reason code |
@@ -410,7 +353,7 @@ ENTRY(system_call) | |||
410 | lgf %r9,0(%r8,%r10) # get system call add. | 353 | lgf %r9,0(%r8,%r10) # get system call add. |
411 | TSTMSK __TI_flags(%r12),_TIF_TRACE | 354 | TSTMSK __TI_flags(%r12),_TIF_TRACE |
412 | jnz .Lsysc_tracesys | 355 | jnz .Lsysc_tracesys |
413 | BASR_R14_R9 # call sys_xxxx | 356 | BASR_EX %r14,%r9 # call sys_xxxx |
414 | stg %r2,__PT_R2(%r11) # store return value | 357 | stg %r2,__PT_R2(%r11) # store return value |
415 | 358 | ||
416 | .Lsysc_return: | 359 | .Lsysc_return: |
@@ -595,7 +538,7 @@ ENTRY(system_call) | |||
595 | lmg %r3,%r7,__PT_R3(%r11) | 538 | lmg %r3,%r7,__PT_R3(%r11) |
596 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | 539 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
597 | lg %r2,__PT_ORIG_GPR2(%r11) | 540 | lg %r2,__PT_ORIG_GPR2(%r11) |
598 | BASR_R14_R9 # call sys_xxx | 541 | BASR_EX %r14,%r9 # call sys_xxx |
599 | stg %r2,__PT_R2(%r11) # store return value | 542 | stg %r2,__PT_R2(%r11) # store return value |
600 | .Lsysc_tracenogo: | 543 | .Lsysc_tracenogo: |
601 | TSTMSK __TI_flags(%r12),_TIF_TRACE | 544 | TSTMSK __TI_flags(%r12),_TIF_TRACE |
@@ -619,7 +562,7 @@ ENTRY(ret_from_fork) | |||
619 | lmg %r9,%r10,__PT_R9(%r11) # load gprs | 562 | lmg %r9,%r10,__PT_R9(%r11) # load gprs |
620 | ENTRY(kernel_thread_starter) | 563 | ENTRY(kernel_thread_starter) |
621 | la %r2,0(%r10) | 564 | la %r2,0(%r10) |
622 | BASR_R14_R9 | 565 | BASR_EX %r14,%r9 |
623 | j .Lsysc_tracenogo | 566 | j .Lsysc_tracenogo |
624 | 567 | ||
625 | /* | 568 | /* |
@@ -701,7 +644,7 @@ ENTRY(pgm_check_handler) | |||
701 | je .Lpgm_return | 644 | je .Lpgm_return |
702 | lgf %r9,0(%r10,%r1) # load address of handler routine | 645 | lgf %r9,0(%r10,%r1) # load address of handler routine |
703 | lgr %r2,%r11 # pass pointer to pt_regs | 646 | lgr %r2,%r11 # pass pointer to pt_regs |
704 | BASR_R14_R9 # branch to interrupt-handler | 647 | BASR_EX %r14,%r9 # branch to interrupt-handler |
705 | .Lpgm_return: | 648 | .Lpgm_return: |
706 | LOCKDEP_SYS_EXIT | 649 | LOCKDEP_SYS_EXIT |
707 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 650 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
@@ -1019,7 +962,7 @@ ENTRY(psw_idle) | |||
1019 | stpt __TIMER_IDLE_ENTER(%r2) | 962 | stpt __TIMER_IDLE_ENTER(%r2) |
1020 | .Lpsw_idle_lpsw: | 963 | .Lpsw_idle_lpsw: |
1021 | lpswe __SF_EMPTY(%r15) | 964 | lpswe __SF_EMPTY(%r15) |
1022 | BR_R1USE_R14 | 965 | BR_EX %r14 |
1023 | .Lpsw_idle_end: | 966 | .Lpsw_idle_end: |
1024 | 967 | ||
1025 | /* | 968 | /* |
@@ -1061,7 +1004,7 @@ ENTRY(save_fpu_regs) | |||
1061 | .Lsave_fpu_regs_done: | 1004 | .Lsave_fpu_regs_done: |
1062 | oi __LC_CPU_FLAGS+7,_CIF_FPU | 1005 | oi __LC_CPU_FLAGS+7,_CIF_FPU |
1063 | .Lsave_fpu_regs_exit: | 1006 | .Lsave_fpu_regs_exit: |
1064 | BR_R1USE_R14 | 1007 | BR_EX %r14 |
1065 | .Lsave_fpu_regs_end: | 1008 | .Lsave_fpu_regs_end: |
1066 | EXPORT_SYMBOL(save_fpu_regs) | 1009 | EXPORT_SYMBOL(save_fpu_regs) |
1067 | 1010 | ||
@@ -1107,7 +1050,7 @@ load_fpu_regs: | |||
1107 | .Lload_fpu_regs_done: | 1050 | .Lload_fpu_regs_done: |
1108 | ni __LC_CPU_FLAGS+7,255-_CIF_FPU | 1051 | ni __LC_CPU_FLAGS+7,255-_CIF_FPU |
1109 | .Lload_fpu_regs_exit: | 1052 | .Lload_fpu_regs_exit: |
1110 | BR_R1USE_R14 | 1053 | BR_EX %r14 |
1111 | .Lload_fpu_regs_end: | 1054 | .Lload_fpu_regs_end: |
1112 | 1055 | ||
1113 | .L__critical_end: | 1056 | .L__critical_end: |
@@ -1322,7 +1265,7 @@ cleanup_critical: | |||
1322 | jl 0f | 1265 | jl 0f |
1323 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end | 1266 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end |
1324 | jl .Lcleanup_load_fpu_regs | 1267 | jl .Lcleanup_load_fpu_regs |
1325 | 0: BR_R11USE_R14 | 1268 | 0: BR_EX %r14 |
1326 | 1269 | ||
1327 | .align 8 | 1270 | .align 8 |
1328 | .Lcleanup_table: | 1271 | .Lcleanup_table: |
@@ -1358,7 +1301,7 @@ cleanup_critical: | |||
1358 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE | 1301 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE |
1359 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 1302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
1360 | larl %r9,sie_exit # skip forward to sie_exit | 1303 | larl %r9,sie_exit # skip forward to sie_exit |
1361 | BR_R11USE_R14 | 1304 | BR_EX %r14 |
1362 | #endif | 1305 | #endif |
1363 | 1306 | ||
1364 | .Lcleanup_system_call: | 1307 | .Lcleanup_system_call: |
@@ -1412,7 +1355,7 @@ cleanup_critical: | |||
1412 | stg %r15,56(%r11) # r15 stack pointer | 1355 | stg %r15,56(%r11) # r15 stack pointer |
1413 | # set new psw address and exit | 1356 | # set new psw address and exit |
1414 | larl %r9,.Lsysc_do_svc | 1357 | larl %r9,.Lsysc_do_svc |
1415 | BR_R11USE_R14 | 1358 | BR_EX %r14,%r11 |
1416 | .Lcleanup_system_call_insn: | 1359 | .Lcleanup_system_call_insn: |
1417 | .quad system_call | 1360 | .quad system_call |
1418 | .quad .Lsysc_stmg | 1361 | .quad .Lsysc_stmg |
@@ -1424,7 +1367,7 @@ cleanup_critical: | |||
1424 | 1367 | ||
1425 | .Lcleanup_sysc_tif: | 1368 | .Lcleanup_sysc_tif: |
1426 | larl %r9,.Lsysc_tif | 1369 | larl %r9,.Lsysc_tif |
1427 | BR_R11USE_R14 | 1370 | BR_EX %r14,%r11 |
1428 | 1371 | ||
1429 | .Lcleanup_sysc_restore: | 1372 | .Lcleanup_sysc_restore: |
1430 | # check if stpt has been executed | 1373 | # check if stpt has been executed |
@@ -1441,14 +1384,14 @@ cleanup_critical: | |||
1441 | mvc 0(64,%r11),__PT_R8(%r9) | 1384 | mvc 0(64,%r11),__PT_R8(%r9) |
1442 | lmg %r0,%r7,__PT_R0(%r9) | 1385 | lmg %r0,%r7,__PT_R0(%r9) |
1443 | 1: lmg %r8,%r9,__LC_RETURN_PSW | 1386 | 1: lmg %r8,%r9,__LC_RETURN_PSW |
1444 | BR_R11USE_R14 | 1387 | BR_EX %r14,%r11 |
1445 | .Lcleanup_sysc_restore_insn: | 1388 | .Lcleanup_sysc_restore_insn: |
1446 | .quad .Lsysc_exit_timer | 1389 | .quad .Lsysc_exit_timer |
1447 | .quad .Lsysc_done - 4 | 1390 | .quad .Lsysc_done - 4 |
1448 | 1391 | ||
1449 | .Lcleanup_io_tif: | 1392 | .Lcleanup_io_tif: |
1450 | larl %r9,.Lio_tif | 1393 | larl %r9,.Lio_tif |
1451 | BR_R11USE_R14 | 1394 | BR_EX %r14,%r11 |
1452 | 1395 | ||
1453 | .Lcleanup_io_restore: | 1396 | .Lcleanup_io_restore: |
1454 | # check if stpt has been executed | 1397 | # check if stpt has been executed |
@@ -1462,7 +1405,7 @@ cleanup_critical: | |||
1462 | mvc 0(64,%r11),__PT_R8(%r9) | 1405 | mvc 0(64,%r11),__PT_R8(%r9) |
1463 | lmg %r0,%r7,__PT_R0(%r9) | 1406 | lmg %r0,%r7,__PT_R0(%r9) |
1464 | 1: lmg %r8,%r9,__LC_RETURN_PSW | 1407 | 1: lmg %r8,%r9,__LC_RETURN_PSW |
1465 | BR_R11USE_R14 | 1408 | BR_EX %r14,%r11 |
1466 | .Lcleanup_io_restore_insn: | 1409 | .Lcleanup_io_restore_insn: |
1467 | .quad .Lio_exit_timer | 1410 | .quad .Lio_exit_timer |
1468 | .quad .Lio_done - 4 | 1411 | .quad .Lio_done - 4 |
@@ -1515,17 +1458,17 @@ cleanup_critical: | |||
1515 | # prepare return psw | 1458 | # prepare return psw |
1516 | nihh %r8,0xfcfd # clear irq & wait state bits | 1459 | nihh %r8,0xfcfd # clear irq & wait state bits |
1517 | lg %r9,48(%r11) # return from psw_idle | 1460 | lg %r9,48(%r11) # return from psw_idle |
1518 | BR_R11USE_R14 | 1461 | BR_EX %r14,%r11 |
1519 | .Lcleanup_idle_insn: | 1462 | .Lcleanup_idle_insn: |
1520 | .quad .Lpsw_idle_lpsw | 1463 | .quad .Lpsw_idle_lpsw |
1521 | 1464 | ||
1522 | .Lcleanup_save_fpu_regs: | 1465 | .Lcleanup_save_fpu_regs: |
1523 | larl %r9,save_fpu_regs | 1466 | larl %r9,save_fpu_regs |
1524 | BR_R11USE_R14 | 1467 | BR_EX %r14,%r11 |
1525 | 1468 | ||
1526 | .Lcleanup_load_fpu_regs: | 1469 | .Lcleanup_load_fpu_regs: |
1527 | larl %r9,load_fpu_regs | 1470 | larl %r9,load_fpu_regs |
1528 | BR_R11USE_R14 | 1471 | BR_EX %r14,%r11 |
1529 | 1472 | ||
1530 | /* | 1473 | /* |
1531 | * Integer constants | 1474 | * Integer constants |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 94f2099bceb0..3d17c41074ca 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -176,10 +176,9 @@ void do_softirq_own_stack(void) | |||
176 | new -= STACK_FRAME_OVERHEAD; | 176 | new -= STACK_FRAME_OVERHEAD; |
177 | ((struct stack_frame *) new)->back_chain = old; | 177 | ((struct stack_frame *) new)->back_chain = old; |
178 | asm volatile(" la 15,0(%0)\n" | 178 | asm volatile(" la 15,0(%0)\n" |
179 | " basr 14,%2\n" | 179 | " brasl 14,__do_softirq\n" |
180 | " la 15,0(%1)\n" | 180 | " la 15,0(%1)\n" |
181 | : : "a" (new), "a" (old), | 181 | : : "a" (new), "a" (old) |
182 | "a" (__do_softirq) | ||
183 | : "0", "1", "2", "3", "4", "5", "14", | 182 | : "0", "1", "2", "3", "4", "5", "14", |
184 | "cc", "memory" ); | 183 | "cc", "memory" ); |
185 | } else { | 184 | } else { |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 82df7d80fab2..27110f3294ed 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -9,13 +9,17 @@ | |||
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | #include <asm/asm-offsets.h> | 10 | #include <asm/asm-offsets.h> |
11 | #include <asm/ftrace.h> | 11 | #include <asm/ftrace.h> |
12 | #include <asm/nospec-insn.h> | ||
12 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
13 | #include <asm/export.h> | 14 | #include <asm/export.h> |
14 | 15 | ||
16 | GEN_BR_THUNK %r1 | ||
17 | GEN_BR_THUNK %r14 | ||
18 | |||
15 | .section .kprobes.text, "ax" | 19 | .section .kprobes.text, "ax" |
16 | 20 | ||
17 | ENTRY(ftrace_stub) | 21 | ENTRY(ftrace_stub) |
18 | br %r14 | 22 | BR_EX %r14 |
19 | 23 | ||
20 | #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) | 24 | #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) |
21 | #define STACK_PTREGS (STACK_FRAME_OVERHEAD) | 25 | #define STACK_PTREGS (STACK_FRAME_OVERHEAD) |
@@ -23,7 +27,7 @@ ENTRY(ftrace_stub) | |||
23 | #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) | 27 | #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) |
24 | 28 | ||
25 | ENTRY(_mcount) | 29 | ENTRY(_mcount) |
26 | br %r14 | 30 | BR_EX %r14 |
27 | 31 | ||
28 | EXPORT_SYMBOL(_mcount) | 32 | EXPORT_SYMBOL(_mcount) |
29 | 33 | ||
@@ -53,7 +57,7 @@ ENTRY(ftrace_caller) | |||
53 | #endif | 57 | #endif |
54 | lgr %r3,%r14 | 58 | lgr %r3,%r14 |
55 | la %r5,STACK_PTREGS(%r15) | 59 | la %r5,STACK_PTREGS(%r15) |
56 | basr %r14,%r1 | 60 | BASR_EX %r14,%r1 |
57 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 61 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
58 | # The j instruction gets runtime patched to a nop instruction. | 62 | # The j instruction gets runtime patched to a nop instruction. |
59 | # See ftrace_enable_ftrace_graph_caller. | 63 | # See ftrace_enable_ftrace_graph_caller. |
@@ -68,7 +72,7 @@ ftrace_graph_caller_end: | |||
68 | #endif | 72 | #endif |
69 | lg %r1,(STACK_PTREGS_PSW+8)(%r15) | 73 | lg %r1,(STACK_PTREGS_PSW+8)(%r15) |
70 | lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) | 74 | lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) |
71 | br %r1 | 75 | BR_EX %r1 |
72 | 76 | ||
73 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 77 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
74 | 78 | ||
@@ -81,6 +85,6 @@ ENTRY(return_to_handler) | |||
81 | aghi %r15,STACK_FRAME_OVERHEAD | 85 | aghi %r15,STACK_FRAME_OVERHEAD |
82 | lgr %r14,%r2 | 86 | lgr %r14,%r2 |
83 | lmg %r2,%r5,32(%r15) | 87 | lmg %r2,%r5,32(%r15) |
84 | br %r14 | 88 | BR_EX %r14 |
85 | 89 | ||
86 | #endif | 90 | #endif |
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 46d49a11663f..8ad6a7128b3a 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c | |||
@@ -1,7 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
4 | #include <linux/cpu.h> | ||
5 | #include <asm/nospec-branch.h> | 4 | #include <asm/nospec-branch.h> |
6 | 5 | ||
7 | static int __init nobp_setup_early(char *str) | 6 | static int __init nobp_setup_early(char *str) |
@@ -44,24 +43,6 @@ static int __init nospec_report(void) | |||
44 | } | 43 | } |
45 | arch_initcall(nospec_report); | 44 | arch_initcall(nospec_report); |
46 | 45 | ||
47 | #ifdef CONFIG_SYSFS | ||
48 | ssize_t cpu_show_spectre_v1(struct device *dev, | ||
49 | struct device_attribute *attr, char *buf) | ||
50 | { | ||
51 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | ||
52 | } | ||
53 | |||
54 | ssize_t cpu_show_spectre_v2(struct device *dev, | ||
55 | struct device_attribute *attr, char *buf) | ||
56 | { | ||
57 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) | ||
58 | return sprintf(buf, "Mitigation: execute trampolines\n"); | ||
59 | if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) | ||
60 | return sprintf(buf, "Mitigation: limited branch prediction.\n"); | ||
61 | return sprintf(buf, "Vulnerable\n"); | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | #ifdef CONFIG_EXPOLINE | 46 | #ifdef CONFIG_EXPOLINE |
66 | 47 | ||
67 | int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); | 48 | int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); |
@@ -112,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) | |||
112 | s32 *epo; | 93 | s32 *epo; |
113 | 94 | ||
114 | /* Second part of the instruction replace is always a nop */ | 95 | /* Second part of the instruction replace is always a nop */ |
115 | memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4); | ||
116 | for (epo = start; epo < end; epo++) { | 96 | for (epo = start; epo < end; epo++) { |
117 | instr = (u8 *) epo + *epo; | 97 | instr = (u8 *) epo + *epo; |
118 | if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) | 98 | if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) |
@@ -133,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) | |||
133 | br = thunk + (*(int *)(thunk + 2)) * 2; | 113 | br = thunk + (*(int *)(thunk + 2)) * 2; |
134 | else | 114 | else |
135 | continue; | 115 | continue; |
136 | if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) | 116 | /* Check for unconditional branch 0x07f? or 0x47f???? */ |
117 | if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) | ||
137 | continue; | 118 | continue; |
119 | |||
120 | memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4); | ||
138 | switch (type) { | 121 | switch (type) { |
139 | case BRCL_EXPOLINE: | 122 | case BRCL_EXPOLINE: |
140 | /* brcl to thunk, replace with br + nop */ | ||
141 | insnbuf[0] = br[0]; | 123 | insnbuf[0] = br[0]; |
142 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); | 124 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); |
125 | if (br[0] == 0x47) { | ||
126 | /* brcl to b, replace with bc + nopr */ | ||
127 | insnbuf[2] = br[2]; | ||
128 | insnbuf[3] = br[3]; | ||
129 | } else { | ||
130 | /* brcl to br, replace with bcr + nop */ | ||
131 | } | ||
143 | break; | 132 | break; |
144 | case BRASL_EXPOLINE: | 133 | case BRASL_EXPOLINE: |
145 | /* brasl to thunk, replace with basr + nop */ | ||
146 | insnbuf[0] = 0x0d; | ||
147 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); | 134 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); |
135 | if (br[0] == 0x47) { | ||
136 | /* brasl to b, replace with bas + nopr */ | ||
137 | insnbuf[0] = 0x4d; | ||
138 | insnbuf[2] = br[2]; | ||
139 | insnbuf[3] = br[3]; | ||
140 | } else { | ||
141 | /* brasl to br, replace with basr + nop */ | ||
142 | insnbuf[0] = 0x0d; | ||
143 | } | ||
148 | break; | 144 | break; |
149 | } | 145 | } |
150 | 146 | ||
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c new file mode 100644 index 000000000000..8affad5f18cb --- /dev/null +++ b/arch/s390/kernel/nospec-sysfs.c | |||
@@ -0,0 +1,21 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/device.h> | ||
3 | #include <linux/cpu.h> | ||
4 | #include <asm/facility.h> | ||
5 | #include <asm/nospec-branch.h> | ||
6 | |||
7 | ssize_t cpu_show_spectre_v1(struct device *dev, | ||
8 | struct device_attribute *attr, char *buf) | ||
9 | { | ||
10 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | ||
11 | } | ||
12 | |||
13 | ssize_t cpu_show_spectre_v2(struct device *dev, | ||
14 | struct device_attribute *attr, char *buf) | ||
15 | { | ||
16 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) | ||
17 | return sprintf(buf, "Mitigation: execute trampolines\n"); | ||
18 | if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) | ||
19 | return sprintf(buf, "Mitigation: limited branch prediction\n"); | ||
20 | return sprintf(buf, "Vulnerable\n"); | ||
21 | } | ||
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 1c9ddd7aa5ec..0292d68e7dde 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
753 | */ | 753 | */ |
754 | rate = 0; | 754 | rate = 0; |
755 | if (attr->freq) { | 755 | if (attr->freq) { |
756 | if (!attr->sample_freq) { | ||
757 | err = -EINVAL; | ||
758 | goto out; | ||
759 | } | ||
756 | rate = freq_to_sample_rate(&si, attr->sample_freq); | 760 | rate = freq_to_sample_rate(&si, attr->sample_freq); |
757 | rate = hw_limit_rate(&si, rate); | 761 | rate = hw_limit_rate(&si, rate); |
758 | attr->freq = 0; | 762 | attr->freq = 0; |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 73cc3750f0d3..7f14adf512c6 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -7,8 +7,11 @@ | |||
7 | 7 | ||
8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | #include <asm/asm-offsets.h> | 9 | #include <asm/asm-offsets.h> |
10 | #include <asm/nospec-insn.h> | ||
10 | #include <asm/sigp.h> | 11 | #include <asm/sigp.h> |
11 | 12 | ||
13 | GEN_BR_THUNK %r9 | ||
14 | |||
12 | # | 15 | # |
13 | # Issue "store status" for the current CPU to its prefix page | 16 | # Issue "store status" for the current CPU to its prefix page |
14 | # and call passed function afterwards | 17 | # and call passed function afterwards |
@@ -67,9 +70,9 @@ ENTRY(store_status) | |||
67 | st %r4,0(%r1) | 70 | st %r4,0(%r1) |
68 | st %r5,4(%r1) | 71 | st %r5,4(%r1) |
69 | stg %r2,8(%r1) | 72 | stg %r2,8(%r1) |
70 | lgr %r1,%r2 | 73 | lgr %r9,%r2 |
71 | lgr %r2,%r3 | 74 | lgr %r2,%r3 |
72 | br %r1 | 75 | BR_EX %r9 |
73 | 76 | ||
74 | .section .bss | 77 | .section .bss |
75 | .align 8 | 78 | .align 8 |
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index e99187149f17..a049a7b9d6e8 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
14 | #include <asm/thread_info.h> | 14 | #include <asm/thread_info.h> |
15 | #include <asm/asm-offsets.h> | 15 | #include <asm/asm-offsets.h> |
16 | #include <asm/nospec-insn.h> | ||
16 | #include <asm/sigp.h> | 17 | #include <asm/sigp.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -24,6 +25,8 @@ | |||
24 | * (see below) in the resume process. | 25 | * (see below) in the resume process. |
25 | * This function runs with disabled interrupts. | 26 | * This function runs with disabled interrupts. |
26 | */ | 27 | */ |
28 | GEN_BR_THUNK %r14 | ||
29 | |||
27 | .section .text | 30 | .section .text |
28 | ENTRY(swsusp_arch_suspend) | 31 | ENTRY(swsusp_arch_suspend) |
29 | stmg %r6,%r15,__SF_GPRS(%r15) | 32 | stmg %r6,%r15,__SF_GPRS(%r15) |
@@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend) | |||
103 | spx 0x318(%r1) | 106 | spx 0x318(%r1) |
104 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 107 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
105 | lghi %r2,0 | 108 | lghi %r2,0 |
106 | br %r14 | 109 | BR_EX %r14 |
107 | 110 | ||
108 | /* | 111 | /* |
109 | * Restore saved memory image to correct place and restore register context. | 112 | * Restore saved memory image to correct place and restore register context. |
@@ -197,11 +200,10 @@ pgm_check_entry: | |||
197 | larl %r15,init_thread_union | 200 | larl %r15,init_thread_union |
198 | ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) | 201 | ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) |
199 | larl %r2,.Lpanic_string | 202 | larl %r2,.Lpanic_string |
200 | larl %r3,sclp_early_printk | ||
201 | lghi %r1,0 | 203 | lghi %r1,0 |
202 | sam31 | 204 | sam31 |
203 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | 205 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE |
204 | basr %r14,%r3 | 206 | brasl %r14,sclp_early_printk |
205 | larl %r3,.Ldisabled_wait_31 | 207 | larl %r3,.Ldisabled_wait_31 |
206 | lpsw 0(%r3) | 208 | lpsw 0(%r3) |
207 | 4: | 209 | 4: |
@@ -267,7 +269,7 @@ restore_registers: | |||
267 | /* Return 0 */ | 269 | /* Return 0 */ |
268 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 270 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
269 | lghi %r2,0 | 271 | lghi %r2,0 |
270 | br %r14 | 272 | BR_EX %r14 |
271 | 273 | ||
272 | .section .data..nosave,"aw",@progbits | 274 | .section .data..nosave,"aw",@progbits |
273 | .align 8 | 275 | .align 8 |
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S index 495c9c4bacc7..2311f15be9cf 100644 --- a/arch/s390/lib/mem.S +++ b/arch/s390/lib/mem.S | |||
@@ -7,6 +7,9 @@ | |||
7 | 7 | ||
8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | #include <asm/export.h> | 9 | #include <asm/export.h> |
10 | #include <asm/nospec-insn.h> | ||
11 | |||
12 | GEN_BR_THUNK %r14 | ||
10 | 13 | ||
11 | /* | 14 | /* |
12 | * void *memmove(void *dest, const void *src, size_t n) | 15 | * void *memmove(void *dest, const void *src, size_t n) |
@@ -33,14 +36,14 @@ ENTRY(memmove) | |||
33 | .Lmemmove_forward_remainder: | 36 | .Lmemmove_forward_remainder: |
34 | larl %r5,.Lmemmove_mvc | 37 | larl %r5,.Lmemmove_mvc |
35 | ex %r4,0(%r5) | 38 | ex %r4,0(%r5) |
36 | br %r14 | 39 | BR_EX %r14 |
37 | .Lmemmove_reverse: | 40 | .Lmemmove_reverse: |
38 | ic %r0,0(%r4,%r3) | 41 | ic %r0,0(%r4,%r3) |
39 | stc %r0,0(%r4,%r1) | 42 | stc %r0,0(%r4,%r1) |
40 | brctg %r4,.Lmemmove_reverse | 43 | brctg %r4,.Lmemmove_reverse |
41 | ic %r0,0(%r4,%r3) | 44 | ic %r0,0(%r4,%r3) |
42 | stc %r0,0(%r4,%r1) | 45 | stc %r0,0(%r4,%r1) |
43 | br %r14 | 46 | BR_EX %r14 |
44 | .Lmemmove_mvc: | 47 | .Lmemmove_mvc: |
45 | mvc 0(1,%r1),0(%r3) | 48 | mvc 0(1,%r1),0(%r3) |
46 | EXPORT_SYMBOL(memmove) | 49 | EXPORT_SYMBOL(memmove) |
@@ -77,7 +80,7 @@ ENTRY(memset) | |||
77 | .Lmemset_clear_remainder: | 80 | .Lmemset_clear_remainder: |
78 | larl %r3,.Lmemset_xc | 81 | larl %r3,.Lmemset_xc |
79 | ex %r4,0(%r3) | 82 | ex %r4,0(%r3) |
80 | br %r14 | 83 | BR_EX %r14 |
81 | .Lmemset_fill: | 84 | .Lmemset_fill: |
82 | cghi %r4,1 | 85 | cghi %r4,1 |
83 | lgr %r1,%r2 | 86 | lgr %r1,%r2 |
@@ -95,10 +98,10 @@ ENTRY(memset) | |||
95 | stc %r3,0(%r1) | 98 | stc %r3,0(%r1) |
96 | larl %r5,.Lmemset_mvc | 99 | larl %r5,.Lmemset_mvc |
97 | ex %r4,0(%r5) | 100 | ex %r4,0(%r5) |
98 | br %r14 | 101 | BR_EX %r14 |
99 | .Lmemset_fill_exit: | 102 | .Lmemset_fill_exit: |
100 | stc %r3,0(%r1) | 103 | stc %r3,0(%r1) |
101 | br %r14 | 104 | BR_EX %r14 |
102 | .Lmemset_xc: | 105 | .Lmemset_xc: |
103 | xc 0(1,%r1),0(%r1) | 106 | xc 0(1,%r1),0(%r1) |
104 | .Lmemset_mvc: | 107 | .Lmemset_mvc: |
@@ -121,7 +124,7 @@ ENTRY(memcpy) | |||
121 | .Lmemcpy_remainder: | 124 | .Lmemcpy_remainder: |
122 | larl %r5,.Lmemcpy_mvc | 125 | larl %r5,.Lmemcpy_mvc |
123 | ex %r4,0(%r5) | 126 | ex %r4,0(%r5) |
124 | br %r14 | 127 | BR_EX %r14 |
125 | .Lmemcpy_loop: | 128 | .Lmemcpy_loop: |
126 | mvc 0(256,%r1),0(%r3) | 129 | mvc 0(256,%r1),0(%r3) |
127 | la %r1,256(%r1) | 130 | la %r1,256(%r1) |
@@ -159,10 +162,10 @@ ENTRY(__memset\bits) | |||
159 | \insn %r3,0(%r1) | 162 | \insn %r3,0(%r1) |
160 | larl %r5,.L__memset_mvc\bits | 163 | larl %r5,.L__memset_mvc\bits |
161 | ex %r4,0(%r5) | 164 | ex %r4,0(%r5) |
162 | br %r14 | 165 | BR_EX %r14 |
163 | .L__memset_exit\bits: | 166 | .L__memset_exit\bits: |
164 | \insn %r3,0(%r2) | 167 | \insn %r3,0(%r2) |
165 | br %r14 | 168 | BR_EX %r14 |
166 | .L__memset_mvc\bits: | 169 | .L__memset_mvc\bits: |
167 | mvc \bytes(1,%r1),0(%r1) | 170 | mvc \bytes(1,%r1),0(%r1) |
168 | .endm | 171 | .endm |
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S index 25bb4643c4f4..9f794869c1b0 100644 --- a/arch/s390/net/bpf_jit.S +++ b/arch/s390/net/bpf_jit.S | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <asm/nospec-insn.h> | ||
12 | #include "bpf_jit.h" | 13 | #include "bpf_jit.h" |
13 | 14 | ||
14 | /* | 15 | /* |
@@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \ | |||
54 | clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ | 55 | clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ |
55 | jh sk_load_##NAME##_slow; \ | 56 | jh sk_load_##NAME##_slow; \ |
56 | LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ | 57 | LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ |
57 | b OFF_OK(%r6); /* Return */ \ | 58 | B_EX OFF_OK,%r6; /* Return */ \ |
58 | \ | 59 | \ |
59 | sk_load_##NAME##_slow:; \ | 60 | sk_load_##NAME##_slow:; \ |
60 | lgr %r2,%r7; /* Arg1 = skb pointer */ \ | 61 | lgr %r2,%r7; /* Arg1 = skb pointer */ \ |
@@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \ | |||
64 | brasl %r14,skb_copy_bits; /* Get data from skb */ \ | 65 | brasl %r14,skb_copy_bits; /* Get data from skb */ \ |
65 | LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ | 66 | LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ |
66 | ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ | 67 | ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ |
67 | br %r6; /* Return */ | 68 | BR_EX %r6; /* Return */ |
68 | 69 | ||
69 | sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ | 70 | sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ |
70 | sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ | 71 | sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ |
71 | 72 | ||
73 | GEN_BR_THUNK %r6 | ||
74 | GEN_B_THUNK OFF_OK,%r6 | ||
75 | |||
72 | /* | 76 | /* |
73 | * Load 1 byte from SKB (optimized version) | 77 | * Load 1 byte from SKB (optimized version) |
74 | */ | 78 | */ |
@@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos) | |||
80 | clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? | 84 | clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? |
81 | jnl sk_load_byte_slow | 85 | jnl sk_load_byte_slow |
82 | llgc %r14,0(%r3,%r12) # Get byte from skb | 86 | llgc %r14,0(%r3,%r12) # Get byte from skb |
83 | b OFF_OK(%r6) # Return OK | 87 | B_EX OFF_OK,%r6 # Return OK |
84 | 88 | ||
85 | sk_load_byte_slow: | 89 | sk_load_byte_slow: |
86 | lgr %r2,%r7 # Arg1 = skb pointer | 90 | lgr %r2,%r7 # Arg1 = skb pointer |
@@ -90,7 +94,7 @@ sk_load_byte_slow: | |||
90 | brasl %r14,skb_copy_bits # Get data from skb | 94 | brasl %r14,skb_copy_bits # Get data from skb |
91 | llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer | 95 | llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer |
92 | ltgr %r2,%r2 # Set cc to (%r2 != 0) | 96 | ltgr %r2,%r2 # Set cc to (%r2 != 0) |
93 | br %r6 # Return cc | 97 | BR_EX %r6 # Return cc |
94 | 98 | ||
95 | #define sk_negative_common(NAME, SIZE, LOAD) \ | 99 | #define sk_negative_common(NAME, SIZE, LOAD) \ |
96 | sk_load_##NAME##_slow_neg:; \ | 100 | sk_load_##NAME##_slow_neg:; \ |
@@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \ | |||
104 | jz bpf_error; \ | 108 | jz bpf_error; \ |
105 | LOAD %r14,0(%r2); /* Get data from pointer */ \ | 109 | LOAD %r14,0(%r2); /* Get data from pointer */ \ |
106 | xr %r3,%r3; /* Set cc to zero */ \ | 110 | xr %r3,%r3; /* Set cc to zero */ \ |
107 | br %r6; /* Return cc */ | 111 | BR_EX %r6; /* Return cc */ |
108 | 112 | ||
109 | sk_negative_common(word, 4, llgf) | 113 | sk_negative_common(word, 4, llgf) |
110 | sk_negative_common(half, 2, llgh) | 114 | sk_negative_common(half, 2, llgh) |
@@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc) | |||
113 | bpf_error: | 117 | bpf_error: |
114 | # force a return 0 from jit handler | 118 | # force a return 0 from jit handler |
115 | ltgr %r15,%r15 # Set condition code | 119 | ltgr %r15,%r15 # Set condition code |
116 | br %r6 | 120 | BR_EX %r6 |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 78a19c93b380..dd2bcf0e7d00 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/bpf.h> | 25 | #include <linux/bpf.h> |
26 | #include <asm/cacheflush.h> | 26 | #include <asm/cacheflush.h> |
27 | #include <asm/dis.h> | 27 | #include <asm/dis.h> |
28 | #include <asm/facility.h> | ||
29 | #include <asm/nospec-branch.h> | ||
28 | #include <asm/set_memory.h> | 30 | #include <asm/set_memory.h> |
29 | #include "bpf_jit.h" | 31 | #include "bpf_jit.h" |
30 | 32 | ||
@@ -41,6 +43,8 @@ struct bpf_jit { | |||
41 | int base_ip; /* Base address for literal pool */ | 43 | int base_ip; /* Base address for literal pool */ |
42 | int ret0_ip; /* Address of return 0 */ | 44 | int ret0_ip; /* Address of return 0 */ |
43 | int exit_ip; /* Address of exit */ | 45 | int exit_ip; /* Address of exit */ |
46 | int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ | ||
47 | int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ | ||
44 | int tail_call_start; /* Tail call start offset */ | 48 | int tail_call_start; /* Tail call start offset */ |
45 | int labels[1]; /* Labels for local jumps */ | 49 | int labels[1]; /* Labels for local jumps */ |
46 | }; | 50 | }; |
@@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) | |||
250 | REG_SET_SEEN(b2); \ | 254 | REG_SET_SEEN(b2); \ |
251 | }) | 255 | }) |
252 | 256 | ||
257 | #define EMIT6_PCREL_RILB(op, b, target) \ | ||
258 | ({ \ | ||
259 | int rel = (target - jit->prg) / 2; \ | ||
260 | _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \ | ||
261 | REG_SET_SEEN(b); \ | ||
262 | }) | ||
263 | |||
264 | #define EMIT6_PCREL_RIL(op, target) \ | ||
265 | ({ \ | ||
266 | int rel = (target - jit->prg) / 2; \ | ||
267 | _EMIT6(op | rel >> 16, rel & 0xffff); \ | ||
268 | }) | ||
269 | |||
253 | #define _EMIT6_IMM(op, imm) \ | 270 | #define _EMIT6_IMM(op, imm) \ |
254 | ({ \ | 271 | ({ \ |
255 | unsigned int __imm = (imm); \ | 272 | unsigned int __imm = (imm); \ |
@@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) | |||
469 | EMIT4(0xb9040000, REG_2, BPF_REG_0); | 486 | EMIT4(0xb9040000, REG_2, BPF_REG_0); |
470 | /* Restore registers */ | 487 | /* Restore registers */ |
471 | save_restore_regs(jit, REGS_RESTORE, stack_depth); | 488 | save_restore_regs(jit, REGS_RESTORE, stack_depth); |
489 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { | ||
490 | jit->r14_thunk_ip = jit->prg; | ||
491 | /* Generate __s390_indirect_jump_r14 thunk */ | ||
492 | if (test_facility(35)) { | ||
493 | /* exrl %r0,.+10 */ | ||
494 | EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); | ||
495 | } else { | ||
496 | /* larl %r1,.+14 */ | ||
497 | EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); | ||
498 | /* ex 0,0(%r1) */ | ||
499 | EMIT4_DISP(0x44000000, REG_0, REG_1, 0); | ||
500 | } | ||
501 | /* j . */ | ||
502 | EMIT4_PCREL(0xa7f40000, 0); | ||
503 | } | ||
472 | /* br %r14 */ | 504 | /* br %r14 */ |
473 | _EMIT2(0x07fe); | 505 | _EMIT2(0x07fe); |
506 | |||
507 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable && | ||
508 | (jit->seen & SEEN_FUNC)) { | ||
509 | jit->r1_thunk_ip = jit->prg; | ||
510 | /* Generate __s390_indirect_jump_r1 thunk */ | ||
511 | if (test_facility(35)) { | ||
512 | /* exrl %r0,.+10 */ | ||
513 | EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); | ||
514 | /* j . */ | ||
515 | EMIT4_PCREL(0xa7f40000, 0); | ||
516 | /* br %r1 */ | ||
517 | _EMIT2(0x07f1); | ||
518 | } else { | ||
519 | /* larl %r1,.+14 */ | ||
520 | EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); | ||
521 | /* ex 0,S390_lowcore.br_r1_tampoline */ | ||
522 | EMIT4_DISP(0x44000000, REG_0, REG_0, | ||
523 | offsetof(struct lowcore, br_r1_trampoline)); | ||
524 | /* j . */ | ||
525 | EMIT4_PCREL(0xa7f40000, 0); | ||
526 | } | ||
527 | } | ||
474 | } | 528 | } |
475 | 529 | ||
476 | /* | 530 | /* |
@@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i | |||
966 | /* lg %w1,<d(imm)>(%l) */ | 1020 | /* lg %w1,<d(imm)>(%l) */ |
967 | EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, | 1021 | EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, |
968 | EMIT_CONST_U64(func)); | 1022 | EMIT_CONST_U64(func)); |
969 | /* basr %r14,%w1 */ | 1023 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { |
970 | EMIT2(0x0d00, REG_14, REG_W1); | 1024 | /* brasl %r14,__s390_indirect_jump_r1 */ |
1025 | EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); | ||
1026 | } else { | ||
1027 | /* basr %r14,%w1 */ | ||
1028 | EMIT2(0x0d00, REG_14, REG_W1); | ||
1029 | } | ||
971 | /* lgr %b0,%r2: load return value into %b0 */ | 1030 | /* lgr %b0,%r2: load return value into %b0 */ |
972 | EMIT4(0xb9040000, BPF_REG_0, REG_2); | 1031 | EMIT4(0xb9040000, BPF_REG_0, REG_2); |
973 | if ((jit->seen & SEEN_SKB) && | 1032 | if ((jit->seen & SEEN_SKB) && |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 97fe29316476..1851eaeee131 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -9,6 +9,7 @@ config SUPERH | |||
9 | select HAVE_IDE if HAS_IOPORT_MAP | 9 | select HAVE_IDE if HAS_IOPORT_MAP |
10 | select HAVE_MEMBLOCK | 10 | select HAVE_MEMBLOCK |
11 | select HAVE_MEMBLOCK_NODE_MAP | 11 | select HAVE_MEMBLOCK_NODE_MAP |
12 | select NO_BOOTMEM | ||
12 | select ARCH_DISCARD_MEMBLOCK | 13 | select ARCH_DISCARD_MEMBLOCK |
13 | select HAVE_OPROFILE | 14 | select HAVE_OPROFILE |
14 | select HAVE_GENERIC_DMA_COHERENT | 15 | select HAVE_GENERIC_DMA_COHERENT |
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index 4205f6d42b69..a5bd03642678 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c | |||
@@ -43,7 +43,11 @@ void __ref cpu_probe(void) | |||
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #if defined(CONFIG_CPU_J2) | 45 | #if defined(CONFIG_CPU_J2) |
46 | #if defined(CONFIG_SMP) | ||
46 | unsigned cpu = hard_smp_processor_id(); | 47 | unsigned cpu = hard_smp_processor_id(); |
48 | #else | ||
49 | unsigned cpu = 0; | ||
50 | #endif | ||
47 | if (cpu == 0) of_scan_flat_dt(scan_cache, NULL); | 51 | if (cpu == 0) of_scan_flat_dt(scan_cache, NULL); |
48 | if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu); | 52 | if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu); |
49 | if (cpu != 0) return; | 53 | if (cpu != 0) return; |
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index d34e998b809f..c286cf5da6e7 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/ioport.h> | 11 | #include <linux/ioport.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/initrd.h> | 13 | #include <linux/initrd.h> |
14 | #include <linux/bootmem.h> | ||
15 | #include <linux/console.h> | 14 | #include <linux/console.h> |
16 | #include <linux/root_dev.h> | 15 | #include <linux/root_dev.h> |
17 | #include <linux/utsname.h> | 16 | #include <linux/utsname.h> |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 8ce98691d822..f1b44697ad68 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -59,7 +59,9 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
59 | 59 | ||
60 | split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); | 60 | split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); |
61 | 61 | ||
62 | *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); | 62 | *dma_handle = virt_to_phys(ret); |
63 | if (!WARN_ON(!dev)) | ||
64 | *dma_handle -= PFN_PHYS(dev->dma_pfn_offset); | ||
63 | 65 | ||
64 | return ret_nocache; | 66 | return ret_nocache; |
65 | } | 67 | } |
@@ -69,9 +71,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size, | |||
69 | unsigned long attrs) | 71 | unsigned long attrs) |
70 | { | 72 | { |
71 | int order = get_order(size); | 73 | int order = get_order(size); |
72 | unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset; | 74 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
73 | int k; | 75 | int k; |
74 | 76 | ||
77 | if (!WARN_ON(!dev)) | ||
78 | pfn += dev->dma_pfn_offset; | ||
79 | |||
75 | for (k = 0; k < (1 << order); k++) | 80 | for (k = 0; k < (1 << order); k++) |
76 | __free_pages(pfn_to_page(pfn + k), 0); | 81 | __free_pages(pfn_to_page(pfn + k), 0); |
77 | 82 | ||
@@ -143,7 +148,7 @@ int __init platform_resource_setup_memory(struct platform_device *pdev, | |||
143 | if (!memsize) | 148 | if (!memsize) |
144 | return 0; | 149 | return 0; |
145 | 150 | ||
146 | buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); | 151 | buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL); |
147 | if (!buf) { | 152 | if (!buf) { |
148 | pr_warning("%s: unable to allocate memory\n", name); | 153 | pr_warning("%s: unable to allocate memory\n", name); |
149 | return -ENOMEM; | 154 | return -ENOMEM; |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index ce0bbaa7e404..4034035fbede 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -211,59 +211,15 @@ void __init allocate_pgdat(unsigned int nid) | |||
211 | 211 | ||
212 | NODE_DATA(nid) = __va(phys); | 212 | NODE_DATA(nid) = __va(phys); |
213 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 213 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
214 | |||
215 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | ||
216 | #endif | 214 | #endif |
217 | 215 | ||
218 | NODE_DATA(nid)->node_start_pfn = start_pfn; | 216 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
219 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | 217 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
220 | } | 218 | } |
221 | 219 | ||
222 | static void __init bootmem_init_one_node(unsigned int nid) | ||
223 | { | ||
224 | unsigned long total_pages, paddr; | ||
225 | unsigned long end_pfn; | ||
226 | struct pglist_data *p; | ||
227 | |||
228 | p = NODE_DATA(nid); | ||
229 | |||
230 | /* Nothing to do.. */ | ||
231 | if (!p->node_spanned_pages) | ||
232 | return; | ||
233 | |||
234 | end_pfn = pgdat_end_pfn(p); | ||
235 | |||
236 | total_pages = bootmem_bootmap_pages(p->node_spanned_pages); | ||
237 | |||
238 | paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); | ||
239 | if (!paddr) | ||
240 | panic("Can't allocate bootmap for nid[%d]\n", nid); | ||
241 | |||
242 | init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); | ||
243 | |||
244 | free_bootmem_with_active_regions(nid, end_pfn); | ||
245 | |||
246 | /* | ||
247 | * XXX Handle initial reservations for the system memory node | ||
248 | * only for the moment, we'll refactor this later for handling | ||
249 | * reservations in other nodes. | ||
250 | */ | ||
251 | if (nid == 0) { | ||
252 | struct memblock_region *reg; | ||
253 | |||
254 | /* Reserve the sections we're already using. */ | ||
255 | for_each_memblock(reserved, reg) { | ||
256 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | sparse_memory_present_with_active_regions(nid); | ||
261 | } | ||
262 | |||
263 | static void __init do_init_bootmem(void) | 220 | static void __init do_init_bootmem(void) |
264 | { | 221 | { |
265 | struct memblock_region *reg; | 222 | struct memblock_region *reg; |
266 | int i; | ||
267 | 223 | ||
268 | /* Add active regions with valid PFNs. */ | 224 | /* Add active regions with valid PFNs. */ |
269 | for_each_memblock(memory, reg) { | 225 | for_each_memblock(memory, reg) { |
@@ -279,9 +235,12 @@ static void __init do_init_bootmem(void) | |||
279 | 235 | ||
280 | plat_mem_setup(); | 236 | plat_mem_setup(); |
281 | 237 | ||
282 | for_each_online_node(i) | 238 | for_each_memblock(memory, reg) { |
283 | bootmem_init_one_node(i); | 239 | int nid = memblock_get_region_node(reg); |
284 | 240 | ||
241 | memory_present(nid, memblock_region_memory_base_pfn(reg), | ||
242 | memblock_region_memory_end_pfn(reg)); | ||
243 | } | ||
285 | sparse_init(); | 244 | sparse_init(); |
286 | } | 245 | } |
287 | 246 | ||
@@ -322,7 +281,6 @@ void __init paging_init(void) | |||
322 | { | 281 | { |
323 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 282 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
324 | unsigned long vaddr, end; | 283 | unsigned long vaddr, end; |
325 | int nid; | ||
326 | 284 | ||
327 | sh_mv.mv_mem_init(); | 285 | sh_mv.mv_mem_init(); |
328 | 286 | ||
@@ -377,21 +335,7 @@ void __init paging_init(void) | |||
377 | kmap_coherent_init(); | 335 | kmap_coherent_init(); |
378 | 336 | ||
379 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 337 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
380 | 338 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | |
381 | for_each_online_node(nid) { | ||
382 | pg_data_t *pgdat = NODE_DATA(nid); | ||
383 | unsigned long low, start_pfn; | ||
384 | |||
385 | start_pfn = pgdat->bdata->node_min_pfn; | ||
386 | low = pgdat->bdata->node_low_pfn; | ||
387 | |||
388 | if (max_zone_pfns[ZONE_NORMAL] < low) | ||
389 | max_zone_pfns[ZONE_NORMAL] = low; | ||
390 | |||
391 | printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | ||
392 | nid, start_pfn, low); | ||
393 | } | ||
394 | |||
395 | free_area_init_nodes(max_zone_pfns); | 339 | free_area_init_nodes(max_zone_pfns); |
396 | } | 340 | } |
397 | 341 | ||
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 05713d190247..830e8b3684e4 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * for more details. | 8 | * for more details. |
9 | */ | 9 | */ |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/bootmem.h> | ||
12 | #include <linux/memblock.h> | 11 | #include <linux/memblock.h> |
13 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
14 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
@@ -26,9 +25,7 @@ EXPORT_SYMBOL_GPL(node_data); | |||
26 | */ | 25 | */ |
27 | void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | 26 | void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) |
28 | { | 27 | { |
29 | unsigned long bootmap_pages; | ||
30 | unsigned long start_pfn, end_pfn; | 28 | unsigned long start_pfn, end_pfn; |
31 | unsigned long bootmem_paddr; | ||
32 | 29 | ||
33 | /* Don't allow bogus node assignment */ | 30 | /* Don't allow bogus node assignment */ |
34 | BUG_ON(nid >= MAX_NUMNODES || nid <= 0); | 31 | BUG_ON(nid >= MAX_NUMNODES || nid <= 0); |
@@ -48,25 +45,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
48 | SMP_CACHE_BYTES, end)); | 45 | SMP_CACHE_BYTES, end)); |
49 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 46 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
50 | 47 | ||
51 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | ||
52 | NODE_DATA(nid)->node_start_pfn = start_pfn; | 48 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
53 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | 49 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
54 | 50 | ||
55 | /* Node-local bootmap */ | ||
56 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | ||
57 | bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, | ||
58 | PAGE_SIZE, end); | ||
59 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | ||
60 | start_pfn, end_pfn); | ||
61 | |||
62 | free_bootmem_with_active_regions(nid, end_pfn); | ||
63 | |||
64 | /* Reserve the pgdat and bootmap space with the bootmem allocator */ | ||
65 | reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, | ||
66 | sizeof(struct pglist_data), BOOTMEM_DEFAULT); | ||
67 | reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr, | ||
68 | bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); | ||
69 | |||
70 | /* It's up */ | 51 | /* It's up */ |
71 | node_set_online(nid); | 52 | node_set_online(nid); |
72 | 53 | ||
diff --git a/arch/x86/entry/vdso/vdso32/vdso-fakesections.c b/arch/x86/entry/vdso/vdso32/vdso-fakesections.c deleted file mode 100644 index 541468e25265..000000000000 --- a/arch/x86/entry/vdso/vdso32/vdso-fakesections.c +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include "../vdso-fakesections.c" | ||
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index a6006e7bb729..45b2b1c93d04 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
28 | #include <linux/bitops.h> | 28 | #include <linux/bitops.h> |
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | #include <linux/nospec.h> | ||
30 | 31 | ||
31 | #include <asm/apic.h> | 32 | #include <asm/apic.h> |
32 | #include <asm/stacktrace.h> | 33 | #include <asm/stacktrace.h> |
@@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) | |||
304 | 305 | ||
305 | config = attr->config; | 306 | config = attr->config; |
306 | 307 | ||
307 | cache_type = (config >> 0) & 0xff; | 308 | cache_type = (config >> 0) & 0xff; |
308 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | 309 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) |
309 | return -EINVAL; | 310 | return -EINVAL; |
311 | cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX); | ||
310 | 312 | ||
311 | cache_op = (config >> 8) & 0xff; | 313 | cache_op = (config >> 8) & 0xff; |
312 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | 314 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
313 | return -EINVAL; | 315 | return -EINVAL; |
316 | cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX); | ||
314 | 317 | ||
315 | cache_result = (config >> 16) & 0xff; | 318 | cache_result = (config >> 16) & 0xff; |
316 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 319 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
317 | return -EINVAL; | 320 | return -EINVAL; |
321 | cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); | ||
318 | 322 | ||
319 | val = hw_cache_event_ids[cache_type][cache_op][cache_result]; | 323 | val = hw_cache_event_ids[cache_type][cache_op][cache_result]; |
320 | 324 | ||
@@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event) | |||
421 | if (attr->config >= x86_pmu.max_events) | 425 | if (attr->config >= x86_pmu.max_events) |
422 | return -EINVAL; | 426 | return -EINVAL; |
423 | 427 | ||
428 | attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); | ||
429 | |||
424 | /* | 430 | /* |
425 | * The generic map: | 431 | * The generic map: |
426 | */ | 432 | */ |
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 9aca448bb8e6..9f8084f18d58 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c | |||
@@ -92,6 +92,7 @@ | |||
92 | #include <linux/module.h> | 92 | #include <linux/module.h> |
93 | #include <linux/slab.h> | 93 | #include <linux/slab.h> |
94 | #include <linux/perf_event.h> | 94 | #include <linux/perf_event.h> |
95 | #include <linux/nospec.h> | ||
95 | #include <asm/cpu_device_id.h> | 96 | #include <asm/cpu_device_id.h> |
96 | #include <asm/intel-family.h> | 97 | #include <asm/intel-family.h> |
97 | #include "../perf_event.h" | 98 | #include "../perf_event.h" |
@@ -302,6 +303,7 @@ static int cstate_pmu_event_init(struct perf_event *event) | |||
302 | } else if (event->pmu == &cstate_pkg_pmu) { | 303 | } else if (event->pmu == &cstate_pkg_pmu) { |
303 | if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) | 304 | if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) |
304 | return -EINVAL; | 305 | return -EINVAL; |
306 | cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); | ||
305 | if (!pkg_msr[cfg].attr) | 307 | if (!pkg_msr[cfg].attr) |
306 | return -EINVAL; | 308 | return -EINVAL; |
307 | event->hw.event_base = pkg_msr[cfg].msr; | 309 | event->hw.event_base = pkg_msr[cfg].msr; |
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index e7edf19e64c2..b4771a6ddbc1 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c | |||
@@ -1,5 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/perf_event.h> | 2 | #include <linux/perf_event.h> |
3 | #include <linux/nospec.h> | ||
3 | #include <asm/intel-family.h> | 4 | #include <asm/intel-family.h> |
4 | 5 | ||
5 | enum perf_msr_id { | 6 | enum perf_msr_id { |
@@ -158,9 +159,6 @@ static int msr_event_init(struct perf_event *event) | |||
158 | if (event->attr.type != event->pmu->type) | 159 | if (event->attr.type != event->pmu->type) |
159 | return -ENOENT; | 160 | return -ENOENT; |
160 | 161 | ||
161 | if (cfg >= PERF_MSR_EVENT_MAX) | ||
162 | return -EINVAL; | ||
163 | |||
164 | /* unsupported modes and filters */ | 162 | /* unsupported modes and filters */ |
165 | if (event->attr.exclude_user || | 163 | if (event->attr.exclude_user || |
166 | event->attr.exclude_kernel || | 164 | event->attr.exclude_kernel || |
@@ -171,6 +169,11 @@ static int msr_event_init(struct perf_event *event) | |||
171 | event->attr.sample_period) /* no sampling */ | 169 | event->attr.sample_period) /* no sampling */ |
172 | return -EINVAL; | 170 | return -EINVAL; |
173 | 171 | ||
172 | if (cfg >= PERF_MSR_EVENT_MAX) | ||
173 | return -EINVAL; | ||
174 | |||
175 | cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX); | ||
176 | |||
174 | if (!msr[cfg].attr) | 177 | if (!msr[cfg].attr) |
175 | return -EINVAL; | 178 | return -EINVAL; |
176 | 179 | ||
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile index 367a8203cfcf..b173d404e3df 100644 --- a/arch/x86/hyperv/Makefile +++ b/arch/x86/hyperv/Makefile | |||
@@ -1 +1,2 @@ | |||
1 | obj-y := hv_init.o mmu.o | 1 | obj-y := hv_init.o mmu.o |
2 | obj-$(CONFIG_X86_64) += hv_apic.o | ||
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c new file mode 100644 index 000000000000..f68855499391 --- /dev/null +++ b/arch/x86/hyperv/hv_apic.c | |||
@@ -0,0 +1,256 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | /* | ||
4 | * Hyper-V specific APIC code. | ||
5 | * | ||
6 | * Copyright (C) 2018, Microsoft, Inc. | ||
7 | * | ||
8 | * Author : K. Y. Srinivasan <kys@microsoft.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published | ||
12 | * by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
17 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
18 | * details. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | #include <linux/version.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/clockchips.h> | ||
27 | #include <linux/hyperv.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/cpuhotplug.h> | ||
30 | #include <asm/hypervisor.h> | ||
31 | #include <asm/mshyperv.h> | ||
32 | #include <asm/apic.h> | ||
33 | |||
34 | static struct apic orig_apic; | ||
35 | |||
36 | static u64 hv_apic_icr_read(void) | ||
37 | { | ||
38 | u64 reg_val; | ||
39 | |||
40 | rdmsrl(HV_X64_MSR_ICR, reg_val); | ||
41 | return reg_val; | ||
42 | } | ||
43 | |||
44 | static void hv_apic_icr_write(u32 low, u32 id) | ||
45 | { | ||
46 | u64 reg_val; | ||
47 | |||
48 | reg_val = SET_APIC_DEST_FIELD(id); | ||
49 | reg_val = reg_val << 32; | ||
50 | reg_val |= low; | ||
51 | |||
52 | wrmsrl(HV_X64_MSR_ICR, reg_val); | ||
53 | } | ||
54 | |||
55 | static u32 hv_apic_read(u32 reg) | ||
56 | { | ||
57 | u32 reg_val, hi; | ||
58 | |||
59 | switch (reg) { | ||
60 | case APIC_EOI: | ||
61 | rdmsr(HV_X64_MSR_EOI, reg_val, hi); | ||
62 | return reg_val; | ||
63 | case APIC_TASKPRI: | ||
64 | rdmsr(HV_X64_MSR_TPR, reg_val, hi); | ||
65 | return reg_val; | ||
66 | |||
67 | default: | ||
68 | return native_apic_mem_read(reg); | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static void hv_apic_write(u32 reg, u32 val) | ||
73 | { | ||
74 | switch (reg) { | ||
75 | case APIC_EOI: | ||
76 | wrmsr(HV_X64_MSR_EOI, val, 0); | ||
77 | break; | ||
78 | case APIC_TASKPRI: | ||
79 | wrmsr(HV_X64_MSR_TPR, val, 0); | ||
80 | break; | ||
81 | default: | ||
82 | native_apic_mem_write(reg, val); | ||
83 | } | ||
84 | } | ||
85 | |||
86 | static void hv_apic_eoi_write(u32 reg, u32 val) | ||
87 | { | ||
88 | wrmsr(HV_X64_MSR_EOI, val, 0); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * IPI implementation on Hyper-V. | ||
93 | */ | ||
94 | static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) | ||
95 | { | ||
96 | struct ipi_arg_ex **arg; | ||
97 | struct ipi_arg_ex *ipi_arg; | ||
98 | unsigned long flags; | ||
99 | int nr_bank = 0; | ||
100 | int ret = 1; | ||
101 | |||
102 | local_irq_save(flags); | ||
103 | arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); | ||
104 | |||
105 | ipi_arg = *arg; | ||
106 | if (unlikely(!ipi_arg)) | ||
107 | goto ipi_mask_ex_done; | ||
108 | |||
109 | ipi_arg->vector = vector; | ||
110 | ipi_arg->reserved = 0; | ||
111 | ipi_arg->vp_set.valid_bank_mask = 0; | ||
112 | |||
113 | if (!cpumask_equal(mask, cpu_present_mask)) { | ||
114 | ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; | ||
115 | nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); | ||
116 | } | ||
117 | if (!nr_bank) | ||
118 | ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; | ||
119 | |||
120 | ret = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank, | ||
121 | ipi_arg, NULL); | ||
122 | |||
123 | ipi_mask_ex_done: | ||
124 | local_irq_restore(flags); | ||
125 | return ((ret == 0) ? true : false); | ||
126 | } | ||
127 | |||
128 | static bool __send_ipi_mask(const struct cpumask *mask, int vector) | ||
129 | { | ||
130 | int cur_cpu, vcpu; | ||
131 | struct ipi_arg_non_ex **arg; | ||
132 | struct ipi_arg_non_ex *ipi_arg; | ||
133 | int ret = 1; | ||
134 | unsigned long flags; | ||
135 | |||
136 | if (cpumask_empty(mask)) | ||
137 | return true; | ||
138 | |||
139 | if (!hv_hypercall_pg) | ||
140 | return false; | ||
141 | |||
142 | if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) | ||
143 | return false; | ||
144 | |||
145 | if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | ||
146 | return __send_ipi_mask_ex(mask, vector); | ||
147 | |||
148 | local_irq_save(flags); | ||
149 | arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); | ||
150 | |||
151 | ipi_arg = *arg; | ||
152 | if (unlikely(!ipi_arg)) | ||
153 | goto ipi_mask_done; | ||
154 | |||
155 | ipi_arg->vector = vector; | ||
156 | ipi_arg->reserved = 0; | ||
157 | ipi_arg->cpu_mask = 0; | ||
158 | |||
159 | for_each_cpu(cur_cpu, mask) { | ||
160 | vcpu = hv_cpu_number_to_vp_number(cur_cpu); | ||
161 | /* | ||
162 | * This particular version of the IPI hypercall can | ||
163 | * only target upto 64 CPUs. | ||
164 | */ | ||
165 | if (vcpu >= 64) | ||
166 | goto ipi_mask_done; | ||
167 | |||
168 | __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask); | ||
169 | } | ||
170 | |||
171 | ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL); | ||
172 | |||
173 | ipi_mask_done: | ||
174 | local_irq_restore(flags); | ||
175 | return ((ret == 0) ? true : false); | ||
176 | } | ||
177 | |||
178 | static bool __send_ipi_one(int cpu, int vector) | ||
179 | { | ||
180 | struct cpumask mask = CPU_MASK_NONE; | ||
181 | |||
182 | cpumask_set_cpu(cpu, &mask); | ||
183 | return __send_ipi_mask(&mask, vector); | ||
184 | } | ||
185 | |||
186 | static void hv_send_ipi(int cpu, int vector) | ||
187 | { | ||
188 | if (!__send_ipi_one(cpu, vector)) | ||
189 | orig_apic.send_IPI(cpu, vector); | ||
190 | } | ||
191 | |||
192 | static void hv_send_ipi_mask(const struct cpumask *mask, int vector) | ||
193 | { | ||
194 | if (!__send_ipi_mask(mask, vector)) | ||
195 | orig_apic.send_IPI_mask(mask, vector); | ||
196 | } | ||
197 | |||
198 | static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) | ||
199 | { | ||
200 | unsigned int this_cpu = smp_processor_id(); | ||
201 | struct cpumask new_mask; | ||
202 | const struct cpumask *local_mask; | ||
203 | |||
204 | cpumask_copy(&new_mask, mask); | ||
205 | cpumask_clear_cpu(this_cpu, &new_mask); | ||
206 | local_mask = &new_mask; | ||
207 | if (!__send_ipi_mask(local_mask, vector)) | ||
208 | orig_apic.send_IPI_mask_allbutself(mask, vector); | ||
209 | } | ||
210 | |||
211 | static void hv_send_ipi_allbutself(int vector) | ||
212 | { | ||
213 | hv_send_ipi_mask_allbutself(cpu_online_mask, vector); | ||
214 | } | ||
215 | |||
216 | static void hv_send_ipi_all(int vector) | ||
217 | { | ||
218 | if (!__send_ipi_mask(cpu_online_mask, vector)) | ||
219 | orig_apic.send_IPI_all(vector); | ||
220 | } | ||
221 | |||
222 | static void hv_send_ipi_self(int vector) | ||
223 | { | ||
224 | if (!__send_ipi_one(smp_processor_id(), vector)) | ||
225 | orig_apic.send_IPI_self(vector); | ||
226 | } | ||
227 | |||
228 | void __init hv_apic_init(void) | ||
229 | { | ||
230 | if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) { | ||
231 | if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | ||
232 | pr_info("Hyper-V: Using ext hypercalls for IPI\n"); | ||
233 | else | ||
234 | pr_info("Hyper-V: Using IPI hypercalls\n"); | ||
235 | /* | ||
236 | * Set the IPI entry points. | ||
237 | */ | ||
238 | orig_apic = *apic; | ||
239 | |||
240 | apic->send_IPI = hv_send_ipi; | ||
241 | apic->send_IPI_mask = hv_send_ipi_mask; | ||
242 | apic->send_IPI_mask_allbutself = hv_send_ipi_mask_allbutself; | ||
243 | apic->send_IPI_allbutself = hv_send_ipi_allbutself; | ||
244 | apic->send_IPI_all = hv_send_ipi_all; | ||
245 | apic->send_IPI_self = hv_send_ipi_self; | ||
246 | } | ||
247 | |||
248 | if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) { | ||
249 | pr_info("Hyper-V: Using MSR based APIC access\n"); | ||
250 | apic_set_eoi_write(hv_apic_eoi_write); | ||
251 | apic->read = hv_apic_read; | ||
252 | apic->write = hv_apic_write; | ||
253 | apic->icr_write = hv_apic_icr_write; | ||
254 | apic->icr_read = hv_apic_icr_read; | ||
255 | } | ||
256 | } | ||
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index cfecc2272f2d..4c431e1c1eff 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c | |||
@@ -91,12 +91,19 @@ EXPORT_SYMBOL_GPL(hv_vp_index); | |||
91 | struct hv_vp_assist_page **hv_vp_assist_page; | 91 | struct hv_vp_assist_page **hv_vp_assist_page; |
92 | EXPORT_SYMBOL_GPL(hv_vp_assist_page); | 92 | EXPORT_SYMBOL_GPL(hv_vp_assist_page); |
93 | 93 | ||
94 | void __percpu **hyperv_pcpu_input_arg; | ||
95 | EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); | ||
96 | |||
94 | u32 hv_max_vp_index; | 97 | u32 hv_max_vp_index; |
95 | 98 | ||
96 | static int hv_cpu_init(unsigned int cpu) | 99 | static int hv_cpu_init(unsigned int cpu) |
97 | { | 100 | { |
98 | u64 msr_vp_index; | 101 | u64 msr_vp_index; |
99 | struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; | 102 | struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; |
103 | void **input_arg; | ||
104 | |||
105 | input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); | ||
106 | *input_arg = page_address(alloc_page(GFP_KERNEL)); | ||
100 | 107 | ||
101 | hv_get_vp_index(msr_vp_index); | 108 | hv_get_vp_index(msr_vp_index); |
102 | 109 | ||
@@ -217,6 +224,16 @@ static int hv_cpu_die(unsigned int cpu) | |||
217 | { | 224 | { |
218 | struct hv_reenlightenment_control re_ctrl; | 225 | struct hv_reenlightenment_control re_ctrl; |
219 | unsigned int new_cpu; | 226 | unsigned int new_cpu; |
227 | unsigned long flags; | ||
228 | void **input_arg; | ||
229 | void *input_pg = NULL; | ||
230 | |||
231 | local_irq_save(flags); | ||
232 | input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); | ||
233 | input_pg = *input_arg; | ||
234 | *input_arg = NULL; | ||
235 | local_irq_restore(flags); | ||
236 | free_page((unsigned long)input_pg); | ||
220 | 237 | ||
221 | if (hv_vp_assist_page && hv_vp_assist_page[cpu]) | 238 | if (hv_vp_assist_page && hv_vp_assist_page[cpu]) |
222 | wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0); | 239 | wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0); |
@@ -242,8 +259,9 @@ static int hv_cpu_die(unsigned int cpu) | |||
242 | * | 259 | * |
243 | * 1. Setup the hypercall page. | 260 | * 1. Setup the hypercall page. |
244 | * 2. Register Hyper-V specific clocksource. | 261 | * 2. Register Hyper-V specific clocksource. |
262 | * 3. Setup Hyper-V specific APIC entry points. | ||
245 | */ | 263 | */ |
246 | void hyperv_init(void) | 264 | void __init hyperv_init(void) |
247 | { | 265 | { |
248 | u64 guest_id, required_msrs; | 266 | u64 guest_id, required_msrs; |
249 | union hv_x64_msr_hypercall_contents hypercall_msr; | 267 | union hv_x64_msr_hypercall_contents hypercall_msr; |
@@ -259,6 +277,16 @@ void hyperv_init(void) | |||
259 | if ((ms_hyperv.features & required_msrs) != required_msrs) | 277 | if ((ms_hyperv.features & required_msrs) != required_msrs) |
260 | return; | 278 | return; |
261 | 279 | ||
280 | /* | ||
281 | * Allocate the per-CPU state for the hypercall input arg. | ||
282 | * If this allocation fails, we will not be able to setup | ||
283 | * (per-CPU) hypercall input page and thus this failure is | ||
284 | * fatal on Hyper-V. | ||
285 | */ | ||
286 | hyperv_pcpu_input_arg = alloc_percpu(void *); | ||
287 | |||
288 | BUG_ON(hyperv_pcpu_input_arg == NULL); | ||
289 | |||
262 | /* Allocate percpu VP index */ | 290 | /* Allocate percpu VP index */ |
263 | hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), | 291 | hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), |
264 | GFP_KERNEL); | 292 | GFP_KERNEL); |
@@ -296,7 +324,7 @@ void hyperv_init(void) | |||
296 | hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); | 324 | hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); |
297 | wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); | 325 | wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); |
298 | 326 | ||
299 | hyper_alloc_mmu(); | 327 | hv_apic_init(); |
300 | 328 | ||
301 | /* | 329 | /* |
302 | * Register Hyper-V specific clocksource. | 330 | * Register Hyper-V specific clocksource. |
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 56c9ebac946f..5f053d7d1bd9 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c | |||
@@ -25,20 +25,13 @@ struct hv_flush_pcpu { | |||
25 | struct hv_flush_pcpu_ex { | 25 | struct hv_flush_pcpu_ex { |
26 | u64 address_space; | 26 | u64 address_space; |
27 | u64 flags; | 27 | u64 flags; |
28 | struct { | 28 | struct hv_vpset hv_vp_set; |
29 | u64 format; | ||
30 | u64 valid_bank_mask; | ||
31 | u64 bank_contents[]; | ||
32 | } hv_vp_set; | ||
33 | u64 gva_list[]; | 29 | u64 gva_list[]; |
34 | }; | 30 | }; |
35 | 31 | ||
36 | /* Each gva in gva_list encodes up to 4096 pages to flush */ | 32 | /* Each gva in gva_list encodes up to 4096 pages to flush */ |
37 | #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) | 33 | #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) |
38 | 34 | ||
39 | static struct hv_flush_pcpu __percpu **pcpu_flush; | ||
40 | |||
41 | static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex; | ||
42 | 35 | ||
43 | /* | 36 | /* |
44 | * Fills in gva_list starting from offset. Returns the number of items added. | 37 | * Fills in gva_list starting from offset. Returns the number of items added. |
@@ -70,41 +63,6 @@ static inline int fill_gva_list(u64 gva_list[], int offset, | |||
70 | return gva_n - offset; | 63 | return gva_n - offset; |
71 | } | 64 | } |
72 | 65 | ||
73 | /* Return the number of banks in the resulting vp_set */ | ||
74 | static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, | ||
75 | const struct cpumask *cpus) | ||
76 | { | ||
77 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; | ||
78 | |||
79 | /* valid_bank_mask can represent up to 64 banks */ | ||
80 | if (hv_max_vp_index / 64 >= 64) | ||
81 | return 0; | ||
82 | |||
83 | /* | ||
84 | * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex | ||
85 | * structs are not cleared between calls, we risk flushing unneeded | ||
86 | * vCPUs otherwise. | ||
87 | */ | ||
88 | for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) | ||
89 | flush->hv_vp_set.bank_contents[vcpu_bank] = 0; | ||
90 | |||
91 | /* | ||
92 | * Some banks may end up being empty but this is acceptable. | ||
93 | */ | ||
94 | for_each_cpu(cpu, cpus) { | ||
95 | vcpu = hv_cpu_number_to_vp_number(cpu); | ||
96 | vcpu_bank = vcpu / 64; | ||
97 | vcpu_offset = vcpu % 64; | ||
98 | __set_bit(vcpu_offset, (unsigned long *) | ||
99 | &flush->hv_vp_set.bank_contents[vcpu_bank]); | ||
100 | if (vcpu_bank >= nr_bank) | ||
101 | nr_bank = vcpu_bank + 1; | ||
102 | } | ||
103 | flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); | ||
104 | |||
105 | return nr_bank; | ||
106 | } | ||
107 | |||
108 | static void hyperv_flush_tlb_others(const struct cpumask *cpus, | 66 | static void hyperv_flush_tlb_others(const struct cpumask *cpus, |
109 | const struct flush_tlb_info *info) | 67 | const struct flush_tlb_info *info) |
110 | { | 68 | { |
@@ -116,7 +74,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, | |||
116 | 74 | ||
117 | trace_hyperv_mmu_flush_tlb_others(cpus, info); | 75 | trace_hyperv_mmu_flush_tlb_others(cpus, info); |
118 | 76 | ||
119 | if (!pcpu_flush || !hv_hypercall_pg) | 77 | if (!hv_hypercall_pg) |
120 | goto do_native; | 78 | goto do_native; |
121 | 79 | ||
122 | if (cpumask_empty(cpus)) | 80 | if (cpumask_empty(cpus)) |
@@ -124,10 +82,8 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, | |||
124 | 82 | ||
125 | local_irq_save(flags); | 83 | local_irq_save(flags); |
126 | 84 | ||
127 | flush_pcpu = this_cpu_ptr(pcpu_flush); | 85 | flush_pcpu = (struct hv_flush_pcpu **) |
128 | 86 | this_cpu_ptr(hyperv_pcpu_input_arg); | |
129 | if (unlikely(!*flush_pcpu)) | ||
130 | *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); | ||
131 | 87 | ||
132 | flush = *flush_pcpu; | 88 | flush = *flush_pcpu; |
133 | 89 | ||
@@ -203,7 +159,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
203 | 159 | ||
204 | trace_hyperv_mmu_flush_tlb_others(cpus, info); | 160 | trace_hyperv_mmu_flush_tlb_others(cpus, info); |
205 | 161 | ||
206 | if (!pcpu_flush_ex || !hv_hypercall_pg) | 162 | if (!hv_hypercall_pg) |
207 | goto do_native; | 163 | goto do_native; |
208 | 164 | ||
209 | if (cpumask_empty(cpus)) | 165 | if (cpumask_empty(cpus)) |
@@ -211,10 +167,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
211 | 167 | ||
212 | local_irq_save(flags); | 168 | local_irq_save(flags); |
213 | 169 | ||
214 | flush_pcpu = this_cpu_ptr(pcpu_flush_ex); | 170 | flush_pcpu = (struct hv_flush_pcpu_ex **) |
215 | 171 | this_cpu_ptr(hyperv_pcpu_input_arg); | |
216 | if (unlikely(!*flush_pcpu)) | ||
217 | *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); | ||
218 | 172 | ||
219 | flush = *flush_pcpu; | 173 | flush = *flush_pcpu; |
220 | 174 | ||
@@ -239,8 +193,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
239 | flush->hv_vp_set.valid_bank_mask = 0; | 193 | flush->hv_vp_set.valid_bank_mask = 0; |
240 | 194 | ||
241 | if (!cpumask_equal(cpus, cpu_present_mask)) { | 195 | if (!cpumask_equal(cpus, cpu_present_mask)) { |
242 | flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K; | 196 | flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K; |
243 | nr_bank = cpumask_to_vp_set(flush, cpus); | 197 | nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus); |
244 | } | 198 | } |
245 | 199 | ||
246 | if (!nr_bank) { | 200 | if (!nr_bank) { |
@@ -296,14 +250,3 @@ void hyperv_setup_mmu_ops(void) | |||
296 | pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex; | 250 | pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex; |
297 | } | 251 | } |
298 | } | 252 | } |
299 | |||
300 | void hyper_alloc_mmu(void) | ||
301 | { | ||
302 | if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)) | ||
303 | return; | ||
304 | |||
305 | if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | ||
306 | pcpu_flush = alloc_percpu(struct hv_flush_pcpu *); | ||
307 | else | ||
308 | pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *); | ||
309 | } | ||
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index a8897615354e..ff3f56bdd5d7 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h | |||
@@ -164,6 +164,11 @@ | |||
164 | */ | 164 | */ |
165 | #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) | 165 | #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) |
166 | 166 | ||
167 | /* | ||
168 | * Recommend using cluster IPI hypercalls. | ||
169 | */ | ||
170 | #define HV_X64_CLUSTER_IPI_RECOMMENDED (1 << 10) | ||
171 | |||
167 | /* Recommend using the newer ExProcessorMasks interface */ | 172 | /* Recommend using the newer ExProcessorMasks interface */ |
168 | #define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) | 173 | #define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) |
169 | 174 | ||
@@ -332,12 +337,17 @@ struct hv_tsc_emulation_status { | |||
332 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ | 337 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ |
333 | (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) | 338 | (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) |
334 | 339 | ||
340 | #define HV_IPI_LOW_VECTOR 0x10 | ||
341 | #define HV_IPI_HIGH_VECTOR 0xff | ||
342 | |||
335 | /* Declare the various hypercall operations. */ | 343 | /* Declare the various hypercall operations. */ |
336 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 | 344 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 |
337 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 | 345 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 |
338 | #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 | 346 | #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 |
347 | #define HVCALL_SEND_IPI 0x000b | ||
339 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 | 348 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 |
340 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 | 349 | #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 |
350 | #define HVCALL_SEND_IPI_EX 0x0015 | ||
341 | #define HVCALL_POST_MESSAGE 0x005c | 351 | #define HVCALL_POST_MESSAGE 0x005c |
342 | #define HVCALL_SIGNAL_EVENT 0x005d | 352 | #define HVCALL_SIGNAL_EVENT 0x005d |
343 | 353 | ||
@@ -363,7 +373,7 @@ struct hv_tsc_emulation_status { | |||
363 | #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) | 373 | #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) |
364 | 374 | ||
365 | enum HV_GENERIC_SET_FORMAT { | 375 | enum HV_GENERIC_SET_FORMAT { |
366 | HV_GENERIC_SET_SPARCE_4K, | 376 | HV_GENERIC_SET_SPARSE_4K, |
367 | HV_GENERIC_SET_ALL, | 377 | HV_GENERIC_SET_ALL, |
368 | }; | 378 | }; |
369 | 379 | ||
@@ -713,4 +723,22 @@ struct hv_enlightened_vmcs { | |||
713 | #define HV_STIMER_AUTOENABLE (1ULL << 3) | 723 | #define HV_STIMER_AUTOENABLE (1ULL << 3) |
714 | #define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) | 724 | #define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) |
715 | 725 | ||
726 | struct ipi_arg_non_ex { | ||
727 | u32 vector; | ||
728 | u32 reserved; | ||
729 | u64 cpu_mask; | ||
730 | }; | ||
731 | |||
732 | struct hv_vpset { | ||
733 | u64 format; | ||
734 | u64 valid_bank_mask; | ||
735 | u64 bank_contents[]; | ||
736 | }; | ||
737 | |||
738 | struct ipi_arg_ex { | ||
739 | u32 vector; | ||
740 | u32 reserved; | ||
741 | struct hv_vpset vp_set; | ||
742 | }; | ||
743 | |||
716 | #endif | 744 | #endif |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index b90e79610cf7..997192131b7b 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -122,6 +122,7 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {} | |||
122 | #if IS_ENABLED(CONFIG_HYPERV) | 122 | #if IS_ENABLED(CONFIG_HYPERV) |
123 | extern struct clocksource *hyperv_cs; | 123 | extern struct clocksource *hyperv_cs; |
124 | extern void *hv_hypercall_pg; | 124 | extern void *hv_hypercall_pg; |
125 | extern void __percpu **hyperv_pcpu_input_arg; | ||
125 | 126 | ||
126 | static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | 127 | static inline u64 hv_do_hypercall(u64 control, void *input, void *output) |
127 | { | 128 | { |
@@ -258,9 +259,41 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number) | |||
258 | return hv_vp_index[cpu_number]; | 259 | return hv_vp_index[cpu_number]; |
259 | } | 260 | } |
260 | 261 | ||
261 | void hyperv_init(void); | 262 | static inline int cpumask_to_vpset(struct hv_vpset *vpset, |
263 | const struct cpumask *cpus) | ||
264 | { | ||
265 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; | ||
266 | |||
267 | /* valid_bank_mask can represent up to 64 banks */ | ||
268 | if (hv_max_vp_index / 64 >= 64) | ||
269 | return 0; | ||
270 | |||
271 | /* | ||
272 | * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex | ||
273 | * structs are not cleared between calls, we risk flushing unneeded | ||
274 | * vCPUs otherwise. | ||
275 | */ | ||
276 | for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) | ||
277 | vpset->bank_contents[vcpu_bank] = 0; | ||
278 | |||
279 | /* | ||
280 | * Some banks may end up being empty but this is acceptable. | ||
281 | */ | ||
282 | for_each_cpu(cpu, cpus) { | ||
283 | vcpu = hv_cpu_number_to_vp_number(cpu); | ||
284 | vcpu_bank = vcpu / 64; | ||
285 | vcpu_offset = vcpu % 64; | ||
286 | __set_bit(vcpu_offset, (unsigned long *) | ||
287 | &vpset->bank_contents[vcpu_bank]); | ||
288 | if (vcpu_bank >= nr_bank) | ||
289 | nr_bank = vcpu_bank + 1; | ||
290 | } | ||
291 | vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); | ||
292 | return nr_bank; | ||
293 | } | ||
294 | |||
295 | void __init hyperv_init(void); | ||
262 | void hyperv_setup_mmu_ops(void); | 296 | void hyperv_setup_mmu_ops(void); |
263 | void hyper_alloc_mmu(void); | ||
264 | void hyperv_report_panic(struct pt_regs *regs, long err); | 297 | void hyperv_report_panic(struct pt_regs *regs, long err); |
265 | bool hv_is_hyperv_initialized(void); | 298 | bool hv_is_hyperv_initialized(void); |
266 | void hyperv_cleanup(void); | 299 | void hyperv_cleanup(void); |
@@ -269,6 +302,13 @@ void hyperv_reenlightenment_intr(struct pt_regs *regs); | |||
269 | void set_hv_tscchange_cb(void (*cb)(void)); | 302 | void set_hv_tscchange_cb(void (*cb)(void)); |
270 | void clear_hv_tscchange_cb(void); | 303 | void clear_hv_tscchange_cb(void); |
271 | void hyperv_stop_tsc_emulation(void); | 304 | void hyperv_stop_tsc_emulation(void); |
305 | |||
306 | #ifdef CONFIG_X86_64 | ||
307 | void hv_apic_init(void); | ||
308 | #else | ||
309 | static inline void hv_apic_init(void) {} | ||
310 | #endif | ||
311 | |||
272 | #else /* CONFIG_HYPERV */ | 312 | #else /* CONFIG_HYPERV */ |
273 | static inline void hyperv_init(void) {} | 313 | static inline void hyperv_init(void) {} |
274 | static inline bool hv_is_hyperv_initialized(void) { return false; } | 314 | static inline bool hv_is_hyperv_initialized(void) { return false; } |
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 4c851ebb3ceb..0ede697c3961 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #define KVM_FEATURE_PV_TLB_FLUSH 9 | 29 | #define KVM_FEATURE_PV_TLB_FLUSH 9 |
30 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 | 30 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 |
31 | 31 | ||
32 | #define KVM_HINTS_DEDICATED 0 | 32 | #define KVM_HINTS_REALTIME 0 |
33 | 33 | ||
34 | /* The last 8 bits are used to indicate how to interpret the flags field | 34 | /* The last 8 bits are used to indicate how to interpret the flags field |
35 | * in pvclock structure. If no bits are set, all flags are ignored. | 35 | * in pvclock structure. If no bits are set, all flags are ignored. |
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index c88e0b127810..b481b95bd8f6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -14,8 +14,11 @@ | |||
14 | #include <asm/amd_nb.h> | 14 | #include <asm/amd_nb.h> |
15 | 15 | ||
16 | #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 | 16 | #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 |
17 | #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 | ||
17 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 | 18 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 |
18 | #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 | 19 | #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 |
20 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb | ||
21 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec | ||
19 | 22 | ||
20 | /* Protect the PCI config register pairs used for SMN and DF indirect access. */ | 23 | /* Protect the PCI config register pairs used for SMN and DF indirect access. */ |
21 | static DEFINE_MUTEX(smn_mutex); | 24 | static DEFINE_MUTEX(smn_mutex); |
@@ -24,6 +27,7 @@ static u32 *flush_words; | |||
24 | 27 | ||
25 | static const struct pci_device_id amd_root_ids[] = { | 28 | static const struct pci_device_id amd_root_ids[] = { |
26 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, | 29 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
30 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, | ||
27 | {} | 31 | {} |
28 | }; | 32 | }; |
29 | 33 | ||
@@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { | |||
39 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, | 43 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
40 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | 44 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
41 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, | 45 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
46 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, | ||
42 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, | 47 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, |
43 | {} | 48 | {} |
44 | }; | 49 | }; |
@@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { | |||
51 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, | 56 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
52 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, | 57 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, |
53 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, | 58 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
59 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, | ||
54 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, | 60 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, |
55 | {} | 61 | {} |
56 | }; | 62 | }; |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 7867417cfaff..5b2300b818af 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void) | |||
457 | static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) | 457 | static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) |
458 | { | 458 | { |
459 | native_smp_prepare_cpus(max_cpus); | 459 | native_smp_prepare_cpus(max_cpus); |
460 | if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) | 460 | if (kvm_para_has_hint(KVM_HINTS_REALTIME)) |
461 | static_branch_disable(&virt_spin_lock_key); | 461 | static_branch_disable(&virt_spin_lock_key); |
462 | } | 462 | } |
463 | 463 | ||
@@ -553,7 +553,7 @@ static void __init kvm_guest_init(void) | |||
553 | } | 553 | } |
554 | 554 | ||
555 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && | 555 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
556 | !kvm_para_has_hint(KVM_HINTS_DEDICATED) && | 556 | !kvm_para_has_hint(KVM_HINTS_REALTIME) && |
557 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) | 557 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) |
558 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; | 558 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; |
559 | 559 | ||
@@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void) | |||
649 | int cpu; | 649 | int cpu; |
650 | 650 | ||
651 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && | 651 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
652 | !kvm_para_has_hint(KVM_HINTS_DEDICATED) && | 652 | !kvm_para_has_hint(KVM_HINTS_REALTIME) && |
653 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { | 653 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { |
654 | for_each_possible_cpu(cpu) { | 654 | for_each_possible_cpu(cpu) { |
655 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), | 655 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), |
@@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void) | |||
745 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | 745 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) |
746 | return; | 746 | return; |
747 | 747 | ||
748 | if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) | 748 | if (kvm_para_has_hint(KVM_HINTS_REALTIME)) |
749 | return; | 749 | return; |
750 | 750 | ||
751 | __pv_init_lock_hash(); | 751 | __pv_init_lock_hash(); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ba55be9b5c27..b7bf9ac9b6d1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); | |||
114 | static bool __read_mostly report_ignored_msrs = true; | 114 | static bool __read_mostly report_ignored_msrs = true; |
115 | module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); | 115 | module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); |
116 | 116 | ||
117 | unsigned int min_timer_period_us = 500; | 117 | unsigned int min_timer_period_us = 200; |
118 | module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); | 118 | module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); |
119 | 119 | ||
120 | static bool __read_mostly kvmclock_periodic_sync = true; | 120 | static bool __read_mostly kvmclock_periodic_sync = true; |
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 826898701045..19c1ff542387 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c | |||
@@ -65,6 +65,19 @@ static void __init xen_hvm_init_mem_mapping(void) | |||
65 | { | 65 | { |
66 | early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); | 66 | early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); |
67 | HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); | 67 | HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); |
68 | |||
69 | /* | ||
70 | * The virtual address of the shared_info page has changed, so | ||
71 | * the vcpu_info pointer for VCPU 0 is now stale. | ||
72 | * | ||
73 | * The prepare_boot_cpu callback will re-initialize it via | ||
74 | * xen_vcpu_setup, but we can't rely on that to be called for | ||
75 | * old Xen versions (xen_have_vector_callback == 0). | ||
76 | * | ||
77 | * It is, in any case, bad to have a stale vcpu_info pointer | ||
78 | * so reset it now. | ||
79 | */ | ||
80 | xen_vcpu_info_reset(0); | ||
68 | } | 81 | } |
69 | 82 | ||
70 | static void __init init_hvm_pv_info(void) | 83 | static void __init init_hvm_pv_info(void) |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d33e7dbe3129..2d76106788a3 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
42 | } | 42 | } |
43 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); | 43 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); |
44 | 44 | ||
45 | static void xen_flush_tlb_all(void) | 45 | static noinline void xen_flush_tlb_all(void) |
46 | { | 46 | { |
47 | struct mmuext_op *op; | 47 | struct mmuext_op *op; |
48 | struct multicall_space mcs; | 48 | struct multicall_space mcs; |
49 | 49 | ||
50 | trace_xen_mmu_flush_tlb_all(0); | ||
51 | |||
52 | preempt_disable(); | 50 | preempt_disable(); |
53 | 51 | ||
54 | mcs = xen_mc_entry(sizeof(*op)); | 52 | mcs = xen_mc_entry(sizeof(*op)); |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 486c0a34d00b..2c30cabfda90 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -1310,13 +1310,11 @@ unsigned long xen_read_cr2_direct(void) | |||
1310 | return this_cpu_read(xen_vcpu_info.arch.cr2); | 1310 | return this_cpu_read(xen_vcpu_info.arch.cr2); |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | static void xen_flush_tlb(void) | 1313 | static noinline void xen_flush_tlb(void) |
1314 | { | 1314 | { |
1315 | struct mmuext_op *op; | 1315 | struct mmuext_op *op; |
1316 | struct multicall_space mcs; | 1316 | struct multicall_space mcs; |
1317 | 1317 | ||
1318 | trace_xen_mmu_flush_tlb(0); | ||
1319 | |||
1320 | preempt_disable(); | 1318 | preempt_disable(); |
1321 | 1319 | ||
1322 | mcs = xen_mc_entry(sizeof(*op)); | 1320 | mcs = xen_mc_entry(sizeof(*op)); |
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 514aaf948ea9..3825df923480 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h | |||
@@ -56,6 +56,10 @@ acpi_status acpi_ns_initialize_objects(void); | |||
56 | 56 | ||
57 | acpi_status acpi_ns_initialize_devices(u32 flags); | 57 | acpi_status acpi_ns_initialize_devices(u32 flags); |
58 | 58 | ||
59 | acpi_status | ||
60 | acpi_ns_init_one_package(acpi_handle obj_handle, | ||
61 | u32 level, void *context, void **return_value); | ||
62 | |||
59 | /* | 63 | /* |
60 | * nsload - Namespace loading | 64 | * nsload - Namespace loading |
61 | */ | 65 | */ |
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 99d92cb32803..f85c6f3271f6 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c | |||
@@ -174,6 +174,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, | |||
174 | return_ACPI_STATUS(status); | 174 | return_ACPI_STATUS(status); |
175 | } | 175 | } |
176 | 176 | ||
177 | /* Complete the initialization/resolution of package objects */ | ||
178 | |||
179 | status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT, | ||
180 | ACPI_UINT32_MAX, 0, | ||
181 | acpi_ns_init_one_package, NULL, NULL, | ||
182 | NULL); | ||
183 | |||
177 | /* Parameter Data (optional) */ | 184 | /* Parameter Data (optional) */ |
178 | 185 | ||
179 | if (parameter_node) { | 186 | if (parameter_node) { |
@@ -430,6 +437,13 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
430 | return_ACPI_STATUS(status); | 437 | return_ACPI_STATUS(status); |
431 | } | 438 | } |
432 | 439 | ||
440 | /* Complete the initialization/resolution of package objects */ | ||
441 | |||
442 | status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT, | ||
443 | ACPI_UINT32_MAX, 0, | ||
444 | acpi_ns_init_one_package, NULL, NULL, | ||
445 | NULL); | ||
446 | |||
433 | /* Store the ddb_handle into the Target operand */ | 447 | /* Store the ddb_handle into the Target operand */ |
434 | 448 | ||
435 | status = acpi_ex_store(ddb_handle, target, walk_state); | 449 | status = acpi_ex_store(ddb_handle, target, walk_state); |
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 77f2b5f4948a..d77257d1c827 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c | |||
@@ -242,6 +242,58 @@ error_exit: | |||
242 | 242 | ||
243 | /******************************************************************************* | 243 | /******************************************************************************* |
244 | * | 244 | * |
245 | * FUNCTION: acpi_ns_init_one_package | ||
246 | * | ||
247 | * PARAMETERS: obj_handle - Node | ||
248 | * level - Current nesting level | ||
249 | * context - Not used | ||
250 | * return_value - Not used | ||
251 | * | ||
252 | * RETURN: Status | ||
253 | * | ||
254 | * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every package | ||
255 | * within the namespace. Used during dynamic load of an SSDT. | ||
256 | * | ||
257 | ******************************************************************************/ | ||
258 | |||
259 | acpi_status | ||
260 | acpi_ns_init_one_package(acpi_handle obj_handle, | ||
261 | u32 level, void *context, void **return_value) | ||
262 | { | ||
263 | acpi_status status; | ||
264 | union acpi_operand_object *obj_desc; | ||
265 | struct acpi_namespace_node *node = | ||
266 | (struct acpi_namespace_node *)obj_handle; | ||
267 | |||
268 | obj_desc = acpi_ns_get_attached_object(node); | ||
269 | if (!obj_desc) { | ||
270 | return (AE_OK); | ||
271 | } | ||
272 | |||
273 | /* Exit if package is already initialized */ | ||
274 | |||
275 | if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { | ||
276 | return (AE_OK); | ||
277 | } | ||
278 | |||
279 | status = acpi_ds_get_package_arguments(obj_desc); | ||
280 | if (ACPI_FAILURE(status)) { | ||
281 | return (AE_OK); | ||
282 | } | ||
283 | |||
284 | status = | ||
285 | acpi_ut_walk_package_tree(obj_desc, NULL, | ||
286 | acpi_ds_init_package_element, NULL); | ||
287 | if (ACPI_FAILURE(status)) { | ||
288 | return (AE_OK); | ||
289 | } | ||
290 | |||
291 | obj_desc->package.flags |= AOPOBJ_DATA_VALID; | ||
292 | return (AE_OK); | ||
293 | } | ||
294 | |||
295 | /******************************************************************************* | ||
296 | * | ||
245 | * FUNCTION: acpi_ns_init_one_object | 297 | * FUNCTION: acpi_ns_init_one_object |
246 | * | 298 | * |
247 | * PARAMETERS: obj_handle - Node | 299 | * PARAMETERS: obj_handle - Node |
@@ -360,27 +412,11 @@ acpi_ns_init_one_object(acpi_handle obj_handle, | |||
360 | 412 | ||
361 | case ACPI_TYPE_PACKAGE: | 413 | case ACPI_TYPE_PACKAGE: |
362 | 414 | ||
363 | info->package_init++; | 415 | /* Complete the initialization/resolution of the package object */ |
364 | status = acpi_ds_get_package_arguments(obj_desc); | ||
365 | if (ACPI_FAILURE(status)) { | ||
366 | break; | ||
367 | } | ||
368 | |||
369 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, | ||
370 | "%s: Completing resolution of Package elements\n", | ||
371 | ACPI_GET_FUNCTION_NAME)); | ||
372 | 416 | ||
373 | /* | 417 | info->package_init++; |
374 | * Resolve all named references in package objects (and all | 418 | status = |
375 | * sub-packages). This action has been deferred until the entire | 419 | acpi_ns_init_one_package(obj_handle, level, NULL, NULL); |
376 | * namespace has been loaded, in order to support external and | ||
377 | * forward references from individual package elements (05/2017). | ||
378 | */ | ||
379 | status = acpi_ut_walk_package_tree(obj_desc, NULL, | ||
380 | acpi_ds_init_package_element, | ||
381 | NULL); | ||
382 | |||
383 | obj_desc->package.flags |= AOPOBJ_DATA_VALID; | ||
384 | break; | 420 | break; |
385 | 421 | ||
386 | default: | 422 | default: |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1ff17799769d..6389c88b3500 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -698,7 +698,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | |||
698 | 698 | ||
699 | DPRINTK("ENTER\n"); | 699 | DPRINTK("ENTER\n"); |
700 | 700 | ||
701 | ahci_stop_engine(ap); | 701 | hpriv->stop_engine(ap); |
702 | 702 | ||
703 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), | 703 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), |
704 | deadline, &online, NULL); | 704 | deadline, &online, NULL); |
@@ -724,7 +724,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | |||
724 | bool online; | 724 | bool online; |
725 | int rc; | 725 | int rc; |
726 | 726 | ||
727 | ahci_stop_engine(ap); | 727 | hpriv->stop_engine(ap); |
728 | 728 | ||
729 | /* clear D2H reception area to properly wait for D2H FIS */ | 729 | /* clear D2H reception area to properly wait for D2H FIS */ |
730 | ata_tf_init(link->device, &tf); | 730 | ata_tf_init(link->device, &tf); |
@@ -788,7 +788,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, | |||
788 | 788 | ||
789 | DPRINTK("ENTER\n"); | 789 | DPRINTK("ENTER\n"); |
790 | 790 | ||
791 | ahci_stop_engine(ap); | 791 | hpriv->stop_engine(ap); |
792 | 792 | ||
793 | for (i = 0; i < 2; i++) { | 793 | for (i = 0; i < 2; i++) { |
794 | u16 val; | 794 | u16 val; |
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 4356ef1d28a8..824bd399f02e 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h | |||
@@ -350,7 +350,6 @@ struct ahci_host_priv { | |||
350 | u32 em_msg_type; /* EM message type */ | 350 | u32 em_msg_type; /* EM message type */ |
351 | bool got_runtime_pm; /* Did we do pm_runtime_get? */ | 351 | bool got_runtime_pm; /* Did we do pm_runtime_get? */ |
352 | struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ | 352 | struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ |
353 | struct reset_control *rsts; /* Optional */ | ||
354 | struct regulator **target_pwrs; /* Optional */ | 353 | struct regulator **target_pwrs; /* Optional */ |
355 | /* | 354 | /* |
356 | * If platform uses PHYs. There is a 1:1 relation between the port number and | 355 | * If platform uses PHYs. There is a 1:1 relation between the port number and |
@@ -366,6 +365,13 @@ struct ahci_host_priv { | |||
366 | * be overridden anytime before the host is activated. | 365 | * be overridden anytime before the host is activated. |
367 | */ | 366 | */ |
368 | void (*start_engine)(struct ata_port *ap); | 367 | void (*start_engine)(struct ata_port *ap); |
368 | /* | ||
369 | * Optional ahci_stop_engine override, if not set this gets set to the | ||
370 | * default ahci_stop_engine during ahci_save_initial_config, this can | ||
371 | * be overridden anytime before the host is activated. | ||
372 | */ | ||
373 | int (*stop_engine)(struct ata_port *ap); | ||
374 | |||
369 | irqreturn_t (*irq_handler)(int irq, void *dev_instance); | 375 | irqreturn_t (*irq_handler)(int irq, void *dev_instance); |
370 | 376 | ||
371 | /* only required for per-port MSI(-X) support */ | 377 | /* only required for per-port MSI(-X) support */ |
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index de7128d81e9c..0045dacd814b 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c | |||
@@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) | |||
62 | writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); | 62 | writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); |
63 | } | 63 | } |
64 | 64 | ||
65 | /** | ||
66 | * ahci_mvebu_stop_engine | ||
67 | * | ||
68 | * @ap: Target ata port | ||
69 | * | ||
70 | * Errata Ref#226 - SATA Disk HOT swap issue when connected through | ||
71 | * Port Multiplier in FIS-based Switching mode. | ||
72 | * | ||
73 | * To avoid the issue, according to design, the bits[11:8, 0] of | ||
74 | * register PxFBS are cleared when Port Command and Status (0x18) bit[0] | ||
75 | * changes its value from 1 to 0, i.e. falling edge of Port | ||
76 | * Command and Status bit[0] sends PULSE that resets PxFBS | ||
77 | * bits[11:8; 0]. | ||
78 | * | ||
79 | * This function is used to override function of "ahci_stop_engine" | ||
80 | * from libahci.c by adding the mvebu work around(WA) to save PxFBS | ||
81 | * value before the PxCMD ST write of 0, then restore PxFBS value. | ||
82 | * | ||
83 | * Return: 0 on success; Error code otherwise. | ||
84 | */ | ||
85 | int ahci_mvebu_stop_engine(struct ata_port *ap) | ||
86 | { | ||
87 | void __iomem *port_mmio = ahci_port_base(ap); | ||
88 | u32 tmp, port_fbs; | ||
89 | |||
90 | tmp = readl(port_mmio + PORT_CMD); | ||
91 | |||
92 | /* check if the HBA is idle */ | ||
93 | if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) | ||
94 | return 0; | ||
95 | |||
96 | /* save the port PxFBS register for later restore */ | ||
97 | port_fbs = readl(port_mmio + PORT_FBS); | ||
98 | |||
99 | /* setting HBA to idle */ | ||
100 | tmp &= ~PORT_CMD_START; | ||
101 | writel(tmp, port_mmio + PORT_CMD); | ||
102 | |||
103 | /* | ||
104 | * bit #15 PxCMD signal doesn't clear PxFBS, | ||
105 | * restore the PxFBS register right after clearing the PxCMD ST, | ||
106 | * no need to wait for the PxCMD bit #15. | ||
107 | */ | ||
108 | writel(port_fbs, port_mmio + PORT_FBS); | ||
109 | |||
110 | /* wait for engine to stop. This could be as long as 500 msec */ | ||
111 | tmp = ata_wait_register(ap, port_mmio + PORT_CMD, | ||
112 | PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); | ||
113 | if (tmp & PORT_CMD_LIST_ON) | ||
114 | return -EIO; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
65 | #ifdef CONFIG_PM_SLEEP | 119 | #ifdef CONFIG_PM_SLEEP |
66 | static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) | 120 | static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) |
67 | { | 121 | { |
@@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev) | |||
112 | if (rc) | 166 | if (rc) |
113 | return rc; | 167 | return rc; |
114 | 168 | ||
169 | hpriv->stop_engine = ahci_mvebu_stop_engine; | ||
170 | |||
115 | if (of_device_is_compatible(pdev->dev.of_node, | 171 | if (of_device_is_compatible(pdev->dev.of_node, |
116 | "marvell,armada-380-ahci")) { | 172 | "marvell,armada-380-ahci")) { |
117 | dram = mv_mbus_dram_info(); | 173 | dram = mv_mbus_dram_info(); |
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 2685f28160f7..cfdef4d44ae9 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c | |||
@@ -96,7 +96,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, | |||
96 | 96 | ||
97 | DPRINTK("ENTER\n"); | 97 | DPRINTK("ENTER\n"); |
98 | 98 | ||
99 | ahci_stop_engine(ap); | 99 | hpriv->stop_engine(ap); |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * There is a errata on ls1021a Rev1.0 and Rev2.0 which is: | 102 | * There is a errata on ls1021a Rev1.0 and Rev2.0 which is: |
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index c2b5941d9184..ad58da7c9aff 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c | |||
@@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) | |||
165 | PORT_CMD_ISSUE, 0x0, 1, 100)) | 165 | PORT_CMD_ISSUE, 0x0, 1, 100)) |
166 | return -EBUSY; | 166 | return -EBUSY; |
167 | 167 | ||
168 | ahci_stop_engine(ap); | 168 | hpriv->stop_engine(ap); |
169 | ahci_start_fis_rx(ap); | 169 | ahci_start_fis_rx(ap); |
170 | 170 | ||
171 | /* | 171 | /* |
@@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class, | |||
421 | portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); | 421 | portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); |
422 | portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); | 422 | portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); |
423 | 423 | ||
424 | ahci_stop_engine(ap); | 424 | hpriv->stop_engine(ap); |
425 | 425 | ||
426 | rc = xgene_ahci_do_hardreset(link, deadline, &online); | 426 | rc = xgene_ahci_do_hardreset(link, deadline, &online); |
427 | 427 | ||
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 7adcf3caabd0..e5d90977caec 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) | |||
560 | if (!hpriv->start_engine) | 560 | if (!hpriv->start_engine) |
561 | hpriv->start_engine = ahci_start_engine; | 561 | hpriv->start_engine = ahci_start_engine; |
562 | 562 | ||
563 | if (!hpriv->stop_engine) | ||
564 | hpriv->stop_engine = ahci_stop_engine; | ||
565 | |||
563 | if (!hpriv->irq_handler) | 566 | if (!hpriv->irq_handler) |
564 | hpriv->irq_handler = ahci_single_level_irq_intr; | 567 | hpriv->irq_handler = ahci_single_level_irq_intr; |
565 | } | 568 | } |
@@ -897,9 +900,10 @@ static void ahci_start_port(struct ata_port *ap) | |||
897 | static int ahci_deinit_port(struct ata_port *ap, const char **emsg) | 900 | static int ahci_deinit_port(struct ata_port *ap, const char **emsg) |
898 | { | 901 | { |
899 | int rc; | 902 | int rc; |
903 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
900 | 904 | ||
901 | /* disable DMA */ | 905 | /* disable DMA */ |
902 | rc = ahci_stop_engine(ap); | 906 | rc = hpriv->stop_engine(ap); |
903 | if (rc) { | 907 | if (rc) { |
904 | *emsg = "failed to stop engine"; | 908 | *emsg = "failed to stop engine"; |
905 | return rc; | 909 | return rc; |
@@ -1310,7 +1314,7 @@ int ahci_kick_engine(struct ata_port *ap) | |||
1310 | int busy, rc; | 1314 | int busy, rc; |
1311 | 1315 | ||
1312 | /* stop engine */ | 1316 | /* stop engine */ |
1313 | rc = ahci_stop_engine(ap); | 1317 | rc = hpriv->stop_engine(ap); |
1314 | if (rc) | 1318 | if (rc) |
1315 | goto out_restart; | 1319 | goto out_restart; |
1316 | 1320 | ||
@@ -1549,7 +1553,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, | |||
1549 | 1553 | ||
1550 | DPRINTK("ENTER\n"); | 1554 | DPRINTK("ENTER\n"); |
1551 | 1555 | ||
1552 | ahci_stop_engine(ap); | 1556 | hpriv->stop_engine(ap); |
1553 | 1557 | ||
1554 | /* clear D2H reception area to properly wait for D2H FIS */ | 1558 | /* clear D2H reception area to properly wait for D2H FIS */ |
1555 | ata_tf_init(link->device, &tf); | 1559 | ata_tf_init(link->device, &tf); |
@@ -2075,14 +2079,14 @@ void ahci_error_handler(struct ata_port *ap) | |||
2075 | 2079 | ||
2076 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { | 2080 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { |
2077 | /* restart engine */ | 2081 | /* restart engine */ |
2078 | ahci_stop_engine(ap); | 2082 | hpriv->stop_engine(ap); |
2079 | hpriv->start_engine(ap); | 2083 | hpriv->start_engine(ap); |
2080 | } | 2084 | } |
2081 | 2085 | ||
2082 | sata_pmp_error_handler(ap); | 2086 | sata_pmp_error_handler(ap); |
2083 | 2087 | ||
2084 | if (!ata_dev_enabled(ap->link.device)) | 2088 | if (!ata_dev_enabled(ap->link.device)) |
2085 | ahci_stop_engine(ap); | 2089 | hpriv->stop_engine(ap); |
2086 | } | 2090 | } |
2087 | EXPORT_SYMBOL_GPL(ahci_error_handler); | 2091 | EXPORT_SYMBOL_GPL(ahci_error_handler); |
2088 | 2092 | ||
@@ -2129,7 +2133,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) | |||
2129 | return; | 2133 | return; |
2130 | 2134 | ||
2131 | /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ | 2135 | /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ |
2132 | rc = ahci_stop_engine(ap); | 2136 | rc = hpriv->stop_engine(ap); |
2133 | if (rc) | 2137 | if (rc) |
2134 | return; | 2138 | return; |
2135 | 2139 | ||
@@ -2189,7 +2193,7 @@ static void ahci_enable_fbs(struct ata_port *ap) | |||
2189 | return; | 2193 | return; |
2190 | } | 2194 | } |
2191 | 2195 | ||
2192 | rc = ahci_stop_engine(ap); | 2196 | rc = hpriv->stop_engine(ap); |
2193 | if (rc) | 2197 | if (rc) |
2194 | return; | 2198 | return; |
2195 | 2199 | ||
@@ -2222,7 +2226,7 @@ static void ahci_disable_fbs(struct ata_port *ap) | |||
2222 | return; | 2226 | return; |
2223 | } | 2227 | } |
2224 | 2228 | ||
2225 | rc = ahci_stop_engine(ap); | 2229 | rc = hpriv->stop_engine(ap); |
2226 | if (rc) | 2230 | if (rc) |
2227 | return; | 2231 | return; |
2228 | 2232 | ||
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 46a762442dc5..30cc8f1a31e1 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/phy/phy.h> | 25 | #include <linux/phy/phy.h> |
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/of_platform.h> | 27 | #include <linux/of_platform.h> |
28 | #include <linux/reset.h> | ||
29 | #include "ahci.h" | 28 | #include "ahci.h" |
30 | 29 | ||
31 | static void ahci_host_stop(struct ata_host *host); | 30 | static void ahci_host_stop(struct ata_host *host); |
@@ -196,8 +195,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators); | |||
196 | * following order: | 195 | * following order: |
197 | * 1) Regulator | 196 | * 1) Regulator |
198 | * 2) Clocks (through ahci_platform_enable_clks) | 197 | * 2) Clocks (through ahci_platform_enable_clks) |
199 | * 3) Resets | 198 | * 3) Phys |
200 | * 4) Phys | ||
201 | * | 199 | * |
202 | * If resource enabling fails at any point the previous enabled resources | 200 | * If resource enabling fails at any point the previous enabled resources |
203 | * are disabled in reverse order. | 201 | * are disabled in reverse order. |
@@ -217,19 +215,12 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv) | |||
217 | if (rc) | 215 | if (rc) |
218 | goto disable_regulator; | 216 | goto disable_regulator; |
219 | 217 | ||
220 | rc = reset_control_deassert(hpriv->rsts); | ||
221 | if (rc) | ||
222 | goto disable_clks; | ||
223 | |||
224 | rc = ahci_platform_enable_phys(hpriv); | 218 | rc = ahci_platform_enable_phys(hpriv); |
225 | if (rc) | 219 | if (rc) |
226 | goto disable_resets; | 220 | goto disable_clks; |
227 | 221 | ||
228 | return 0; | 222 | return 0; |
229 | 223 | ||
230 | disable_resets: | ||
231 | reset_control_assert(hpriv->rsts); | ||
232 | |||
233 | disable_clks: | 224 | disable_clks: |
234 | ahci_platform_disable_clks(hpriv); | 225 | ahci_platform_disable_clks(hpriv); |
235 | 226 | ||
@@ -248,15 +239,12 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); | |||
248 | * following order: | 239 | * following order: |
249 | * 1) Phys | 240 | * 1) Phys |
250 | * 2) Clocks (through ahci_platform_disable_clks) | 241 | * 2) Clocks (through ahci_platform_disable_clks) |
251 | * 3) Resets | 242 | * 3) Regulator |
252 | * 4) Regulator | ||
253 | */ | 243 | */ |
254 | void ahci_platform_disable_resources(struct ahci_host_priv *hpriv) | 244 | void ahci_platform_disable_resources(struct ahci_host_priv *hpriv) |
255 | { | 245 | { |
256 | ahci_platform_disable_phys(hpriv); | 246 | ahci_platform_disable_phys(hpriv); |
257 | 247 | ||
258 | reset_control_assert(hpriv->rsts); | ||
259 | |||
260 | ahci_platform_disable_clks(hpriv); | 248 | ahci_platform_disable_clks(hpriv); |
261 | 249 | ||
262 | ahci_platform_disable_regulators(hpriv); | 250 | ahci_platform_disable_regulators(hpriv); |
@@ -405,12 +393,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) | |||
405 | hpriv->clks[i] = clk; | 393 | hpriv->clks[i] = clk; |
406 | } | 394 | } |
407 | 395 | ||
408 | hpriv->rsts = devm_reset_control_array_get_optional_shared(dev); | ||
409 | if (IS_ERR(hpriv->rsts)) { | ||
410 | rc = PTR_ERR(hpriv->rsts); | ||
411 | goto err_out; | ||
412 | } | ||
413 | |||
414 | hpriv->nports = child_nodes = of_get_child_count(dev->of_node); | 396 | hpriv->nports = child_nodes = of_get_child_count(dev->of_node); |
415 | 397 | ||
416 | /* | 398 | /* |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8bc71ca61e7f..68596bd4cf06 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4549,6 +4549,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4549 | ATA_HORKAGE_ZERO_AFTER_TRIM | | 4549 | ATA_HORKAGE_ZERO_AFTER_TRIM | |
4550 | ATA_HORKAGE_NOLPM, }, | 4550 | ATA_HORKAGE_NOLPM, }, |
4551 | 4551 | ||
4552 | /* This specific Samsung model/firmware-rev does not handle LPM well */ | ||
4553 | { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, | ||
4554 | |||
4555 | /* Sandisk devices which are known to not handle LPM well */ | ||
4556 | { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, | ||
4557 | |||
4552 | /* devices that don't properly handle queued TRIM commands */ | 4558 | /* devices that don't properly handle queued TRIM commands */ |
4553 | { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4559 | { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4554 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4560 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c016829a38fd..513b260bcff1 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) | |||
175 | { } | 175 | { } |
176 | #endif /* CONFIG_PM */ | 176 | #endif /* CONFIG_PM */ |
177 | 177 | ||
178 | static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, | 178 | static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, |
179 | va_list args) | 179 | const char *fmt, va_list args) |
180 | { | 180 | { |
181 | ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, | 181 | ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, |
182 | ATA_EH_DESC_LEN - ehi->desc_len, | 182 | ATA_EH_DESC_LEN - ehi->desc_len, |
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index aafb8cc03523..e67815b896fc 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c | |||
@@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, | |||
410 | int rc; | 410 | int rc; |
411 | int retry = 100; | 411 | int retry = 100; |
412 | 412 | ||
413 | ahci_stop_engine(ap); | 413 | hpriv->stop_engine(ap); |
414 | 414 | ||
415 | /* clear D2H reception area to properly wait for D2H FIS */ | 415 | /* clear D2H reception area to properly wait for D2H FIS */ |
416 | ata_tf_init(link->device, &tf); | 416 | ata_tf_init(link->device, &tf); |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 4b1995e2d044..010ca101d412 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -285,13 +285,13 @@ static const struct sil24_cerr_info { | |||
285 | [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, | 285 | [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, |
286 | "protocol mismatch" }, | 286 | "protocol mismatch" }, |
287 | [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, | 287 | [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, |
288 | "data directon mismatch" }, | 288 | "data direction mismatch" }, |
289 | [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, | 289 | [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, |
290 | "ran out of SGEs while writing" }, | 290 | "ran out of SGEs while writing" }, |
291 | [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, | 291 | [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, |
292 | "ran out of SGEs while reading" }, | 292 | "ran out of SGEs while reading" }, |
293 | [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, | 293 | [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, |
294 | "invalid data directon for ATAPI CDB" }, | 294 | "invalid data direction for ATAPI CDB" }, |
295 | [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, | 295 | [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, |
296 | "SGT not on qword boundary" }, | 296 | "SGT not on qword boundary" }, |
297 | [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, | 297 | [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, |
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index d97c05690faa..4e46dc9e41ad 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c | |||
@@ -191,7 +191,7 @@ static char *res_strings[] = { | |||
191 | "reserved 37", | 191 | "reserved 37", |
192 | "reserved 38", | 192 | "reserved 38", |
193 | "reserved 39", | 193 | "reserved 39", |
194 | "reseverd 40", | 194 | "reserved 40", |
195 | "reserved 41", | 195 | "reserved 41", |
196 | "reserved 42", | 196 | "reserved 42", |
197 | "reserved 43", | 197 | "reserved 43", |
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 1ef67db03c8e..9c9a22958717 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/io.h> | 28 | #include <asm/io.h> |
29 | #include <linux/atomic.h> | 29 | #include <linux/atomic.h> |
30 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | #include <linux/nospec.h> | ||
31 | 32 | ||
32 | #include "uPD98401.h" | 33 | #include "uPD98401.h" |
33 | #include "uPD98402.h" | 34 | #include "uPD98402.h" |
@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) | |||
1458 | return -EFAULT; | 1459 | return -EFAULT; |
1459 | if (pool < 0 || pool > ZATM_LAST_POOL) | 1460 | if (pool < 0 || pool > ZATM_LAST_POOL) |
1460 | return -EINVAL; | 1461 | return -EINVAL; |
1462 | pool = array_index_nospec(pool, | ||
1463 | ZATM_LAST_POOL + 1); | ||
1461 | spin_lock_irqsave(&zatm_dev->lock, flags); | 1464 | spin_lock_irqsave(&zatm_dev->lock, flags); |
1462 | info = zatm_dev->pool_info[pool]; | 1465 | info = zatm_dev->pool_info[pool]; |
1463 | if (cmd == ZATM_GETPOOLZ) { | 1466 | if (cmd == ZATM_GETPOOLZ) { |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e8b04cc569a..33b36fea1d73 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -2366,7 +2366,9 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) | |||
2366 | osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd", | 2366 | osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd", |
2367 | "copyup"); | 2367 | "copyup"); |
2368 | osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0, | 2368 | osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0, |
2369 | obj_req->copyup_bvecs, bytes); | 2369 | obj_req->copyup_bvecs, |
2370 | obj_req->copyup_bvec_count, | ||
2371 | bytes); | ||
2370 | 2372 | ||
2371 | switch (obj_req->img_request->op_type) { | 2373 | switch (obj_req->img_request->op_type) { |
2372 | case OBJ_OP_WRITE: | 2374 | case OBJ_OP_WRITE: |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c8c8b0b8d333..b937cc1e2c07 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
231 | { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, | 231 | { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, |
232 | { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, | 232 | { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, |
233 | { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, | 233 | { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, |
234 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, | ||
234 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, | 235 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, |
235 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, | 236 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, |
236 | { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, | 237 | { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, |
@@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = { | |||
263 | { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, | 264 | { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, |
264 | 265 | ||
265 | /* QCA ROME chipset */ | 266 | /* QCA ROME chipset */ |
266 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME }, | ||
267 | { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, | 267 | { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, |
268 | { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, | 268 | { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, |
269 | { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, | 269 | { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, |
@@ -399,6 +399,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { | |||
399 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), | 399 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), |
400 | }, | 400 | }, |
401 | }, | 401 | }, |
402 | { | ||
403 | /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */ | ||
404 | .matches = { | ||
405 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
406 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), | ||
407 | }, | ||
408 | }, | ||
402 | {} | 409 | {} |
403 | }; | 410 | }; |
404 | 411 | ||
@@ -2852,6 +2859,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) | |||
2852 | } | 2859 | } |
2853 | #endif | 2860 | #endif |
2854 | 2861 | ||
2862 | static void btusb_check_needs_reset_resume(struct usb_interface *intf) | ||
2863 | { | ||
2864 | if (dmi_check_system(btusb_needs_reset_resume_table)) | ||
2865 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; | ||
2866 | } | ||
2867 | |||
2855 | static int btusb_probe(struct usb_interface *intf, | 2868 | static int btusb_probe(struct usb_interface *intf, |
2856 | const struct usb_device_id *id) | 2869 | const struct usb_device_id *id) |
2857 | { | 2870 | { |
@@ -2974,9 +2987,6 @@ static int btusb_probe(struct usb_interface *intf, | |||
2974 | hdev->send = btusb_send_frame; | 2987 | hdev->send = btusb_send_frame; |
2975 | hdev->notify = btusb_notify; | 2988 | hdev->notify = btusb_notify; |
2976 | 2989 | ||
2977 | if (dmi_check_system(btusb_needs_reset_resume_table)) | ||
2978 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; | ||
2979 | |||
2980 | #ifdef CONFIG_PM | 2990 | #ifdef CONFIG_PM |
2981 | err = btusb_config_oob_wake(hdev); | 2991 | err = btusb_config_oob_wake(hdev); |
2982 | if (err) | 2992 | if (err) |
@@ -3064,6 +3074,7 @@ static int btusb_probe(struct usb_interface *intf, | |||
3064 | data->setup_on_usb = btusb_setup_qca; | 3074 | data->setup_on_usb = btusb_setup_qca; |
3065 | hdev->set_bdaddr = btusb_set_bdaddr_ath3012; | 3075 | hdev->set_bdaddr = btusb_set_bdaddr_ath3012; |
3066 | set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); | 3076 | set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); |
3077 | btusb_check_needs_reset_resume(intf); | ||
3067 | } | 3078 | } |
3068 | 3079 | ||
3069 | #ifdef CONFIG_BT_HCIBTUSB_RTL | 3080 | #ifdef CONFIG_BT_HCIBTUSB_RTL |
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index c381c8e396fc..79d8c84693a1 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c | |||
@@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty | |||
195 | return 0; | 195 | return 0; |
196 | } | 196 | } |
197 | 197 | ||
198 | int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | 198 | static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) |
199 | { | 199 | { |
200 | size_t i; | 200 | size_t i; |
201 | u32 *gp; | 201 | u32 *gp; |
@@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) | |||
470 | return 0; | 470 | return 0; |
471 | } | 471 | } |
472 | 472 | ||
473 | void null_cache_flush(void) | 473 | static void null_cache_flush(void) |
474 | { | 474 | { |
475 | mb(); | 475 | mb(); |
476 | } | 476 | } |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 41492e980ef4..34968a381d0f 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
@@ -266,15 +266,13 @@ config COMMON_CLK_STM32MP157 | |||
266 | Support for stm32mp157 SoC family clocks | 266 | Support for stm32mp157 SoC family clocks |
267 | 267 | ||
268 | config COMMON_CLK_STM32F | 268 | config COMMON_CLK_STM32F |
269 | bool "Clock driver for stm32f4 and stm32f7 SoC families" | 269 | def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746) |
270 | depends on MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746 | ||
271 | help | 270 | help |
272 | ---help--- | 271 | ---help--- |
273 | Support for stm32f4 and stm32f7 SoC families clocks | 272 | Support for stm32f4 and stm32f7 SoC families clocks |
274 | 273 | ||
275 | config COMMON_CLK_STM32H7 | 274 | config COMMON_CLK_STM32H7 |
276 | bool "Clock driver for stm32h7 SoC family" | 275 | def_bool COMMON_CLK && MACH_STM32H743 |
277 | depends on MACH_STM32H743 | ||
278 | help | 276 | help |
279 | ---help--- | 277 | ---help--- |
280 | Support for stm32h7 SoC family clocks | 278 | Support for stm32h7 SoC family clocks |
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 114ecbb94ec5..12320118f8de 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c | |||
@@ -464,7 +464,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) | |||
464 | clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); | 464 | clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); |
465 | 465 | ||
466 | /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ | 466 | /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ |
467 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]); | 467 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]); |
468 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); | 468 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); |
469 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); | 469 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); |
470 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); | 470 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index de55c7d57438..96b35b8b3606 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ | |||
20 | 20 | ||
21 | config ARM_ARMADA_37XX_CPUFREQ | 21 | config ARM_ARMADA_37XX_CPUFREQ |
22 | tristate "Armada 37xx CPUFreq support" | 22 | tristate "Armada 37xx CPUFreq support" |
23 | depends on ARCH_MVEBU | 23 | depends on ARCH_MVEBU && CPUFREQ_DT |
24 | help | 24 | help |
25 | This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. | 25 | This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. |
26 | The Armada 37xx PMU supports 4 frequency and VDD levels. | 26 | The Armada 37xx PMU supports 4 frequency and VDD levels. |
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 77e485557498..6f693b7d5220 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c | |||
@@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) | |||
384 | if (set) | 384 | if (set) |
385 | reg |= bit; | 385 | reg |= bit; |
386 | else | 386 | else |
387 | reg &= bit; | 387 | reg &= ~bit; |
388 | iowrite32(reg, addr); | 388 | iowrite32(reg, addr); |
389 | 389 | ||
390 | spin_unlock_irqrestore(&gpio->lock, flags); | 390 | spin_unlock_irqrestore(&gpio->lock, flags); |
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c index 1948724d8c36..25d16b2af1c3 100644 --- a/drivers/gpio/gpio-pci-idio-16.c +++ b/drivers/gpio/gpio-pci-idio-16.c | |||
@@ -116,9 +116,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip, | |||
116 | unsigned long word_mask; | 116 | unsigned long word_mask; |
117 | const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0); | 117 | const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0); |
118 | unsigned long port_state; | 118 | unsigned long port_state; |
119 | u8 __iomem ports[] = { | 119 | void __iomem *ports[] = { |
120 | idio16gpio->reg->out0_7, idio16gpio->reg->out8_15, | 120 | &idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15, |
121 | idio16gpio->reg->in0_7, idio16gpio->reg->in8_15, | 121 | &idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15, |
122 | }; | 122 | }; |
123 | 123 | ||
124 | /* clear bits array to a clean slate */ | 124 | /* clear bits array to a clean slate */ |
@@ -143,7 +143,7 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip, | |||
143 | } | 143 | } |
144 | 144 | ||
145 | /* read bits from current gpio port */ | 145 | /* read bits from current gpio port */ |
146 | port_state = ioread8(ports + i); | 146 | port_state = ioread8(ports[i]); |
147 | 147 | ||
148 | /* store acquired bits at respective bits array offset */ | 148 | /* store acquired bits at respective bits array offset */ |
149 | bits[word_index] |= port_state << word_offset; | 149 | bits[word_index] |= port_state << word_offset; |
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c index 835607ecf658..f953541e7890 100644 --- a/drivers/gpio/gpio-pcie-idio-24.c +++ b/drivers/gpio/gpio-pcie-idio-24.c | |||
@@ -206,10 +206,10 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip, | |||
206 | unsigned long word_mask; | 206 | unsigned long word_mask; |
207 | const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0); | 207 | const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0); |
208 | unsigned long port_state; | 208 | unsigned long port_state; |
209 | u8 __iomem ports[] = { | 209 | void __iomem *ports[] = { |
210 | idio24gpio->reg->out0_7, idio24gpio->reg->out8_15, | 210 | &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15, |
211 | idio24gpio->reg->out16_23, idio24gpio->reg->in0_7, | 211 | &idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7, |
212 | idio24gpio->reg->in8_15, idio24gpio->reg->in16_23, | 212 | &idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23, |
213 | }; | 213 | }; |
214 | const unsigned long out_mode_mask = BIT(1); | 214 | const unsigned long out_mode_mask = BIT(1); |
215 | 215 | ||
@@ -217,7 +217,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip, | |||
217 | bitmap_zero(bits, chip->ngpio); | 217 | bitmap_zero(bits, chip->ngpio); |
218 | 218 | ||
219 | /* get bits are evaluated a gpio port register at a time */ | 219 | /* get bits are evaluated a gpio port register at a time */ |
220 | for (i = 0; i < ARRAY_SIZE(ports); i++) { | 220 | for (i = 0; i < ARRAY_SIZE(ports) + 1; i++) { |
221 | /* gpio offset in bits array */ | 221 | /* gpio offset in bits array */ |
222 | bits_offset = i * gpio_reg_size; | 222 | bits_offset = i * gpio_reg_size; |
223 | 223 | ||
@@ -236,7 +236,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip, | |||
236 | 236 | ||
237 | /* read bits from current gpio port (port 6 is TTL GPIO) */ | 237 | /* read bits from current gpio port (port 6 is TTL GPIO) */ |
238 | if (i < 6) | 238 | if (i < 6) |
239 | port_state = ioread8(ports + i); | 239 | port_state = ioread8(ports[i]); |
240 | else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask) | 240 | else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask) |
241 | port_state = ioread8(&idio24gpio->reg->ttl_out0_7); | 241 | port_state = ioread8(&idio24gpio->reg->ttl_out0_7); |
242 | else | 242 | else |
@@ -301,9 +301,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip, | |||
301 | const unsigned long port_mask = GENMASK(gpio_reg_size, 0); | 301 | const unsigned long port_mask = GENMASK(gpio_reg_size, 0); |
302 | unsigned long flags; | 302 | unsigned long flags; |
303 | unsigned int out_state; | 303 | unsigned int out_state; |
304 | u8 __iomem ports[] = { | 304 | void __iomem *ports[] = { |
305 | idio24gpio->reg->out0_7, idio24gpio->reg->out8_15, | 305 | &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15, |
306 | idio24gpio->reg->out16_23 | 306 | &idio24gpio->reg->out16_23 |
307 | }; | 307 | }; |
308 | const unsigned long out_mode_mask = BIT(1); | 308 | const unsigned long out_mode_mask = BIT(1); |
309 | const unsigned int ttl_offset = 48; | 309 | const unsigned int ttl_offset = 48; |
@@ -327,9 +327,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip, | |||
327 | raw_spin_lock_irqsave(&idio24gpio->lock, flags); | 327 | raw_spin_lock_irqsave(&idio24gpio->lock, flags); |
328 | 328 | ||
329 | /* process output lines */ | 329 | /* process output lines */ |
330 | out_state = ioread8(ports + i) & ~gpio_mask; | 330 | out_state = ioread8(ports[i]) & ~gpio_mask; |
331 | out_state |= (*bits >> bits_offset) & gpio_mask; | 331 | out_state |= (*bits >> bits_offset) & gpio_mask; |
332 | iowrite8(out_state, ports + i); | 332 | iowrite8(out_state, ports[i]); |
333 | 333 | ||
334 | raw_spin_unlock_irqrestore(&idio24gpio->lock, flags); | 334 | raw_spin_unlock_irqrestore(&idio24gpio->lock, flags); |
335 | } | 335 | } |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 43aeb07343ec..d8ccb500872f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -497,7 +497,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) | |||
497 | struct gpiohandle_request handlereq; | 497 | struct gpiohandle_request handlereq; |
498 | struct linehandle_state *lh; | 498 | struct linehandle_state *lh; |
499 | struct file *file; | 499 | struct file *file; |
500 | int fd, i, ret; | 500 | int fd, i, count = 0, ret; |
501 | u32 lflags; | 501 | u32 lflags; |
502 | 502 | ||
503 | if (copy_from_user(&handlereq, ip, sizeof(handlereq))) | 503 | if (copy_from_user(&handlereq, ip, sizeof(handlereq))) |
@@ -558,6 +558,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) | |||
558 | if (ret) | 558 | if (ret) |
559 | goto out_free_descs; | 559 | goto out_free_descs; |
560 | lh->descs[i] = desc; | 560 | lh->descs[i] = desc; |
561 | count = i; | ||
561 | 562 | ||
562 | if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) | 563 | if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) |
563 | set_bit(FLAG_ACTIVE_LOW, &desc->flags); | 564 | set_bit(FLAG_ACTIVE_LOW, &desc->flags); |
@@ -628,7 +629,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) | |||
628 | out_put_unused_fd: | 629 | out_put_unused_fd: |
629 | put_unused_fd(fd); | 630 | put_unused_fd(fd); |
630 | out_free_descs: | 631 | out_free_descs: |
631 | for (; i >= 0; i--) | 632 | for (i = 0; i < count; i++) |
632 | gpiod_free(lh->descs[i]); | 633 | gpiod_free(lh->descs[i]); |
633 | kfree(lh->label); | 634 | kfree(lh->label); |
634 | out_free_lh: | 635 | out_free_lh: |
@@ -902,7 +903,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) | |||
902 | desc = &gdev->descs[offset]; | 903 | desc = &gdev->descs[offset]; |
903 | ret = gpiod_request(desc, le->label); | 904 | ret = gpiod_request(desc, le->label); |
904 | if (ret) | 905 | if (ret) |
905 | goto out_free_desc; | 906 | goto out_free_label; |
906 | le->desc = desc; | 907 | le->desc = desc; |
907 | le->eflags = eflags; | 908 | le->eflags = eflags; |
908 | 909 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 09d35051fdd6..3fabf9f97022 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -419,9 +419,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) | |||
419 | 419 | ||
420 | if (other) { | 420 | if (other) { |
421 | signed long r; | 421 | signed long r; |
422 | r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); | 422 | r = dma_fence_wait(other, true); |
423 | if (r < 0) { | 423 | if (r < 0) { |
424 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); | 424 | if (r != -ERESTARTSYS) |
425 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); | ||
426 | |||
425 | return r; | 427 | return r; |
426 | } | 428 | } |
427 | } | 429 | } |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index ace9ad578ca0..4304d9e408b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | |||
@@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, | |||
83 | enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? | 83 | enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? |
84 | I2C_MOT_TRUE : I2C_MOT_FALSE; | 84 | I2C_MOT_TRUE : I2C_MOT_FALSE; |
85 | enum ddc_result res; | 85 | enum ddc_result res; |
86 | ssize_t read_bytes; | 86 | uint32_t read_bytes = msg->size; |
87 | 87 | ||
88 | if (WARN_ON(msg->size > 16)) | 88 | if (WARN_ON(msg->size > 16)) |
89 | return -E2BIG; | 89 | return -E2BIG; |
90 | 90 | ||
91 | switch (msg->request & ~DP_AUX_I2C_MOT) { | 91 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
92 | case DP_AUX_NATIVE_READ: | 92 | case DP_AUX_NATIVE_READ: |
93 | read_bytes = dal_ddc_service_read_dpcd_data( | 93 | res = dal_ddc_service_read_dpcd_data( |
94 | TO_DM_AUX(aux)->ddc_service, | 94 | TO_DM_AUX(aux)->ddc_service, |
95 | false, | 95 | false, |
96 | I2C_MOT_UNDEF, | 96 | I2C_MOT_UNDEF, |
97 | msg->address, | 97 | msg->address, |
98 | msg->buffer, | 98 | msg->buffer, |
99 | msg->size); | 99 | msg->size, |
100 | return read_bytes; | 100 | &read_bytes); |
101 | break; | ||
101 | case DP_AUX_NATIVE_WRITE: | 102 | case DP_AUX_NATIVE_WRITE: |
102 | res = dal_ddc_service_write_dpcd_data( | 103 | res = dal_ddc_service_write_dpcd_data( |
103 | TO_DM_AUX(aux)->ddc_service, | 104 | TO_DM_AUX(aux)->ddc_service, |
@@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, | |||
108 | msg->size); | 109 | msg->size); |
109 | break; | 110 | break; |
110 | case DP_AUX_I2C_READ: | 111 | case DP_AUX_I2C_READ: |
111 | read_bytes = dal_ddc_service_read_dpcd_data( | 112 | res = dal_ddc_service_read_dpcd_data( |
112 | TO_DM_AUX(aux)->ddc_service, | 113 | TO_DM_AUX(aux)->ddc_service, |
113 | true, | 114 | true, |
114 | mot, | 115 | mot, |
115 | msg->address, | 116 | msg->address, |
116 | msg->buffer, | 117 | msg->buffer, |
117 | msg->size); | 118 | msg->size, |
118 | return read_bytes; | 119 | &read_bytes); |
120 | break; | ||
119 | case DP_AUX_I2C_WRITE: | 121 | case DP_AUX_I2C_WRITE: |
120 | res = dal_ddc_service_write_dpcd_data( | 122 | res = dal_ddc_service_write_dpcd_data( |
121 | TO_DM_AUX(aux)->ddc_service, | 123 | TO_DM_AUX(aux)->ddc_service, |
@@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, | |||
137 | r == DDC_RESULT_SUCESSFULL); | 139 | r == DDC_RESULT_SUCESSFULL); |
138 | #endif | 140 | #endif |
139 | 141 | ||
140 | return msg->size; | 142 | if (res != DDC_RESULT_SUCESSFULL) |
143 | return -EIO; | ||
144 | return read_bytes; | ||
141 | } | 145 | } |
142 | 146 | ||
143 | static enum drm_connector_status | 147 | static enum drm_connector_status |
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 985fe8c22875..10a5807a7e8b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | |||
@@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1( | |||
70 | struct bios_parser *bp, | 70 | struct bios_parser *bp, |
71 | struct dc_firmware_info *info); | 71 | struct dc_firmware_info *info); |
72 | 72 | ||
73 | static enum bp_result get_firmware_info_v3_2( | ||
74 | struct bios_parser *bp, | ||
75 | struct dc_firmware_info *info); | ||
76 | |||
73 | static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, | 77 | static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, |
74 | struct atom_display_object_path_v2 *object); | 78 | struct atom_display_object_path_v2 *object); |
75 | 79 | ||
@@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info( | |||
1321 | case 3: | 1325 | case 3: |
1322 | switch (revision.minor) { | 1326 | switch (revision.minor) { |
1323 | case 1: | 1327 | case 1: |
1324 | case 2: | ||
1325 | result = get_firmware_info_v3_1(bp, info); | 1328 | result = get_firmware_info_v3_1(bp, info); |
1326 | break; | 1329 | break; |
1330 | case 2: | ||
1331 | result = get_firmware_info_v3_2(bp, info); | ||
1332 | break; | ||
1327 | default: | 1333 | default: |
1328 | break; | 1334 | break; |
1329 | } | 1335 | } |
@@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1( | |||
1383 | return BP_RESULT_OK; | 1389 | return BP_RESULT_OK; |
1384 | } | 1390 | } |
1385 | 1391 | ||
1392 | static enum bp_result get_firmware_info_v3_2( | ||
1393 | struct bios_parser *bp, | ||
1394 | struct dc_firmware_info *info) | ||
1395 | { | ||
1396 | struct atom_firmware_info_v3_2 *firmware_info; | ||
1397 | struct atom_display_controller_info_v4_1 *dce_info = NULL; | ||
1398 | struct atom_common_table_header *header; | ||
1399 | struct atom_data_revision revision; | ||
1400 | struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL; | ||
1401 | struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL; | ||
1402 | |||
1403 | if (!info) | ||
1404 | return BP_RESULT_BADINPUT; | ||
1405 | |||
1406 | firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2, | ||
1407 | DATA_TABLES(firmwareinfo)); | ||
1408 | |||
1409 | dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1, | ||
1410 | DATA_TABLES(dce_info)); | ||
1411 | |||
1412 | if (!firmware_info || !dce_info) | ||
1413 | return BP_RESULT_BADBIOSTABLE; | ||
1414 | |||
1415 | memset(info, 0, sizeof(*info)); | ||
1416 | |||
1417 | header = GET_IMAGE(struct atom_common_table_header, | ||
1418 | DATA_TABLES(smu_info)); | ||
1419 | get_atom_data_table_revision(header, &revision); | ||
1420 | |||
1421 | if (revision.minor == 2) { | ||
1422 | /* Vega12 */ | ||
1423 | smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, | ||
1424 | DATA_TABLES(smu_info)); | ||
1425 | |||
1426 | if (!smu_info_v3_2) | ||
1427 | return BP_RESULT_BADBIOSTABLE; | ||
1428 | |||
1429 | info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; | ||
1430 | } else if (revision.minor == 3) { | ||
1431 | /* Vega20 */ | ||
1432 | smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, | ||
1433 | DATA_TABLES(smu_info)); | ||
1434 | |||
1435 | if (!smu_info_v3_3) | ||
1436 | return BP_RESULT_BADBIOSTABLE; | ||
1437 | |||
1438 | info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; | ||
1439 | } | ||
1440 | |||
1441 | // We need to convert from 10KHz units into KHz units. | ||
1442 | info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; | ||
1443 | |||
1444 | /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */ | ||
1445 | info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10; | ||
1446 | /* Hardcode frequency if BIOS gives no DCE Ref Clk */ | ||
1447 | if (info->pll_info.crystal_frequency == 0) { | ||
1448 | if (revision.minor == 2) | ||
1449 | info->pll_info.crystal_frequency = 27000; | ||
1450 | else if (revision.minor == 3) | ||
1451 | info->pll_info.crystal_frequency = 100000; | ||
1452 | } | ||
1453 | /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/ | ||
1454 | info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10; | ||
1455 | info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10; | ||
1456 | |||
1457 | /* Get GPU PLL VCO Clock */ | ||
1458 | if (bp->cmd_tbl.get_smu_clock_info != NULL) { | ||
1459 | if (revision.minor == 2) | ||
1460 | info->smu_gpu_pll_output_freq = | ||
1461 | bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; | ||
1462 | else if (revision.minor == 3) | ||
1463 | info->smu_gpu_pll_output_freq = | ||
1464 | bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; | ||
1465 | } | ||
1466 | |||
1467 | return BP_RESULT_OK; | ||
1468 | } | ||
1469 | |||
1386 | static enum bp_result bios_parser_get_encoder_cap_info( | 1470 | static enum bp_result bios_parser_get_encoder_cap_info( |
1387 | struct dc_bios *dcb, | 1471 | struct dc_bios *dcb, |
1388 | struct graphics_object_id object_id, | 1472 | struct graphics_object_id object_id, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 49c2face1e7a..ae48d603ebd6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | |||
@@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data( | |||
629 | return ret; | 629 | return ret; |
630 | } | 630 | } |
631 | 631 | ||
632 | ssize_t dal_ddc_service_read_dpcd_data( | 632 | enum ddc_result dal_ddc_service_read_dpcd_data( |
633 | struct ddc_service *ddc, | 633 | struct ddc_service *ddc, |
634 | bool i2c, | 634 | bool i2c, |
635 | enum i2c_mot_mode mot, | 635 | enum i2c_mot_mode mot, |
636 | uint32_t address, | 636 | uint32_t address, |
637 | uint8_t *data, | 637 | uint8_t *data, |
638 | uint32_t len) | 638 | uint32_t len, |
639 | uint32_t *read) | ||
639 | { | 640 | { |
640 | struct aux_payload read_payload = { | 641 | struct aux_payload read_payload = { |
641 | .i2c_over_aux = i2c, | 642 | .i2c_over_aux = i2c, |
@@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data( | |||
652 | .mot = mot | 653 | .mot = mot |
653 | }; | 654 | }; |
654 | 655 | ||
656 | *read = 0; | ||
657 | |||
655 | if (len > DEFAULT_AUX_MAX_DATA_SIZE) { | 658 | if (len > DEFAULT_AUX_MAX_DATA_SIZE) { |
656 | BREAK_TO_DEBUGGER(); | 659 | BREAK_TO_DEBUGGER(); |
657 | return DDC_RESULT_FAILED_INVALID_OPERATION; | 660 | return DDC_RESULT_FAILED_INVALID_OPERATION; |
@@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data( | |||
661 | ddc->ctx->i2caux, | 664 | ddc->ctx->i2caux, |
662 | ddc->ddc_pin, | 665 | ddc->ddc_pin, |
663 | &command)) { | 666 | &command)) { |
664 | return (ssize_t)command.payloads->length; | 667 | *read = command.payloads->length; |
668 | return DDC_RESULT_SUCESSFULL; | ||
665 | } | 669 | } |
666 | 670 | ||
667 | return DDC_RESULT_FAILED_OPERATION; | 671 | return DDC_RESULT_FAILED_OPERATION; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index ade5b8ee9c3c..132eef3826e2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c | |||
@@ -66,8 +66,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc) | |||
66 | { | 66 | { |
67 | struct dc *core_dc = dc; | 67 | struct dc *core_dc = dc; |
68 | 68 | ||
69 | struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state), | 69 | struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), |
70 | GFP_KERNEL); | 70 | GFP_KERNEL); |
71 | 71 | ||
72 | if (NULL == plane_state) | 72 | if (NULL == plane_state) |
73 | return NULL; | 73 | return NULL; |
@@ -120,7 +120,7 @@ static void dc_plane_state_free(struct kref *kref) | |||
120 | { | 120 | { |
121 | struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); | 121 | struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); |
122 | destruct(plane_state); | 122 | destruct(plane_state); |
123 | kfree(plane_state); | 123 | kvfree(plane_state); |
124 | } | 124 | } |
125 | 125 | ||
126 | void dc_plane_state_release(struct dc_plane_state *plane_state) | 126 | void dc_plane_state_release(struct dc_plane_state *plane_state) |
@@ -136,7 +136,7 @@ void dc_gamma_retain(struct dc_gamma *gamma) | |||
136 | static void dc_gamma_free(struct kref *kref) | 136 | static void dc_gamma_free(struct kref *kref) |
137 | { | 137 | { |
138 | struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount); | 138 | struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount); |
139 | kfree(gamma); | 139 | kvfree(gamma); |
140 | } | 140 | } |
141 | 141 | ||
142 | void dc_gamma_release(struct dc_gamma **gamma) | 142 | void dc_gamma_release(struct dc_gamma **gamma) |
@@ -147,7 +147,7 @@ void dc_gamma_release(struct dc_gamma **gamma) | |||
147 | 147 | ||
148 | struct dc_gamma *dc_create_gamma(void) | 148 | struct dc_gamma *dc_create_gamma(void) |
149 | { | 149 | { |
150 | struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL); | 150 | struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL); |
151 | 151 | ||
152 | if (gamma == NULL) | 152 | if (gamma == NULL) |
153 | goto alloc_fail; | 153 | goto alloc_fail; |
@@ -167,7 +167,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf) | |||
167 | static void dc_transfer_func_free(struct kref *kref) | 167 | static void dc_transfer_func_free(struct kref *kref) |
168 | { | 168 | { |
169 | struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount); | 169 | struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount); |
170 | kfree(tf); | 170 | kvfree(tf); |
171 | } | 171 | } |
172 | 172 | ||
173 | void dc_transfer_func_release(struct dc_transfer_func *tf) | 173 | void dc_transfer_func_release(struct dc_transfer_func *tf) |
@@ -177,7 +177,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf) | |||
177 | 177 | ||
178 | struct dc_transfer_func *dc_create_transfer_func(void) | 178 | struct dc_transfer_func *dc_create_transfer_func(void) |
179 | { | 179 | { |
180 | struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL); | 180 | struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL); |
181 | 181 | ||
182 | if (tf == NULL) | 182 | if (tf == NULL) |
183 | goto alloc_fail; | 183 | goto alloc_fail; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 090b7a8dd67b..30b3a08b91be 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | |||
@@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data( | |||
102 | uint8_t *read_buf, | 102 | uint8_t *read_buf, |
103 | uint32_t read_size); | 103 | uint32_t read_size); |
104 | 104 | ||
105 | ssize_t dal_ddc_service_read_dpcd_data( | 105 | enum ddc_result dal_ddc_service_read_dpcd_data( |
106 | struct ddc_service *ddc, | 106 | struct ddc_service *ddc, |
107 | bool i2c, | 107 | bool i2c, |
108 | enum i2c_mot_mode mot, | 108 | enum i2c_mot_mode mot, |
109 | uint32_t address, | 109 | uint32_t address, |
110 | uint8_t *data, | 110 | uint8_t *data, |
111 | uint32_t len); | 111 | uint32_t len, |
112 | uint32_t *read); | ||
112 | 113 | ||
113 | enum ddc_result dal_ddc_service_write_dpcd_data( | 114 | enum ddc_result dal_ddc_service_write_dpcd_data( |
114 | struct ddc_service *ddc, | 115 | struct ddc_service *ddc, |
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 9831cb5eaa7c..9b0a04f99ac8 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h | |||
@@ -113,9 +113,14 @@ | |||
113 | 113 | ||
114 | #define AI_GREENLAND_P_A0 1 | 114 | #define AI_GREENLAND_P_A0 1 |
115 | #define AI_GREENLAND_P_A1 2 | 115 | #define AI_GREENLAND_P_A1 2 |
116 | #define AI_UNKNOWN 0xFF | ||
116 | 117 | ||
117 | #define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN) | 118 | #define AI_VEGA12_P_A0 20 |
118 | #define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN) | 119 | #define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0) |
120 | #define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0) | ||
121 | |||
122 | #define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) | ||
123 | #define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) | ||
119 | 124 | ||
120 | /* DCN1_0 */ | 125 | /* DCN1_0 */ |
121 | #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ | 126 | #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ |
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index e7e374f56864..b3747a019deb 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c | |||
@@ -1093,19 +1093,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | |||
1093 | 1093 | ||
1094 | output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; | 1094 | output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; |
1095 | 1095 | ||
1096 | rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), | 1096 | rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), |
1097 | GFP_KERNEL); | 1097 | GFP_KERNEL); |
1098 | if (!rgb_user) | 1098 | if (!rgb_user) |
1099 | goto rgb_user_alloc_fail; | 1099 | goto rgb_user_alloc_fail; |
1100 | rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), | 1100 | rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), |
1101 | GFP_KERNEL); | 1101 | GFP_KERNEL); |
1102 | if (!rgb_regamma) | 1102 | if (!rgb_regamma) |
1103 | goto rgb_regamma_alloc_fail; | 1103 | goto rgb_regamma_alloc_fail; |
1104 | axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), | 1104 | axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), |
1105 | GFP_KERNEL); | 1105 | GFP_KERNEL); |
1106 | if (!axix_x) | 1106 | if (!axix_x) |
1107 | goto axix_x_alloc_fail; | 1107 | goto axix_x_alloc_fail; |
1108 | coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); | 1108 | coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); |
1109 | if (!coeff) | 1109 | if (!coeff) |
1110 | goto coeff_alloc_fail; | 1110 | goto coeff_alloc_fail; |
1111 | 1111 | ||
@@ -1157,13 +1157,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | |||
1157 | 1157 | ||
1158 | ret = true; | 1158 | ret = true; |
1159 | 1159 | ||
1160 | kfree(coeff); | 1160 | kvfree(coeff); |
1161 | coeff_alloc_fail: | 1161 | coeff_alloc_fail: |
1162 | kfree(axix_x); | 1162 | kvfree(axix_x); |
1163 | axix_x_alloc_fail: | 1163 | axix_x_alloc_fail: |
1164 | kfree(rgb_regamma); | 1164 | kvfree(rgb_regamma); |
1165 | rgb_regamma_alloc_fail: | 1165 | rgb_regamma_alloc_fail: |
1166 | kfree(rgb_user); | 1166 | kvfree(rgb_user); |
1167 | rgb_user_alloc_fail: | 1167 | rgb_user_alloc_fail: |
1168 | return ret; | 1168 | return ret; |
1169 | } | 1169 | } |
@@ -1192,19 +1192,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, | |||
1192 | 1192 | ||
1193 | input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; | 1193 | input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; |
1194 | 1194 | ||
1195 | rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), | 1195 | rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), |
1196 | GFP_KERNEL); | 1196 | GFP_KERNEL); |
1197 | if (!rgb_user) | 1197 | if (!rgb_user) |
1198 | goto rgb_user_alloc_fail; | 1198 | goto rgb_user_alloc_fail; |
1199 | curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), | 1199 | curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), |
1200 | GFP_KERNEL); | 1200 | GFP_KERNEL); |
1201 | if (!curve) | 1201 | if (!curve) |
1202 | goto curve_alloc_fail; | 1202 | goto curve_alloc_fail; |
1203 | axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), | 1203 | axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), |
1204 | GFP_KERNEL); | 1204 | GFP_KERNEL); |
1205 | if (!axix_x) | 1205 | if (!axix_x) |
1206 | goto axix_x_alloc_fail; | 1206 | goto axix_x_alloc_fail; |
1207 | coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); | 1207 | coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); |
1208 | if (!coeff) | 1208 | if (!coeff) |
1209 | goto coeff_alloc_fail; | 1209 | goto coeff_alloc_fail; |
1210 | 1210 | ||
@@ -1246,13 +1246,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, | |||
1246 | 1246 | ||
1247 | ret = true; | 1247 | ret = true; |
1248 | 1248 | ||
1249 | kfree(coeff); | 1249 | kvfree(coeff); |
1250 | coeff_alloc_fail: | 1250 | coeff_alloc_fail: |
1251 | kfree(axix_x); | 1251 | kvfree(axix_x); |
1252 | axix_x_alloc_fail: | 1252 | axix_x_alloc_fail: |
1253 | kfree(curve); | 1253 | kvfree(curve); |
1254 | curve_alloc_fail: | 1254 | curve_alloc_fail: |
1255 | kfree(rgb_user); | 1255 | kvfree(rgb_user); |
1256 | rgb_user_alloc_fail: | 1256 | rgb_user_alloc_fail: |
1257 | 1257 | ||
1258 | return ret; | 1258 | return ret; |
@@ -1281,8 +1281,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, | |||
1281 | } | 1281 | } |
1282 | ret = true; | 1282 | ret = true; |
1283 | } else if (trans == TRANSFER_FUNCTION_PQ) { | 1283 | } else if (trans == TRANSFER_FUNCTION_PQ) { |
1284 | rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + | 1284 | rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * |
1285 | _EXTRA_POINTS), GFP_KERNEL); | 1285 | (MAX_HW_POINTS + _EXTRA_POINTS), |
1286 | GFP_KERNEL); | ||
1286 | if (!rgb_regamma) | 1287 | if (!rgb_regamma) |
1287 | goto rgb_regamma_alloc_fail; | 1288 | goto rgb_regamma_alloc_fail; |
1288 | points->end_exponent = 7; | 1289 | points->end_exponent = 7; |
@@ -1302,11 +1303,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, | |||
1302 | } | 1303 | } |
1303 | ret = true; | 1304 | ret = true; |
1304 | 1305 | ||
1305 | kfree(rgb_regamma); | 1306 | kvfree(rgb_regamma); |
1306 | } else if (trans == TRANSFER_FUNCTION_SRGB || | 1307 | } else if (trans == TRANSFER_FUNCTION_SRGB || |
1307 | trans == TRANSFER_FUNCTION_BT709) { | 1308 | trans == TRANSFER_FUNCTION_BT709) { |
1308 | rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + | 1309 | rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * |
1309 | _EXTRA_POINTS), GFP_KERNEL); | 1310 | (MAX_HW_POINTS + _EXTRA_POINTS), |
1311 | GFP_KERNEL); | ||
1310 | if (!rgb_regamma) | 1312 | if (!rgb_regamma) |
1311 | goto rgb_regamma_alloc_fail; | 1313 | goto rgb_regamma_alloc_fail; |
1312 | points->end_exponent = 0; | 1314 | points->end_exponent = 0; |
@@ -1324,7 +1326,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, | |||
1324 | } | 1326 | } |
1325 | ret = true; | 1327 | ret = true; |
1326 | 1328 | ||
1327 | kfree(rgb_regamma); | 1329 | kvfree(rgb_regamma); |
1328 | } | 1330 | } |
1329 | rgb_regamma_alloc_fail: | 1331 | rgb_regamma_alloc_fail: |
1330 | return ret; | 1332 | return ret; |
@@ -1348,8 +1350,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, | |||
1348 | } | 1350 | } |
1349 | ret = true; | 1351 | ret = true; |
1350 | } else if (trans == TRANSFER_FUNCTION_PQ) { | 1352 | } else if (trans == TRANSFER_FUNCTION_PQ) { |
1351 | rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + | 1353 | rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * |
1352 | _EXTRA_POINTS), GFP_KERNEL); | 1354 | (MAX_HW_POINTS + _EXTRA_POINTS), |
1355 | GFP_KERNEL); | ||
1353 | if (!rgb_degamma) | 1356 | if (!rgb_degamma) |
1354 | goto rgb_degamma_alloc_fail; | 1357 | goto rgb_degamma_alloc_fail; |
1355 | 1358 | ||
@@ -1364,11 +1367,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, | |||
1364 | } | 1367 | } |
1365 | ret = true; | 1368 | ret = true; |
1366 | 1369 | ||
1367 | kfree(rgb_degamma); | 1370 | kvfree(rgb_degamma); |
1368 | } else if (trans == TRANSFER_FUNCTION_SRGB || | 1371 | } else if (trans == TRANSFER_FUNCTION_SRGB || |
1369 | trans == TRANSFER_FUNCTION_BT709) { | 1372 | trans == TRANSFER_FUNCTION_BT709) { |
1370 | rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + | 1373 | rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * |
1371 | _EXTRA_POINTS), GFP_KERNEL); | 1374 | (MAX_HW_POINTS + _EXTRA_POINTS), |
1375 | GFP_KERNEL); | ||
1372 | if (!rgb_degamma) | 1376 | if (!rgb_degamma) |
1373 | goto rgb_degamma_alloc_fail; | 1377 | goto rgb_degamma_alloc_fail; |
1374 | 1378 | ||
@@ -1382,7 +1386,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, | |||
1382 | } | 1386 | } |
1383 | ret = true; | 1387 | ret = true; |
1384 | 1388 | ||
1385 | kfree(rgb_degamma); | 1389 | kvfree(rgb_degamma); |
1386 | } | 1390 | } |
1387 | points->end_exponent = 0; | 1391 | points->end_exponent = 0; |
1388 | points->x_point_at_y1_red = 1; | 1392 | points->x_point_at_y1_red = 1; |
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0f5ad54d3fd3..de177ce8ca80 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h | |||
@@ -501,6 +501,32 @@ enum atom_cooling_solution_id{ | |||
501 | LIQUID_COOLING = 0x01 | 501 | LIQUID_COOLING = 0x01 |
502 | }; | 502 | }; |
503 | 503 | ||
504 | struct atom_firmware_info_v3_2 { | ||
505 | struct atom_common_table_header table_header; | ||
506 | uint32_t firmware_revision; | ||
507 | uint32_t bootup_sclk_in10khz; | ||
508 | uint32_t bootup_mclk_in10khz; | ||
509 | uint32_t firmware_capability; // enum atombios_firmware_capability | ||
510 | uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */ | ||
511 | uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address | ||
512 | uint16_t bootup_vddc_mv; | ||
513 | uint16_t bootup_vddci_mv; | ||
514 | uint16_t bootup_mvddc_mv; | ||
515 | uint16_t bootup_vddgfx_mv; | ||
516 | uint8_t mem_module_id; | ||
517 | uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */ | ||
518 | uint8_t reserved1[2]; | ||
519 | uint32_t mc_baseaddr_high; | ||
520 | uint32_t mc_baseaddr_low; | ||
521 | uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def | ||
522 | uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id | ||
523 | uint8_t board_i2c_feature_slave_addr; | ||
524 | uint8_t reserved3; | ||
525 | uint16_t bootup_mvddq_mv; | ||
526 | uint16_t bootup_mvpp_mv; | ||
527 | uint32_t zfbstartaddrin16mb; | ||
528 | uint32_t reserved2[3]; | ||
529 | }; | ||
504 | 530 | ||
505 | /* | 531 | /* |
506 | *************************************************************************** | 532 | *************************************************************************** |
@@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2 | |||
1169 | uint32_t rlc_gpu_timer_refclk; | 1195 | uint32_t rlc_gpu_timer_refclk; |
1170 | }; | 1196 | }; |
1171 | 1197 | ||
1172 | 1198 | struct atom_gfx_info_v2_3 { | |
1199 | struct atom_common_table_header table_header; | ||
1200 | uint8_t gfxip_min_ver; | ||
1201 | uint8_t gfxip_max_ver; | ||
1202 | uint8_t max_shader_engines; | ||
1203 | uint8_t max_tile_pipes; | ||
1204 | uint8_t max_cu_per_sh; | ||
1205 | uint8_t max_sh_per_se; | ||
1206 | uint8_t max_backends_per_se; | ||
1207 | uint8_t max_texture_channel_caches; | ||
1208 | uint32_t regaddr_cp_dma_src_addr; | ||
1209 | uint32_t regaddr_cp_dma_src_addr_hi; | ||
1210 | uint32_t regaddr_cp_dma_dst_addr; | ||
1211 | uint32_t regaddr_cp_dma_dst_addr_hi; | ||
1212 | uint32_t regaddr_cp_dma_command; | ||
1213 | uint32_t regaddr_cp_status; | ||
1214 | uint32_t regaddr_rlc_gpu_clock_32; | ||
1215 | uint32_t rlc_gpu_timer_refclk; | ||
1216 | uint8_t active_cu_per_sh; | ||
1217 | uint8_t active_rb_per_se; | ||
1218 | uint16_t gcgoldenoffset; | ||
1219 | uint32_t rm21_sram_vmin_value; | ||
1220 | }; | ||
1173 | 1221 | ||
1174 | /* | 1222 | /* |
1175 | *************************************************************************** | 1223 | *************************************************************************** |
@@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1 | |||
1198 | uint8_t fw_ctf_polarity; // GPIO polarity for CTF | 1246 | uint8_t fw_ctf_polarity; // GPIO polarity for CTF |
1199 | }; | 1247 | }; |
1200 | 1248 | ||
1249 | struct atom_smu_info_v3_2 { | ||
1250 | struct atom_common_table_header table_header; | ||
1251 | uint8_t smuip_min_ver; | ||
1252 | uint8_t smuip_max_ver; | ||
1253 | uint8_t smu_rsd1; | ||
1254 | uint8_t gpuclk_ss_mode; | ||
1255 | uint16_t sclk_ss_percentage; | ||
1256 | uint16_t sclk_ss_rate_10hz; | ||
1257 | uint16_t gpuclk_ss_percentage; // in unit of 0.001% | ||
1258 | uint16_t gpuclk_ss_rate_10hz; | ||
1259 | uint32_t core_refclk_10khz; | ||
1260 | uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid | ||
1261 | uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching | ||
1262 | uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid | ||
1263 | uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event | ||
1264 | uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid | ||
1265 | uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event | ||
1266 | uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid | ||
1267 | uint8_t fw_ctf_polarity; // GPIO polarity for CTF | ||
1268 | uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid | ||
1269 | uint8_t pcc_gpio_polarity; // GPIO polarity for CTF | ||
1270 | uint16_t smugoldenoffset; | ||
1271 | uint32_t gpupll_vco_freq_10khz; | ||
1272 | uint32_t bootup_smnclk_10khz; | ||
1273 | uint32_t bootup_socclk_10khz; | ||
1274 | uint32_t bootup_mp0clk_10khz; | ||
1275 | uint32_t bootup_mp1clk_10khz; | ||
1276 | uint32_t bootup_lclk_10khz; | ||
1277 | uint32_t bootup_dcefclk_10khz; | ||
1278 | uint32_t ctf_threshold_override_value; | ||
1279 | uint32_t reserved[5]; | ||
1280 | }; | ||
1281 | |||
1282 | struct atom_smu_info_v3_3 { | ||
1283 | struct atom_common_table_header table_header; | ||
1284 | uint8_t smuip_min_ver; | ||
1285 | uint8_t smuip_max_ver; | ||
1286 | uint8_t smu_rsd1; | ||
1287 | uint8_t gpuclk_ss_mode; | ||
1288 | uint16_t sclk_ss_percentage; | ||
1289 | uint16_t sclk_ss_rate_10hz; | ||
1290 | uint16_t gpuclk_ss_percentage; // in unit of 0.001% | ||
1291 | uint16_t gpuclk_ss_rate_10hz; | ||
1292 | uint32_t core_refclk_10khz; | ||
1293 | uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid | ||
1294 | uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching | ||
1295 | uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid | ||
1296 | uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event | ||
1297 | uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid | ||
1298 | uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event | ||
1299 | uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid | ||
1300 | uint8_t fw_ctf_polarity; // GPIO polarity for CTF | ||
1301 | uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid | ||
1302 | uint8_t pcc_gpio_polarity; // GPIO polarity for CTF | ||
1303 | uint16_t smugoldenoffset; | ||
1304 | uint32_t gpupll_vco_freq_10khz; | ||
1305 | uint32_t bootup_smnclk_10khz; | ||
1306 | uint32_t bootup_socclk_10khz; | ||
1307 | uint32_t bootup_mp0clk_10khz; | ||
1308 | uint32_t bootup_mp1clk_10khz; | ||
1309 | uint32_t bootup_lclk_10khz; | ||
1310 | uint32_t bootup_dcefclk_10khz; | ||
1311 | uint32_t ctf_threshold_override_value; | ||
1312 | uint32_t syspll3_0_vco_freq_10khz; | ||
1313 | uint32_t syspll3_1_vco_freq_10khz; | ||
1314 | uint32_t bootup_fclk_10khz; | ||
1315 | uint32_t bootup_waflclk_10khz; | ||
1316 | uint32_t reserved[3]; | ||
1317 | }; | ||
1318 | |||
1201 | /* | 1319 | /* |
1202 | *************************************************************************** | 1320 | *************************************************************************** |
1203 | Data Table smc_dpm_info structure | 1321 | Data Table smc_dpm_info structure |
@@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1 | |||
1283 | uint32_t boardreserved[10]; | 1401 | uint32_t boardreserved[10]; |
1284 | }; | 1402 | }; |
1285 | 1403 | ||
1286 | |||
1287 | /* | 1404 | /* |
1288 | *************************************************************************** | 1405 | *************************************************************************** |
1289 | Data Table asic_profiling_info structure | 1406 | Data Table asic_profiling_info structure |
@@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id | |||
1864 | SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK | 1981 | SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK |
1865 | }; | 1982 | }; |
1866 | 1983 | ||
1984 | enum atom_smu11_syspll_id { | ||
1985 | SMU11_SYSPLL0_ID = 0, | ||
1986 | SMU11_SYSPLL1_0_ID = 1, | ||
1987 | SMU11_SYSPLL1_1_ID = 2, | ||
1988 | SMU11_SYSPLL1_2_ID = 3, | ||
1989 | SMU11_SYSPLL2_ID = 4, | ||
1990 | SMU11_SYSPLL3_0_ID = 5, | ||
1991 | SMU11_SYSPLL3_1_ID = 6, | ||
1992 | }; | ||
1993 | |||
1994 | |||
1995 | enum atom_smu11_syspll0_clock_id { | ||
1996 | SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK | ||
1997 | SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK | ||
1998 | SMU11_SYSPLL0_DCLK_ID = 2, // DCLK | ||
1999 | SMU11_SYSPLL0_VCLK_ID = 3, // VCLK | ||
2000 | SMU11_SYSPLL0_ECLK_ID = 4, // ECLK | ||
2001 | SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK | ||
2002 | }; | ||
2003 | |||
2004 | |||
2005 | enum atom_smu11_syspll1_0_clock_id { | ||
2006 | SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a | ||
2007 | }; | ||
2008 | |||
2009 | enum atom_smu11_syspll1_1_clock_id { | ||
2010 | SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b | ||
2011 | }; | ||
2012 | |||
2013 | enum atom_smu11_syspll1_2_clock_id { | ||
2014 | SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK | ||
2015 | }; | ||
2016 | |||
2017 | enum atom_smu11_syspll2_clock_id { | ||
2018 | SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK | ||
2019 | }; | ||
2020 | |||
2021 | enum atom_smu11_syspll3_0_clock_id { | ||
2022 | SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK | ||
2023 | SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK | ||
2024 | SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK | ||
2025 | }; | ||
2026 | |||
2027 | enum atom_smu11_syspll3_1_clock_id { | ||
2028 | SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK | ||
2029 | SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK | ||
2030 | SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK | ||
2031 | }; | ||
2032 | |||
1867 | struct atom_get_smu_clock_info_output_parameters_v3_1 | 2033 | struct atom_get_smu_clock_info_output_parameters_v3_1 |
1868 | { | 2034 | { |
1869 | union { | 2035 | union { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 26fbeafc3c96..18b5b2ff47fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -79,12 +79,13 @@ | |||
79 | #define PCIE_BUS_CLK 10000 | 79 | #define PCIE_BUS_CLK 10000 |
80 | #define TCLK (PCIE_BUS_CLK / 10) | 80 | #define TCLK (PCIE_BUS_CLK / 10) |
81 | 81 | ||
82 | static const struct profile_mode_setting smu7_profiling[5] = | 82 | static const struct profile_mode_setting smu7_profiling[6] = |
83 | {{1, 0, 100, 30, 1, 0, 100, 10}, | 83 | {{1, 0, 100, 30, 1, 0, 100, 10}, |
84 | {1, 10, 0, 30, 0, 0, 0, 0}, | 84 | {1, 10, 0, 30, 0, 0, 0, 0}, |
85 | {0, 0, 0, 0, 1, 10, 16, 31}, | 85 | {0, 0, 0, 0, 1, 10, 16, 31}, |
86 | {1, 0, 11, 50, 1, 0, 100, 10}, | 86 | {1, 0, 11, 50, 1, 0, 100, 10}, |
87 | {1, 0, 5, 30, 0, 0, 0, 0}, | 87 | {1, 0, 5, 30, 0, 0, 0, 0}, |
88 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
88 | }; | 89 | }; |
89 | 90 | ||
90 | /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ | 91 | /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ |
@@ -4864,6 +4865,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) | |||
4864 | len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); | 4865 | len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); |
4865 | 4866 | ||
4866 | for (i = 0; i < len; i++) { | 4867 | for (i = 0; i < len; i++) { |
4868 | if (i == hwmgr->power_profile_mode) { | ||
4869 | size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", | ||
4870 | i, profile_name[i], "*", | ||
4871 | data->current_profile_setting.sclk_up_hyst, | ||
4872 | data->current_profile_setting.sclk_down_hyst, | ||
4873 | data->current_profile_setting.sclk_activity, | ||
4874 | data->current_profile_setting.mclk_up_hyst, | ||
4875 | data->current_profile_setting.mclk_down_hyst, | ||
4876 | data->current_profile_setting.mclk_activity); | ||
4877 | continue; | ||
4878 | } | ||
4867 | if (smu7_profiling[i].bupdate_sclk) | 4879 | if (smu7_profiling[i].bupdate_sclk) |
4868 | size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", | 4880 | size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", |
4869 | i, profile_name[i], smu7_profiling[i].sclk_up_hyst, | 4881 | i, profile_name[i], smu7_profiling[i].sclk_up_hyst, |
@@ -4883,24 +4895,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) | |||
4883 | "-", "-", "-"); | 4895 | "-", "-", "-"); |
4884 | } | 4896 | } |
4885 | 4897 | ||
4886 | size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n", | ||
4887 | i, profile_name[i], | ||
4888 | data->custom_profile_setting.sclk_up_hyst, | ||
4889 | data->custom_profile_setting.sclk_down_hyst, | ||
4890 | data->custom_profile_setting.sclk_activity, | ||
4891 | data->custom_profile_setting.mclk_up_hyst, | ||
4892 | data->custom_profile_setting.mclk_down_hyst, | ||
4893 | data->custom_profile_setting.mclk_activity); | ||
4894 | |||
4895 | size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n", | ||
4896 | "*", "CURRENT", | ||
4897 | data->current_profile_setting.sclk_up_hyst, | ||
4898 | data->current_profile_setting.sclk_down_hyst, | ||
4899 | data->current_profile_setting.sclk_activity, | ||
4900 | data->current_profile_setting.mclk_up_hyst, | ||
4901 | data->current_profile_setting.mclk_down_hyst, | ||
4902 | data->current_profile_setting.mclk_activity); | ||
4903 | |||
4904 | return size; | 4898 | return size; |
4905 | } | 4899 | } |
4906 | 4900 | ||
@@ -4939,16 +4933,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint | |||
4939 | if (size < 8) | 4933 | if (size < 8) |
4940 | return -EINVAL; | 4934 | return -EINVAL; |
4941 | 4935 | ||
4942 | data->custom_profile_setting.bupdate_sclk = input[0]; | 4936 | tmp.bupdate_sclk = input[0]; |
4943 | data->custom_profile_setting.sclk_up_hyst = input[1]; | 4937 | tmp.sclk_up_hyst = input[1]; |
4944 | data->custom_profile_setting.sclk_down_hyst = input[2]; | 4938 | tmp.sclk_down_hyst = input[2]; |
4945 | data->custom_profile_setting.sclk_activity = input[3]; | 4939 | tmp.sclk_activity = input[3]; |
4946 | data->custom_profile_setting.bupdate_mclk = input[4]; | 4940 | tmp.bupdate_mclk = input[4]; |
4947 | data->custom_profile_setting.mclk_up_hyst = input[5]; | 4941 | tmp.mclk_up_hyst = input[5]; |
4948 | data->custom_profile_setting.mclk_down_hyst = input[6]; | 4942 | tmp.mclk_down_hyst = input[6]; |
4949 | data->custom_profile_setting.mclk_activity = input[7]; | 4943 | tmp.mclk_activity = input[7]; |
4950 | if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) { | 4944 | if (!smum_update_dpm_settings(hwmgr, &tmp)) { |
4951 | memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting)); | 4945 | memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); |
4952 | hwmgr->power_profile_mode = mode; | 4946 | hwmgr->power_profile_mode = mode; |
4953 | } | 4947 | } |
4954 | break; | 4948 | break; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index f40179c9ca97..b8d0bb378595 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | |||
@@ -325,7 +325,6 @@ struct smu7_hwmgr { | |||
325 | uint16_t mem_latency_high; | 325 | uint16_t mem_latency_high; |
326 | uint16_t mem_latency_low; | 326 | uint16_t mem_latency_low; |
327 | uint32_t vr_config; | 327 | uint32_t vr_config; |
328 | struct profile_mode_setting custom_profile_setting; | ||
329 | struct profile_mode_setting current_profile_setting; | 328 | struct profile_mode_setting current_profile_setting; |
330 | }; | 329 | }; |
331 | 330 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 03bc7453f3b1..d9e92e306535 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | |||
@@ -852,12 +852,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) | |||
852 | { | 852 | { |
853 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 853 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
854 | 854 | ||
855 | n = (n & 0xff) << 8; | ||
856 | |||
857 | if (data->power_containment_features & | 855 | if (data->power_containment_features & |
858 | POWERCONTAINMENT_FEATURE_PkgPwrLimit) | 856 | POWERCONTAINMENT_FEATURE_PkgPwrLimit) |
859 | return smum_send_msg_to_smc_with_parameter(hwmgr, | 857 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
860 | PPSMC_MSG_PkgPwrSetLimit, n); | 858 | PPSMC_MSG_PkgPwrSetLimit, n<<8); |
861 | return 0; | 859 | return 0; |
862 | } | 860 | } |
863 | 861 | ||
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3aa65bdecb0e..684ac626ac53 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig | |||
@@ -74,6 +74,7 @@ config DRM_SIL_SII8620 | |||
74 | tristate "Silicon Image SII8620 HDMI/MHL bridge" | 74 | tristate "Silicon Image SII8620 HDMI/MHL bridge" |
75 | depends on OF && RC_CORE | 75 | depends on OF && RC_CORE |
76 | select DRM_KMS_HELPER | 76 | select DRM_KMS_HELPER |
77 | imply EXTCON | ||
77 | help | 78 | help |
78 | Silicon Image SII8620 HDMI/MHL bridge chip driver. | 79 | Silicon Image SII8620 HDMI/MHL bridge chip driver. |
79 | 80 | ||
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 7d25c42f22db..c825c76edc1d 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) | |||
155 | state->connectors[i].state); | 155 | state->connectors[i].state); |
156 | state->connectors[i].ptr = NULL; | 156 | state->connectors[i].ptr = NULL; |
157 | state->connectors[i].state = NULL; | 157 | state->connectors[i].state = NULL; |
158 | state->connectors[i].old_state = NULL; | ||
159 | state->connectors[i].new_state = NULL; | ||
158 | drm_connector_put(connector); | 160 | drm_connector_put(connector); |
159 | } | 161 | } |
160 | 162 | ||
@@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) | |||
169 | 171 | ||
170 | state->crtcs[i].ptr = NULL; | 172 | state->crtcs[i].ptr = NULL; |
171 | state->crtcs[i].state = NULL; | 173 | state->crtcs[i].state = NULL; |
174 | state->crtcs[i].old_state = NULL; | ||
175 | state->crtcs[i].new_state = NULL; | ||
172 | } | 176 | } |
173 | 177 | ||
174 | for (i = 0; i < config->num_total_plane; i++) { | 178 | for (i = 0; i < config->num_total_plane; i++) { |
@@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) | |||
181 | state->planes[i].state); | 185 | state->planes[i].state); |
182 | state->planes[i].ptr = NULL; | 186 | state->planes[i].ptr = NULL; |
183 | state->planes[i].state = NULL; | 187 | state->planes[i].state = NULL; |
188 | state->planes[i].old_state = NULL; | ||
189 | state->planes[i].new_state = NULL; | ||
184 | } | 190 | } |
185 | 191 | ||
186 | for (i = 0; i < state->num_private_objs; i++) { | 192 | for (i = 0; i < state->num_private_objs; i++) { |
@@ -190,6 +196,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) | |||
190 | state->private_objs[i].state); | 196 | state->private_objs[i].state); |
191 | state->private_objs[i].ptr = NULL; | 197 | state->private_objs[i].ptr = NULL; |
192 | state->private_objs[i].state = NULL; | 198 | state->private_objs[i].state = NULL; |
199 | state->private_objs[i].old_state = NULL; | ||
200 | state->private_objs[i].new_state = NULL; | ||
193 | } | 201 | } |
194 | state->num_private_objs = 0; | 202 | state->num_private_objs = 0; |
195 | 203 | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a1b9338736e3..c2c21d839727 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev) | |||
716 | if (!minor) | 716 | if (!minor) |
717 | return; | 717 | return; |
718 | 718 | ||
719 | name = kasprintf(GFP_KERNEL, "controlD%d", minor->index); | 719 | name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); |
720 | if (!name) | 720 | if (!name) |
721 | return; | 721 | return; |
722 | 722 | ||
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index 39ac15ce4702..9e2ae02f31e0 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c | |||
@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev, | |||
65 | return -EINVAL; | 65 | return -EINVAL; |
66 | 66 | ||
67 | /* overflow checks for 32bit size calculations */ | 67 | /* overflow checks for 32bit size calculations */ |
68 | /* NOTE: DIV_ROUND_UP() can overflow */ | 68 | if (args->bpp > U32_MAX - 8) |
69 | return -EINVAL; | ||
69 | cpp = DIV_ROUND_UP(args->bpp, 8); | 70 | cpp = DIV_ROUND_UP(args->bpp, 8); |
70 | if (!cpp || cpp > 0xffffffffU / args->width) | 71 | if (cpp > U32_MAX / args->width) |
71 | return -EINVAL; | 72 | return -EINVAL; |
72 | stride = cpp * args->width; | 73 | stride = cpp * args->width; |
73 | if (args->height > 0xffffffffU / stride) | 74 | if (args->height > U32_MAX / stride) |
74 | return -EINVAL; | 75 | return -EINVAL; |
75 | 76 | ||
76 | /* test for wrap-around */ | 77 | /* test for wrap-around */ |
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index e394799979a6..6d9b9453707c 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c | |||
@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) | |||
212 | return -ENOMEM; | 212 | return -ENOMEM; |
213 | 213 | ||
214 | filp->private_data = priv; | 214 | filp->private_data = priv; |
215 | filp->f_mode |= FMODE_UNSIGNED_OFFSET; | ||
215 | priv->filp = filp; | 216 | priv->filp = filp; |
216 | priv->pid = get_pid(task_pid(current)); | 217 | priv->pid = get_pid(task_pid(current)); |
217 | priv->minor = minor; | 218 | priv->minor = minor; |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index abd84cbcf1c2..09c4bc0b1859 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -954,8 +954,6 @@ static int hdmi_create_connector(struct drm_encoder *encoder) | |||
954 | drm_mode_connector_attach_encoder(connector, encoder); | 954 | drm_mode_connector_attach_encoder(connector, encoder); |
955 | 955 | ||
956 | if (hdata->bridge) { | 956 | if (hdata->bridge) { |
957 | encoder->bridge = hdata->bridge; | ||
958 | hdata->bridge->encoder = encoder; | ||
959 | ret = drm_bridge_attach(encoder, hdata->bridge, NULL); | 957 | ret = drm_bridge_attach(encoder, hdata->bridge, NULL); |
960 | if (ret) | 958 | if (ret) |
961 | DRM_ERROR("Failed to attach bridge\n"); | 959 | DRM_ERROR("Failed to attach bridge\n"); |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 257299ec95c4..272c79f5f5bf 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
@@ -473,7 +473,7 @@ static void vp_video_buffer(struct mixer_context *ctx, | |||
473 | chroma_addr[1] = chroma_addr[0] + 0x40; | 473 | chroma_addr[1] = chroma_addr[0] + 0x40; |
474 | } else { | 474 | } else { |
475 | luma_addr[1] = luma_addr[0] + fb->pitches[0]; | 475 | luma_addr[1] = luma_addr[0] + fb->pitches[0]; |
476 | chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; | 476 | chroma_addr[1] = chroma_addr[0] + fb->pitches[1]; |
477 | } | 477 | } |
478 | } else { | 478 | } else { |
479 | luma_addr[1] = 0; | 479 | luma_addr[1] = 0; |
@@ -482,6 +482,7 @@ static void vp_video_buffer(struct mixer_context *ctx, | |||
482 | 482 | ||
483 | spin_lock_irqsave(&ctx->reg_slock, flags); | 483 | spin_lock_irqsave(&ctx->reg_slock, flags); |
484 | 484 | ||
485 | vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); | ||
485 | /* interlace or progressive scan mode */ | 486 | /* interlace or progressive scan mode */ |
486 | val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); | 487 | val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); |
487 | vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); | 488 | vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); |
@@ -495,21 +496,23 @@ static void vp_video_buffer(struct mixer_context *ctx, | |||
495 | vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | | 496 | vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | |
496 | VP_IMG_VSIZE(fb->height)); | 497 | VP_IMG_VSIZE(fb->height)); |
497 | /* chroma plane for NV12/NV21 is half the height of the luma plane */ | 498 | /* chroma plane for NV12/NV21 is half the height of the luma plane */ |
498 | vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | | 499 | vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) | |
499 | VP_IMG_VSIZE(fb->height / 2)); | 500 | VP_IMG_VSIZE(fb->height / 2)); |
500 | 501 | ||
501 | vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w); | 502 | vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w); |
502 | vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); | ||
503 | vp_reg_write(ctx, VP_SRC_H_POSITION, | 503 | vp_reg_write(ctx, VP_SRC_H_POSITION, |
504 | VP_SRC_H_POSITION_VAL(state->src.x)); | 504 | VP_SRC_H_POSITION_VAL(state->src.x)); |
505 | vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); | ||
506 | |||
507 | vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w); | 505 | vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w); |
508 | vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x); | 506 | vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x); |
507 | |||
509 | if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { | 508 | if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { |
509 | vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2); | ||
510 | vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2); | ||
510 | vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); | 511 | vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); |
511 | vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2); | 512 | vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2); |
512 | } else { | 513 | } else { |
514 | vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); | ||
515 | vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); | ||
513 | vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); | 516 | vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); |
514 | vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y); | 517 | vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y); |
515 | } | 518 | } |
@@ -699,6 +702,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) | |||
699 | 702 | ||
700 | /* interlace scan need to check shadow register */ | 703 | /* interlace scan need to check shadow register */ |
701 | if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { | 704 | if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { |
705 | if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && | ||
706 | vp_reg_read(ctx, VP_SHADOW_UPDATE)) | ||
707 | goto out; | ||
708 | |||
709 | base = mixer_reg_read(ctx, MXR_CFG); | ||
710 | shadow = mixer_reg_read(ctx, MXR_CFG_S); | ||
711 | if (base != shadow) | ||
712 | goto out; | ||
713 | |||
702 | base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); | 714 | base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); |
703 | shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); | 715 | shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); |
704 | if (base != shadow) | 716 | if (base != shadow) |
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h index c311f571bdf9..189cfa2470a8 100644 --- a/drivers/gpu/drm/exynos/regs-mixer.h +++ b/drivers/gpu/drm/exynos/regs-mixer.h | |||
@@ -47,6 +47,7 @@ | |||
47 | #define MXR_MO 0x0304 | 47 | #define MXR_MO 0x0304 |
48 | #define MXR_RESOLUTION 0x0310 | 48 | #define MXR_RESOLUTION 0x0310 |
49 | 49 | ||
50 | #define MXR_CFG_S 0x2004 | ||
50 | #define MXR_GRAPHIC0_BASE_S 0x2024 | 51 | #define MXR_GRAPHIC0_BASE_S 0x2024 |
51 | #define MXR_GRAPHIC1_BASE_S 0x2044 | 52 | #define MXR_GRAPHIC1_BASE_S 0x2044 |
52 | 53 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d596a8302ca3..854bd51b9478 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, | |||
778 | I915_USERPTR_UNSYNCHRONIZED)) | 778 | I915_USERPTR_UNSYNCHRONIZED)) |
779 | return -EINVAL; | 779 | return -EINVAL; |
780 | 780 | ||
781 | if (!args->user_size) | ||
782 | return -EINVAL; | ||
783 | |||
781 | if (offset_in_page(args->user_ptr | args->user_size)) | 784 | if (offset_in_page(args->user_ptr | args->user_size)) |
782 | return -EINVAL; | 785 | return -EINVAL; |
783 | 786 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e6a8c0ee7df1..8a69a9275e28 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -7326,6 +7326,9 @@ enum { | |||
7326 | #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) | 7326 | #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) |
7327 | #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) | 7327 | #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) |
7328 | 7328 | ||
7329 | #define GEN9_WM_CHICKEN3 _MMIO(0x5588) | ||
7330 | #define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9) | ||
7331 | |||
7329 | /* WaCatErrorRejectionIssue */ | 7332 | /* WaCatErrorRejectionIssue */ |
7330 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) | 7333 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) |
7331 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) | 7334 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 32d24c69da3c..704ddb4d3ca7 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c | |||
@@ -2302,9 +2302,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state) | |||
2302 | return 0; | 2302 | return 0; |
2303 | } | 2303 | } |
2304 | 2304 | ||
2305 | static int skl_dpll0_vco(struct intel_atomic_state *intel_state) | ||
2306 | { | ||
2307 | struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); | ||
2308 | struct intel_crtc *crtc; | ||
2309 | struct intel_crtc_state *crtc_state; | ||
2310 | int vco, i; | ||
2311 | |||
2312 | vco = intel_state->cdclk.logical.vco; | ||
2313 | if (!vco) | ||
2314 | vco = dev_priv->skl_preferred_vco_freq; | ||
2315 | |||
2316 | for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { | ||
2317 | if (!crtc_state->base.enable) | ||
2318 | continue; | ||
2319 | |||
2320 | if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) | ||
2321 | continue; | ||
2322 | |||
2323 | /* | ||
2324 | * DPLL0 VCO may need to be adjusted to get the correct | ||
2325 | * clock for eDP. This will affect cdclk as well. | ||
2326 | */ | ||
2327 | switch (crtc_state->port_clock / 2) { | ||
2328 | case 108000: | ||
2329 | case 216000: | ||
2330 | vco = 8640000; | ||
2331 | break; | ||
2332 | default: | ||
2333 | vco = 8100000; | ||
2334 | break; | ||
2335 | } | ||
2336 | } | ||
2337 | |||
2338 | return vco; | ||
2339 | } | ||
2340 | |||
2305 | static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) | 2341 | static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) |
2306 | { | 2342 | { |
2307 | struct drm_i915_private *dev_priv = to_i915(state->dev); | ||
2308 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | 2343 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
2309 | int min_cdclk, cdclk, vco; | 2344 | int min_cdclk, cdclk, vco; |
2310 | 2345 | ||
@@ -2312,9 +2347,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) | |||
2312 | if (min_cdclk < 0) | 2347 | if (min_cdclk < 0) |
2313 | return min_cdclk; | 2348 | return min_cdclk; |
2314 | 2349 | ||
2315 | vco = intel_state->cdclk.logical.vco; | 2350 | vco = skl_dpll0_vco(intel_state); |
2316 | if (!vco) | ||
2317 | vco = dev_priv->skl_preferred_vco_freq; | ||
2318 | 2351 | ||
2319 | /* | 2352 | /* |
2320 | * FIXME should also account for plane ratio | 2353 | * FIXME should also account for plane ratio |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b48fd2561fe..56004ffbd8bb 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -15178,6 +15178,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
15178 | memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); | 15178 | memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); |
15179 | if (crtc_state->base.active) { | 15179 | if (crtc_state->base.active) { |
15180 | intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); | 15180 | intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); |
15181 | crtc->base.mode.hdisplay = crtc_state->pipe_src_w; | ||
15182 | crtc->base.mode.vdisplay = crtc_state->pipe_src_h; | ||
15181 | intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); | 15183 | intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); |
15182 | WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); | 15184 | WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); |
15183 | 15185 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9a4a51e79fa1..b7b4cfdeb974 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1881,26 +1881,6 @@ found: | |||
1881 | reduce_m_n); | 1881 | reduce_m_n); |
1882 | } | 1882 | } |
1883 | 1883 | ||
1884 | /* | ||
1885 | * DPLL0 VCO may need to be adjusted to get the correct | ||
1886 | * clock for eDP. This will affect cdclk as well. | ||
1887 | */ | ||
1888 | if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) { | ||
1889 | int vco; | ||
1890 | |||
1891 | switch (pipe_config->port_clock / 2) { | ||
1892 | case 108000: | ||
1893 | case 216000: | ||
1894 | vco = 8640000; | ||
1895 | break; | ||
1896 | default: | ||
1897 | vco = 8100000; | ||
1898 | break; | ||
1899 | } | ||
1900 | |||
1901 | to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco; | ||
1902 | } | ||
1903 | |||
1904 | if (!HAS_DDI(dev_priv)) | 1884 | if (!HAS_DDI(dev_priv)) |
1905 | intel_dp_set_clock(encoder, pipe_config); | 1885 | intel_dp_set_clock(encoder, pipe_config); |
1906 | 1886 | ||
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 4ba139c27fba..f7c25828d3bb 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -1149,6 +1149,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
1149 | WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, | 1149 | WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, |
1150 | GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); | 1150 | GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); |
1151 | 1151 | ||
1152 | /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ | ||
1153 | if (IS_GEN9_LP(dev_priv)) | ||
1154 | WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); | ||
1155 | |||
1152 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ | 1156 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ |
1153 | ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); | 1157 | ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); |
1154 | if (ret) | 1158 | if (ret) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e3a5f673ff67..8704f7f8d072 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -884,6 +884,7 @@ static void execlists_submission_tasklet(unsigned long data) | |||
884 | 884 | ||
885 | head = execlists->csb_head; | 885 | head = execlists->csb_head; |
886 | tail = READ_ONCE(buf[write_idx]); | 886 | tail = READ_ONCE(buf[write_idx]); |
887 | rmb(); /* Hopefully paired with a wmb() in HW */ | ||
887 | } | 888 | } |
888 | GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", | 889 | GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", |
889 | engine->name, | 890 | engine->name, |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index d35d2d50f595..8691c86f579c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -326,7 +326,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder, | |||
326 | 326 | ||
327 | I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); | 327 | I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); |
328 | POSTING_READ(lvds_encoder->reg); | 328 | POSTING_READ(lvds_encoder->reg); |
329 | if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000)) | 329 | |
330 | if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000)) | ||
330 | DRM_ERROR("timed out waiting for panel to power on\n"); | 331 | DRM_ERROR("timed out waiting for panel to power on\n"); |
331 | 332 | ||
332 | intel_panel_enable_backlight(pipe_config, conn_state); | 333 | intel_panel_enable_backlight(pipe_config, conn_state); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6f402c4f2bdd..ab61c038f42c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, | |||
214 | INIT_LIST_HEAD(&nvbo->entry); | 214 | INIT_LIST_HEAD(&nvbo->entry); |
215 | INIT_LIST_HEAD(&nvbo->vma_list); | 215 | INIT_LIST_HEAD(&nvbo->vma_list); |
216 | nvbo->bo.bdev = &drm->ttm.bdev; | 216 | nvbo->bo.bdev = &drm->ttm.bdev; |
217 | nvbo->cli = cli; | ||
218 | 217 | ||
219 | /* This is confusing, and doesn't actually mean we want an uncached | 218 | /* This is confusing, and doesn't actually mean we want an uncached |
220 | * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated | 219 | * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index be8e00b49cde..73c48440d4d7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h | |||
@@ -26,8 +26,6 @@ struct nouveau_bo { | |||
26 | 26 | ||
27 | struct list_head vma_list; | 27 | struct list_head vma_list; |
28 | 28 | ||
29 | struct nouveau_cli *cli; | ||
30 | |||
31 | unsigned contig:1; | 29 | unsigned contig:1; |
32 | unsigned page:5; | 30 | unsigned page:5; |
33 | unsigned kind:8; | 31 | unsigned kind:8; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index dff51a0ee028..8c093ca4222e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
@@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
63 | struct ttm_mem_reg *reg) | 63 | struct ttm_mem_reg *reg) |
64 | { | 64 | { |
65 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 65 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
66 | struct nouveau_drm *drm = nvbo->cli->drm; | 66 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
67 | struct nouveau_mem *mem; | 67 | struct nouveau_mem *mem; |
68 | int ret; | 68 | int ret; |
69 | 69 | ||
@@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |||
103 | struct ttm_mem_reg *reg) | 103 | struct ttm_mem_reg *reg) |
104 | { | 104 | { |
105 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 105 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
106 | struct nouveau_drm *drm = nvbo->cli->drm; | 106 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
107 | struct nouveau_mem *mem; | 107 | struct nouveau_mem *mem; |
108 | int ret; | 108 | int ret; |
109 | 109 | ||
@@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, | |||
131 | struct ttm_mem_reg *reg) | 131 | struct ttm_mem_reg *reg) |
132 | { | 132 | { |
133 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 133 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
134 | struct nouveau_drm *drm = nvbo->cli->drm; | 134 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
135 | struct nouveau_mem *mem; | 135 | struct nouveau_mem *mem; |
136 | int ret; | 136 | int ret; |
137 | 137 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 8bd739cfd00d..2b3ccd850750 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -3264,10 +3264,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
3264 | 3264 | ||
3265 | drm_connector_unregister(&mstc->connector); | 3265 | drm_connector_unregister(&mstc->connector); |
3266 | 3266 | ||
3267 | drm_modeset_lock_all(drm->dev); | ||
3268 | drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector); | 3267 | drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector); |
3268 | |||
3269 | drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL); | ||
3269 | mstc->port = NULL; | 3270 | mstc->port = NULL; |
3270 | drm_modeset_unlock_all(drm->dev); | 3271 | drm_modeset_unlock(&drm->dev->mode_config.connection_mutex); |
3271 | 3272 | ||
3272 | drm_connector_unreference(&mstc->connector); | 3273 | drm_connector_unreference(&mstc->connector); |
3273 | } | 3274 | } |
@@ -3277,9 +3278,7 @@ nv50_mstm_register_connector(struct drm_connector *connector) | |||
3277 | { | 3278 | { |
3278 | struct nouveau_drm *drm = nouveau_drm(connector->dev); | 3279 | struct nouveau_drm *drm = nouveau_drm(connector->dev); |
3279 | 3280 | ||
3280 | drm_modeset_lock_all(drm->dev); | ||
3281 | drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector); | 3281 | drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector); |
3282 | drm_modeset_unlock_all(drm->dev); | ||
3283 | 3282 | ||
3284 | drm_connector_register(connector); | 3283 | drm_connector_register(connector); |
3285 | } | 3284 | } |
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 5e2e65e88847..7f3ac6b13b56 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c | |||
@@ -828,6 +828,12 @@ static void dispc_ovl_set_scale_coef(struct dispc_device *dispc, | |||
828 | h_coef = dispc_ovl_get_scale_coef(fir_hinc, true); | 828 | h_coef = dispc_ovl_get_scale_coef(fir_hinc, true); |
829 | v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps); | 829 | v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps); |
830 | 830 | ||
831 | if (!h_coef || !v_coef) { | ||
832 | dev_err(&dispc->pdev->dev, "%s: failed to find scale coefs\n", | ||
833 | __func__); | ||
834 | return; | ||
835 | } | ||
836 | |||
831 | for (i = 0; i < 8; i++) { | 837 | for (i = 0; i < 8; i++) { |
832 | u32 h, hv; | 838 | u32 h, hv; |
833 | 839 | ||
@@ -2342,7 +2348,7 @@ static int dispc_ovl_calc_scaling_24xx(struct dispc_device *dispc, | |||
2342 | } | 2348 | } |
2343 | 2349 | ||
2344 | if (in_width > maxsinglelinewidth) { | 2350 | if (in_width > maxsinglelinewidth) { |
2345 | DSSERR("Cannot scale max input width exceeded"); | 2351 | DSSERR("Cannot scale max input width exceeded\n"); |
2346 | return -EINVAL; | 2352 | return -EINVAL; |
2347 | } | 2353 | } |
2348 | return 0; | 2354 | return 0; |
@@ -2424,13 +2430,13 @@ again: | |||
2424 | } | 2430 | } |
2425 | 2431 | ||
2426 | if (in_width > (maxsinglelinewidth * 2)) { | 2432 | if (in_width > (maxsinglelinewidth * 2)) { |
2427 | DSSERR("Cannot setup scaling"); | 2433 | DSSERR("Cannot setup scaling\n"); |
2428 | DSSERR("width exceeds maximum width possible"); | 2434 | DSSERR("width exceeds maximum width possible\n"); |
2429 | return -EINVAL; | 2435 | return -EINVAL; |
2430 | } | 2436 | } |
2431 | 2437 | ||
2432 | if (in_width > maxsinglelinewidth && *five_taps) { | 2438 | if (in_width > maxsinglelinewidth && *five_taps) { |
2433 | DSSERR("cannot setup scaling with five taps"); | 2439 | DSSERR("cannot setup scaling with five taps\n"); |
2434 | return -EINVAL; | 2440 | return -EINVAL; |
2435 | } | 2441 | } |
2436 | return 0; | 2442 | return 0; |
@@ -2472,7 +2478,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc, | |||
2472 | in_width > maxsinglelinewidth && ++*decim_x); | 2478 | in_width > maxsinglelinewidth && ++*decim_x); |
2473 | 2479 | ||
2474 | if (in_width > maxsinglelinewidth) { | 2480 | if (in_width > maxsinglelinewidth) { |
2475 | DSSERR("Cannot scale width exceeds max line width"); | 2481 | DSSERR("Cannot scale width exceeds max line width\n"); |
2476 | return -EINVAL; | 2482 | return -EINVAL; |
2477 | } | 2483 | } |
2478 | 2484 | ||
@@ -2490,7 +2496,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc, | |||
2490 | * bandwidth. Despite what theory says this appears to | 2496 | * bandwidth. Despite what theory says this appears to |
2491 | * be true also for 16-bit color formats. | 2497 | * be true also for 16-bit color formats. |
2492 | */ | 2498 | */ |
2493 | DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)", *decim_x); | 2499 | DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)\n", *decim_x); |
2494 | 2500 | ||
2495 | return -EINVAL; | 2501 | return -EINVAL; |
2496 | } | 2502 | } |
@@ -4633,7 +4639,7 @@ static int dispc_errata_i734_wa_init(struct dispc_device *dispc) | |||
4633 | i734_buf.size, &i734_buf.paddr, | 4639 | i734_buf.size, &i734_buf.paddr, |
4634 | GFP_KERNEL); | 4640 | GFP_KERNEL); |
4635 | if (!i734_buf.vaddr) { | 4641 | if (!i734_buf.vaddr) { |
4636 | dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed", | 4642 | dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed\n", |
4637 | __func__); | 4643 | __func__); |
4638 | return -ENOMEM; | 4644 | return -ENOMEM; |
4639 | } | 4645 | } |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 97c88861d67a..5879f45f6fc9 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c | |||
@@ -679,7 +679,7 @@ static int hdmi_audio_config(struct device *dev, | |||
679 | struct omap_dss_audio *dss_audio) | 679 | struct omap_dss_audio *dss_audio) |
680 | { | 680 | { |
681 | struct omap_hdmi *hd = dev_get_drvdata(dev); | 681 | struct omap_hdmi *hd = dev_get_drvdata(dev); |
682 | int ret; | 682 | int ret = 0; |
683 | 683 | ||
684 | mutex_lock(&hd->lock); | 684 | mutex_lock(&hd->lock); |
685 | 685 | ||
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 35ed2add6189..813ba42f2753 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c | |||
@@ -922,8 +922,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) | |||
922 | { | 922 | { |
923 | const struct hdmi4_features *features; | 923 | const struct hdmi4_features *features; |
924 | struct resource *res; | 924 | struct resource *res; |
925 | const struct soc_device_attribute *soc; | ||
925 | 926 | ||
926 | features = soc_device_match(hdmi4_soc_devices)->data; | 927 | soc = soc_device_match(hdmi4_soc_devices); |
928 | if (!soc) | ||
929 | return -ENODEV; | ||
930 | |||
931 | features = soc->data; | ||
927 | core->cts_swmode = features->cts_swmode; | 932 | core->cts_swmode = features->cts_swmode; |
928 | core->audio_use_mclk = features->audio_use_mclk; | 933 | core->audio_use_mclk = features->audio_use_mclk; |
929 | 934 | ||
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index d28da9ac3e90..ae1a001d1b83 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c | |||
@@ -671,7 +671,7 @@ static int hdmi_audio_config(struct device *dev, | |||
671 | struct omap_dss_audio *dss_audio) | 671 | struct omap_dss_audio *dss_audio) |
672 | { | 672 | { |
673 | struct omap_hdmi *hd = dev_get_drvdata(dev); | 673 | struct omap_hdmi *hd = dev_get_drvdata(dev); |
674 | int ret; | 674 | int ret = 0; |
675 | 675 | ||
676 | mutex_lock(&hd->lock); | 676 | mutex_lock(&hd->lock); |
677 | 677 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index a0d7b1d905e8..5cde26ac937b 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c | |||
@@ -121,6 +121,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) | |||
121 | if (dssdrv->read_edid) { | 121 | if (dssdrv->read_edid) { |
122 | void *edid = kzalloc(MAX_EDID, GFP_KERNEL); | 122 | void *edid = kzalloc(MAX_EDID, GFP_KERNEL); |
123 | 123 | ||
124 | if (!edid) | ||
125 | return 0; | ||
126 | |||
124 | if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && | 127 | if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && |
125 | drm_edid_is_valid(edid)) { | 128 | drm_edid_is_valid(edid)) { |
126 | drm_mode_connector_update_edid_property( | 129 | drm_mode_connector_update_edid_property( |
@@ -139,6 +142,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) | |||
139 | struct drm_display_mode *mode = drm_mode_create(dev); | 142 | struct drm_display_mode *mode = drm_mode_create(dev); |
140 | struct videomode vm = {0}; | 143 | struct videomode vm = {0}; |
141 | 144 | ||
145 | if (!mode) | ||
146 | return 0; | ||
147 | |||
142 | dssdrv->get_timings(dssdev, &vm); | 148 | dssdrv->get_timings(dssdev, &vm); |
143 | 149 | ||
144 | drm_display_mode_from_videomode(&vm, mode); | 150 | drm_display_mode_from_videomode(&vm, mode); |
@@ -200,6 +206,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector, | |||
200 | if (!r) { | 206 | if (!r) { |
201 | /* check if vrefresh is still valid */ | 207 | /* check if vrefresh is still valid */ |
202 | new_mode = drm_mode_duplicate(dev, mode); | 208 | new_mode = drm_mode_duplicate(dev, mode); |
209 | |||
210 | if (!new_mode) | ||
211 | return MODE_BAD; | ||
212 | |||
203 | new_mode->clock = vm.pixelclock / 1000; | 213 | new_mode->clock = vm.pixelclock / 1000; |
204 | new_mode->vrefresh = 0; | 214 | new_mode->vrefresh = 0; |
205 | if (mode->vrefresh == drm_mode_vrefresh(new_mode)) | 215 | if (mode->vrefresh == drm_mode_vrefresh(new_mode)) |
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f9fa1c90b35c..401c02e9e6b2 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
@@ -401,12 +401,16 @@ int tiler_unpin(struct tiler_block *block) | |||
401 | struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, | 401 | struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, |
402 | u16 h, u16 align) | 402 | u16 h, u16 align) |
403 | { | 403 | { |
404 | struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); | 404 | struct tiler_block *block; |
405 | u32 min_align = 128; | 405 | u32 min_align = 128; |
406 | int ret; | 406 | int ret; |
407 | unsigned long flags; | 407 | unsigned long flags; |
408 | u32 slot_bytes; | 408 | u32 slot_bytes; |
409 | 409 | ||
410 | block = kzalloc(sizeof(*block), GFP_KERNEL); | ||
411 | if (!block) | ||
412 | return ERR_PTR(-ENOMEM); | ||
413 | |||
410 | BUG_ON(!validfmt(fmt)); | 414 | BUG_ON(!validfmt(fmt)); |
411 | 415 | ||
412 | /* convert width/height to slots */ | 416 | /* convert width/height to slots */ |
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index d7f7bc9f061a..817be3c41863 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c | |||
@@ -90,7 +90,7 @@ static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset, | |||
90 | { | 90 | { |
91 | int i; | 91 | int i; |
92 | unsigned long index; | 92 | unsigned long index; |
93 | bool area_free; | 93 | bool area_free = false; |
94 | unsigned long slots_per_band = PAGE_SIZE / slot_bytes; | 94 | unsigned long slots_per_band = PAGE_SIZE / slot_bytes; |
95 | unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0; | 95 | unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0; |
96 | unsigned long curr_bit = bit_offset; | 96 | unsigned long curr_bit = bit_offset; |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index f0481b7b60c5..06c94e3a5f15 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -910,7 +910,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, | |||
910 | while (npages >= HPAGE_PMD_NR) { | 910 | while (npages >= HPAGE_PMD_NR) { |
911 | gfp_t huge_flags = gfp_flags; | 911 | gfp_t huge_flags = gfp_flags; |
912 | 912 | ||
913 | huge_flags |= GFP_TRANSHUGE; | 913 | huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
914 | __GFP_KSWAPD_RECLAIM; | ||
914 | huge_flags &= ~__GFP_MOVABLE; | 915 | huge_flags &= ~__GFP_MOVABLE; |
915 | huge_flags &= ~__GFP_COMP; | 916 | huge_flags &= ~__GFP_COMP; |
916 | p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); | 917 | p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); |
@@ -1027,11 +1028,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
1027 | GFP_USER | GFP_DMA32, "uc dma", 0); | 1028 | GFP_USER | GFP_DMA32, "uc dma", 0); |
1028 | 1029 | ||
1029 | ttm_page_pool_init_locked(&_manager->wc_pool_huge, | 1030 | ttm_page_pool_init_locked(&_manager->wc_pool_huge, |
1030 | GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), | 1031 | (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
1032 | __GFP_KSWAPD_RECLAIM) & | ||
1033 | ~(__GFP_MOVABLE | __GFP_COMP), | ||
1031 | "wc huge", order); | 1034 | "wc huge", order); |
1032 | 1035 | ||
1033 | ttm_page_pool_init_locked(&_manager->uc_pool_huge, | 1036 | ttm_page_pool_init_locked(&_manager->uc_pool_huge, |
1034 | GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) | 1037 | (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
1038 | __GFP_KSWAPD_RECLAIM) & | ||
1039 | ~(__GFP_MOVABLE | __GFP_COMP) | ||
1035 | , "uc huge", order); | 1040 | , "uc huge", order); |
1036 | 1041 | ||
1037 | _manager->options.max_size = max_pages; | 1042 | _manager->options.max_size = max_pages; |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 8a25d1974385..f63d99c302e4 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | |||
@@ -910,7 +910,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) | |||
910 | gfp_flags |= __GFP_ZERO; | 910 | gfp_flags |= __GFP_ZERO; |
911 | 911 | ||
912 | if (huge) { | 912 | if (huge) { |
913 | gfp_flags |= GFP_TRANSHUGE; | 913 | gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | |
914 | __GFP_KSWAPD_RECLAIM; | ||
914 | gfp_flags &= ~__GFP_MOVABLE; | 915 | gfp_flags &= ~__GFP_MOVABLE; |
915 | gfp_flags &= ~__GFP_COMP; | 916 | gfp_flags &= ~__GFP_COMP; |
916 | } | 917 | } |
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 72c9dbd81d7f..f185812970da 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c | |||
@@ -96,7 +96,6 @@ struct vc4_dpi { | |||
96 | struct platform_device *pdev; | 96 | struct platform_device *pdev; |
97 | 97 | ||
98 | struct drm_encoder *encoder; | 98 | struct drm_encoder *encoder; |
99 | struct drm_connector *connector; | ||
100 | 99 | ||
101 | void __iomem *regs; | 100 | void __iomem *regs; |
102 | 101 | ||
@@ -164,14 +163,31 @@ static void vc4_dpi_encoder_disable(struct drm_encoder *encoder) | |||
164 | 163 | ||
165 | static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) | 164 | static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) |
166 | { | 165 | { |
166 | struct drm_device *dev = encoder->dev; | ||
167 | struct drm_display_mode *mode = &encoder->crtc->mode; | 167 | struct drm_display_mode *mode = &encoder->crtc->mode; |
168 | struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder); | 168 | struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder); |
169 | struct vc4_dpi *dpi = vc4_encoder->dpi; | 169 | struct vc4_dpi *dpi = vc4_encoder->dpi; |
170 | struct drm_connector_list_iter conn_iter; | ||
171 | struct drm_connector *connector = NULL, *connector_scan; | ||
170 | u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE; | 172 | u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE; |
171 | int ret; | 173 | int ret; |
172 | 174 | ||
173 | if (dpi->connector->display_info.num_bus_formats) { | 175 | /* Look up the connector attached to DPI so we can get the |
174 | u32 bus_format = dpi->connector->display_info.bus_formats[0]; | 176 | * bus_format. Ideally the bridge would tell us the |
177 | * bus_format we want, but it doesn't yet, so assume that it's | ||
178 | * uniform throughout the bridge chain. | ||
179 | */ | ||
180 | drm_connector_list_iter_begin(dev, &conn_iter); | ||
181 | drm_for_each_connector_iter(connector_scan, &conn_iter) { | ||
182 | if (connector_scan->encoder == encoder) { | ||
183 | connector = connector_scan; | ||
184 | break; | ||
185 | } | ||
186 | } | ||
187 | drm_connector_list_iter_end(&conn_iter); | ||
188 | |||
189 | if (connector && connector->display_info.num_bus_formats) { | ||
190 | u32 bus_format = connector->display_info.bus_formats[0]; | ||
175 | 191 | ||
176 | switch (bus_format) { | 192 | switch (bus_format) { |
177 | case MEDIA_BUS_FMT_RGB888_1X24: | 193 | case MEDIA_BUS_FMT_RGB888_1X24: |
@@ -199,6 +215,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) | |||
199 | DRM_ERROR("Unknown media bus format %d\n", bus_format); | 215 | DRM_ERROR("Unknown media bus format %d\n", bus_format); |
200 | break; | 216 | break; |
201 | } | 217 | } |
218 | } else { | ||
219 | /* Default to 24bit if no connector found. */ | ||
220 | dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT); | ||
202 | } | 221 | } |
203 | 222 | ||
204 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) | 223 | if (mode->flags & DRM_MODE_FLAG_NHSYNC) |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 94b99c90425a..7c95ed5c5cac 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
@@ -130,6 +130,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) | |||
130 | struct vc4_file *vc4file = file->driver_priv; | 130 | struct vc4_file *vc4file = file->driver_priv; |
131 | 131 | ||
132 | vc4_perfmon_close_file(vc4file); | 132 | vc4_perfmon_close_file(vc4file); |
133 | kfree(vc4file); | ||
133 | } | 134 | } |
134 | 135 | ||
135 | static const struct vm_operations_struct vc4_vm_ops = { | 136 | static const struct vm_operations_struct vc4_vm_ops = { |
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index ce39390be389..13dcaad06798 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
@@ -503,7 +503,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
503 | * the scl fields here. | 503 | * the scl fields here. |
504 | */ | 504 | */ |
505 | if (num_planes == 1) { | 505 | if (num_planes == 1) { |
506 | scl0 = vc4_get_scl_field(state, 1); | 506 | scl0 = vc4_get_scl_field(state, 0); |
507 | scl1 = scl0; | 507 | scl1 = scl0; |
508 | } else { | 508 | } else { |
509 | scl0 = vc4_get_scl_field(state, 1); | 509 | scl0 = vc4_get_scl_field(state, 1); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 648f8127f65a..3d667e903beb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -482,6 +482,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, | |||
482 | return ret; | 482 | return ret; |
483 | } | 483 | } |
484 | 484 | ||
485 | vps->dmabuf_size = size; | ||
486 | |||
485 | /* | 487 | /* |
486 | * TTM already thinks the buffer is pinned, but make sure the | 488 | * TTM already thinks the buffer is pinned, but make sure the |
487 | * pin_count is upped. | 489 | * pin_count is upped. |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 60252fd796f6..0000434a1fbd 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -462,10 +462,11 @@ config HID_LENOVO | |||
462 | select NEW_LEDS | 462 | select NEW_LEDS |
463 | select LEDS_CLASS | 463 | select LEDS_CLASS |
464 | ---help--- | 464 | ---help--- |
465 | Support for Lenovo devices that are not fully compliant with HID standard. | 465 | Support for IBM/Lenovo devices that are not fully compliant with HID standard. |
466 | 466 | ||
467 | Say Y if you want support for the non-compliant features of the Lenovo | 467 | Say Y if you want support for horizontal scrolling of the IBM/Lenovo |
468 | Thinkpad standalone keyboards, e.g: | 468 | Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad |
469 | standalone keyboards, e.g: | ||
469 | - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint | 470 | - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint |
470 | configuration) | 471 | configuration) |
471 | - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys) | 472 | - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys) |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 0b5cc910f62e..46f5ecd11bf7 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -552,6 +552,13 @@ | |||
552 | #define USB_VENDOR_ID_HUION 0x256c | 552 | #define USB_VENDOR_ID_HUION 0x256c |
553 | #define USB_DEVICE_ID_HUION_TABLET 0x006e | 553 | #define USB_DEVICE_ID_HUION_TABLET 0x006e |
554 | 554 | ||
555 | #define USB_VENDOR_ID_IBM 0x04b3 | ||
556 | #define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100 | ||
557 | #define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103 | ||
558 | #define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105 | ||
559 | #define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108 | ||
560 | #define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109 | ||
561 | |||
555 | #define USB_VENDOR_ID_IDEACOM 0x1cb6 | 562 | #define USB_VENDOR_ID_IDEACOM 0x1cb6 |
556 | #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 | 563 | #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 |
557 | #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 | 564 | #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 |
@@ -684,6 +691,7 @@ | |||
684 | #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 | 691 | #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 |
685 | #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047 | 692 | #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047 |
686 | #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 | 693 | #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 |
694 | #define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049 | ||
687 | #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 | 695 | #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 |
688 | #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 | 696 | #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 |
689 | #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 | 697 | #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 |
@@ -964,6 +972,7 @@ | |||
964 | #define USB_DEVICE_ID_SIS817_TOUCH 0x0817 | 972 | #define USB_DEVICE_ID_SIS817_TOUCH 0x0817 |
965 | #define USB_DEVICE_ID_SIS_TS 0x1013 | 973 | #define USB_DEVICE_ID_SIS_TS 0x1013 |
966 | #define USB_DEVICE_ID_SIS1030_TOUCH 0x1030 | 974 | #define USB_DEVICE_ID_SIS1030_TOUCH 0x1030 |
975 | #define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb | ||
967 | 976 | ||
968 | #define USB_VENDOR_ID_SKYCABLE 0x1223 | 977 | #define USB_VENDOR_ID_SKYCABLE 0x1223 |
969 | #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 | 978 | #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 |
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 1ac4ff4d57a6..643b6eb54442 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c | |||
@@ -6,6 +6,17 @@ | |||
6 | * | 6 | * |
7 | * Copyright (c) 2012 Bernhard Seibold | 7 | * Copyright (c) 2012 Bernhard Seibold |
8 | * Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk> | 8 | * Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk> |
9 | * | ||
10 | * Linux IBM/Lenovo Scrollpoint mouse driver: | ||
11 | * - IBM Scrollpoint III | ||
12 | * - IBM Scrollpoint Pro | ||
13 | * - IBM Scrollpoint Optical | ||
14 | * - IBM Scrollpoint Optical 800dpi | ||
15 | * - IBM Scrollpoint Optical 800dpi Pro | ||
16 | * - Lenovo Scrollpoint Optical | ||
17 | * | ||
18 | * Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com> | ||
19 | * Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com> | ||
9 | */ | 20 | */ |
10 | 21 | ||
11 | /* | 22 | /* |
@@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev, | |||
160 | return 0; | 171 | return 0; |
161 | } | 172 | } |
162 | 173 | ||
174 | static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev, | ||
175 | struct hid_input *hi, struct hid_field *field, | ||
176 | struct hid_usage *usage, unsigned long **bit, int *max) | ||
177 | { | ||
178 | if (usage->hid == HID_GD_Z) { | ||
179 | hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL); | ||
180 | return 1; | ||
181 | } | ||
182 | return 0; | ||
183 | } | ||
184 | |||
163 | static int lenovo_input_mapping(struct hid_device *hdev, | 185 | static int lenovo_input_mapping(struct hid_device *hdev, |
164 | struct hid_input *hi, struct hid_field *field, | 186 | struct hid_input *hi, struct hid_field *field, |
165 | struct hid_usage *usage, unsigned long **bit, int *max) | 187 | struct hid_usage *usage, unsigned long **bit, int *max) |
@@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev, | |||
172 | case USB_DEVICE_ID_LENOVO_CBTKBD: | 194 | case USB_DEVICE_ID_LENOVO_CBTKBD: |
173 | return lenovo_input_mapping_cptkbd(hdev, hi, field, | 195 | return lenovo_input_mapping_cptkbd(hdev, hi, field, |
174 | usage, bit, max); | 196 | usage, bit, max); |
197 | case USB_DEVICE_ID_IBM_SCROLLPOINT_III: | ||
198 | case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO: | ||
199 | case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL: | ||
200 | case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL: | ||
201 | case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO: | ||
202 | case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL: | ||
203 | return lenovo_input_mapping_scrollpoint(hdev, hi, field, | ||
204 | usage, bit, max); | ||
175 | default: | 205 | default: |
176 | return 0; | 206 | return 0; |
177 | } | 207 | } |
@@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = { | |||
883 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, | 913 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, |
884 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, | 914 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, |
885 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, | 915 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, |
916 | { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) }, | ||
917 | { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) }, | ||
918 | { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) }, | ||
919 | { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) }, | ||
920 | { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) }, | ||
921 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) }, | ||
886 | { } | 922 | { } |
887 | }; | 923 | }; |
888 | 924 | ||
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 963328674e93..cc33622253aa 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -174,6 +174,8 @@ static const struct i2c_hid_quirks { | |||
174 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, | 174 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, |
175 | { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, | 175 | { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, |
176 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, | 176 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, |
177 | { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, | ||
178 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, | ||
177 | { 0, 0 } | 179 | { 0, 0 } |
178 | }; | 180 | }; |
179 | 181 | ||
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index 157b44aacdff..acc2536c8094 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c | |||
@@ -77,21 +77,21 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf, | |||
77 | struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; | 77 | struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; |
78 | int curr_hid_dev = client_data->cur_hid_dev; | 78 | int curr_hid_dev = client_data->cur_hid_dev; |
79 | 79 | ||
80 | if (data_len < sizeof(struct hostif_msg_hdr)) { | ||
81 | dev_err(&client_data->cl_device->dev, | ||
82 | "[hid-ish]: error, received %u which is less than data header %u\n", | ||
83 | (unsigned int)data_len, | ||
84 | (unsigned int)sizeof(struct hostif_msg_hdr)); | ||
85 | ++client_data->bad_recv_cnt; | ||
86 | ish_hw_reset(hid_ishtp_cl->dev); | ||
87 | return; | ||
88 | } | ||
89 | |||
90 | payload = recv_buf + sizeof(struct hostif_msg_hdr); | 80 | payload = recv_buf + sizeof(struct hostif_msg_hdr); |
91 | total_len = data_len; | 81 | total_len = data_len; |
92 | cur_pos = 0; | 82 | cur_pos = 0; |
93 | 83 | ||
94 | do { | 84 | do { |
85 | if (cur_pos + sizeof(struct hostif_msg) > total_len) { | ||
86 | dev_err(&client_data->cl_device->dev, | ||
87 | "[hid-ish]: error, received %u which is less than data header %u\n", | ||
88 | (unsigned int)data_len, | ||
89 | (unsigned int)sizeof(struct hostif_msg_hdr)); | ||
90 | ++client_data->bad_recv_cnt; | ||
91 | ish_hw_reset(hid_ishtp_cl->dev); | ||
92 | break; | ||
93 | } | ||
94 | |||
95 | recv_msg = (struct hostif_msg *)(recv_buf + cur_pos); | 95 | recv_msg = (struct hostif_msg *)(recv_buf + cur_pos); |
96 | payload_len = recv_msg->hdr.size; | 96 | payload_len = recv_msg->hdr.size; |
97 | 97 | ||
@@ -412,9 +412,7 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id, | |||
412 | { | 412 | { |
413 | struct ishtp_hid_data *hid_data = hid->driver_data; | 413 | struct ishtp_hid_data *hid_data = hid->driver_data; |
414 | struct ishtp_cl_data *client_data = hid_data->client_data; | 414 | struct ishtp_cl_data *client_data = hid_data->client_data; |
415 | static unsigned char buf[10]; | 415 | struct hostif_msg_to_sensor msg = {}; |
416 | unsigned int len; | ||
417 | struct hostif_msg_to_sensor *msg = (struct hostif_msg_to_sensor *)buf; | ||
418 | int rv; | 416 | int rv; |
419 | int i; | 417 | int i; |
420 | 418 | ||
@@ -426,14 +424,11 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id, | |||
426 | return; | 424 | return; |
427 | } | 425 | } |
428 | 426 | ||
429 | len = sizeof(struct hostif_msg_to_sensor); | 427 | msg.hdr.command = (report_type == HID_FEATURE_REPORT) ? |
430 | |||
431 | memset(msg, 0, sizeof(struct hostif_msg_to_sensor)); | ||
432 | msg->hdr.command = (report_type == HID_FEATURE_REPORT) ? | ||
433 | HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT; | 428 | HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT; |
434 | for (i = 0; i < client_data->num_hid_devices; ++i) { | 429 | for (i = 0; i < client_data->num_hid_devices; ++i) { |
435 | if (hid == client_data->hid_sensor_hubs[i]) { | 430 | if (hid == client_data->hid_sensor_hubs[i]) { |
436 | msg->hdr.device_id = | 431 | msg.hdr.device_id = |
437 | client_data->hid_devices[i].dev_id; | 432 | client_data->hid_devices[i].dev_id; |
438 | break; | 433 | break; |
439 | } | 434 | } |
@@ -442,8 +437,9 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id, | |||
442 | if (i == client_data->num_hid_devices) | 437 | if (i == client_data->num_hid_devices) |
443 | return; | 438 | return; |
444 | 439 | ||
445 | msg->report_id = report_id; | 440 | msg.report_id = report_id; |
446 | rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len); | 441 | rv = ishtp_cl_send(client_data->hid_ishtp_cl, (uint8_t *)&msg, |
442 | sizeof(msg)); | ||
447 | if (rv) | 443 | if (rv) |
448 | hid_ishtp_trace(client_data, "%s hid %p send failed\n", | 444 | hid_ishtp_trace(client_data, "%s hid %p send failed\n", |
449 | __func__, hid); | 445 | __func__, hid); |
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index f272cdd9bd55..2623a567ffba 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c | |||
@@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev, | |||
418 | list_del(&device->device_link); | 418 | list_del(&device->device_link); |
419 | spin_unlock_irqrestore(&dev->device_list_lock, flags); | 419 | spin_unlock_irqrestore(&dev->device_list_lock, flags); |
420 | dev_err(dev->devc, "Failed to register ISHTP client device\n"); | 420 | dev_err(dev->devc, "Failed to register ISHTP client device\n"); |
421 | kfree(device); | 421 | put_device(&device->dev); |
422 | return NULL; | 422 | return NULL; |
423 | } | 423 | } |
424 | 424 | ||
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index b54ef1ffcbec..ee7a37eb159a 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c | |||
@@ -1213,8 +1213,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom, | |||
1213 | devres->root = root; | 1213 | devres->root = root; |
1214 | 1214 | ||
1215 | error = sysfs_create_group(devres->root, group); | 1215 | error = sysfs_create_group(devres->root, group); |
1216 | if (error) | 1216 | if (error) { |
1217 | devres_free(devres); | ||
1217 | return error; | 1218 | return error; |
1219 | } | ||
1218 | 1220 | ||
1219 | devres_add(&wacom->hdev->dev, devres); | 1221 | devres_add(&wacom->hdev->dev, devres); |
1220 | 1222 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index f249a4428458..6ec307c93ece 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -272,7 +272,7 @@ config SENSORS_K8TEMP | |||
272 | 272 | ||
273 | config SENSORS_K10TEMP | 273 | config SENSORS_K10TEMP |
274 | tristate "AMD Family 10h+ temperature sensor" | 274 | tristate "AMD Family 10h+ temperature sensor" |
275 | depends on X86 && PCI | 275 | depends on X86 && PCI && AMD_NB |
276 | help | 276 | help |
277 | If you say yes here you get support for the temperature | 277 | If you say yes here you get support for the temperature |
278 | sensor(s) inside your CPU. Supported are later revisions of | 278 | sensor(s) inside your CPU. Supported are later revisions of |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index d2cc55e21374..3b73dee6fdc6 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/amd_nb.h> | ||
26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
27 | 28 | ||
28 | MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); | 29 | MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); |
@@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); | |||
40 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 | 41 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 |
41 | #endif | 42 | #endif |
42 | 43 | ||
43 | #ifndef PCI_DEVICE_ID_AMD_17H_RR_NB | 44 | #ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 |
44 | #define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0 | 45 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb |
45 | #endif | 46 | #endif |
46 | 47 | ||
47 | /* CPUID function 0x80000001, ebx */ | 48 | /* CPUID function 0x80000001, ebx */ |
@@ -63,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); | |||
63 | #define NB_CAP_HTC 0x00000400 | 64 | #define NB_CAP_HTC 0x00000400 |
64 | 65 | ||
65 | /* | 66 | /* |
66 | * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE | 67 | * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL |
67 | * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature | 68 | * and REG_REPORTED_TEMPERATURE have been moved to |
68 | * Control] | 69 | * D0F0xBC_xD820_0C64 [Hardware Temperature Control] |
70 | * D0F0xBC_xD820_0CA4 [Reported Temperature Control] | ||
69 | */ | 71 | */ |
72 | #define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64 | ||
70 | #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 | 73 | #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 |
71 | 74 | ||
72 | /* F17h M01h Access througn SMN */ | 75 | /* F17h M01h Access througn SMN */ |
@@ -74,6 +77,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); | |||
74 | 77 | ||
75 | struct k10temp_data { | 78 | struct k10temp_data { |
76 | struct pci_dev *pdev; | 79 | struct pci_dev *pdev; |
80 | void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); | ||
77 | void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); | 81 | void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); |
78 | int temp_offset; | 82 | int temp_offset; |
79 | u32 temp_adjust_mask; | 83 | u32 temp_adjust_mask; |
@@ -98,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = { | |||
98 | { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, | 102 | { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, |
99 | }; | 103 | }; |
100 | 104 | ||
105 | static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) | ||
106 | { | ||
107 | pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); | ||
108 | } | ||
109 | |||
101 | static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) | 110 | static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) |
102 | { | 111 | { |
103 | pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); | 112 | pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); |
@@ -114,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn, | |||
114 | mutex_unlock(&nb_smu_ind_mutex); | 123 | mutex_unlock(&nb_smu_ind_mutex); |
115 | } | 124 | } |
116 | 125 | ||
126 | static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval) | ||
127 | { | ||
128 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, | ||
129 | F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval); | ||
130 | } | ||
131 | |||
117 | static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) | 132 | static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) |
118 | { | 133 | { |
119 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, | 134 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, |
@@ -122,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) | |||
122 | 137 | ||
123 | static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) | 138 | static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) |
124 | { | 139 | { |
125 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60, | 140 | amd_smn_read(amd_pci_dev_to_node_id(pdev), |
126 | F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); | 141 | F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); |
127 | } | 142 | } |
128 | 143 | ||
129 | static ssize_t temp1_input_show(struct device *dev, | 144 | static ssize_t temp1_input_show(struct device *dev, |
@@ -160,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev, | |||
160 | u32 regval; | 175 | u32 regval; |
161 | int value; | 176 | int value; |
162 | 177 | ||
163 | pci_read_config_dword(data->pdev, | 178 | data->read_htcreg(data->pdev, ®val); |
164 | REG_HARDWARE_THERMAL_CONTROL, ®val); | ||
165 | value = ((regval >> 16) & 0x7f) * 500 + 52000; | 179 | value = ((regval >> 16) & 0x7f) * 500 + 52000; |
166 | if (show_hyst) | 180 | if (show_hyst) |
167 | value -= ((regval >> 24) & 0xf) * 500; | 181 | value -= ((regval >> 24) & 0xf) * 500; |
@@ -181,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj, | |||
181 | struct pci_dev *pdev = data->pdev; | 195 | struct pci_dev *pdev = data->pdev; |
182 | 196 | ||
183 | if (index >= 2) { | 197 | if (index >= 2) { |
184 | u32 reg_caps, reg_htc; | 198 | u32 reg; |
199 | |||
200 | if (!data->read_htcreg) | ||
201 | return 0; | ||
185 | 202 | ||
186 | pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, | 203 | pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, |
187 | ®_caps); | 204 | ®); |
188 | pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, | 205 | if (!(reg & NB_CAP_HTC)) |
189 | ®_htc); | 206 | return 0; |
190 | if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE)) | 207 | |
208 | data->read_htcreg(data->pdev, ®); | ||
209 | if (!(reg & HTC_ENABLE)) | ||
191 | return 0; | 210 | return 0; |
192 | } | 211 | } |
193 | return attr->mode; | 212 | return attr->mode; |
@@ -268,11 +287,13 @@ static int k10temp_probe(struct pci_dev *pdev, | |||
268 | 287 | ||
269 | if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || | 288 | if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || |
270 | boot_cpu_data.x86_model == 0x70)) { | 289 | boot_cpu_data.x86_model == 0x70)) { |
290 | data->read_htcreg = read_htcreg_nb_f15; | ||
271 | data->read_tempreg = read_tempreg_nb_f15; | 291 | data->read_tempreg = read_tempreg_nb_f15; |
272 | } else if (boot_cpu_data.x86 == 0x17) { | 292 | } else if (boot_cpu_data.x86 == 0x17) { |
273 | data->temp_adjust_mask = 0x80000; | 293 | data->temp_adjust_mask = 0x80000; |
274 | data->read_tempreg = read_tempreg_nb_f17; | 294 | data->read_tempreg = read_tempreg_nb_f17; |
275 | } else { | 295 | } else { |
296 | data->read_htcreg = read_htcreg_pci; | ||
276 | data->read_tempreg = read_tempreg_pci; | 297 | data->read_tempreg = read_tempreg_pci; |
277 | } | 298 | } |
278 | 299 | ||
@@ -302,7 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = { | |||
302 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, | 323 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
303 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | 324 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
304 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, | 325 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
305 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) }, | 326 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, |
306 | {} | 327 | {} |
307 | }; | 328 | }; |
308 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); | 329 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); |
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index fd36c39ddf4e..0cdba29ae0a9 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
@@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | |||
209 | i2c_dw_disable_int(dev); | 209 | i2c_dw_disable_int(dev); |
210 | 210 | ||
211 | /* Enable the adapter */ | 211 | /* Enable the adapter */ |
212 | __i2c_dw_enable_and_wait(dev, true); | 212 | __i2c_dw_enable(dev, true); |
213 | |||
214 | /* Dummy read to avoid the register getting stuck on Bay Trail */ | ||
215 | dw_readl(dev, DW_IC_ENABLE_STATUS); | ||
213 | 216 | ||
214 | /* Clear and enable interrupts */ | 217 | /* Clear and enable interrupts */ |
215 | dw_readl(dev, DW_IC_CLR_INTR); | 218 | dw_readl(dev, DW_IC_CLR_INTR); |
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 2aa0e83174c5..dae8ac618a52 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c | |||
@@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap, | |||
564 | * TODO: We could potentially loop and retry in the case | 564 | * TODO: We could potentially loop and retry in the case |
565 | * of MSP_TWI_XFER_TIMEOUT. | 565 | * of MSP_TWI_XFER_TIMEOUT. |
566 | */ | 566 | */ |
567 | return -1; | 567 | return -EIO; |
568 | } | 568 | } |
569 | 569 | ||
570 | return 0; | 570 | return num; |
571 | } | 571 | } |
572 | 572 | ||
573 | static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) | 573 | static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) |
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c index e4be86b3de9a..7235c7302bb7 100644 --- a/drivers/i2c/busses/i2c-viperboard.c +++ b/drivers/i2c/busses/i2c-viperboard.c | |||
@@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, | |||
337 | } | 337 | } |
338 | mutex_unlock(&vb->lock); | 338 | mutex_unlock(&vb->lock); |
339 | } | 339 | } |
340 | return 0; | 340 | return num; |
341 | error: | 341 | error: |
342 | mutex_unlock(&vb->lock); | 342 | mutex_unlock(&vb->lock); |
343 | return error; | 343 | return error; |
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index a9126b3cda61..7c3b4740b94b 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c | |||
@@ -445,10 +445,17 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, | |||
445 | msgs[1].buf = buffer; | 445 | msgs[1].buf = buffer; |
446 | 446 | ||
447 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | 447 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); |
448 | if (ret < 0) | 448 | if (ret < 0) { |
449 | dev_err(&client->adapter->dev, "i2c read failed\n"); | 449 | /* Getting a NACK is unfortunately normal with some DSTDs */ |
450 | else | 450 | if (ret == -EREMOTEIO) |
451 | dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", | ||
452 | data_len, client->addr, cmd, ret); | ||
453 | else | ||
454 | dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", | ||
455 | data_len, client->addr, cmd, ret); | ||
456 | } else { | ||
451 | memcpy(data, buffer, data_len); | 457 | memcpy(data, buffer, data_len); |
458 | } | ||
452 | 459 | ||
453 | kfree(buffer); | 460 | kfree(buffer); |
454 | return ret; | 461 | return ret; |
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 4e63c6f6c04d..d030ce3025a6 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
@@ -250,7 +250,9 @@ void bch_debug_exit(void) | |||
250 | 250 | ||
251 | int __init bch_debug_init(struct kobject *kobj) | 251 | int __init bch_debug_init(struct kobject *kobj) |
252 | { | 252 | { |
253 | bcache_debug = debugfs_create_dir("bcache", NULL); | 253 | if (!IS_ENABLED(CONFIG_DEBUG_FS)) |
254 | return 0; | ||
254 | 255 | ||
256 | bcache_debug = debugfs_create_dir("bcache", NULL); | ||
255 | return IS_ERR_OR_NULL(bcache_debug); | 257 | return IS_ERR_OR_NULL(bcache_debug); |
256 | } | 258 | } |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 12aa9ca21d8c..dc385b70e4c3 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1681,8 +1681,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign | |||
1681 | 1681 | ||
1682 | if (block_size <= KMALLOC_MAX_SIZE && | 1682 | if (block_size <= KMALLOC_MAX_SIZE && |
1683 | (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { | 1683 | (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { |
1684 | snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", c->block_size); | 1684 | unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE); |
1685 | c->slab_cache = kmem_cache_create(slab_name, c->block_size, ARCH_KMALLOC_MINALIGN, | 1685 | snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size); |
1686 | c->slab_cache = kmem_cache_create(slab_name, block_size, align, | ||
1686 | SLAB_RECLAIM_ACCOUNT, NULL); | 1687 | SLAB_RECLAIM_ACCOUNT, NULL); |
1687 | if (!c->slab_cache) { | 1688 | if (!c->slab_cache) { |
1688 | r = -ENOMEM; | 1689 | r = -ENOMEM; |
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c index 1d0af0a21fc7..84814e819e4c 100644 --- a/drivers/md/dm-cache-background-tracker.c +++ b/drivers/md/dm-cache-background-tracker.c | |||
@@ -166,7 +166,7 @@ static bool max_work_reached(struct background_tracker *b) | |||
166 | atomic_read(&b->pending_demotes) >= b->max_work; | 166 | atomic_read(&b->pending_demotes) >= b->max_work; |
167 | } | 167 | } |
168 | 168 | ||
169 | struct bt_work *alloc_work(struct background_tracker *b) | 169 | static struct bt_work *alloc_work(struct background_tracker *b) |
170 | { | 170 | { |
171 | if (max_work_reached(b)) | 171 | if (max_work_reached(b)) |
172 | return NULL; | 172 | return NULL; |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 77d9fe58dae2..514fb4aec5d1 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str | |||
2440 | unsigned i; | 2440 | unsigned i; |
2441 | for (i = 0; i < ic->journal_sections; i++) | 2441 | for (i = 0; i < ic->journal_sections; i++) |
2442 | kvfree(sl[i]); | 2442 | kvfree(sl[i]); |
2443 | kfree(sl); | 2443 | kvfree(sl); |
2444 | } | 2444 | } |
2445 | 2445 | ||
2446 | static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl) | 2446 | static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl) |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 580c49cc8079..5903e492bb34 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -23,6 +23,8 @@ | |||
23 | 23 | ||
24 | #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ | 24 | #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ |
25 | 25 | ||
26 | #define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1) | ||
27 | |||
26 | #define DM_RAID1_HANDLE_ERRORS 0x01 | 28 | #define DM_RAID1_HANDLE_ERRORS 0x01 |
27 | #define DM_RAID1_KEEP_LOG 0x02 | 29 | #define DM_RAID1_KEEP_LOG 0x02 |
28 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) | 30 | #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) |
@@ -255,7 +257,7 @@ static int mirror_flush(struct dm_target *ti) | |||
255 | unsigned long error_bits; | 257 | unsigned long error_bits; |
256 | 258 | ||
257 | unsigned int i; | 259 | unsigned int i; |
258 | struct dm_io_region io[ms->nr_mirrors]; | 260 | struct dm_io_region io[MAX_NR_MIRRORS]; |
259 | struct mirror *m; | 261 | struct mirror *m; |
260 | struct dm_io_request io_req = { | 262 | struct dm_io_request io_req = { |
261 | .bi_op = REQ_OP_WRITE, | 263 | .bi_op = REQ_OP_WRITE, |
@@ -651,7 +653,7 @@ static void write_callback(unsigned long error, void *context) | |||
651 | static void do_write(struct mirror_set *ms, struct bio *bio) | 653 | static void do_write(struct mirror_set *ms, struct bio *bio) |
652 | { | 654 | { |
653 | unsigned int i; | 655 | unsigned int i; |
654 | struct dm_io_region io[ms->nr_mirrors], *dest = io; | 656 | struct dm_io_region io[MAX_NR_MIRRORS], *dest = io; |
655 | struct mirror *m; | 657 | struct mirror *m; |
656 | struct dm_io_request io_req = { | 658 | struct dm_io_request io_req = { |
657 | .bi_op = REQ_OP_WRITE, | 659 | .bi_op = REQ_OP_WRITE, |
@@ -1083,7 +1085,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1083 | argc -= args_used; | 1085 | argc -= args_used; |
1084 | 1086 | ||
1085 | if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || | 1087 | if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || |
1086 | nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { | 1088 | nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) { |
1087 | ti->error = "Invalid number of mirrors"; | 1089 | ti->error = "Invalid number of mirrors"; |
1088 | dm_dirty_log_destroy(dl); | 1090 | dm_dirty_log_destroy(dl); |
1089 | return -EINVAL; | 1091 | return -EINVAL; |
@@ -1404,7 +1406,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type, | |||
1404 | int num_feature_args = 0; | 1406 | int num_feature_args = 0; |
1405 | struct mirror_set *ms = (struct mirror_set *) ti->private; | 1407 | struct mirror_set *ms = (struct mirror_set *) ti->private; |
1406 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); | 1408 | struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); |
1407 | char buffer[ms->nr_mirrors + 1]; | 1409 | char buffer[MAX_NR_MIRRORS + 1]; |
1408 | 1410 | ||
1409 | switch (type) { | 1411 | switch (type) { |
1410 | case STATUSTYPE_INFO: | 1412 | case STATUSTYPE_INFO: |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4ea404dbcf0b..0a7b0107ca78 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1020,7 +1020,8 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
1020 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); | 1020 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); |
1021 | 1021 | ||
1022 | static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, | 1022 | static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, |
1023 | sector_t sector, int *srcu_idx) | 1023 | sector_t sector, int *srcu_idx) |
1024 | __acquires(md->io_barrier) | ||
1024 | { | 1025 | { |
1025 | struct dm_table *map; | 1026 | struct dm_table *map; |
1026 | struct dm_target *ti; | 1027 | struct dm_target *ti; |
@@ -1037,7 +1038,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, | |||
1037 | } | 1038 | } |
1038 | 1039 | ||
1039 | static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, | 1040 | static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, |
1040 | long nr_pages, void **kaddr, pfn_t *pfn) | 1041 | long nr_pages, void **kaddr, pfn_t *pfn) |
1041 | { | 1042 | { |
1042 | struct mapped_device *md = dax_get_private(dax_dev); | 1043 | struct mapped_device *md = dax_get_private(dax_dev); |
1043 | sector_t sector = pgoff * PAGE_SECTORS; | 1044 | sector_t sector = pgoff * PAGE_SECTORS; |
@@ -1065,7 +1066,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, | |||
1065 | } | 1066 | } |
1066 | 1067 | ||
1067 | static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, | 1068 | static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
1068 | void *addr, size_t bytes, struct iov_iter *i) | 1069 | void *addr, size_t bytes, struct iov_iter *i) |
1069 | { | 1070 | { |
1070 | struct mapped_device *md = dax_get_private(dax_dev); | 1071 | struct mapped_device *md = dax_get_private(dax_dev); |
1071 | sector_t sector = pgoff * PAGE_SECTORS; | 1072 | sector_t sector = pgoff * PAGE_SECTORS; |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index a4c9c8297a6d..918d4fb742d1 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
@@ -717,6 +717,7 @@ struct cxl { | |||
717 | bool perst_select_user; | 717 | bool perst_select_user; |
718 | bool perst_same_image; | 718 | bool perst_same_image; |
719 | bool psl_timebase_synced; | 719 | bool psl_timebase_synced; |
720 | bool tunneled_ops_supported; | ||
720 | 721 | ||
721 | /* | 722 | /* |
722 | * number of contexts mapped on to this card. Possible values are: | 723 | * number of contexts mapped on to this card. Possible values are: |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 83f1d08058fc..4d6736f9d463 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -1742,6 +1742,15 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) | |||
1742 | /* Required for devices using CAPP DMA mode, harmless for others */ | 1742 | /* Required for devices using CAPP DMA mode, harmless for others */ |
1743 | pci_set_master(dev); | 1743 | pci_set_master(dev); |
1744 | 1744 | ||
1745 | adapter->tunneled_ops_supported = false; | ||
1746 | |||
1747 | if (cxl_is_power9()) { | ||
1748 | if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1)) | ||
1749 | dev_info(&dev->dev, "Tunneled operations unsupported\n"); | ||
1750 | else | ||
1751 | adapter->tunneled_ops_supported = true; | ||
1752 | } | ||
1753 | |||
1745 | if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) | 1754 | if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) |
1746 | goto err; | 1755 | goto err; |
1747 | 1756 | ||
@@ -1768,6 +1777,9 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) | |||
1768 | { | 1777 | { |
1769 | struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); | 1778 | struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); |
1770 | 1779 | ||
1780 | if (cxl_is_power9()) | ||
1781 | pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0); | ||
1782 | |||
1771 | cxl_native_release_psl_err_irq(adapter); | 1783 | cxl_native_release_psl_err_irq(adapter); |
1772 | cxl_unmap_adapter_regs(adapter); | 1784 | cxl_unmap_adapter_regs(adapter); |
1773 | 1785 | ||
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 95285b7f636f..4b5a4c5d3c01 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c | |||
@@ -78,6 +78,15 @@ static ssize_t psl_timebase_synced_show(struct device *device, | |||
78 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); | 78 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); |
79 | } | 79 | } |
80 | 80 | ||
81 | static ssize_t tunneled_ops_supported_show(struct device *device, | ||
82 | struct device_attribute *attr, | ||
83 | char *buf) | ||
84 | { | ||
85 | struct cxl *adapter = to_cxl_adapter(device); | ||
86 | |||
87 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported); | ||
88 | } | ||
89 | |||
81 | static ssize_t reset_adapter_store(struct device *device, | 90 | static ssize_t reset_adapter_store(struct device *device, |
82 | struct device_attribute *attr, | 91 | struct device_attribute *attr, |
83 | const char *buf, size_t count) | 92 | const char *buf, size_t count) |
@@ -183,6 +192,7 @@ static struct device_attribute adapter_attrs[] = { | |||
183 | __ATTR_RO(base_image), | 192 | __ATTR_RO(base_image), |
184 | __ATTR_RO(image_loaded), | 193 | __ATTR_RO(image_loaded), |
185 | __ATTR_RO(psl_timebase_synced), | 194 | __ATTR_RO(psl_timebase_synced), |
195 | __ATTR_RO(tunneled_ops_supported), | ||
186 | __ATTR_RW(load_image_on_perst), | 196 | __ATTR_RW(load_image_on_perst), |
187 | __ATTR_RW(perst_reloads_same_image), | 197 | __ATTR_RW(perst_reloads_same_image), |
188 | __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), | 198 | __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), |
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 0c125f207aea..33053b0d1fdf 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
@@ -518,7 +518,7 @@ static int at24_get_pdata(struct device *dev, struct at24_platform_data *pdata) | |||
518 | if (of_node && of_match_device(at24_of_match, dev)) | 518 | if (of_node && of_match_device(at24_of_match, dev)) |
519 | cdata = of_device_get_match_data(dev); | 519 | cdata = of_device_get_match_data(dev); |
520 | else if (id) | 520 | else if (id) |
521 | cdata = (void *)&id->driver_data; | 521 | cdata = (void *)id->driver_data; |
522 | else | 522 | else |
523 | cdata = acpi_device_get_match_data(dev); | 523 | cdata = acpi_device_get_match_data(dev); |
524 | 524 | ||
diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c index 9c159f0dd9a6..321137158ff3 100644 --- a/drivers/mtd/nand/onenand/omap2.c +++ b/drivers/mtd/nand/onenand/omap2.c | |||
@@ -375,56 +375,42 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, | |||
375 | { | 375 | { |
376 | struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); | 376 | struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); |
377 | struct onenand_chip *this = mtd->priv; | 377 | struct onenand_chip *this = mtd->priv; |
378 | dma_addr_t dma_src, dma_dst; | 378 | struct device *dev = &c->pdev->dev; |
379 | int bram_offset; | ||
380 | void *buf = (void *)buffer; | 379 | void *buf = (void *)buffer; |
380 | dma_addr_t dma_src, dma_dst; | ||
381 | int bram_offset, err; | ||
381 | size_t xtra; | 382 | size_t xtra; |
382 | int ret; | ||
383 | 383 | ||
384 | bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; | 384 | bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; |
385 | if (bram_offset & 3 || (size_t)buf & 3 || count < 384) | 385 | /* |
386 | goto out_copy; | 386 | * If the buffer address is not DMA-able, len is not long enough to make |
387 | 387 | * DMA transfers profitable or panic_write() may be in an interrupt | |
388 | /* panic_write() may be in an interrupt context */ | 388 | * context fallback to PIO mode. |
389 | if (in_interrupt() || oops_in_progress) | 389 | */ |
390 | if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || | ||
391 | count < 384 || in_interrupt() || oops_in_progress ) | ||
390 | goto out_copy; | 392 | goto out_copy; |
391 | 393 | ||
392 | if (buf >= high_memory) { | ||
393 | struct page *p1; | ||
394 | |||
395 | if (((size_t)buf & PAGE_MASK) != | ||
396 | ((size_t)(buf + count - 1) & PAGE_MASK)) | ||
397 | goto out_copy; | ||
398 | p1 = vmalloc_to_page(buf); | ||
399 | if (!p1) | ||
400 | goto out_copy; | ||
401 | buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); | ||
402 | } | ||
403 | |||
404 | xtra = count & 3; | 394 | xtra = count & 3; |
405 | if (xtra) { | 395 | if (xtra) { |
406 | count -= xtra; | 396 | count -= xtra; |
407 | memcpy(buf + count, this->base + bram_offset + count, xtra); | 397 | memcpy(buf + count, this->base + bram_offset + count, xtra); |
408 | } | 398 | } |
409 | 399 | ||
400 | dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE); | ||
410 | dma_src = c->phys_base + bram_offset; | 401 | dma_src = c->phys_base + bram_offset; |
411 | dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE); | ||
412 | if (dma_mapping_error(&c->pdev->dev, dma_dst)) { | ||
413 | dev_err(&c->pdev->dev, | ||
414 | "Couldn't DMA map a %d byte buffer\n", | ||
415 | count); | ||
416 | goto out_copy; | ||
417 | } | ||
418 | 402 | ||
419 | ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); | 403 | if (dma_mapping_error(dev, dma_dst)) { |
420 | dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); | 404 | dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count); |
421 | |||
422 | if (ret) { | ||
423 | dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); | ||
424 | goto out_copy; | 405 | goto out_copy; |
425 | } | 406 | } |
426 | 407 | ||
427 | return 0; | 408 | err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); |
409 | dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE); | ||
410 | if (!err) | ||
411 | return 0; | ||
412 | |||
413 | dev_err(dev, "timeout waiting for DMA\n"); | ||
428 | 414 | ||
429 | out_copy: | 415 | out_copy: |
430 | memcpy(buf, this->base + bram_offset, count); | 416 | memcpy(buf, this->base + bram_offset, count); |
@@ -437,49 +423,34 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, | |||
437 | { | 423 | { |
438 | struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); | 424 | struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); |
439 | struct onenand_chip *this = mtd->priv; | 425 | struct onenand_chip *this = mtd->priv; |
440 | dma_addr_t dma_src, dma_dst; | 426 | struct device *dev = &c->pdev->dev; |
441 | int bram_offset; | ||
442 | void *buf = (void *)buffer; | 427 | void *buf = (void *)buffer; |
443 | int ret; | 428 | dma_addr_t dma_src, dma_dst; |
429 | int bram_offset, err; | ||
444 | 430 | ||
445 | bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; | 431 | bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; |
446 | if (bram_offset & 3 || (size_t)buf & 3 || count < 384) | 432 | /* |
447 | goto out_copy; | 433 | * If the buffer address is not DMA-able, len is not long enough to make |
448 | 434 | * DMA transfers profitable or panic_write() may be in an interrupt | |
449 | /* panic_write() may be in an interrupt context */ | 435 | * context fallback to PIO mode. |
450 | if (in_interrupt() || oops_in_progress) | 436 | */ |
437 | if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || | ||
438 | count < 384 || in_interrupt() || oops_in_progress ) | ||
451 | goto out_copy; | 439 | goto out_copy; |
452 | 440 | ||
453 | if (buf >= high_memory) { | 441 | dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); |
454 | struct page *p1; | ||
455 | |||
456 | if (((size_t)buf & PAGE_MASK) != | ||
457 | ((size_t)(buf + count - 1) & PAGE_MASK)) | ||
458 | goto out_copy; | ||
459 | p1 = vmalloc_to_page(buf); | ||
460 | if (!p1) | ||
461 | goto out_copy; | ||
462 | buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); | ||
463 | } | ||
464 | |||
465 | dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); | ||
466 | dma_dst = c->phys_base + bram_offset; | 442 | dma_dst = c->phys_base + bram_offset; |
467 | if (dma_mapping_error(&c->pdev->dev, dma_src)) { | 443 | if (dma_mapping_error(dev, dma_src)) { |
468 | dev_err(&c->pdev->dev, | 444 | dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count); |
469 | "Couldn't DMA map a %d byte buffer\n", | ||
470 | count); | ||
471 | return -1; | ||
472 | } | ||
473 | |||
474 | ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); | ||
475 | dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); | ||
476 | |||
477 | if (ret) { | ||
478 | dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); | ||
479 | goto out_copy; | 445 | goto out_copy; |
480 | } | 446 | } |
481 | 447 | ||
482 | return 0; | 448 | err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); |
449 | dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE); | ||
450 | if (!err) | ||
451 | return 0; | ||
452 | |||
453 | dev_err(dev, "timeout waiting for DMA\n"); | ||
483 | 454 | ||
484 | out_copy: | 455 | out_copy: |
485 | memcpy(this->base + bram_offset, buf, count); | 456 | memcpy(this->base + bram_offset, buf, count); |
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 1d779a35ac8e..ebb1d141b900 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c | |||
@@ -1074,7 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, | |||
1074 | return ret; | 1074 | return ret; |
1075 | 1075 | ||
1076 | ret = marvell_nfc_wait_op(chip, | 1076 | ret = marvell_nfc_wait_op(chip, |
1077 | chip->data_interface.timings.sdr.tPROG_max); | 1077 | PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); |
1078 | return ret; | 1078 | return ret; |
1079 | } | 1079 | } |
1080 | 1080 | ||
@@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk, | |||
1194 | NDCB0_CMD2(NAND_CMD_READSTART); | 1194 | NDCB0_CMD2(NAND_CMD_READSTART); |
1195 | 1195 | ||
1196 | /* | 1196 | /* |
1197 | * Trigger the naked read operation only on the last chunk. | 1197 | * Trigger the monolithic read on the first chunk, then naked read on |
1198 | * Otherwise, use monolithic read. | 1198 | * intermediate chunks and finally a last naked read on the last chunk. |
1199 | */ | 1199 | */ |
1200 | if (lt->nchunks == 1 || (chunk < lt->nchunks - 1)) | 1200 | if (chunk == 0) |
1201 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); | 1201 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); |
1202 | else if (chunk < lt->nchunks - 1) | ||
1203 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW); | ||
1202 | else | 1204 | else |
1203 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); | 1205 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); |
1204 | 1206 | ||
@@ -1408,6 +1410,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, | |||
1408 | struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); | 1410 | struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); |
1409 | struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); | 1411 | struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); |
1410 | const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; | 1412 | const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; |
1413 | u32 xtype; | ||
1411 | int ret; | 1414 | int ret; |
1412 | struct marvell_nfc_op nfc_op = { | 1415 | struct marvell_nfc_op nfc_op = { |
1413 | .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD, | 1416 | .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD, |
@@ -1423,7 +1426,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, | |||
1423 | * last naked write. | 1426 | * last naked write. |
1424 | */ | 1427 | */ |
1425 | if (chunk == 0) { | 1428 | if (chunk == 0) { |
1426 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) | | 1429 | if (lt->nchunks == 1) |
1430 | xtype = XTYPE_MONOLITHIC_RW; | ||
1431 | else | ||
1432 | xtype = XTYPE_WRITE_DISPATCH; | ||
1433 | |||
1434 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) | | ||
1427 | NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | | 1435 | NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | |
1428 | NDCB0_CMD1(NAND_CMD_SEQIN); | 1436 | NDCB0_CMD1(NAND_CMD_SEQIN); |
1429 | nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page); | 1437 | nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page); |
@@ -1494,7 +1502,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd, | |||
1494 | } | 1502 | } |
1495 | 1503 | ||
1496 | ret = marvell_nfc_wait_op(chip, | 1504 | ret = marvell_nfc_wait_op(chip, |
1497 | chip->data_interface.timings.sdr.tPROG_max); | 1505 | PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); |
1498 | 1506 | ||
1499 | marvell_nfc_disable_hw_ecc(chip); | 1507 | marvell_nfc_disable_hw_ecc(chip); |
1500 | 1508 | ||
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 72f3a89da513..f28c3a555861 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c | |||
@@ -706,12 +706,17 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo) | |||
706 | */ | 706 | */ |
707 | int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) | 707 | int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) |
708 | { | 708 | { |
709 | const struct nand_sdr_timings *timings; | ||
709 | u8 status = 0; | 710 | u8 status = 0; |
710 | int ret; | 711 | int ret; |
711 | 712 | ||
712 | if (!chip->exec_op) | 713 | if (!chip->exec_op) |
713 | return -ENOTSUPP; | 714 | return -ENOTSUPP; |
714 | 715 | ||
716 | /* Wait tWB before polling the STATUS reg. */ | ||
717 | timings = nand_get_sdr_timings(&chip->data_interface); | ||
718 | ndelay(PSEC_TO_NSEC(timings->tWB_max)); | ||
719 | |||
715 | ret = nand_status_op(chip, NULL); | 720 | ret = nand_status_op(chip, NULL); |
716 | if (ret) | 721 | if (ret) |
717 | return ret; | 722 | return ret; |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 1ed9529e7bd1..5eb0df2e5464 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) | |||
450 | { | 450 | { |
451 | int i; | 451 | int i; |
452 | 452 | ||
453 | if (!client_info->slave) | 453 | if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst)) |
454 | return; | 454 | return; |
455 | 455 | ||
456 | for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { | 456 | for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { |
@@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], | |||
943 | skb->priority = TC_PRIO_CONTROL; | 943 | skb->priority = TC_PRIO_CONTROL; |
944 | skb->dev = slave->dev; | 944 | skb->dev = slave->dev; |
945 | 945 | ||
946 | netdev_dbg(slave->bond->dev, | ||
947 | "Send learning packet: dev %s mac %pM vlan %d\n", | ||
948 | slave->dev->name, mac_addr, vid); | ||
949 | |||
946 | if (vid) | 950 | if (vid) |
947 | __vlan_hwaccel_put_tag(skb, vlan_proto, vid); | 951 | __vlan_hwaccel_put_tag(skb, vlan_proto, vid); |
948 | 952 | ||
@@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data) | |||
965 | u8 *mac_addr = data->mac_addr; | 969 | u8 *mac_addr = data->mac_addr; |
966 | struct bond_vlan_tag *tags; | 970 | struct bond_vlan_tag *tags; |
967 | 971 | ||
968 | if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { | 972 | if (is_vlan_dev(upper) && |
969 | if (strict_match && | 973 | bond->nest_level == vlan_get_encap_level(upper) - 1) { |
970 | ether_addr_equal_64bits(mac_addr, | 974 | if (upper->addr_assign_type == NET_ADDR_STOLEN) { |
971 | upper->dev_addr)) { | ||
972 | alb_send_lp_vid(slave, mac_addr, | 975 | alb_send_lp_vid(slave, mac_addr, |
973 | vlan_dev_vlan_proto(upper), | 976 | vlan_dev_vlan_proto(upper), |
974 | vlan_dev_vlan_id(upper)); | 977 | vlan_dev_vlan_id(upper)); |
975 | } else if (!strict_match) { | 978 | } else { |
976 | alb_send_lp_vid(slave, upper->dev_addr, | 979 | alb_send_lp_vid(slave, upper->dev_addr, |
977 | vlan_dev_vlan_proto(upper), | 980 | vlan_dev_vlan_proto(upper), |
978 | vlan_dev_vlan_id(upper)); | 981 | vlan_dev_vlan_id(upper)); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 718e4914e3a0..1f1e97b26f95 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1738,6 +1738,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, | |||
1738 | if (bond_mode_uses_xmit_hash(bond)) | 1738 | if (bond_mode_uses_xmit_hash(bond)) |
1739 | bond_update_slave_arr(bond, NULL); | 1739 | bond_update_slave_arr(bond, NULL); |
1740 | 1740 | ||
1741 | bond->nest_level = dev_get_nest_level(bond_dev); | ||
1742 | |||
1741 | netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", | 1743 | netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", |
1742 | slave_dev->name, | 1744 | slave_dev->name, |
1743 | bond_is_active_slave(new_slave) ? "an active" : "a backup", | 1745 | bond_is_active_slave(new_slave) ? "an active" : "a backup", |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index b1779566c5bb..3c71f1cb205f 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -605,7 +605,7 @@ void can_bus_off(struct net_device *dev) | |||
605 | { | 605 | { |
606 | struct can_priv *priv = netdev_priv(dev); | 606 | struct can_priv *priv = netdev_priv(dev); |
607 | 607 | ||
608 | netdev_dbg(dev, "bus-off\n"); | 608 | netdev_info(dev, "bus-off\n"); |
609 | 609 | ||
610 | netif_carrier_off(dev); | 610 | netif_carrier_off(dev); |
611 | 611 | ||
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 634c51e6b8ae..d53a45bf2a72 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -200,6 +200,7 @@ | |||
200 | #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ | 200 | #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ |
201 | #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ | 201 | #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ |
202 | #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ | 202 | #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ |
203 | #define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */ | ||
203 | 204 | ||
204 | /* Structure of the message buffer */ | 205 | /* Structure of the message buffer */ |
205 | struct flexcan_mb { | 206 | struct flexcan_mb { |
@@ -288,6 +289,12 @@ struct flexcan_priv { | |||
288 | 289 | ||
289 | static const struct flexcan_devtype_data fsl_p1010_devtype_data = { | 290 | static const struct flexcan_devtype_data fsl_p1010_devtype_data = { |
290 | .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | | 291 | .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | |
292 | FLEXCAN_QUIRK_BROKEN_PERR_STATE | | ||
293 | FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN, | ||
294 | }; | ||
295 | |||
296 | static const struct flexcan_devtype_data fsl_imx25_devtype_data = { | ||
297 | .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | | ||
291 | FLEXCAN_QUIRK_BROKEN_PERR_STATE, | 298 | FLEXCAN_QUIRK_BROKEN_PERR_STATE, |
292 | }; | 299 | }; |
293 | 300 | ||
@@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct net_device *dev) | |||
1251 | static const struct of_device_id flexcan_of_match[] = { | 1258 | static const struct of_device_id flexcan_of_match[] = { |
1252 | { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, | 1259 | { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, |
1253 | { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, | 1260 | { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, |
1254 | { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, }, | 1261 | { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, }, |
1255 | { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, }, | 1262 | { .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, }, |
1256 | { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, }, | 1263 | { .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, }, |
1257 | { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, | 1264 | { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, |
1258 | { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, | 1265 | { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, |
1259 | { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, | 1266 | { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, |
@@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1337 | 1344 | ||
1338 | priv = netdev_priv(dev); | 1345 | priv = netdev_priv(dev); |
1339 | 1346 | ||
1340 | if (of_property_read_bool(pdev->dev.of_node, "big-endian")) { | 1347 | if (of_property_read_bool(pdev->dev.of_node, "big-endian") || |
1348 | devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) { | ||
1341 | priv->read = flexcan_read_be; | 1349 | priv->read = flexcan_read_be; |
1342 | priv->write = flexcan_write_be; | 1350 | priv->write = flexcan_write_be; |
1343 | } else { | 1351 | } else { |
1344 | if (of_device_is_compatible(pdev->dev.of_node, | 1352 | priv->read = flexcan_read_le; |
1345 | "fsl,p1010-flexcan")) { | 1353 | priv->write = flexcan_write_le; |
1346 | priv->read = flexcan_read_be; | ||
1347 | priv->write = flexcan_write_be; | ||
1348 | } else { | ||
1349 | priv->read = flexcan_read_le; | ||
1350 | priv->write = flexcan_write_le; | ||
1351 | } | ||
1352 | } | 1354 | } |
1353 | 1355 | ||
1354 | priv->can.clock.freq = clock_freq; | 1356 | priv->can.clock.freq = clock_freq; |
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 5590c559a8ca..53e320c92a8b 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c | |||
@@ -91,6 +91,7 @@ | |||
91 | #define HI3110_STAT_BUSOFF BIT(2) | 91 | #define HI3110_STAT_BUSOFF BIT(2) |
92 | #define HI3110_STAT_ERRP BIT(3) | 92 | #define HI3110_STAT_ERRP BIT(3) |
93 | #define HI3110_STAT_ERRW BIT(4) | 93 | #define HI3110_STAT_ERRW BIT(4) |
94 | #define HI3110_STAT_TXMTY BIT(7) | ||
94 | 95 | ||
95 | #define HI3110_BTR0_SJW_SHIFT 6 | 96 | #define HI3110_BTR0_SJW_SHIFT 6 |
96 | #define HI3110_BTR0_BRP_SHIFT 0 | 97 | #define HI3110_BTR0_BRP_SHIFT 0 |
@@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net, | |||
427 | struct hi3110_priv *priv = netdev_priv(net); | 428 | struct hi3110_priv *priv = netdev_priv(net); |
428 | struct spi_device *spi = priv->spi; | 429 | struct spi_device *spi = priv->spi; |
429 | 430 | ||
431 | mutex_lock(&priv->hi3110_lock); | ||
430 | bec->txerr = hi3110_read(spi, HI3110_READ_TEC); | 432 | bec->txerr = hi3110_read(spi, HI3110_READ_TEC); |
431 | bec->rxerr = hi3110_read(spi, HI3110_READ_REC); | 433 | bec->rxerr = hi3110_read(spi, HI3110_READ_REC); |
434 | mutex_unlock(&priv->hi3110_lock); | ||
432 | 435 | ||
433 | return 0; | 436 | return 0; |
434 | } | 437 | } |
@@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) | |||
735 | } | 738 | } |
736 | } | 739 | } |
737 | 740 | ||
738 | if (intf == 0) | 741 | if (priv->tx_len && statf & HI3110_STAT_TXMTY) { |
739 | break; | ||
740 | |||
741 | if (intf & HI3110_INT_TXCPLT) { | ||
742 | net->stats.tx_packets++; | 742 | net->stats.tx_packets++; |
743 | net->stats.tx_bytes += priv->tx_len - 1; | 743 | net->stats.tx_bytes += priv->tx_len - 1; |
744 | can_led_event(net, CAN_LED_EVENT_TX); | 744 | can_led_event(net, CAN_LED_EVENT_TX); |
@@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) | |||
748 | } | 748 | } |
749 | netif_wake_queue(net); | 749 | netif_wake_queue(net); |
750 | } | 750 | } |
751 | |||
752 | if (intf == 0) | ||
753 | break; | ||
751 | } | 754 | } |
752 | mutex_unlock(&priv->hi3110_lock); | 755 | mutex_unlock(&priv->hi3110_lock); |
753 | return IRQ_HANDLED; | 756 | return IRQ_HANDLED; |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 63587b8e6825..daed57d3d209 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, | |||
1179 | 1179 | ||
1180 | skb = alloc_can_skb(priv->netdev, &cf); | 1180 | skb = alloc_can_skb(priv->netdev, &cf); |
1181 | if (!skb) { | 1181 | if (!skb) { |
1182 | stats->tx_dropped++; | 1182 | stats->rx_dropped++; |
1183 | return; | 1183 | return; |
1184 | } | 1184 | } |
1185 | 1185 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 3d2091099f7f..5b4374f21d76 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -3370,6 +3370,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3370 | .num_internal_phys = 5, | 3370 | .num_internal_phys = 5, |
3371 | .max_vid = 4095, | 3371 | .max_vid = 4095, |
3372 | .port_base_addr = 0x10, | 3372 | .port_base_addr = 0x10, |
3373 | .phy_base_addr = 0x0, | ||
3373 | .global1_addr = 0x1b, | 3374 | .global1_addr = 0x1b, |
3374 | .global2_addr = 0x1c, | 3375 | .global2_addr = 0x1c, |
3375 | .age_time_coeff = 15000, | 3376 | .age_time_coeff = 15000, |
@@ -3391,6 +3392,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3391 | .num_internal_phys = 0, | 3392 | .num_internal_phys = 0, |
3392 | .max_vid = 4095, | 3393 | .max_vid = 4095, |
3393 | .port_base_addr = 0x10, | 3394 | .port_base_addr = 0x10, |
3395 | .phy_base_addr = 0x0, | ||
3394 | .global1_addr = 0x1b, | 3396 | .global1_addr = 0x1b, |
3395 | .global2_addr = 0x1c, | 3397 | .global2_addr = 0x1c, |
3396 | .age_time_coeff = 15000, | 3398 | .age_time_coeff = 15000, |
@@ -3410,6 +3412,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3410 | .num_internal_phys = 8, | 3412 | .num_internal_phys = 8, |
3411 | .max_vid = 4095, | 3413 | .max_vid = 4095, |
3412 | .port_base_addr = 0x10, | 3414 | .port_base_addr = 0x10, |
3415 | .phy_base_addr = 0x0, | ||
3413 | .global1_addr = 0x1b, | 3416 | .global1_addr = 0x1b, |
3414 | .global2_addr = 0x1c, | 3417 | .global2_addr = 0x1c, |
3415 | .age_time_coeff = 15000, | 3418 | .age_time_coeff = 15000, |
@@ -3431,6 +3434,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3431 | .num_internal_phys = 5, | 3434 | .num_internal_phys = 5, |
3432 | .max_vid = 4095, | 3435 | .max_vid = 4095, |
3433 | .port_base_addr = 0x10, | 3436 | .port_base_addr = 0x10, |
3437 | .phy_base_addr = 0x0, | ||
3434 | .global1_addr = 0x1b, | 3438 | .global1_addr = 0x1b, |
3435 | .global2_addr = 0x1c, | 3439 | .global2_addr = 0x1c, |
3436 | .age_time_coeff = 15000, | 3440 | .age_time_coeff = 15000, |
@@ -3452,6 +3456,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3452 | .num_internal_phys = 0, | 3456 | .num_internal_phys = 0, |
3453 | .max_vid = 4095, | 3457 | .max_vid = 4095, |
3454 | .port_base_addr = 0x10, | 3458 | .port_base_addr = 0x10, |
3459 | .phy_base_addr = 0x0, | ||
3455 | .global1_addr = 0x1b, | 3460 | .global1_addr = 0x1b, |
3456 | .global2_addr = 0x1c, | 3461 | .global2_addr = 0x1c, |
3457 | .age_time_coeff = 15000, | 3462 | .age_time_coeff = 15000, |
@@ -3472,6 +3477,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3472 | .num_gpio = 11, | 3477 | .num_gpio = 11, |
3473 | .max_vid = 4095, | 3478 | .max_vid = 4095, |
3474 | .port_base_addr = 0x10, | 3479 | .port_base_addr = 0x10, |
3480 | .phy_base_addr = 0x10, | ||
3475 | .global1_addr = 0x1b, | 3481 | .global1_addr = 0x1b, |
3476 | .global2_addr = 0x1c, | 3482 | .global2_addr = 0x1c, |
3477 | .age_time_coeff = 3750, | 3483 | .age_time_coeff = 3750, |
@@ -3493,6 +3499,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3493 | .num_internal_phys = 5, | 3499 | .num_internal_phys = 5, |
3494 | .max_vid = 4095, | 3500 | .max_vid = 4095, |
3495 | .port_base_addr = 0x10, | 3501 | .port_base_addr = 0x10, |
3502 | .phy_base_addr = 0x0, | ||
3496 | .global1_addr = 0x1b, | 3503 | .global1_addr = 0x1b, |
3497 | .global2_addr = 0x1c, | 3504 | .global2_addr = 0x1c, |
3498 | .age_time_coeff = 15000, | 3505 | .age_time_coeff = 15000, |
@@ -3514,6 +3521,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3514 | .num_internal_phys = 0, | 3521 | .num_internal_phys = 0, |
3515 | .max_vid = 4095, | 3522 | .max_vid = 4095, |
3516 | .port_base_addr = 0x10, | 3523 | .port_base_addr = 0x10, |
3524 | .phy_base_addr = 0x0, | ||
3517 | .global1_addr = 0x1b, | 3525 | .global1_addr = 0x1b, |
3518 | .global2_addr = 0x1c, | 3526 | .global2_addr = 0x1c, |
3519 | .age_time_coeff = 15000, | 3527 | .age_time_coeff = 15000, |
@@ -3535,6 +3543,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3535 | .num_internal_phys = 5, | 3543 | .num_internal_phys = 5, |
3536 | .max_vid = 4095, | 3544 | .max_vid = 4095, |
3537 | .port_base_addr = 0x10, | 3545 | .port_base_addr = 0x10, |
3546 | .phy_base_addr = 0x0, | ||
3538 | .global1_addr = 0x1b, | 3547 | .global1_addr = 0x1b, |
3539 | .global2_addr = 0x1c, | 3548 | .global2_addr = 0x1c, |
3540 | .age_time_coeff = 15000, | 3549 | .age_time_coeff = 15000, |
@@ -3557,6 +3566,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3557 | .num_gpio = 15, | 3566 | .num_gpio = 15, |
3558 | .max_vid = 4095, | 3567 | .max_vid = 4095, |
3559 | .port_base_addr = 0x10, | 3568 | .port_base_addr = 0x10, |
3569 | .phy_base_addr = 0x0, | ||
3560 | .global1_addr = 0x1b, | 3570 | .global1_addr = 0x1b, |
3561 | .global2_addr = 0x1c, | 3571 | .global2_addr = 0x1c, |
3562 | .age_time_coeff = 15000, | 3572 | .age_time_coeff = 15000, |
@@ -3578,6 +3588,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3578 | .num_internal_phys = 5, | 3588 | .num_internal_phys = 5, |
3579 | .max_vid = 4095, | 3589 | .max_vid = 4095, |
3580 | .port_base_addr = 0x10, | 3590 | .port_base_addr = 0x10, |
3591 | .phy_base_addr = 0x0, | ||
3581 | .global1_addr = 0x1b, | 3592 | .global1_addr = 0x1b, |
3582 | .global2_addr = 0x1c, | 3593 | .global2_addr = 0x1c, |
3583 | .age_time_coeff = 15000, | 3594 | .age_time_coeff = 15000, |
@@ -3600,6 +3611,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3600 | .num_gpio = 15, | 3611 | .num_gpio = 15, |
3601 | .max_vid = 4095, | 3612 | .max_vid = 4095, |
3602 | .port_base_addr = 0x10, | 3613 | .port_base_addr = 0x10, |
3614 | .phy_base_addr = 0x0, | ||
3603 | .global1_addr = 0x1b, | 3615 | .global1_addr = 0x1b, |
3604 | .global2_addr = 0x1c, | 3616 | .global2_addr = 0x1c, |
3605 | .age_time_coeff = 15000, | 3617 | .age_time_coeff = 15000, |
@@ -3621,6 +3633,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3621 | .num_internal_phys = 0, | 3633 | .num_internal_phys = 0, |
3622 | .max_vid = 4095, | 3634 | .max_vid = 4095, |
3623 | .port_base_addr = 0x10, | 3635 | .port_base_addr = 0x10, |
3636 | .phy_base_addr = 0x0, | ||
3624 | .global1_addr = 0x1b, | 3637 | .global1_addr = 0x1b, |
3625 | .global2_addr = 0x1c, | 3638 | .global2_addr = 0x1c, |
3626 | .age_time_coeff = 15000, | 3639 | .age_time_coeff = 15000, |
@@ -3641,6 +3654,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3641 | .num_gpio = 16, | 3654 | .num_gpio = 16, |
3642 | .max_vid = 8191, | 3655 | .max_vid = 8191, |
3643 | .port_base_addr = 0x0, | 3656 | .port_base_addr = 0x0, |
3657 | .phy_base_addr = 0x0, | ||
3644 | .global1_addr = 0x1b, | 3658 | .global1_addr = 0x1b, |
3645 | .global2_addr = 0x1c, | 3659 | .global2_addr = 0x1c, |
3646 | .tag_protocol = DSA_TAG_PROTO_DSA, | 3660 | .tag_protocol = DSA_TAG_PROTO_DSA, |
@@ -3663,6 +3677,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3663 | .num_gpio = 16, | 3677 | .num_gpio = 16, |
3664 | .max_vid = 8191, | 3678 | .max_vid = 8191, |
3665 | .port_base_addr = 0x0, | 3679 | .port_base_addr = 0x0, |
3680 | .phy_base_addr = 0x0, | ||
3666 | .global1_addr = 0x1b, | 3681 | .global1_addr = 0x1b, |
3667 | .global2_addr = 0x1c, | 3682 | .global2_addr = 0x1c, |
3668 | .age_time_coeff = 3750, | 3683 | .age_time_coeff = 3750, |
@@ -3684,6 +3699,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3684 | .num_internal_phys = 11, | 3699 | .num_internal_phys = 11, |
3685 | .max_vid = 8191, | 3700 | .max_vid = 8191, |
3686 | .port_base_addr = 0x0, | 3701 | .port_base_addr = 0x0, |
3702 | .phy_base_addr = 0x0, | ||
3687 | .global1_addr = 0x1b, | 3703 | .global1_addr = 0x1b, |
3688 | .global2_addr = 0x1c, | 3704 | .global2_addr = 0x1c, |
3689 | .age_time_coeff = 3750, | 3705 | .age_time_coeff = 3750, |
@@ -3707,6 +3723,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3707 | .num_gpio = 15, | 3723 | .num_gpio = 15, |
3708 | .max_vid = 4095, | 3724 | .max_vid = 4095, |
3709 | .port_base_addr = 0x10, | 3725 | .port_base_addr = 0x10, |
3726 | .phy_base_addr = 0x0, | ||
3710 | .global1_addr = 0x1b, | 3727 | .global1_addr = 0x1b, |
3711 | .global2_addr = 0x1c, | 3728 | .global2_addr = 0x1c, |
3712 | .age_time_coeff = 15000, | 3729 | .age_time_coeff = 15000, |
@@ -3730,6 +3747,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3730 | .num_gpio = 16, | 3747 | .num_gpio = 16, |
3731 | .max_vid = 8191, | 3748 | .max_vid = 8191, |
3732 | .port_base_addr = 0x0, | 3749 | .port_base_addr = 0x0, |
3750 | .phy_base_addr = 0x0, | ||
3733 | .global1_addr = 0x1b, | 3751 | .global1_addr = 0x1b, |
3734 | .global2_addr = 0x1c, | 3752 | .global2_addr = 0x1c, |
3735 | .age_time_coeff = 3750, | 3753 | .age_time_coeff = 3750, |
@@ -3753,6 +3771,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3753 | .num_gpio = 15, | 3771 | .num_gpio = 15, |
3754 | .max_vid = 4095, | 3772 | .max_vid = 4095, |
3755 | .port_base_addr = 0x10, | 3773 | .port_base_addr = 0x10, |
3774 | .phy_base_addr = 0x0, | ||
3756 | .global1_addr = 0x1b, | 3775 | .global1_addr = 0x1b, |
3757 | .global2_addr = 0x1c, | 3776 | .global2_addr = 0x1c, |
3758 | .age_time_coeff = 15000, | 3777 | .age_time_coeff = 15000, |
@@ -3776,6 +3795,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3776 | .num_gpio = 15, | 3795 | .num_gpio = 15, |
3777 | .max_vid = 4095, | 3796 | .max_vid = 4095, |
3778 | .port_base_addr = 0x10, | 3797 | .port_base_addr = 0x10, |
3798 | .phy_base_addr = 0x0, | ||
3779 | .global1_addr = 0x1b, | 3799 | .global1_addr = 0x1b, |
3780 | .global2_addr = 0x1c, | 3800 | .global2_addr = 0x1c, |
3781 | .age_time_coeff = 15000, | 3801 | .age_time_coeff = 15000, |
@@ -3798,6 +3818,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3798 | .num_gpio = 11, | 3818 | .num_gpio = 11, |
3799 | .max_vid = 4095, | 3819 | .max_vid = 4095, |
3800 | .port_base_addr = 0x10, | 3820 | .port_base_addr = 0x10, |
3821 | .phy_base_addr = 0x10, | ||
3801 | .global1_addr = 0x1b, | 3822 | .global1_addr = 0x1b, |
3802 | .global2_addr = 0x1c, | 3823 | .global2_addr = 0x1c, |
3803 | .age_time_coeff = 3750, | 3824 | .age_time_coeff = 3750, |
@@ -3820,6 +3841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3820 | .num_internal_phys = 5, | 3841 | .num_internal_phys = 5, |
3821 | .max_vid = 4095, | 3842 | .max_vid = 4095, |
3822 | .port_base_addr = 0x10, | 3843 | .port_base_addr = 0x10, |
3844 | .phy_base_addr = 0x0, | ||
3823 | .global1_addr = 0x1b, | 3845 | .global1_addr = 0x1b, |
3824 | .global2_addr = 0x1c, | 3846 | .global2_addr = 0x1c, |
3825 | .age_time_coeff = 15000, | 3847 | .age_time_coeff = 15000, |
@@ -3841,6 +3863,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3841 | .num_internal_phys = 5, | 3863 | .num_internal_phys = 5, |
3842 | .max_vid = 4095, | 3864 | .max_vid = 4095, |
3843 | .port_base_addr = 0x10, | 3865 | .port_base_addr = 0x10, |
3866 | .phy_base_addr = 0x0, | ||
3844 | .global1_addr = 0x1b, | 3867 | .global1_addr = 0x1b, |
3845 | .global2_addr = 0x1c, | 3868 | .global2_addr = 0x1c, |
3846 | .age_time_coeff = 15000, | 3869 | .age_time_coeff = 15000, |
@@ -3863,6 +3886,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3863 | .num_gpio = 15, | 3886 | .num_gpio = 15, |
3864 | .max_vid = 4095, | 3887 | .max_vid = 4095, |
3865 | .port_base_addr = 0x10, | 3888 | .port_base_addr = 0x10, |
3889 | .phy_base_addr = 0x0, | ||
3866 | .global1_addr = 0x1b, | 3890 | .global1_addr = 0x1b, |
3867 | .global2_addr = 0x1c, | 3891 | .global2_addr = 0x1c, |
3868 | .age_time_coeff = 15000, | 3892 | .age_time_coeff = 15000, |
@@ -3885,6 +3909,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3885 | .num_gpio = 16, | 3909 | .num_gpio = 16, |
3886 | .max_vid = 8191, | 3910 | .max_vid = 8191, |
3887 | .port_base_addr = 0x0, | 3911 | .port_base_addr = 0x0, |
3912 | .phy_base_addr = 0x0, | ||
3888 | .global1_addr = 0x1b, | 3913 | .global1_addr = 0x1b, |
3889 | .global2_addr = 0x1c, | 3914 | .global2_addr = 0x1c, |
3890 | .age_time_coeff = 3750, | 3915 | .age_time_coeff = 3750, |
@@ -3907,6 +3932,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { | |||
3907 | .num_gpio = 16, | 3932 | .num_gpio = 16, |
3908 | .max_vid = 8191, | 3933 | .max_vid = 8191, |
3909 | .port_base_addr = 0x0, | 3934 | .port_base_addr = 0x0, |
3935 | .phy_base_addr = 0x0, | ||
3910 | .global1_addr = 0x1b, | 3936 | .global1_addr = 0x1b, |
3911 | .global2_addr = 0x1c, | 3937 | .global2_addr = 0x1c, |
3912 | .age_time_coeff = 3750, | 3938 | .age_time_coeff = 3750, |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 80490f66bc06..12b7f4649b25 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h | |||
@@ -114,6 +114,7 @@ struct mv88e6xxx_info { | |||
114 | unsigned int num_gpio; | 114 | unsigned int num_gpio; |
115 | unsigned int max_vid; | 115 | unsigned int max_vid; |
116 | unsigned int port_base_addr; | 116 | unsigned int port_base_addr; |
117 | unsigned int phy_base_addr; | ||
117 | unsigned int global1_addr; | 118 | unsigned int global1_addr; |
118 | unsigned int global2_addr; | 119 | unsigned int global2_addr; |
119 | unsigned int age_time_coeff; | 120 | unsigned int age_time_coeff; |
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 0ce627fded48..8d22d66d84b7 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c | |||
@@ -1118,7 +1118,7 @@ int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip, | |||
1118 | err = irq; | 1118 | err = irq; |
1119 | goto out; | 1119 | goto out; |
1120 | } | 1120 | } |
1121 | bus->irq[chip->info->port_base_addr + phy] = irq; | 1121 | bus->irq[chip->info->phy_base_addr + phy] = irq; |
1122 | } | 1122 | } |
1123 | return 0; | 1123 | return 0; |
1124 | out: | 1124 | out: |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 32f6d2e24d66..1a1a6380c128 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
@@ -95,6 +95,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self) | |||
95 | /*rss rings */ | 95 | /*rss rings */ |
96 | cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); | 96 | cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); |
97 | cfg->vecs = min(cfg->vecs, num_online_cpus()); | 97 | cfg->vecs = min(cfg->vecs, num_online_cpus()); |
98 | cfg->vecs = min(cfg->vecs, self->irqvecs); | ||
98 | /* cfg->vecs should be power of 2 for RSS */ | 99 | /* cfg->vecs should be power of 2 for RSS */ |
99 | if (cfg->vecs >= 8U) | 100 | if (cfg->vecs >= 8U) |
100 | cfg->vecs = 8U; | 101 | cfg->vecs = 8U; |
@@ -246,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self) | |||
246 | 247 | ||
247 | self->ndev->hw_features |= aq_hw_caps->hw_features; | 248 | self->ndev->hw_features |= aq_hw_caps->hw_features; |
248 | self->ndev->features = aq_hw_caps->hw_features; | 249 | self->ndev->features = aq_hw_caps->hw_features; |
250 | self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | | ||
251 | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO; | ||
249 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; | 252 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; |
250 | self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 253 | self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
251 | 254 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 219b550d1665..faa533a0ec47 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
@@ -80,6 +80,7 @@ struct aq_nic_s { | |||
80 | 80 | ||
81 | struct pci_dev *pdev; | 81 | struct pci_dev *pdev; |
82 | unsigned int msix_entry_mask; | 82 | unsigned int msix_entry_mask; |
83 | u32 irqvecs; | ||
83 | }; | 84 | }; |
84 | 85 | ||
85 | static inline struct device *aq_nic_get_dev(struct aq_nic_s *self) | 86 | static inline struct device *aq_nic_get_dev(struct aq_nic_s *self) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index ecc6306f940f..a50e08bb4748 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | |||
@@ -267,16 +267,16 @@ static int aq_pci_probe(struct pci_dev *pdev, | |||
267 | numvecs = min(numvecs, num_online_cpus()); | 267 | numvecs = min(numvecs, num_online_cpus()); |
268 | /*enable interrupts */ | 268 | /*enable interrupts */ |
269 | #if !AQ_CFG_FORCE_LEGACY_INT | 269 | #if !AQ_CFG_FORCE_LEGACY_INT |
270 | err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, | 270 | numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs, |
271 | PCI_IRQ_MSIX); | 271 | PCI_IRQ_MSIX | PCI_IRQ_MSI | |
272 | 272 | PCI_IRQ_LEGACY); | |
273 | if (err < 0) { | 273 | |
274 | err = pci_alloc_irq_vectors(self->pdev, 1, 1, | 274 | if (numvecs < 0) { |
275 | PCI_IRQ_MSI | PCI_IRQ_LEGACY); | 275 | err = numvecs; |
276 | if (err < 0) | 276 | goto err_hwinit; |
277 | goto err_hwinit; | ||
278 | } | 277 | } |
279 | #endif | 278 | #endif |
279 | self->irqvecs = numvecs; | ||
280 | 280 | ||
281 | /* net device init */ | 281 | /* net device init */ |
282 | aq_nic_cfg_start(self); | 282 | aq_nic_cfg_start(self); |
@@ -298,9 +298,9 @@ err_free_aq_hw: | |||
298 | kfree(self->aq_hw); | 298 | kfree(self->aq_hw); |
299 | err_ioremap: | 299 | err_ioremap: |
300 | free_netdev(ndev); | 300 | free_netdev(ndev); |
301 | err_pci_func: | ||
302 | pci_release_regions(pdev); | ||
303 | err_ndev: | 301 | err_ndev: |
302 | pci_release_regions(pdev); | ||
303 | err_pci_func: | ||
304 | pci_disable_device(pdev); | 304 | pci_disable_device(pdev); |
305 | return err; | 305 | return err; |
306 | } | 306 | } |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 08bbb639be1a..9f59b1270a7c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -8733,14 +8733,15 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
8733 | tg3_mem_rx_release(tp); | 8733 | tg3_mem_rx_release(tp); |
8734 | tg3_mem_tx_release(tp); | 8734 | tg3_mem_tx_release(tp); |
8735 | 8735 | ||
8736 | /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ | 8736 | /* tp->hw_stats can be referenced safely: |
8737 | tg3_full_lock(tp, 0); | 8737 | * 1. under rtnl_lock |
8738 | * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. | ||
8739 | */ | ||
8738 | if (tp->hw_stats) { | 8740 | if (tp->hw_stats) { |
8739 | dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), | 8741 | dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), |
8740 | tp->hw_stats, tp->stats_mapping); | 8742 | tp->hw_stats, tp->stats_mapping); |
8741 | tp->hw_stats = NULL; | 8743 | tp->hw_stats = NULL; |
8742 | } | 8744 | } |
8743 | tg3_full_unlock(tp); | ||
8744 | } | 8745 | } |
8745 | 8746 | ||
8746 | /* | 8747 | /* |
@@ -14178,7 +14179,7 @@ static void tg3_get_stats64(struct net_device *dev, | |||
14178 | struct tg3 *tp = netdev_priv(dev); | 14179 | struct tg3 *tp = netdev_priv(dev); |
14179 | 14180 | ||
14180 | spin_lock_bh(&tp->lock); | 14181 | spin_lock_bh(&tp->lock); |
14181 | if (!tp->hw_stats) { | 14182 | if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { |
14182 | *stats = tp->net_stats_prev; | 14183 | *stats = tp->net_stats_prev; |
14183 | spin_unlock_bh(&tp->lock); | 14184 | spin_unlock_bh(&tp->lock); |
14184 | return; | 14185 | return; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 24d2865b8806..005283c7cdfe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -3433,8 +3433,8 @@ static int adap_config_hma(struct adapter *adapter) | |||
3433 | sgl = adapter->hma.sgt->sgl; | 3433 | sgl = adapter->hma.sgt->sgl; |
3434 | node = dev_to_node(adapter->pdev_dev); | 3434 | node = dev_to_node(adapter->pdev_dev); |
3435 | for_each_sg(sgl, iter, sgt->orig_nents, i) { | 3435 | for_each_sg(sgl, iter, sgt->orig_nents, i) { |
3436 | newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL, | 3436 | newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL | |
3437 | page_order); | 3437 | __GFP_ZERO, page_order); |
3438 | if (!newpage) { | 3438 | if (!newpage) { |
3439 | dev_err(adapter->pdev_dev, | 3439 | dev_err(adapter->pdev_dev, |
3440 | "Not enough memory for HMA page allocation\n"); | 3440 | "Not enough memory for HMA page allocation\n"); |
@@ -5474,6 +5474,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5474 | } | 5474 | } |
5475 | spin_lock_init(&adapter->mbox_lock); | 5475 | spin_lock_init(&adapter->mbox_lock); |
5476 | INIT_LIST_HEAD(&adapter->mlist.list); | 5476 | INIT_LIST_HEAD(&adapter->mlist.list); |
5477 | adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; | ||
5477 | pci_set_drvdata(pdev, adapter); | 5478 | pci_set_drvdata(pdev, adapter); |
5478 | 5479 | ||
5479 | if (func != ent->driver_data) { | 5480 | if (func != ent->driver_data) { |
@@ -5508,8 +5509,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5508 | goto out_free_adapter; | 5509 | goto out_free_adapter; |
5509 | } | 5510 | } |
5510 | 5511 | ||
5511 | adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; | ||
5512 | |||
5513 | /* PCI device has been enabled */ | 5512 | /* PCI device has been enabled */ |
5514 | adapter->flags |= DEV_ENABLED; | 5513 | adapter->flags |= DEV_ENABLED; |
5515 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | 5514 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 5909a4407e38..7c511f144ed6 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c | |||
@@ -1014,10 +1014,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, | |||
1014 | desc = ICE_CTL_Q_DESC(cq->rq, ntc); | 1014 | desc = ICE_CTL_Q_DESC(cq->rq, ntc); |
1015 | desc_idx = ntc; | 1015 | desc_idx = ntc; |
1016 | 1016 | ||
1017 | cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); | ||
1017 | flags = le16_to_cpu(desc->flags); | 1018 | flags = le16_to_cpu(desc->flags); |
1018 | if (flags & ICE_AQ_FLAG_ERR) { | 1019 | if (flags & ICE_AQ_FLAG_ERR) { |
1019 | ret_code = ICE_ERR_AQ_ERROR; | 1020 | ret_code = ICE_ERR_AQ_ERROR; |
1020 | cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); | ||
1021 | ice_debug(hw, ICE_DBG_AQ_MSG, | 1021 | ice_debug(hw, ICE_DBG_AQ_MSG, |
1022 | "Control Receive Queue Event received with error 0x%x\n", | 1022 | "Control Receive Queue Event received with error 0x%x\n", |
1023 | cq->rq_last_status); | 1023 | cq->rq_last_status); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 68af127987bc..cead23e3db0c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | |||
@@ -943,8 +943,8 @@ err2: | |||
943 | kfree(ipsec->ip_tbl); | 943 | kfree(ipsec->ip_tbl); |
944 | kfree(ipsec->rx_tbl); | 944 | kfree(ipsec->rx_tbl); |
945 | kfree(ipsec->tx_tbl); | 945 | kfree(ipsec->tx_tbl); |
946 | kfree(ipsec); | ||
946 | err1: | 947 | err1: |
947 | kfree(adapter->ipsec); | ||
948 | netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); | 948 | netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); |
949 | } | 949 | } |
950 | 950 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 3123267dfba9..9592f3e3e42e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | |||
@@ -3427,6 +3427,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) | |||
3427 | hw->phy.sfp_setup_needed = false; | 3427 | hw->phy.sfp_setup_needed = false; |
3428 | } | 3428 | } |
3429 | 3429 | ||
3430 | if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) | ||
3431 | return status; | ||
3432 | |||
3430 | /* Reset PHY */ | 3433 | /* Reset PHY */ |
3431 | if (!hw->phy.reset_disable && hw->phy.ops.reset) | 3434 | if (!hw->phy.reset_disable && hw->phy.ops.reset) |
3432 | hw->phy.ops.reset(hw); | 3435 | hw->phy.ops.reset(hw); |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e3d04f226d57..850f8af95e49 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -4137,7 +4137,7 @@ out_drop: | |||
4137 | return NETDEV_TX_OK; | 4137 | return NETDEV_TX_OK; |
4138 | } | 4138 | } |
4139 | 4139 | ||
4140 | static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 4140 | static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
4141 | { | 4141 | { |
4142 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 4142 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
4143 | struct ixgbevf_ring *tx_ring; | 4143 | struct ixgbevf_ring *tx_ring; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a30a2e95d13f..f11b45001cad 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -1027,6 +1027,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev, | |||
1027 | if (!coal->tx_max_coalesced_frames_irq) | 1027 | if (!coal->tx_max_coalesced_frames_irq) |
1028 | return -EINVAL; | 1028 | return -EINVAL; |
1029 | 1029 | ||
1030 | if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || | ||
1031 | coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || | ||
1032 | coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME || | ||
1033 | coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) { | ||
1034 | netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n", | ||
1035 | __func__, MLX4_EN_MAX_COAL_TIME); | ||
1036 | return -ERANGE; | ||
1037 | } | ||
1038 | |||
1039 | if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS || | ||
1040 | coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) { | ||
1041 | netdev_info(dev, "%s: maximum coalesced frames supported is %d\n", | ||
1042 | __func__, MLX4_EN_MAX_COAL_PKTS); | ||
1043 | return -ERANGE; | ||
1044 | } | ||
1045 | |||
1030 | priv->rx_frames = (coal->rx_max_coalesced_frames == | 1046 | priv->rx_frames = (coal->rx_max_coalesced_frames == |
1031 | MLX4_EN_AUTO_CONF) ? | 1047 | MLX4_EN_AUTO_CONF) ? |
1032 | MLX4_EN_RX_COAL_TARGET : | 1048 | MLX4_EN_RX_COAL_TARGET : |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e0adac4a9a19..9670b33fc9b1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -3324,12 +3324,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
3324 | MAX_TX_RINGS, GFP_KERNEL); | 3324 | MAX_TX_RINGS, GFP_KERNEL); |
3325 | if (!priv->tx_ring[t]) { | 3325 | if (!priv->tx_ring[t]) { |
3326 | err = -ENOMEM; | 3326 | err = -ENOMEM; |
3327 | goto err_free_tx; | 3327 | goto out; |
3328 | } | 3328 | } |
3329 | priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * | 3329 | priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * |
3330 | MAX_TX_RINGS, GFP_KERNEL); | 3330 | MAX_TX_RINGS, GFP_KERNEL); |
3331 | if (!priv->tx_cq[t]) { | 3331 | if (!priv->tx_cq[t]) { |
3332 | kfree(priv->tx_ring[t]); | ||
3333 | err = -ENOMEM; | 3332 | err = -ENOMEM; |
3334 | goto out; | 3333 | goto out; |
3335 | } | 3334 | } |
@@ -3582,11 +3581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
3582 | 3581 | ||
3583 | return 0; | 3582 | return 0; |
3584 | 3583 | ||
3585 | err_free_tx: | ||
3586 | while (t--) { | ||
3587 | kfree(priv->tx_ring[t]); | ||
3588 | kfree(priv->tx_cq[t]); | ||
3589 | } | ||
3590 | out: | 3584 | out: |
3591 | mlx4_en_destroy_netdev(dev); | 3585 | mlx4_en_destroy_netdev(dev); |
3592 | return err; | 3586 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f7c81133594f..ace6545f82e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -132,6 +132,9 @@ | |||
132 | #define MLX4_EN_TX_COAL_PKTS 16 | 132 | #define MLX4_EN_TX_COAL_PKTS 16 |
133 | #define MLX4_EN_TX_COAL_TIME 0x10 | 133 | #define MLX4_EN_TX_COAL_TIME 0x10 |
134 | 134 | ||
135 | #define MLX4_EN_MAX_COAL_PKTS U16_MAX | ||
136 | #define MLX4_EN_MAX_COAL_TIME U16_MAX | ||
137 | |||
135 | #define MLX4_EN_RX_RATE_LOW 400000 | 138 | #define MLX4_EN_RX_RATE_LOW 400000 |
136 | #define MLX4_EN_RX_COAL_TIME_LOW 0 | 139 | #define MLX4_EN_RX_COAL_TIME_LOW 0 |
137 | #define MLX4_EN_RX_RATE_HIGH 450000 | 140 | #define MLX4_EN_RX_RATE_HIGH 450000 |
@@ -552,8 +555,8 @@ struct mlx4_en_priv { | |||
552 | u16 rx_usecs_low; | 555 | u16 rx_usecs_low; |
553 | u32 pkt_rate_high; | 556 | u32 pkt_rate_high; |
554 | u16 rx_usecs_high; | 557 | u16 rx_usecs_high; |
555 | u16 sample_interval; | 558 | u32 sample_interval; |
556 | u16 adaptive_rx_coal; | 559 | u32 adaptive_rx_coal; |
557 | u32 msg_enable; | 560 | u32 msg_enable; |
558 | u32 loopback_ok; | 561 | u32 loopback_ok; |
559 | u32 validate_loopback; | 562 | u32 validate_loopback; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3c534fc43400..b94276db3ce9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -1261,6 +1261,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
1261 | f->mask); | 1261 | f->mask); |
1262 | addr_type = key->addr_type; | 1262 | addr_type = key->addr_type; |
1263 | 1263 | ||
1264 | /* the HW doesn't support frag first/later */ | ||
1265 | if (mask->flags & FLOW_DIS_FIRST_FRAG) | ||
1266 | return -EOPNOTSUPP; | ||
1267 | |||
1264 | if (mask->flags & FLOW_DIS_IS_FRAGMENT) { | 1268 | if (mask->flags & FLOW_DIS_IS_FRAGMENT) { |
1265 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); | 1269 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); |
1266 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, | 1270 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c1c94974e16b..1814f803bd2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -34,6 +34,9 @@ | |||
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/mlx5/driver.h> | 35 | #include <linux/mlx5/driver.h> |
36 | #include <linux/mlx5/cmd.h> | 36 | #include <linux/mlx5/cmd.h> |
37 | #ifdef CONFIG_RFS_ACCEL | ||
38 | #include <linux/cpu_rmap.h> | ||
39 | #endif | ||
37 | #include "mlx5_core.h" | 40 | #include "mlx5_core.h" |
38 | #include "fpga/core.h" | 41 | #include "fpga/core.h" |
39 | #include "eswitch.h" | 42 | #include "eswitch.h" |
@@ -923,3 +926,28 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, | |||
923 | MLX5_SET(query_eq_in, in, eq_number, eq->eqn); | 926 | MLX5_SET(query_eq_in, in, eq_number, eq->eqn); |
924 | return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); | 927 | return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); |
925 | } | 928 | } |
929 | |||
930 | /* This function should only be called after mlx5_cmd_force_teardown_hca */ | ||
931 | void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) | ||
932 | { | ||
933 | struct mlx5_eq_table *table = &dev->priv.eq_table; | ||
934 | struct mlx5_eq *eq; | ||
935 | |||
936 | #ifdef CONFIG_RFS_ACCEL | ||
937 | if (dev->rmap) { | ||
938 | free_irq_cpu_rmap(dev->rmap); | ||
939 | dev->rmap = NULL; | ||
940 | } | ||
941 | #endif | ||
942 | list_for_each_entry(eq, &table->comp_eqs_list, list) | ||
943 | free_irq(eq->irqn, eq); | ||
944 | |||
945 | free_irq(table->pages_eq.irqn, &table->pages_eq); | ||
946 | free_irq(table->async_eq.irqn, &table->async_eq); | ||
947 | free_irq(table->cmd_eq.irqn, &table->cmd_eq); | ||
948 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
949 | if (MLX5_CAP_GEN(dev, pg)) | ||
950 | free_irq(table->pfault_eq.irqn, &table->pfault_eq); | ||
951 | #endif | ||
952 | pci_free_irq_vectors(dev->pdev); | ||
953 | } | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 332bc56306bf..1352d13eedb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -2175,26 +2175,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, | |||
2175 | memset(vf_stats, 0, sizeof(*vf_stats)); | 2175 | memset(vf_stats, 0, sizeof(*vf_stats)); |
2176 | vf_stats->rx_packets = | 2176 | vf_stats->rx_packets = |
2177 | MLX5_GET_CTR(out, received_eth_unicast.packets) + | 2177 | MLX5_GET_CTR(out, received_eth_unicast.packets) + |
2178 | MLX5_GET_CTR(out, received_ib_unicast.packets) + | ||
2178 | MLX5_GET_CTR(out, received_eth_multicast.packets) + | 2179 | MLX5_GET_CTR(out, received_eth_multicast.packets) + |
2180 | MLX5_GET_CTR(out, received_ib_multicast.packets) + | ||
2179 | MLX5_GET_CTR(out, received_eth_broadcast.packets); | 2181 | MLX5_GET_CTR(out, received_eth_broadcast.packets); |
2180 | 2182 | ||
2181 | vf_stats->rx_bytes = | 2183 | vf_stats->rx_bytes = |
2182 | MLX5_GET_CTR(out, received_eth_unicast.octets) + | 2184 | MLX5_GET_CTR(out, received_eth_unicast.octets) + |
2185 | MLX5_GET_CTR(out, received_ib_unicast.octets) + | ||
2183 | MLX5_GET_CTR(out, received_eth_multicast.octets) + | 2186 | MLX5_GET_CTR(out, received_eth_multicast.octets) + |
2187 | MLX5_GET_CTR(out, received_ib_multicast.octets) + | ||
2184 | MLX5_GET_CTR(out, received_eth_broadcast.octets); | 2188 | MLX5_GET_CTR(out, received_eth_broadcast.octets); |
2185 | 2189 | ||
2186 | vf_stats->tx_packets = | 2190 | vf_stats->tx_packets = |
2187 | MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + | 2191 | MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + |
2192 | MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + | ||
2188 | MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + | 2193 | MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + |
2194 | MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + | ||
2189 | MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); | 2195 | MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); |
2190 | 2196 | ||
2191 | vf_stats->tx_bytes = | 2197 | vf_stats->tx_bytes = |
2192 | MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + | 2198 | MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + |
2199 | MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + | ||
2193 | MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + | 2200 | MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + |
2201 | MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + | ||
2194 | MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); | 2202 | MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); |
2195 | 2203 | ||
2196 | vf_stats->multicast = | 2204 | vf_stats->multicast = |
2197 | MLX5_GET_CTR(out, received_eth_multicast.packets); | 2205 | MLX5_GET_CTR(out, received_eth_multicast.packets) + |
2206 | MLX5_GET_CTR(out, received_ib_multicast.packets); | ||
2198 | 2207 | ||
2199 | vf_stats->broadcast = | 2208 | vf_stats->broadcast = |
2200 | MLX5_GET_CTR(out, received_eth_broadcast.packets); | 2209 | MLX5_GET_CTR(out, received_eth_broadcast.packets); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 63a8ea31601c..e2c465b0b3f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -1587,6 +1587,14 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) | |||
1587 | 1587 | ||
1588 | mlx5_enter_error_state(dev, true); | 1588 | mlx5_enter_error_state(dev, true); |
1589 | 1589 | ||
1590 | /* Some platforms requiring freeing the IRQ's in the shutdown | ||
1591 | * flow. If they aren't freed they can't be allocated after | ||
1592 | * kexec. There is no need to cleanup the mlx5_core software | ||
1593 | * contexts. | ||
1594 | */ | ||
1595 | mlx5_irq_clear_affinity_hints(dev); | ||
1596 | mlx5_core_eq_free_irqs(dev); | ||
1597 | |||
1590 | return 0; | 1598 | return 0; |
1591 | } | 1599 | } |
1592 | 1600 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 7d001fe6e631..023882d9a22e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
@@ -128,6 +128,8 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, | |||
128 | u32 *out, int outlen); | 128 | u32 *out, int outlen); |
129 | int mlx5_start_eqs(struct mlx5_core_dev *dev); | 129 | int mlx5_start_eqs(struct mlx5_core_dev *dev); |
130 | void mlx5_stop_eqs(struct mlx5_core_dev *dev); | 130 | void mlx5_stop_eqs(struct mlx5_core_dev *dev); |
131 | /* This function should only be called after mlx5_cmd_force_teardown_hca */ | ||
132 | void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev); | ||
131 | struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); | 133 | struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); |
132 | u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); | 134 | u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); |
133 | void mlx5_cq_tasklet_cb(unsigned long data); | 135 | void mlx5_cq_tasklet_cb(unsigned long data); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 93ea56620a24..e13ac3b8dff7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c | |||
@@ -1100,11 +1100,11 @@ err_emad_init: | |||
1100 | err_alloc_lag_mapping: | 1100 | err_alloc_lag_mapping: |
1101 | mlxsw_ports_fini(mlxsw_core); | 1101 | mlxsw_ports_fini(mlxsw_core); |
1102 | err_ports_init: | 1102 | err_ports_init: |
1103 | mlxsw_bus->fini(bus_priv); | ||
1104 | err_bus_init: | ||
1105 | if (!reload) | 1103 | if (!reload) |
1106 | devlink_resources_unregister(devlink, NULL); | 1104 | devlink_resources_unregister(devlink, NULL); |
1107 | err_register_resources: | 1105 | err_register_resources: |
1106 | mlxsw_bus->fini(bus_priv); | ||
1107 | err_bus_init: | ||
1108 | if (!reload) | 1108 | if (!reload) |
1109 | devlink_free(devlink); | 1109 | devlink_free(devlink); |
1110 | err_devlink_alloc: | 1110 | err_devlink_alloc: |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index a997e34bcec2..84e3b9f5abb1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c | |||
@@ -52,8 +52,6 @@ | |||
52 | 52 | ||
53 | #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL | 53 | #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL |
54 | 54 | ||
55 | #define NFP_FLOWER_FRAME_HEADROOM 158 | ||
56 | |||
57 | static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) | 55 | static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) |
58 | { | 56 | { |
59 | return "FLOWER"; | 57 | return "FLOWER"; |
@@ -559,22 +557,6 @@ static void nfp_flower_clean(struct nfp_app *app) | |||
559 | app->priv = NULL; | 557 | app->priv = NULL; |
560 | } | 558 | } |
561 | 559 | ||
562 | static int | ||
563 | nfp_flower_check_mtu(struct nfp_app *app, struct net_device *netdev, | ||
564 | int new_mtu) | ||
565 | { | ||
566 | /* The flower fw reserves NFP_FLOWER_FRAME_HEADROOM bytes of the | ||
567 | * supported max MTU to allow for appending tunnel headers. To prevent | ||
568 | * unexpected behaviour this needs to be accounted for. | ||
569 | */ | ||
570 | if (new_mtu > netdev->max_mtu - NFP_FLOWER_FRAME_HEADROOM) { | ||
571 | nfp_err(app->cpp, "New MTU (%d) is not valid\n", new_mtu); | ||
572 | return -EINVAL; | ||
573 | } | ||
574 | |||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) | 560 | static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) |
579 | { | 561 | { |
580 | bool ret; | 562 | bool ret; |
@@ -656,7 +638,6 @@ const struct nfp_app_type app_flower = { | |||
656 | .init = nfp_flower_init, | 638 | .init = nfp_flower_init, |
657 | .clean = nfp_flower_clean, | 639 | .clean = nfp_flower_clean, |
658 | 640 | ||
659 | .check_mtu = nfp_flower_check_mtu, | ||
660 | .repr_change_mtu = nfp_flower_repr_change_mtu, | 641 | .repr_change_mtu = nfp_flower_repr_change_mtu, |
661 | 642 | ||
662 | .vnic_alloc = nfp_flower_vnic_alloc, | 643 | .vnic_alloc = nfp_flower_vnic_alloc, |
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 27364b7572fc..b092894dd128 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c | |||
@@ -1170,7 +1170,7 @@ static void *nixge_get_nvmem_address(struct device *dev) | |||
1170 | 1170 | ||
1171 | cell = nvmem_cell_get(dev, "address"); | 1171 | cell = nvmem_cell_get(dev, "address"); |
1172 | if (IS_ERR(cell)) | 1172 | if (IS_ERR(cell)) |
1173 | return cell; | 1173 | return NULL; |
1174 | 1174 | ||
1175 | mac = nvmem_cell_read(cell, &cell_size); | 1175 | mac = nvmem_cell_read(cell, &cell_size); |
1176 | nvmem_cell_put(cell); | 1176 | nvmem_cell_put(cell); |
@@ -1183,7 +1183,7 @@ static int nixge_probe(struct platform_device *pdev) | |||
1183 | struct nixge_priv *priv; | 1183 | struct nixge_priv *priv; |
1184 | struct net_device *ndev; | 1184 | struct net_device *ndev; |
1185 | struct resource *dmares; | 1185 | struct resource *dmares; |
1186 | const char *mac_addr; | 1186 | const u8 *mac_addr; |
1187 | int err; | 1187 | int err; |
1188 | 1188 | ||
1189 | ndev = alloc_etherdev(sizeof(*priv)); | 1189 | ndev = alloc_etherdev(sizeof(*priv)); |
@@ -1202,10 +1202,12 @@ static int nixge_probe(struct platform_device *pdev) | |||
1202 | ndev->max_mtu = NIXGE_JUMBO_MTU; | 1202 | ndev->max_mtu = NIXGE_JUMBO_MTU; |
1203 | 1203 | ||
1204 | mac_addr = nixge_get_nvmem_address(&pdev->dev); | 1204 | mac_addr = nixge_get_nvmem_address(&pdev->dev); |
1205 | if (mac_addr && is_valid_ether_addr(mac_addr)) | 1205 | if (mac_addr && is_valid_ether_addr(mac_addr)) { |
1206 | ether_addr_copy(ndev->dev_addr, mac_addr); | 1206 | ether_addr_copy(ndev->dev_addr, mac_addr); |
1207 | else | 1207 | kfree(mac_addr); |
1208 | } else { | ||
1208 | eth_hw_addr_random(ndev); | 1209 | eth_hw_addr_random(ndev); |
1210 | } | ||
1209 | 1211 | ||
1210 | priv = netdev_priv(ndev); | 1212 | priv = netdev_priv(ndev); |
1211 | priv->ndev = ndev; | 1213 | priv->ndev = ndev; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e874504e8b28..8667799d0069 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c | |||
@@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn) | |||
115 | 115 | ||
116 | void qed_l2_setup(struct qed_hwfn *p_hwfn) | 116 | void qed_l2_setup(struct qed_hwfn *p_hwfn) |
117 | { | 117 | { |
118 | if (p_hwfn->hw_info.personality != QED_PCI_ETH && | 118 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
119 | p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) | ||
120 | return; | 119 | return; |
121 | 120 | ||
122 | mutex_init(&p_hwfn->p_l2_info->lock); | 121 | mutex_init(&p_hwfn->p_l2_info->lock); |
@@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn) | |||
126 | { | 125 | { |
127 | u32 i; | 126 | u32 i; |
128 | 127 | ||
129 | if (p_hwfn->hw_info.personality != QED_PCI_ETH && | 128 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
130 | p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) | ||
131 | return; | 129 | return; |
132 | 130 | ||
133 | if (!p_hwfn->p_l2_info) | 131 | if (!p_hwfn->p_l2_info) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9854aa9139af..7870ae2a6f7e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -680,7 +680,7 @@ static int qed_nic_stop(struct qed_dev *cdev) | |||
680 | tasklet_disable(p_hwfn->sp_dpc); | 680 | tasklet_disable(p_hwfn->sp_dpc); |
681 | p_hwfn->b_sp_dpc_enabled = false; | 681 | p_hwfn->b_sp_dpc_enabled = false; |
682 | DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, | 682 | DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, |
683 | "Disabled sp taskelt [hwfn %d] at %p\n", | 683 | "Disabled sp tasklet [hwfn %d] at %p\n", |
684 | i, p_hwfn->sp_dpc); | 684 | i, p_hwfn->sp_dpc); |
685 | } | 685 | } |
686 | } | 686 | } |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 50b142fad6b8..1900bf7e67d1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c | |||
@@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | if (!found) { | 240 | if (!found) { |
241 | event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); | 241 | event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); |
242 | if (!event_node) { | 242 | if (!event_node) { |
243 | DP_NOTICE(edev, | 243 | DP_NOTICE(edev, |
244 | "qedr: Could not allocate memory for rdma work\n"); | 244 | "qedr: Could not allocate memory for rdma work\n"); |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 604ae78381ae..c7aac1fc99e8 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -4981,6 +4981,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) | |||
4981 | static void rtl_pll_power_up(struct rtl8169_private *tp) | 4981 | static void rtl_pll_power_up(struct rtl8169_private *tp) |
4982 | { | 4982 | { |
4983 | rtl_generic_op(tp, tp->pll_power_ops.up); | 4983 | rtl_generic_op(tp, tp->pll_power_ops.up); |
4984 | |||
4985 | /* give MAC/PHY some time to resume */ | ||
4986 | msleep(20); | ||
4984 | } | 4987 | } |
4985 | 4988 | ||
4986 | static void rtl_init_pll_power_ops(struct rtl8169_private *tp) | 4989 | static void rtl_init_pll_power_ops(struct rtl8169_private *tp) |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f081de4f38d7..88c12474a0c3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -3443,7 +3443,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, | |||
3443 | 3443 | ||
3444 | len = (val & RCR_ENTRY_L2_LEN) >> | 3444 | len = (val & RCR_ENTRY_L2_LEN) >> |
3445 | RCR_ENTRY_L2_LEN_SHIFT; | 3445 | RCR_ENTRY_L2_LEN_SHIFT; |
3446 | len -= ETH_FCS_LEN; | 3446 | append_size = len + ETH_HLEN + ETH_FCS_LEN; |
3447 | 3447 | ||
3448 | addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << | 3448 | addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << |
3449 | RCR_ENTRY_PKT_BUF_ADDR_SHIFT; | 3449 | RCR_ENTRY_PKT_BUF_ADDR_SHIFT; |
@@ -3453,7 +3453,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, | |||
3453 | RCR_ENTRY_PKTBUFSZ_SHIFT]; | 3453 | RCR_ENTRY_PKTBUFSZ_SHIFT]; |
3454 | 3454 | ||
3455 | off = addr & ~PAGE_MASK; | 3455 | off = addr & ~PAGE_MASK; |
3456 | append_size = rcr_size; | ||
3457 | if (num_rcr == 1) { | 3456 | if (num_rcr == 1) { |
3458 | int ptype; | 3457 | int ptype; |
3459 | 3458 | ||
@@ -3466,7 +3465,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, | |||
3466 | else | 3465 | else |
3467 | skb_checksum_none_assert(skb); | 3466 | skb_checksum_none_assert(skb); |
3468 | } else if (!(val & RCR_ENTRY_MULTI)) | 3467 | } else if (!(val & RCR_ENTRY_MULTI)) |
3469 | append_size = len - skb->len; | 3468 | append_size = append_size - skb->len; |
3470 | 3469 | ||
3471 | niu_rx_skb_append(skb, page, off, append_size, rcr_size); | 3470 | niu_rx_skb_append(skb, page, off, append_size, rcr_size); |
3472 | if ((page->index + rp->rbr_block_size) - rcr_size == addr) { | 3471 | if ((page->index + rp->rbr_block_size) - rcr_size == addr) { |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ecc84954c511..da07ccdf84bf 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -1840,7 +1840,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev, | |||
1840 | goto rx_handler_failed; | 1840 | goto rx_handler_failed; |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | ret = netdev_upper_dev_link(vf_netdev, ndev, NULL); | 1843 | ret = netdev_master_upper_dev_link(vf_netdev, ndev, |
1844 | NULL, NULL, NULL); | ||
1844 | if (ret != 0) { | 1845 | if (ret != 0) { |
1845 | netdev_err(vf_netdev, | 1846 | netdev_err(vf_netdev, |
1846 | "can not set master device %s (err = %d)\n", | 1847 | "can not set master device %s (err = %d)\n", |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 6b127be781d9..e7ca5b5f39ed 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -1288,7 +1288,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | |||
1288 | rndis_device->link_state ? "down" : "up"); | 1288 | rndis_device->link_state ? "down" : "up"); |
1289 | 1289 | ||
1290 | if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) | 1290 | if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) |
1291 | return net_device; | 1291 | goto out; |
1292 | 1292 | ||
1293 | rndis_filter_query_link_speed(rndis_device, net_device); | 1293 | rndis_filter_query_link_speed(rndis_device, net_device); |
1294 | 1294 | ||
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 9fb9b565a002..4f684cbcdc57 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c | |||
@@ -1045,7 +1045,7 @@ static int atusb_probe(struct usb_interface *interface, | |||
1045 | atusb->tx_dr.bRequest = ATUSB_TX; | 1045 | atusb->tx_dr.bRequest = ATUSB_TX; |
1046 | atusb->tx_dr.wValue = cpu_to_le16(0); | 1046 | atusb->tx_dr.wValue = cpu_to_le16(0); |
1047 | 1047 | ||
1048 | atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC); | 1048 | atusb->tx_urb = usb_alloc_urb(0, GFP_KERNEL); |
1049 | if (!atusb->tx_urb) | 1049 | if (!atusb->tx_urb) |
1050 | goto fail; | 1050 | goto fail; |
1051 | 1051 | ||
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 55a22c761808..de0d7f28a181 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c | |||
@@ -1267,7 +1267,7 @@ mcr20a_probe(struct spi_device *spi) | |||
1267 | ret = mcr20a_get_platform_data(spi, pdata); | 1267 | ret = mcr20a_get_platform_data(spi, pdata); |
1268 | if (ret < 0) { | 1268 | if (ret < 0) { |
1269 | dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n"); | 1269 | dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n"); |
1270 | return ret; | 1270 | goto free_pdata; |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | /* init reset gpio */ | 1273 | /* init reset gpio */ |
@@ -1275,7 +1275,7 @@ mcr20a_probe(struct spi_device *spi) | |||
1275 | ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio, | 1275 | ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio, |
1276 | GPIOF_OUT_INIT_HIGH, "reset"); | 1276 | GPIOF_OUT_INIT_HIGH, "reset"); |
1277 | if (ret) | 1277 | if (ret) |
1278 | return ret; | 1278 | goto free_pdata; |
1279 | } | 1279 | } |
1280 | 1280 | ||
1281 | /* reset mcr20a */ | 1281 | /* reset mcr20a */ |
@@ -1291,7 +1291,8 @@ mcr20a_probe(struct spi_device *spi) | |||
1291 | hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); | 1291 | hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); |
1292 | if (!hw) { | 1292 | if (!hw) { |
1293 | dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); | 1293 | dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); |
1294 | return -ENOMEM; | 1294 | ret = -ENOMEM; |
1295 | goto free_pdata; | ||
1295 | } | 1296 | } |
1296 | 1297 | ||
1297 | /* init mcr20a local data */ | 1298 | /* init mcr20a local data */ |
@@ -1308,8 +1309,10 @@ mcr20a_probe(struct spi_device *spi) | |||
1308 | /* init buf */ | 1309 | /* init buf */ |
1309 | lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL); | 1310 | lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL); |
1310 | 1311 | ||
1311 | if (!lp->buf) | 1312 | if (!lp->buf) { |
1312 | return -ENOMEM; | 1313 | ret = -ENOMEM; |
1314 | goto free_dev; | ||
1315 | } | ||
1313 | 1316 | ||
1314 | mcr20a_setup_tx_spi_messages(lp); | 1317 | mcr20a_setup_tx_spi_messages(lp); |
1315 | mcr20a_setup_rx_spi_messages(lp); | 1318 | mcr20a_setup_rx_spi_messages(lp); |
@@ -1366,6 +1369,8 @@ mcr20a_probe(struct spi_device *spi) | |||
1366 | 1369 | ||
1367 | free_dev: | 1370 | free_dev: |
1368 | ieee802154_free_hw(lp->hw); | 1371 | ieee802154_free_hw(lp->hw); |
1372 | free_pdata: | ||
1373 | kfree(pdata); | ||
1369 | 1374 | ||
1370 | return ret; | 1375 | return ret; |
1371 | } | 1376 | } |
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 3bb6b66dc7bf..f9c25912eb98 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c | |||
@@ -720,6 +720,15 @@ static struct phy_driver broadcom_drivers[] = { | |||
720 | .get_strings = bcm_phy_get_strings, | 720 | .get_strings = bcm_phy_get_strings, |
721 | .get_stats = bcm53xx_phy_get_stats, | 721 | .get_stats = bcm53xx_phy_get_stats, |
722 | .probe = bcm53xx_phy_probe, | 722 | .probe = bcm53xx_phy_probe, |
723 | }, { | ||
724 | .phy_id = PHY_ID_BCM89610, | ||
725 | .phy_id_mask = 0xfffffff0, | ||
726 | .name = "Broadcom BCM89610", | ||
727 | .features = PHY_GBIT_FEATURES, | ||
728 | .flags = PHY_HAS_INTERRUPT, | ||
729 | .config_init = bcm54xx_config_init, | ||
730 | .ack_interrupt = bcm_phy_ack_intr, | ||
731 | .config_intr = bcm_phy_config_intr, | ||
723 | } }; | 732 | } }; |
724 | 733 | ||
725 | module_phy_driver(broadcom_drivers); | 734 | module_phy_driver(broadcom_drivers); |
@@ -741,6 +750,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { | |||
741 | { PHY_ID_BCMAC131, 0xfffffff0 }, | 750 | { PHY_ID_BCMAC131, 0xfffffff0 }, |
742 | { PHY_ID_BCM5241, 0xfffffff0 }, | 751 | { PHY_ID_BCM5241, 0xfffffff0 }, |
743 | { PHY_ID_BCM5395, 0xfffffff0 }, | 752 | { PHY_ID_BCM5395, 0xfffffff0 }, |
753 | { PHY_ID_BCM89610, 0xfffffff0 }, | ||
744 | { } | 754 | { } |
745 | }; | 755 | }; |
746 | 756 | ||
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 0381da78d228..fd6c23f69c2f 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
@@ -125,7 +125,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, | |||
125 | if (id->base.br_nominal) { | 125 | if (id->base.br_nominal) { |
126 | if (id->base.br_nominal != 255) { | 126 | if (id->base.br_nominal != 255) { |
127 | br_nom = id->base.br_nominal * 100; | 127 | br_nom = id->base.br_nominal * 100; |
128 | br_min = br_nom + id->base.br_nominal * id->ext.br_min; | 128 | br_min = br_nom - id->base.br_nominal * id->ext.br_min; |
129 | br_max = br_nom + id->base.br_nominal * id->ext.br_max; | 129 | br_max = br_nom + id->base.br_nominal * id->ext.br_max; |
130 | } else if (id->ext.br_max) { | 130 | } else if (id->ext.br_max) { |
131 | br_nom = 250 * id->ext.br_max; | 131 | br_nom = 250 * id->ext.br_max; |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 96d26cfae90b..4a017a0d71ea 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -3236,6 +3236,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
3236 | GENL_SET_ERR_MSG(info,"MAC is no valid source addr"); | 3236 | GENL_SET_ERR_MSG(info,"MAC is no valid source addr"); |
3237 | NL_SET_BAD_ATTR(info->extack, | 3237 | NL_SET_BAD_ATTR(info->extack, |
3238 | info->attrs[HWSIM_ATTR_PERM_ADDR]); | 3238 | info->attrs[HWSIM_ATTR_PERM_ADDR]); |
3239 | kfree(hwname); | ||
3239 | return -EINVAL; | 3240 | return -EINVAL; |
3240 | } | 3241 | } |
3241 | 3242 | ||
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a3771c5729f5..99b857e5a7a9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -99,6 +99,7 @@ static struct class *nvme_subsys_class; | |||
99 | 99 | ||
100 | static void nvme_ns_remove(struct nvme_ns *ns); | 100 | static void nvme_ns_remove(struct nvme_ns *ns); |
101 | static int nvme_revalidate_disk(struct gendisk *disk); | 101 | static int nvme_revalidate_disk(struct gendisk *disk); |
102 | static void nvme_put_subsystem(struct nvme_subsystem *subsys); | ||
102 | 103 | ||
103 | int nvme_reset_ctrl(struct nvme_ctrl *ctrl) | 104 | int nvme_reset_ctrl(struct nvme_ctrl *ctrl) |
104 | { | 105 | { |
@@ -117,7 +118,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) | |||
117 | ret = nvme_reset_ctrl(ctrl); | 118 | ret = nvme_reset_ctrl(ctrl); |
118 | if (!ret) { | 119 | if (!ret) { |
119 | flush_work(&ctrl->reset_work); | 120 | flush_work(&ctrl->reset_work); |
120 | if (ctrl->state != NVME_CTRL_LIVE) | 121 | if (ctrl->state != NVME_CTRL_LIVE && |
122 | ctrl->state != NVME_CTRL_ADMIN_ONLY) | ||
121 | ret = -ENETRESET; | 123 | ret = -ENETRESET; |
122 | } | 124 | } |
123 | 125 | ||
@@ -350,6 +352,7 @@ static void nvme_free_ns_head(struct kref *ref) | |||
350 | ida_simple_remove(&head->subsys->ns_ida, head->instance); | 352 | ida_simple_remove(&head->subsys->ns_ida, head->instance); |
351 | list_del_init(&head->entry); | 353 | list_del_init(&head->entry); |
352 | cleanup_srcu_struct(&head->srcu); | 354 | cleanup_srcu_struct(&head->srcu); |
355 | nvme_put_subsystem(head->subsys); | ||
353 | kfree(head); | 356 | kfree(head); |
354 | } | 357 | } |
355 | 358 | ||
@@ -2861,6 +2864,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, | |||
2861 | goto out_cleanup_srcu; | 2864 | goto out_cleanup_srcu; |
2862 | 2865 | ||
2863 | list_add_tail(&head->entry, &ctrl->subsys->nsheads); | 2866 | list_add_tail(&head->entry, &ctrl->subsys->nsheads); |
2867 | |||
2868 | kref_get(&ctrl->subsys->ref); | ||
2869 | |||
2864 | return head; | 2870 | return head; |
2865 | out_cleanup_srcu: | 2871 | out_cleanup_srcu: |
2866 | cleanup_srcu_struct(&head->srcu); | 2872 | cleanup_srcu_struct(&head->srcu); |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 7ded7a51c430..17d2f7cf3fed 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -84,6 +84,11 @@ enum nvme_quirks { | |||
84 | * Supports the LighNVM command set if indicated in vs[1]. | 84 | * Supports the LighNVM command set if indicated in vs[1]. |
85 | */ | 85 | */ |
86 | NVME_QUIRK_LIGHTNVM = (1 << 6), | 86 | NVME_QUIRK_LIGHTNVM = (1 << 6), |
87 | |||
88 | /* | ||
89 | * Set MEDIUM priority on SQ creation | ||
90 | */ | ||
91 | NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), | ||
87 | }; | 92 | }; |
88 | 93 | ||
89 | /* | 94 | /* |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fbc71fac6f1e..17a0190bd88f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1093,10 +1093,19 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, | |||
1093 | static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, | 1093 | static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, |
1094 | struct nvme_queue *nvmeq) | 1094 | struct nvme_queue *nvmeq) |
1095 | { | 1095 | { |
1096 | struct nvme_ctrl *ctrl = &dev->ctrl; | ||
1096 | struct nvme_command c; | 1097 | struct nvme_command c; |
1097 | int flags = NVME_QUEUE_PHYS_CONTIG; | 1098 | int flags = NVME_QUEUE_PHYS_CONTIG; |
1098 | 1099 | ||
1099 | /* | 1100 | /* |
1101 | * Some drives have a bug that auto-enables WRRU if MEDIUM isn't | ||
1102 | * set. Since URGENT priority is zeroes, it makes all queues | ||
1103 | * URGENT. | ||
1104 | */ | ||
1105 | if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) | ||
1106 | flags |= NVME_SQ_PRIO_MEDIUM; | ||
1107 | |||
1108 | /* | ||
1100 | * Note: we (ab)use the fact that the prp fields survive if no data | 1109 | * Note: we (ab)use the fact that the prp fields survive if no data |
1101 | * is attached to the request. | 1110 | * is attached to the request. |
1102 | */ | 1111 | */ |
@@ -2701,7 +2710,8 @@ static const struct pci_device_id nvme_id_table[] = { | |||
2701 | .driver_data = NVME_QUIRK_STRIPE_SIZE | | 2710 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
2702 | NVME_QUIRK_DEALLOCATE_ZEROES, }, | 2711 | NVME_QUIRK_DEALLOCATE_ZEROES, }, |
2703 | { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ | 2712 | { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ |
2704 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, | 2713 | .driver_data = NVME_QUIRK_NO_DEEPEST_PS | |
2714 | NVME_QUIRK_MEDIUM_PRIO_SQ }, | ||
2705 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ | 2715 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ |
2706 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, | 2716 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, |
2707 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ | 2717 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index b35fe88f1851..7baa53e5b1d7 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
@@ -102,12 +102,28 @@ static DEFINE_IDR(ovcs_idr); | |||
102 | 102 | ||
103 | static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain); | 103 | static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain); |
104 | 104 | ||
105 | /** | ||
106 | * of_overlay_notifier_register() - Register notifier for overlay operations | ||
107 | * @nb: Notifier block to register | ||
108 | * | ||
109 | * Register for notification on overlay operations on device tree nodes. The | ||
110 | * reported actions definied by @of_reconfig_change. The notifier callback | ||
111 | * furthermore receives a pointer to the affected device tree node. | ||
112 | * | ||
113 | * Note that a notifier callback is not supposed to store pointers to a device | ||
114 | * tree node or its content beyond @OF_OVERLAY_POST_REMOVE corresponding to the | ||
115 | * respective node it received. | ||
116 | */ | ||
105 | int of_overlay_notifier_register(struct notifier_block *nb) | 117 | int of_overlay_notifier_register(struct notifier_block *nb) |
106 | { | 118 | { |
107 | return blocking_notifier_chain_register(&overlay_notify_chain, nb); | 119 | return blocking_notifier_chain_register(&overlay_notify_chain, nb); |
108 | } | 120 | } |
109 | EXPORT_SYMBOL_GPL(of_overlay_notifier_register); | 121 | EXPORT_SYMBOL_GPL(of_overlay_notifier_register); |
110 | 122 | ||
123 | /** | ||
124 | * of_overlay_notifier_register() - Unregister notifier for overlay operations | ||
125 | * @nb: Notifier block to unregister | ||
126 | */ | ||
111 | int of_overlay_notifier_unregister(struct notifier_block *nb) | 127 | int of_overlay_notifier_unregister(struct notifier_block *nb) |
112 | { | 128 | { |
113 | return blocking_notifier_chain_unregister(&overlay_notify_chain, nb); | 129 | return blocking_notifier_chain_unregister(&overlay_notify_chain, nb); |
@@ -671,17 +687,13 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs) | |||
671 | of_node_put(ovcs->fragments[i].overlay); | 687 | of_node_put(ovcs->fragments[i].overlay); |
672 | } | 688 | } |
673 | kfree(ovcs->fragments); | 689 | kfree(ovcs->fragments); |
674 | |||
675 | /* | 690 | /* |
676 | * TODO | 691 | * There should be no live pointers into ovcs->overlay_tree and |
677 | * | 692 | * ovcs->fdt due to the policy that overlay notifiers are not allowed |
678 | * would like to: kfree(ovcs->overlay_tree); | 693 | * to retain pointers into the overlay devicetree. |
679 | * but can not since drivers may have pointers into this data | ||
680 | * | ||
681 | * would like to: kfree(ovcs->fdt); | ||
682 | * but can not since drivers may have pointers into this data | ||
683 | */ | 694 | */ |
684 | 695 | kfree(ovcs->overlay_tree); | |
696 | kfree(ovcs->fdt); | ||
685 | kfree(ovcs); | 697 | kfree(ovcs); |
686 | } | 698 | } |
687 | 699 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a04197ce767d..dbfe7c4f3776 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1910,7 +1910,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1910 | EXPORT_SYMBOL(pci_pme_active); | 1910 | EXPORT_SYMBOL(pci_pme_active); |
1911 | 1911 | ||
1912 | /** | 1912 | /** |
1913 | * pci_enable_wake - enable PCI device as wakeup event source | 1913 | * __pci_enable_wake - enable PCI device as wakeup event source |
1914 | * @dev: PCI device affected | 1914 | * @dev: PCI device affected |
1915 | * @state: PCI state from which device will issue wakeup events | 1915 | * @state: PCI state from which device will issue wakeup events |
1916 | * @enable: True to enable event generation; false to disable | 1916 | * @enable: True to enable event generation; false to disable |
@@ -1928,7 +1928,7 @@ EXPORT_SYMBOL(pci_pme_active); | |||
1928 | * Error code depending on the platform is returned if both the platform and | 1928 | * Error code depending on the platform is returned if both the platform and |
1929 | * the native mechanism fail to enable the generation of wake-up events | 1929 | * the native mechanism fail to enable the generation of wake-up events |
1930 | */ | 1930 | */ |
1931 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | 1931 | static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) |
1932 | { | 1932 | { |
1933 | int ret = 0; | 1933 | int ret = 0; |
1934 | 1934 | ||
@@ -1969,6 +1969,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) | |||
1969 | 1969 | ||
1970 | return ret; | 1970 | return ret; |
1971 | } | 1971 | } |
1972 | |||
1973 | /** | ||
1974 | * pci_enable_wake - change wakeup settings for a PCI device | ||
1975 | * @pci_dev: Target device | ||
1976 | * @state: PCI state from which device will issue wakeup events | ||
1977 | * @enable: Whether or not to enable event generation | ||
1978 | * | ||
1979 | * If @enable is set, check device_may_wakeup() for the device before calling | ||
1980 | * __pci_enable_wake() for it. | ||
1981 | */ | ||
1982 | int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) | ||
1983 | { | ||
1984 | if (enable && !device_may_wakeup(&pci_dev->dev)) | ||
1985 | return -EINVAL; | ||
1986 | |||
1987 | return __pci_enable_wake(pci_dev, state, enable); | ||
1988 | } | ||
1972 | EXPORT_SYMBOL(pci_enable_wake); | 1989 | EXPORT_SYMBOL(pci_enable_wake); |
1973 | 1990 | ||
1974 | /** | 1991 | /** |
@@ -1981,9 +1998,9 @@ EXPORT_SYMBOL(pci_enable_wake); | |||
1981 | * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI | 1998 | * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI |
1982 | * ordering constraints. | 1999 | * ordering constraints. |
1983 | * | 2000 | * |
1984 | * This function only returns error code if the device is not capable of | 2001 | * This function only returns error code if the device is not allowed to wake |
1985 | * generating PME# from both D3_hot and D3_cold, and the platform is unable to | 2002 | * up the system from sleep or it is not capable of generating PME# from both |
1986 | * enable wake-up power for it. | 2003 | * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. |
1987 | */ | 2004 | */ |
1988 | int pci_wake_from_d3(struct pci_dev *dev, bool enable) | 2005 | int pci_wake_from_d3(struct pci_dev *dev, bool enable) |
1989 | { | 2006 | { |
@@ -2114,7 +2131,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) | |||
2114 | 2131 | ||
2115 | dev->runtime_d3cold = target_state == PCI_D3cold; | 2132 | dev->runtime_d3cold = target_state == PCI_D3cold; |
2116 | 2133 | ||
2117 | pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); | 2134 | __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); |
2118 | 2135 | ||
2119 | error = pci_set_power_state(dev, target_state); | 2136 | error = pci_set_power_state(dev, target_state); |
2120 | 2137 | ||
@@ -2138,16 +2155,16 @@ bool pci_dev_run_wake(struct pci_dev *dev) | |||
2138 | { | 2155 | { |
2139 | struct pci_bus *bus = dev->bus; | 2156 | struct pci_bus *bus = dev->bus; |
2140 | 2157 | ||
2141 | if (device_can_wakeup(&dev->dev)) | ||
2142 | return true; | ||
2143 | |||
2144 | if (!dev->pme_support) | 2158 | if (!dev->pme_support) |
2145 | return false; | 2159 | return false; |
2146 | 2160 | ||
2147 | /* PME-capable in principle, but not from the target power state */ | 2161 | /* PME-capable in principle, but not from the target power state */ |
2148 | if (!pci_pme_capable(dev, pci_target_state(dev, false))) | 2162 | if (!pci_pme_capable(dev, pci_target_state(dev, true))) |
2149 | return false; | 2163 | return false; |
2150 | 2164 | ||
2165 | if (device_can_wakeup(&dev->dev)) | ||
2166 | return true; | ||
2167 | |||
2151 | while (bus->parent) { | 2168 | while (bus->parent) { |
2152 | struct pci_dev *bridge = bus->self; | 2169 | struct pci_dev *bridge = bus->self; |
2153 | 2170 | ||
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index b1ae1618fefe..fee9225ca559 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1622,22 +1622,30 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
1622 | 1622 | ||
1623 | if (!need_valid_mask) { | 1623 | if (!need_valid_mask) { |
1624 | irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, | 1624 | irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, |
1625 | chip->ngpio, NUMA_NO_NODE); | 1625 | community->npins, NUMA_NO_NODE); |
1626 | if (irq_base < 0) { | 1626 | if (irq_base < 0) { |
1627 | dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); | 1627 | dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); |
1628 | return irq_base; | 1628 | return irq_base; |
1629 | } | 1629 | } |
1630 | } else { | ||
1631 | irq_base = 0; | ||
1632 | } | 1630 | } |
1633 | 1631 | ||
1634 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, | 1632 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, |
1635 | handle_bad_irq, IRQ_TYPE_NONE); | 1633 | handle_bad_irq, IRQ_TYPE_NONE); |
1636 | if (ret) { | 1634 | if (ret) { |
1637 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); | 1635 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); |
1638 | return ret; | 1636 | return ret; |
1639 | } | 1637 | } |
1640 | 1638 | ||
1639 | if (!need_valid_mask) { | ||
1640 | for (i = 0; i < community->ngpio_ranges; i++) { | ||
1641 | range = &community->gpio_ranges[i]; | ||
1642 | |||
1643 | irq_domain_associate_many(chip->irq.domain, irq_base, | ||
1644 | range->base, range->npins); | ||
1645 | irq_base += range->npins; | ||
1646 | } | ||
1647 | } | ||
1648 | |||
1641 | gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq, | 1649 | gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq, |
1642 | chv_gpio_irq_handler); | 1650 | chv_gpio_irq_handler); |
1643 | return 0; | 1651 | return 0; |
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 8870a4100164..fee3435a6f15 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c | |||
@@ -36,6 +36,27 @@ | |||
36 | .npins = ((e) - (s) + 1), \ | 36 | .npins = ((e) - (s) + 1), \ |
37 | } | 37 | } |
38 | 38 | ||
39 | #define SPTH_GPP(r, s, e, g) \ | ||
40 | { \ | ||
41 | .reg_num = (r), \ | ||
42 | .base = (s), \ | ||
43 | .size = ((e) - (s) + 1), \ | ||
44 | .gpio_base = (g), \ | ||
45 | } | ||
46 | |||
47 | #define SPTH_COMMUNITY(b, s, e, g) \ | ||
48 | { \ | ||
49 | .barno = (b), \ | ||
50 | .padown_offset = SPT_PAD_OWN, \ | ||
51 | .padcfglock_offset = SPT_PADCFGLOCK, \ | ||
52 | .hostown_offset = SPT_HOSTSW_OWN, \ | ||
53 | .ie_offset = SPT_GPI_IE, \ | ||
54 | .pin_base = (s), \ | ||
55 | .npins = ((e) - (s) + 1), \ | ||
56 | .gpps = (g), \ | ||
57 | .ngpps = ARRAY_SIZE(g), \ | ||
58 | } | ||
59 | |||
39 | /* Sunrisepoint-LP */ | 60 | /* Sunrisepoint-LP */ |
40 | static const struct pinctrl_pin_desc sptlp_pins[] = { | 61 | static const struct pinctrl_pin_desc sptlp_pins[] = { |
41 | /* GPP_A */ | 62 | /* GPP_A */ |
@@ -531,10 +552,28 @@ static const struct intel_function spth_functions[] = { | |||
531 | FUNCTION("i2c2", spth_i2c2_groups), | 552 | FUNCTION("i2c2", spth_i2c2_groups), |
532 | }; | 553 | }; |
533 | 554 | ||
555 | static const struct intel_padgroup spth_community0_gpps[] = { | ||
556 | SPTH_GPP(0, 0, 23, 0), /* GPP_A */ | ||
557 | SPTH_GPP(1, 24, 47, 24), /* GPP_B */ | ||
558 | }; | ||
559 | |||
560 | static const struct intel_padgroup spth_community1_gpps[] = { | ||
561 | SPTH_GPP(0, 48, 71, 48), /* GPP_C */ | ||
562 | SPTH_GPP(1, 72, 95, 72), /* GPP_D */ | ||
563 | SPTH_GPP(2, 96, 108, 96), /* GPP_E */ | ||
564 | SPTH_GPP(3, 109, 132, 120), /* GPP_F */ | ||
565 | SPTH_GPP(4, 133, 156, 144), /* GPP_G */ | ||
566 | SPTH_GPP(5, 157, 180, 168), /* GPP_H */ | ||
567 | }; | ||
568 | |||
569 | static const struct intel_padgroup spth_community3_gpps[] = { | ||
570 | SPTH_GPP(0, 181, 191, 192), /* GPP_I */ | ||
571 | }; | ||
572 | |||
534 | static const struct intel_community spth_communities[] = { | 573 | static const struct intel_community spth_communities[] = { |
535 | SPT_COMMUNITY(0, 0, 47), | 574 | SPTH_COMMUNITY(0, 0, 47, spth_community0_gpps), |
536 | SPT_COMMUNITY(1, 48, 180), | 575 | SPTH_COMMUNITY(1, 48, 180, spth_community1_gpps), |
537 | SPT_COMMUNITY(2, 181, 191), | 576 | SPTH_COMMUNITY(2, 181, 191, spth_community3_gpps), |
538 | }; | 577 | }; |
539 | 578 | ||
540 | static const struct intel_pinctrl_soc_data spth_soc_data = { | 579 | static const struct intel_pinctrl_soc_data spth_soc_data = { |
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 4b91ff74779b..99a6ceac8e53 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c | |||
@@ -898,7 +898,7 @@ static struct meson_bank meson_axg_periphs_banks[] = { | |||
898 | 898 | ||
899 | static struct meson_bank meson_axg_aobus_banks[] = { | 899 | static struct meson_bank meson_axg_aobus_banks[] = { |
900 | /* name first last irq pullen pull dir out in */ | 900 | /* name first last irq pullen pull dir out in */ |
901 | BANK("AO", GPIOAO_0, GPIOAO_9, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), | 901 | BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0), |
902 | }; | 902 | }; |
903 | 903 | ||
904 | static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = { | 904 | static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = { |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index bc309c5327ff..566644bb496a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -168,8 +168,8 @@ config DELL_WMI | |||
168 | depends on DMI | 168 | depends on DMI |
169 | depends on INPUT | 169 | depends on INPUT |
170 | depends on ACPI_VIDEO || ACPI_VIDEO = n | 170 | depends on ACPI_VIDEO || ACPI_VIDEO = n |
171 | depends on DELL_SMBIOS | ||
171 | select DELL_WMI_DESCRIPTOR | 172 | select DELL_WMI_DESCRIPTOR |
172 | select DELL_SMBIOS | ||
173 | select INPUT_SPARSEKMAP | 173 | select INPUT_SPARSEKMAP |
174 | ---help--- | 174 | ---help--- |
175 | Say Y here if you want to support WMI-based hotkeys on Dell laptops. | 175 | Say Y here if you want to support WMI-based hotkeys on Dell laptops. |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 439991d71b14..4c14ce428e92 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) | |||
141 | int i; | 141 | int i; |
142 | 142 | ||
143 | for (i = 0; i < nr_queues; i++) { | 143 | for (i = 0; i < nr_queues; i++) { |
144 | q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); | 144 | q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); |
145 | if (!q) | 145 | if (!q) |
146 | return -ENOMEM; | 146 | return -ENOMEM; |
147 | 147 | ||
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
456 | { | 456 | { |
457 | struct ciw *ciw; | 457 | struct ciw *ciw; |
458 | struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; | 458 | struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; |
459 | int rc; | ||
460 | 459 | ||
461 | memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); | 460 | memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); |
462 | memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); | 461 | memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); |
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
493 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); | 492 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); |
494 | if (!ciw) { | 493 | if (!ciw) { |
495 | DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); | 494 | DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); |
496 | rc = -EINVAL; | 495 | return -EINVAL; |
497 | goto out_err; | ||
498 | } | 496 | } |
499 | irq_ptr->equeue = *ciw; | 497 | irq_ptr->equeue = *ciw; |
500 | 498 | ||
501 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); | 499 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); |
502 | if (!ciw) { | 500 | if (!ciw) { |
503 | DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); | 501 | DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); |
504 | rc = -EINVAL; | 502 | return -EINVAL; |
505 | goto out_err; | ||
506 | } | 503 | } |
507 | irq_ptr->aqueue = *ciw; | 504 | irq_ptr->aqueue = *ciw; |
508 | 505 | ||
@@ -512,9 +509,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
512 | init_data->cdev->handler = qdio_int_handler; | 509 | init_data->cdev->handler = qdio_int_handler; |
513 | spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); | 510 | spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); |
514 | return 0; | 511 | return 0; |
515 | out_err: | ||
516 | qdio_release_memory(irq_ptr); | ||
517 | return rc; | ||
518 | } | 512 | } |
519 | 513 | ||
520 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | 514 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, |
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 2c7550797ec2..dce92b2a895d 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c | |||
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp) | |||
715 | * and stores the result to ccwchain list. @cp must have been | 715 | * and stores the result to ccwchain list. @cp must have been |
716 | * initialized by a previous call with cp_init(). Otherwise, undefined | 716 | * initialized by a previous call with cp_init(). Otherwise, undefined |
717 | * behavior occurs. | 717 | * behavior occurs. |
718 | * For each chain composing the channel program: | ||
719 | * - On entry ch_len holds the count of CCWs to be translated. | ||
720 | * - On exit ch_len is adjusted to the count of successfully translated CCWs. | ||
721 | * This allows cp_free to find in ch_len the count of CCWs to free in a chain. | ||
718 | * | 722 | * |
719 | * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced | 723 | * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced |
720 | * as helpers to do ccw chain translation inside the kernel. Basically | 724 | * as helpers to do ccw chain translation inside the kernel. Basically |
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp) | |||
749 | for (idx = 0; idx < len; idx++) { | 753 | for (idx = 0; idx < len; idx++) { |
750 | ret = ccwchain_fetch_one(chain, idx, cp); | 754 | ret = ccwchain_fetch_one(chain, idx, cp); |
751 | if (ret) | 755 | if (ret) |
752 | return ret; | 756 | goto out_err; |
753 | } | 757 | } |
754 | } | 758 | } |
755 | 759 | ||
756 | return 0; | 760 | return 0; |
761 | out_err: | ||
762 | /* Only cleanup the chain elements that were actually translated. */ | ||
763 | chain->ch_len = idx; | ||
764 | list_for_each_entry_continue(chain, &cp->ccwchain_list, next) { | ||
765 | chain->ch_len = 0; | ||
766 | } | ||
767 | return ret; | ||
757 | } | 768 | } |
758 | 769 | ||
759 | /** | 770 | /** |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 0156c9623c35..d62ddd63f4fe 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, | |||
724 | int wait; | 724 | int wait; |
725 | unsigned long flags = 0; | 725 | unsigned long flags = 0; |
726 | unsigned long mflags = 0; | 726 | unsigned long mflags = 0; |
727 | struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *) | ||
728 | fibptr->hw_fib_va; | ||
727 | 729 | ||
728 | fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); | 730 | fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); |
729 | if (callback) { | 731 | if (callback) { |
@@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, | |||
734 | wait = 1; | 736 | wait = 1; |
735 | 737 | ||
736 | 738 | ||
737 | if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { | 739 | hbacmd->iu_type = command; |
738 | struct aac_hba_cmd_req *hbacmd = | ||
739 | (struct aac_hba_cmd_req *)fibptr->hw_fib_va; | ||
740 | 740 | ||
741 | hbacmd->iu_type = command; | 741 | if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { |
742 | /* bit1 of request_id must be 0 */ | 742 | /* bit1 of request_id must be 0 */ |
743 | hbacmd->request_id = | 743 | hbacmd->request_id = |
744 | cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); | 744 | cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); |
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index c374e3b5c678..777e5f1e52d1 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, | |||
609 | break; | 609 | break; |
610 | 610 | ||
611 | case BTSTAT_ABORTQUEUE: | 611 | case BTSTAT_ABORTQUEUE: |
612 | cmd->result = (DID_ABORT << 16); | 612 | cmd->result = (DID_BUS_BUSY << 16); |
613 | break; | 613 | break; |
614 | 614 | ||
615 | case BTSTAT_SCSIPARITY: | 615 | case BTSTAT_SCSIPARITY: |
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 1596d35498c5..6573152ce893 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c | |||
@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi, | |||
490 | 490 | ||
491 | static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) | 491 | static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) |
492 | { | 492 | { |
493 | if (!has_bspi(qspi) || (qspi->bspi_enabled)) | 493 | if (!has_bspi(qspi)) |
494 | return; | 494 | return; |
495 | 495 | ||
496 | qspi->bspi_enabled = 1; | 496 | qspi->bspi_enabled = 1; |
@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) | |||
505 | 505 | ||
506 | static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) | 506 | static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) |
507 | { | 507 | { |
508 | if (!has_bspi(qspi) || (!qspi->bspi_enabled)) | 508 | if (!has_bspi(qspi)) |
509 | return; | 509 | return; |
510 | 510 | ||
511 | qspi->bspi_enabled = 0; | 511 | qspi->bspi_enabled = 0; |
@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) | |||
519 | 519 | ||
520 | static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) | 520 | static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) |
521 | { | 521 | { |
522 | u32 data = 0; | 522 | u32 rd = 0; |
523 | u32 wr = 0; | ||
523 | 524 | ||
524 | if (qspi->curr_cs == cs) | ||
525 | return; | ||
526 | if (qspi->base[CHIP_SELECT]) { | 525 | if (qspi->base[CHIP_SELECT]) { |
527 | data = bcm_qspi_read(qspi, CHIP_SELECT, 0); | 526 | rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); |
528 | data = (data & ~0xff) | (1 << cs); | 527 | wr = (rd & ~0xff) | (1 << cs); |
529 | bcm_qspi_write(qspi, CHIP_SELECT, 0, data); | 528 | if (rd == wr) |
529 | return; | ||
530 | bcm_qspi_write(qspi, CHIP_SELECT, 0, wr); | ||
530 | usleep_range(10, 20); | 531 | usleep_range(10, 20); |
531 | } | 532 | } |
533 | |||
534 | dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs); | ||
532 | qspi->curr_cs = cs; | 535 | qspi->curr_cs = cs; |
533 | } | 536 | } |
534 | 537 | ||
@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) | |||
755 | dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); | 758 | dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); |
756 | } | 759 | } |
757 | mspi_cdram = MSPI_CDRAM_CONT_BIT; | 760 | mspi_cdram = MSPI_CDRAM_CONT_BIT; |
758 | mspi_cdram |= (~(1 << spi->chip_select) & | 761 | |
759 | MSPI_CDRAM_PCS); | 762 | if (has_bspi(qspi)) |
763 | mspi_cdram &= ~1; | ||
764 | else | ||
765 | mspi_cdram |= (~(1 << spi->chip_select) & | ||
766 | MSPI_CDRAM_PCS); | ||
767 | |||
760 | mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : | 768 | mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : |
761 | MSPI_CDRAM_BITSE_BIT); | 769 | MSPI_CDRAM_BITSE_BIT); |
762 | 770 | ||
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 1431cb98fe40..3094d818cf06 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c | |||
@@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) | |||
184 | struct bcm2835aux_spi *bs = spi_master_get_devdata(master); | 184 | struct bcm2835aux_spi *bs = spi_master_get_devdata(master); |
185 | irqreturn_t ret = IRQ_NONE; | 185 | irqreturn_t ret = IRQ_NONE; |
186 | 186 | ||
187 | /* IRQ may be shared, so return if our interrupts are disabled */ | ||
188 | if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) & | ||
189 | (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE))) | ||
190 | return ret; | ||
191 | |||
187 | /* check if we have data to read */ | 192 | /* check if we have data to read */ |
188 | while (bs->rx_len && | 193 | while (bs->rx_len && |
189 | (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & | 194 | (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & |
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 5c9516ae4942..4a001634023e 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c | |||
@@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) | |||
313 | 313 | ||
314 | while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && | 314 | while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && |
315 | (xspi->tx_bytes > 0)) { | 315 | (xspi->tx_bytes > 0)) { |
316 | |||
317 | /* When xspi in busy condition, bytes may send failed, | ||
318 | * then spi control did't work thoroughly, add one byte delay | ||
319 | */ | ||
320 | if (cdns_spi_read(xspi, CDNS_SPI_ISR) & | ||
321 | CDNS_SPI_IXR_TXFULL) | ||
322 | usleep_range(10, 20); | ||
323 | |||
316 | if (xspi->txbuf) | 324 | if (xspi->txbuf) |
317 | cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); | 325 | cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); |
318 | else | 326 | else |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 6f57592a7f95..a056ee88a960 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -1701,7 +1701,7 @@ static struct platform_driver spi_imx_driver = { | |||
1701 | }; | 1701 | }; |
1702 | module_platform_driver(spi_imx_driver); | 1702 | module_platform_driver(spi_imx_driver); |
1703 | 1703 | ||
1704 | MODULE_DESCRIPTION("SPI Master Controller driver"); | 1704 | MODULE_DESCRIPTION("SPI Controller driver"); |
1705 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); | 1705 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); |
1706 | MODULE_LICENSE("GPL"); | 1706 | MODULE_LICENSE("GPL"); |
1707 | MODULE_ALIAS("platform:" DRIVER_NAME); | 1707 | MODULE_ALIAS("platform:" DRIVER_NAME); |
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 513ec6c6e25b..0ae7defd3492 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h | |||
@@ -38,7 +38,7 @@ struct driver_data { | |||
38 | 38 | ||
39 | /* SSP register addresses */ | 39 | /* SSP register addresses */ |
40 | void __iomem *ioaddr; | 40 | void __iomem *ioaddr; |
41 | u32 ssdr_physical; | 41 | phys_addr_t ssdr_physical; |
42 | 42 | ||
43 | /* SSP masks*/ | 43 | /* SSP masks*/ |
44 | u32 dma_cr1; | 44 | u32 dma_cr1; |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index ae086aab57d5..8171eedbfc90 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
@@ -283,6 +283,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, | |||
283 | } | 283 | } |
284 | 284 | ||
285 | k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); | 285 | k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); |
286 | brps = min_t(int, brps, 32); | ||
286 | 287 | ||
287 | scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); | 288 | scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); |
288 | sh_msiof_write(p, TSCR, scr); | 289 | sh_msiof_write(p, TSCR, scr); |
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c index 8a7f24dd9315..0c19fcd56a0d 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c | |||
@@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv) | |||
194 | return -EFAULT; | 194 | return -EFAULT; |
195 | } | 195 | } |
196 | 196 | ||
197 | priv->priv = obj; | ||
197 | obj->max_state = p->package.count - 1; | 198 | obj->max_state = p->package.count - 1; |
198 | obj->cdev = | 199 | obj->cdev = |
199 | thermal_cooling_device_register(acpi_device_bid(priv->adev), | 200 | thermal_cooling_device_register(acpi_device_bid(priv->adev), |
@@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv) | |||
201 | if (IS_ERR(obj->cdev)) | 202 | if (IS_ERR(obj->cdev)) |
202 | result = PTR_ERR(obj->cdev); | 203 | result = PTR_ERR(obj->cdev); |
203 | 204 | ||
204 | priv->priv = obj; | ||
205 | |||
206 | kfree(buf.pointer); | 205 | kfree(buf.pointer); |
207 | /* TODO: add ACPI notification support */ | 206 | /* TODO: add ACPI notification support */ |
208 | 207 | ||
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index ed805c7c5ace..ac83f721db24 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c | |||
@@ -185,6 +185,7 @@ | |||
185 | * @regulator: pointer to the TMU regulator structure. | 185 | * @regulator: pointer to the TMU regulator structure. |
186 | * @reg_conf: pointer to structure to register with core thermal. | 186 | * @reg_conf: pointer to structure to register with core thermal. |
187 | * @ntrip: number of supported trip points. | 187 | * @ntrip: number of supported trip points. |
188 | * @enabled: current status of TMU device | ||
188 | * @tmu_initialize: SoC specific TMU initialization method | 189 | * @tmu_initialize: SoC specific TMU initialization method |
189 | * @tmu_control: SoC specific TMU control method | 190 | * @tmu_control: SoC specific TMU control method |
190 | * @tmu_read: SoC specific TMU temperature read method | 191 | * @tmu_read: SoC specific TMU temperature read method |
@@ -205,6 +206,7 @@ struct exynos_tmu_data { | |||
205 | struct regulator *regulator; | 206 | struct regulator *regulator; |
206 | struct thermal_zone_device *tzd; | 207 | struct thermal_zone_device *tzd; |
207 | unsigned int ntrip; | 208 | unsigned int ntrip; |
209 | bool enabled; | ||
208 | 210 | ||
209 | int (*tmu_initialize)(struct platform_device *pdev); | 211 | int (*tmu_initialize)(struct platform_device *pdev); |
210 | void (*tmu_control)(struct platform_device *pdev, bool on); | 212 | void (*tmu_control)(struct platform_device *pdev, bool on); |
@@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) | |||
398 | mutex_lock(&data->lock); | 400 | mutex_lock(&data->lock); |
399 | clk_enable(data->clk); | 401 | clk_enable(data->clk); |
400 | data->tmu_control(pdev, on); | 402 | data->tmu_control(pdev, on); |
403 | data->enabled = on; | ||
401 | clk_disable(data->clk); | 404 | clk_disable(data->clk); |
402 | mutex_unlock(&data->lock); | 405 | mutex_unlock(&data->lock); |
403 | } | 406 | } |
@@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) | |||
889 | static int exynos_get_temp(void *p, int *temp) | 892 | static int exynos_get_temp(void *p, int *temp) |
890 | { | 893 | { |
891 | struct exynos_tmu_data *data = p; | 894 | struct exynos_tmu_data *data = p; |
895 | int value, ret = 0; | ||
892 | 896 | ||
893 | if (!data || !data->tmu_read) | 897 | if (!data || !data->tmu_read || !data->enabled) |
894 | return -EINVAL; | 898 | return -EINVAL; |
895 | 899 | ||
896 | mutex_lock(&data->lock); | 900 | mutex_lock(&data->lock); |
897 | clk_enable(data->clk); | 901 | clk_enable(data->clk); |
898 | 902 | ||
899 | *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS; | 903 | value = data->tmu_read(data); |
904 | if (value < 0) | ||
905 | ret = value; | ||
906 | else | ||
907 | *temp = code_to_temp(data, value) * MCELSIUS; | ||
900 | 908 | ||
901 | clk_disable(data->clk); | 909 | clk_disable(data->clk); |
902 | mutex_unlock(&data->lock); | 910 | mutex_unlock(&data->lock); |
903 | 911 | ||
904 | return 0; | 912 | return ret; |
905 | } | 913 | } |
906 | 914 | ||
907 | #ifdef CONFIG_THERMAL_EMULATION | 915 | #ifdef CONFIG_THERMAL_EMULATION |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 72ebbc908e19..32cd52ca8318 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, | |||
354 | 354 | ||
355 | slot_id = 0; | 355 | slot_id = 0; |
356 | for (i = 0; i < MAX_HC_SLOTS; i++) { | 356 | for (i = 0; i < MAX_HC_SLOTS; i++) { |
357 | if (!xhci->devs[i]) | 357 | if (!xhci->devs[i] || !xhci->devs[i]->udev) |
358 | continue; | 358 | continue; |
359 | speed = xhci->devs[i]->udev->speed; | 359 | speed = xhci->devs[i]->udev->speed; |
360 | if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) | 360 | if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index e7f99d55922a..15a42cee0a9c 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -2524,8 +2524,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd) | |||
2524 | { | 2524 | { |
2525 | struct musb *musb = hcd_to_musb(hcd); | 2525 | struct musb *musb = hcd_to_musb(hcd); |
2526 | u8 devctl; | 2526 | u8 devctl; |
2527 | int ret; | ||
2527 | 2528 | ||
2528 | musb_port_suspend(musb, true); | 2529 | ret = musb_port_suspend(musb, true); |
2530 | if (ret) | ||
2531 | return ret; | ||
2529 | 2532 | ||
2530 | if (!is_host_active(musb)) | 2533 | if (!is_host_active(musb)) |
2531 | return 0; | 2534 | return 0; |
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h index 72392bbcd0a4..2999845632ce 100644 --- a/drivers/usb/musb/musb_host.h +++ b/drivers/usb/musb/musb_host.h | |||
@@ -67,7 +67,7 @@ extern void musb_host_rx(struct musb *, u8); | |||
67 | extern void musb_root_disconnect(struct musb *musb); | 67 | extern void musb_root_disconnect(struct musb *musb); |
68 | extern void musb_host_resume_root_hub(struct musb *musb); | 68 | extern void musb_host_resume_root_hub(struct musb *musb); |
69 | extern void musb_host_poke_root_hub(struct musb *musb); | 69 | extern void musb_host_poke_root_hub(struct musb *musb); |
70 | extern void musb_port_suspend(struct musb *musb, bool do_suspend); | 70 | extern int musb_port_suspend(struct musb *musb, bool do_suspend); |
71 | extern void musb_port_reset(struct musb *musb, bool do_reset); | 71 | extern void musb_port_reset(struct musb *musb, bool do_reset); |
72 | extern void musb_host_finish_resume(struct work_struct *work); | 72 | extern void musb_host_finish_resume(struct work_struct *work); |
73 | #else | 73 | #else |
@@ -99,7 +99,10 @@ static inline void musb_root_disconnect(struct musb *musb) {} | |||
99 | static inline void musb_host_resume_root_hub(struct musb *musb) {} | 99 | static inline void musb_host_resume_root_hub(struct musb *musb) {} |
100 | static inline void musb_host_poll_rh_status(struct musb *musb) {} | 100 | static inline void musb_host_poll_rh_status(struct musb *musb) {} |
101 | static inline void musb_host_poke_root_hub(struct musb *musb) {} | 101 | static inline void musb_host_poke_root_hub(struct musb *musb) {} |
102 | static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {} | 102 | static inline int musb_port_suspend(struct musb *musb, bool do_suspend) |
103 | { | ||
104 | return 0; | ||
105 | } | ||
103 | static inline void musb_port_reset(struct musb *musb, bool do_reset) {} | 106 | static inline void musb_port_reset(struct musb *musb, bool do_reset) {} |
104 | static inline void musb_host_finish_resume(struct work_struct *work) {} | 107 | static inline void musb_host_finish_resume(struct work_struct *work) {} |
105 | #endif | 108 | #endif |
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 5165d2b07ade..2f8dd9826e94 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c | |||
@@ -48,14 +48,14 @@ void musb_host_finish_resume(struct work_struct *work) | |||
48 | spin_unlock_irqrestore(&musb->lock, flags); | 48 | spin_unlock_irqrestore(&musb->lock, flags); |
49 | } | 49 | } |
50 | 50 | ||
51 | void musb_port_suspend(struct musb *musb, bool do_suspend) | 51 | int musb_port_suspend(struct musb *musb, bool do_suspend) |
52 | { | 52 | { |
53 | struct usb_otg *otg = musb->xceiv->otg; | 53 | struct usb_otg *otg = musb->xceiv->otg; |
54 | u8 power; | 54 | u8 power; |
55 | void __iomem *mbase = musb->mregs; | 55 | void __iomem *mbase = musb->mregs; |
56 | 56 | ||
57 | if (!is_host_active(musb)) | 57 | if (!is_host_active(musb)) |
58 | return; | 58 | return 0; |
59 | 59 | ||
60 | /* NOTE: this doesn't necessarily put PHY into low power mode, | 60 | /* NOTE: this doesn't necessarily put PHY into low power mode, |
61 | * turning off its clock; that's a function of PHY integration and | 61 | * turning off its clock; that's a function of PHY integration and |
@@ -66,16 +66,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) | |||
66 | if (do_suspend) { | 66 | if (do_suspend) { |
67 | int retries = 10000; | 67 | int retries = 10000; |
68 | 68 | ||
69 | power &= ~MUSB_POWER_RESUME; | 69 | if (power & MUSB_POWER_RESUME) |
70 | power |= MUSB_POWER_SUSPENDM; | 70 | return -EBUSY; |
71 | musb_writeb(mbase, MUSB_POWER, power); | ||
72 | 71 | ||
73 | /* Needed for OPT A tests */ | 72 | if (!(power & MUSB_POWER_SUSPENDM)) { |
74 | power = musb_readb(mbase, MUSB_POWER); | 73 | power |= MUSB_POWER_SUSPENDM; |
75 | while (power & MUSB_POWER_SUSPENDM) { | 74 | musb_writeb(mbase, MUSB_POWER, power); |
75 | |||
76 | /* Needed for OPT A tests */ | ||
76 | power = musb_readb(mbase, MUSB_POWER); | 77 | power = musb_readb(mbase, MUSB_POWER); |
77 | if (retries-- < 1) | 78 | while (power & MUSB_POWER_SUSPENDM) { |
78 | break; | 79 | power = musb_readb(mbase, MUSB_POWER); |
80 | if (retries-- < 1) | ||
81 | break; | ||
82 | } | ||
79 | } | 83 | } |
80 | 84 | ||
81 | musb_dbg(musb, "Root port suspended, power %02x", power); | 85 | musb_dbg(musb, "Root port suspended, power %02x", power); |
@@ -111,6 +115,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) | |||
111 | schedule_delayed_work(&musb->finish_resume_work, | 115 | schedule_delayed_work(&musb->finish_resume_work, |
112 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); | 116 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); |
113 | } | 117 | } |
118 | return 0; | ||
114 | } | 119 | } |
115 | 120 | ||
116 | void musb_port_reset(struct musb *musb, bool do_reset) | 121 | void musb_port_reset(struct musb *musb, bool do_reset) |
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h index 14a72357800a..35618ceb2791 100644 --- a/drivers/usb/usbip/stub.h +++ b/drivers/usb/usbip/stub.h | |||
@@ -73,6 +73,7 @@ struct bus_id_priv { | |||
73 | struct stub_device *sdev; | 73 | struct stub_device *sdev; |
74 | struct usb_device *udev; | 74 | struct usb_device *udev; |
75 | char shutdown_busid; | 75 | char shutdown_busid; |
76 | spinlock_t busid_lock; | ||
76 | }; | 77 | }; |
77 | 78 | ||
78 | /* stub_priv is allocated from stub_priv_cache */ | 79 | /* stub_priv is allocated from stub_priv_cache */ |
@@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver; | |||
83 | 84 | ||
84 | /* stub_main.c */ | 85 | /* stub_main.c */ |
85 | struct bus_id_priv *get_busid_priv(const char *busid); | 86 | struct bus_id_priv *get_busid_priv(const char *busid); |
87 | void put_busid_priv(struct bus_id_priv *bid); | ||
86 | int del_match_busid(char *busid); | 88 | int del_match_busid(char *busid); |
87 | void stub_device_cleanup_urbs(struct stub_device *sdev); | 89 | void stub_device_cleanup_urbs(struct stub_device *sdev); |
88 | 90 | ||
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index dd8ef36ab10e..c0d6ff1baa72 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c | |||
@@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev) | |||
300 | struct stub_device *sdev = NULL; | 300 | struct stub_device *sdev = NULL; |
301 | const char *udev_busid = dev_name(&udev->dev); | 301 | const char *udev_busid = dev_name(&udev->dev); |
302 | struct bus_id_priv *busid_priv; | 302 | struct bus_id_priv *busid_priv; |
303 | int rc; | 303 | int rc = 0; |
304 | 304 | ||
305 | dev_dbg(&udev->dev, "Enter\n"); | 305 | dev_dbg(&udev->dev, "Enter probe\n"); |
306 | 306 | ||
307 | /* check we should claim or not by busid_table */ | 307 | /* check we should claim or not by busid_table */ |
308 | busid_priv = get_busid_priv(udev_busid); | 308 | busid_priv = get_busid_priv(udev_busid); |
@@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev) | |||
317 | * other matched drivers by the driver core. | 317 | * other matched drivers by the driver core. |
318 | * See driver_probe_device() in driver/base/dd.c | 318 | * See driver_probe_device() in driver/base/dd.c |
319 | */ | 319 | */ |
320 | return -ENODEV; | 320 | rc = -ENODEV; |
321 | goto call_put_busid_priv; | ||
321 | } | 322 | } |
322 | 323 | ||
323 | if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { | 324 | if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { |
324 | dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", | 325 | dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", |
325 | udev_busid); | 326 | udev_busid); |
326 | return -ENODEV; | 327 | rc = -ENODEV; |
328 | goto call_put_busid_priv; | ||
327 | } | 329 | } |
328 | 330 | ||
329 | if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { | 331 | if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { |
@@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev) | |||
331 | "%s is attached on vhci_hcd... skip!\n", | 333 | "%s is attached on vhci_hcd... skip!\n", |
332 | udev_busid); | 334 | udev_busid); |
333 | 335 | ||
334 | return -ENODEV; | 336 | rc = -ENODEV; |
337 | goto call_put_busid_priv; | ||
335 | } | 338 | } |
336 | 339 | ||
337 | /* ok, this is my device */ | 340 | /* ok, this is my device */ |
338 | sdev = stub_device_alloc(udev); | 341 | sdev = stub_device_alloc(udev); |
339 | if (!sdev) | 342 | if (!sdev) { |
340 | return -ENOMEM; | 343 | rc = -ENOMEM; |
344 | goto call_put_busid_priv; | ||
345 | } | ||
341 | 346 | ||
342 | dev_info(&udev->dev, | 347 | dev_info(&udev->dev, |
343 | "usbip-host: register new device (bus %u dev %u)\n", | 348 | "usbip-host: register new device (bus %u dev %u)\n", |
@@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev) | |||
369 | } | 374 | } |
370 | busid_priv->status = STUB_BUSID_ALLOC; | 375 | busid_priv->status = STUB_BUSID_ALLOC; |
371 | 376 | ||
372 | return 0; | 377 | rc = 0; |
378 | goto call_put_busid_priv; | ||
379 | |||
373 | err_files: | 380 | err_files: |
374 | usb_hub_release_port(udev->parent, udev->portnum, | 381 | usb_hub_release_port(udev->parent, udev->portnum, |
375 | (struct usb_dev_state *) udev); | 382 | (struct usb_dev_state *) udev); |
@@ -379,6 +386,9 @@ err_port: | |||
379 | 386 | ||
380 | busid_priv->sdev = NULL; | 387 | busid_priv->sdev = NULL; |
381 | stub_device_free(sdev); | 388 | stub_device_free(sdev); |
389 | |||
390 | call_put_busid_priv: | ||
391 | put_busid_priv(busid_priv); | ||
382 | return rc; | 392 | return rc; |
383 | } | 393 | } |
384 | 394 | ||
@@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev) | |||
404 | struct bus_id_priv *busid_priv; | 414 | struct bus_id_priv *busid_priv; |
405 | int rc; | 415 | int rc; |
406 | 416 | ||
407 | dev_dbg(&udev->dev, "Enter\n"); | 417 | dev_dbg(&udev->dev, "Enter disconnect\n"); |
408 | 418 | ||
409 | busid_priv = get_busid_priv(udev_busid); | 419 | busid_priv = get_busid_priv(udev_busid); |
410 | if (!busid_priv) { | 420 | if (!busid_priv) { |
@@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev) | |||
417 | /* get stub_device */ | 427 | /* get stub_device */ |
418 | if (!sdev) { | 428 | if (!sdev) { |
419 | dev_err(&udev->dev, "could not get device"); | 429 | dev_err(&udev->dev, "could not get device"); |
420 | return; | 430 | goto call_put_busid_priv; |
421 | } | 431 | } |
422 | 432 | ||
423 | dev_set_drvdata(&udev->dev, NULL); | 433 | dev_set_drvdata(&udev->dev, NULL); |
@@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev) | |||
432 | (struct usb_dev_state *) udev); | 442 | (struct usb_dev_state *) udev); |
433 | if (rc) { | 443 | if (rc) { |
434 | dev_dbg(&udev->dev, "unable to release port\n"); | 444 | dev_dbg(&udev->dev, "unable to release port\n"); |
435 | return; | 445 | goto call_put_busid_priv; |
436 | } | 446 | } |
437 | 447 | ||
438 | /* If usb reset is called from event handler */ | 448 | /* If usb reset is called from event handler */ |
439 | if (usbip_in_eh(current)) | 449 | if (usbip_in_eh(current)) |
440 | return; | 450 | goto call_put_busid_priv; |
441 | 451 | ||
442 | /* shutdown the current connection */ | 452 | /* shutdown the current connection */ |
443 | shutdown_busid(busid_priv); | 453 | shutdown_busid(busid_priv); |
@@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev) | |||
448 | busid_priv->sdev = NULL; | 458 | busid_priv->sdev = NULL; |
449 | stub_device_free(sdev); | 459 | stub_device_free(sdev); |
450 | 460 | ||
451 | if (busid_priv->status == STUB_BUSID_ALLOC) { | 461 | if (busid_priv->status == STUB_BUSID_ALLOC) |
452 | busid_priv->status = STUB_BUSID_ADDED; | 462 | busid_priv->status = STUB_BUSID_ADDED; |
453 | } else { | 463 | |
454 | busid_priv->status = STUB_BUSID_OTHER; | 464 | call_put_busid_priv: |
455 | del_match_busid((char *)udev_busid); | 465 | put_busid_priv(busid_priv); |
456 | } | ||
457 | } | 466 | } |
458 | 467 | ||
459 | #ifdef CONFIG_PM | 468 | #ifdef CONFIG_PM |
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index d41d0cdeec0f..bf8a5feb0ee9 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #define DRIVER_DESC "USB/IP Host Driver" | 14 | #define DRIVER_DESC "USB/IP Host Driver" |
15 | 15 | ||
16 | struct kmem_cache *stub_priv_cache; | 16 | struct kmem_cache *stub_priv_cache; |
17 | |||
17 | /* | 18 | /* |
18 | * busid_tables defines matching busids that usbip can grab. A user can change | 19 | * busid_tables defines matching busids that usbip can grab. A user can change |
19 | * dynamically what device is locally used and what device is exported to a | 20 | * dynamically what device is locally used and what device is exported to a |
@@ -25,6 +26,8 @@ static spinlock_t busid_table_lock; | |||
25 | 26 | ||
26 | static void init_busid_table(void) | 27 | static void init_busid_table(void) |
27 | { | 28 | { |
29 | int i; | ||
30 | |||
28 | /* | 31 | /* |
29 | * This also sets the bus_table[i].status to | 32 | * This also sets the bus_table[i].status to |
30 | * STUB_BUSID_OTHER, which is 0. | 33 | * STUB_BUSID_OTHER, which is 0. |
@@ -32,6 +35,9 @@ static void init_busid_table(void) | |||
32 | memset(busid_table, 0, sizeof(busid_table)); | 35 | memset(busid_table, 0, sizeof(busid_table)); |
33 | 36 | ||
34 | spin_lock_init(&busid_table_lock); | 37 | spin_lock_init(&busid_table_lock); |
38 | |||
39 | for (i = 0; i < MAX_BUSID; i++) | ||
40 | spin_lock_init(&busid_table[i].busid_lock); | ||
35 | } | 41 | } |
36 | 42 | ||
37 | /* | 43 | /* |
@@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid) | |||
43 | int i; | 49 | int i; |
44 | int idx = -1; | 50 | int idx = -1; |
45 | 51 | ||
46 | for (i = 0; i < MAX_BUSID; i++) | 52 | for (i = 0; i < MAX_BUSID; i++) { |
53 | spin_lock(&busid_table[i].busid_lock); | ||
47 | if (busid_table[i].name[0]) | 54 | if (busid_table[i].name[0]) |
48 | if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { | 55 | if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { |
49 | idx = i; | 56 | idx = i; |
57 | spin_unlock(&busid_table[i].busid_lock); | ||
50 | break; | 58 | break; |
51 | } | 59 | } |
60 | spin_unlock(&busid_table[i].busid_lock); | ||
61 | } | ||
52 | return idx; | 62 | return idx; |
53 | } | 63 | } |
54 | 64 | ||
65 | /* Returns holding busid_lock. Should call put_busid_priv() to unlock */ | ||
55 | struct bus_id_priv *get_busid_priv(const char *busid) | 66 | struct bus_id_priv *get_busid_priv(const char *busid) |
56 | { | 67 | { |
57 | int idx; | 68 | int idx; |
@@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid) | |||
59 | 70 | ||
60 | spin_lock(&busid_table_lock); | 71 | spin_lock(&busid_table_lock); |
61 | idx = get_busid_idx(busid); | 72 | idx = get_busid_idx(busid); |
62 | if (idx >= 0) | 73 | if (idx >= 0) { |
63 | bid = &(busid_table[idx]); | 74 | bid = &(busid_table[idx]); |
75 | /* get busid_lock before returning */ | ||
76 | spin_lock(&bid->busid_lock); | ||
77 | } | ||
64 | spin_unlock(&busid_table_lock); | 78 | spin_unlock(&busid_table_lock); |
65 | 79 | ||
66 | return bid; | 80 | return bid; |
67 | } | 81 | } |
68 | 82 | ||
83 | void put_busid_priv(struct bus_id_priv *bid) | ||
84 | { | ||
85 | if (bid) | ||
86 | spin_unlock(&bid->busid_lock); | ||
87 | } | ||
88 | |||
69 | static int add_match_busid(char *busid) | 89 | static int add_match_busid(char *busid) |
70 | { | 90 | { |
71 | int i; | 91 | int i; |
@@ -78,15 +98,19 @@ static int add_match_busid(char *busid) | |||
78 | goto out; | 98 | goto out; |
79 | } | 99 | } |
80 | 100 | ||
81 | for (i = 0; i < MAX_BUSID; i++) | 101 | for (i = 0; i < MAX_BUSID; i++) { |
102 | spin_lock(&busid_table[i].busid_lock); | ||
82 | if (!busid_table[i].name[0]) { | 103 | if (!busid_table[i].name[0]) { |
83 | strlcpy(busid_table[i].name, busid, BUSID_SIZE); | 104 | strlcpy(busid_table[i].name, busid, BUSID_SIZE); |
84 | if ((busid_table[i].status != STUB_BUSID_ALLOC) && | 105 | if ((busid_table[i].status != STUB_BUSID_ALLOC) && |
85 | (busid_table[i].status != STUB_BUSID_REMOV)) | 106 | (busid_table[i].status != STUB_BUSID_REMOV)) |
86 | busid_table[i].status = STUB_BUSID_ADDED; | 107 | busid_table[i].status = STUB_BUSID_ADDED; |
87 | ret = 0; | 108 | ret = 0; |
109 | spin_unlock(&busid_table[i].busid_lock); | ||
88 | break; | 110 | break; |
89 | } | 111 | } |
112 | spin_unlock(&busid_table[i].busid_lock); | ||
113 | } | ||
90 | 114 | ||
91 | out: | 115 | out: |
92 | spin_unlock(&busid_table_lock); | 116 | spin_unlock(&busid_table_lock); |
@@ -107,6 +131,8 @@ int del_match_busid(char *busid) | |||
107 | /* found */ | 131 | /* found */ |
108 | ret = 0; | 132 | ret = 0; |
109 | 133 | ||
134 | spin_lock(&busid_table[idx].busid_lock); | ||
135 | |||
110 | if (busid_table[idx].status == STUB_BUSID_OTHER) | 136 | if (busid_table[idx].status == STUB_BUSID_OTHER) |
111 | memset(busid_table[idx].name, 0, BUSID_SIZE); | 137 | memset(busid_table[idx].name, 0, BUSID_SIZE); |
112 | 138 | ||
@@ -114,6 +140,7 @@ int del_match_busid(char *busid) | |||
114 | (busid_table[idx].status != STUB_BUSID_ADDED)) | 140 | (busid_table[idx].status != STUB_BUSID_ADDED)) |
115 | busid_table[idx].status = STUB_BUSID_REMOV; | 141 | busid_table[idx].status = STUB_BUSID_REMOV; |
116 | 142 | ||
143 | spin_unlock(&busid_table[idx].busid_lock); | ||
117 | out: | 144 | out: |
118 | spin_unlock(&busid_table_lock); | 145 | spin_unlock(&busid_table_lock); |
119 | 146 | ||
@@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf) | |||
126 | char *out = buf; | 153 | char *out = buf; |
127 | 154 | ||
128 | spin_lock(&busid_table_lock); | 155 | spin_lock(&busid_table_lock); |
129 | for (i = 0; i < MAX_BUSID; i++) | 156 | for (i = 0; i < MAX_BUSID; i++) { |
157 | spin_lock(&busid_table[i].busid_lock); | ||
130 | if (busid_table[i].name[0]) | 158 | if (busid_table[i].name[0]) |
131 | out += sprintf(out, "%s ", busid_table[i].name); | 159 | out += sprintf(out, "%s ", busid_table[i].name); |
160 | spin_unlock(&busid_table[i].busid_lock); | ||
161 | } | ||
132 | spin_unlock(&busid_table_lock); | 162 | spin_unlock(&busid_table_lock); |
133 | out += sprintf(out, "\n"); | 163 | out += sprintf(out, "\n"); |
134 | 164 | ||
@@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf, | |||
169 | } | 199 | } |
170 | static DRIVER_ATTR_RW(match_busid); | 200 | static DRIVER_ATTR_RW(match_busid); |
171 | 201 | ||
202 | static int do_rebind(char *busid, struct bus_id_priv *busid_priv) | ||
203 | { | ||
204 | int ret; | ||
205 | |||
206 | /* device_attach() callers should hold parent lock for USB */ | ||
207 | if (busid_priv->udev->dev.parent) | ||
208 | device_lock(busid_priv->udev->dev.parent); | ||
209 | ret = device_attach(&busid_priv->udev->dev); | ||
210 | if (busid_priv->udev->dev.parent) | ||
211 | device_unlock(busid_priv->udev->dev.parent); | ||
212 | if (ret < 0) { | ||
213 | dev_err(&busid_priv->udev->dev, "rebind failed\n"); | ||
214 | return ret; | ||
215 | } | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static void stub_device_rebind(void) | ||
220 | { | ||
221 | #if IS_MODULE(CONFIG_USBIP_HOST) | ||
222 | struct bus_id_priv *busid_priv; | ||
223 | int i; | ||
224 | |||
225 | /* update status to STUB_BUSID_OTHER so probe ignores the device */ | ||
226 | spin_lock(&busid_table_lock); | ||
227 | for (i = 0; i < MAX_BUSID; i++) { | ||
228 | if (busid_table[i].name[0] && | ||
229 | busid_table[i].shutdown_busid) { | ||
230 | busid_priv = &(busid_table[i]); | ||
231 | busid_priv->status = STUB_BUSID_OTHER; | ||
232 | } | ||
233 | } | ||
234 | spin_unlock(&busid_table_lock); | ||
235 | |||
236 | /* now run rebind - no need to hold locks. driver files are removed */ | ||
237 | for (i = 0; i < MAX_BUSID; i++) { | ||
238 | if (busid_table[i].name[0] && | ||
239 | busid_table[i].shutdown_busid) { | ||
240 | busid_priv = &(busid_table[i]); | ||
241 | do_rebind(busid_table[i].name, busid_priv); | ||
242 | } | ||
243 | } | ||
244 | #endif | ||
245 | } | ||
246 | |||
172 | static ssize_t rebind_store(struct device_driver *dev, const char *buf, | 247 | static ssize_t rebind_store(struct device_driver *dev, const char *buf, |
173 | size_t count) | 248 | size_t count) |
174 | { | 249 | { |
@@ -186,16 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf, | |||
186 | if (!bid) | 261 | if (!bid) |
187 | return -ENODEV; | 262 | return -ENODEV; |
188 | 263 | ||
189 | /* device_attach() callers should hold parent lock for USB */ | 264 | /* mark the device for deletion so probe ignores it during rescan */ |
190 | if (bid->udev->dev.parent) | 265 | bid->status = STUB_BUSID_OTHER; |
191 | device_lock(bid->udev->dev.parent); | 266 | /* release the busid lock */ |
192 | ret = device_attach(&bid->udev->dev); | 267 | put_busid_priv(bid); |
193 | if (bid->udev->dev.parent) | 268 | |
194 | device_unlock(bid->udev->dev.parent); | 269 | ret = do_rebind((char *) buf, bid); |
195 | if (ret < 0) { | 270 | if (ret < 0) |
196 | dev_err(&bid->udev->dev, "rebind failed\n"); | ||
197 | return ret; | 271 | return ret; |
198 | } | 272 | |
273 | /* delete device from busid_table */ | ||
274 | del_match_busid((char *) buf); | ||
199 | 275 | ||
200 | return count; | 276 | return count; |
201 | } | 277 | } |
@@ -317,6 +393,9 @@ static void __exit usbip_host_exit(void) | |||
317 | */ | 393 | */ |
318 | usb_deregister_device_driver(&stub_driver); | 394 | usb_deregister_device_driver(&stub_driver); |
319 | 395 | ||
396 | /* initiate scan to attach devices */ | ||
397 | stub_device_rebind(); | ||
398 | |||
320 | kmem_cache_destroy(stub_priv_cache); | 399 | kmem_cache_destroy(stub_priv_cache); |
321 | } | 400 | } |
322 | 401 | ||
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c index 3bedfed608a2..7587fb665ff1 100644 --- a/fs/afs/addr_list.c +++ b/fs/afs/addr_list.c | |||
@@ -121,7 +121,7 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, | |||
121 | p = text; | 121 | p = text; |
122 | do { | 122 | do { |
123 | struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs]; | 123 | struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs]; |
124 | char tdelim = delim; | 124 | const char *q, *stop; |
125 | 125 | ||
126 | if (*p == delim) { | 126 | if (*p == delim) { |
127 | p++; | 127 | p++; |
@@ -130,28 +130,33 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, | |||
130 | 130 | ||
131 | if (*p == '[') { | 131 | if (*p == '[') { |
132 | p++; | 132 | p++; |
133 | tdelim = ']'; | 133 | q = memchr(p, ']', end - p); |
134 | } else { | ||
135 | for (q = p; q < end; q++) | ||
136 | if (*q == '+' || *q == delim) | ||
137 | break; | ||
134 | } | 138 | } |
135 | 139 | ||
136 | if (in4_pton(p, end - p, | 140 | if (in4_pton(p, q - p, |
137 | (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3], | 141 | (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3], |
138 | tdelim, &p)) { | 142 | -1, &stop)) { |
139 | srx->transport.sin6.sin6_addr.s6_addr32[0] = 0; | 143 | srx->transport.sin6.sin6_addr.s6_addr32[0] = 0; |
140 | srx->transport.sin6.sin6_addr.s6_addr32[1] = 0; | 144 | srx->transport.sin6.sin6_addr.s6_addr32[1] = 0; |
141 | srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); | 145 | srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); |
142 | } else if (in6_pton(p, end - p, | 146 | } else if (in6_pton(p, q - p, |
143 | srx->transport.sin6.sin6_addr.s6_addr, | 147 | srx->transport.sin6.sin6_addr.s6_addr, |
144 | tdelim, &p)) { | 148 | -1, &stop)) { |
145 | /* Nothing to do */ | 149 | /* Nothing to do */ |
146 | } else { | 150 | } else { |
147 | goto bad_address; | 151 | goto bad_address; |
148 | } | 152 | } |
149 | 153 | ||
150 | if (tdelim == ']') { | 154 | if (stop != q) |
151 | if (p == end || *p != ']') | 155 | goto bad_address; |
152 | goto bad_address; | 156 | |
157 | p = q; | ||
158 | if (q < end && *q == ']') | ||
153 | p++; | 159 | p++; |
154 | } | ||
155 | 160 | ||
156 | if (p < end) { | 161 | if (p < end) { |
157 | if (*p == '+') { | 162 | if (*p == '+') { |
diff --git a/fs/afs/callback.c b/fs/afs/callback.c index abd9a84f4e88..571437dcb252 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c | |||
@@ -23,36 +23,55 @@ | |||
23 | /* | 23 | /* |
24 | * Set up an interest-in-callbacks record for a volume on a server and | 24 | * Set up an interest-in-callbacks record for a volume on a server and |
25 | * register it with the server. | 25 | * register it with the server. |
26 | * - Called with volume->server_sem held. | 26 | * - Called with vnode->io_lock held. |
27 | */ | 27 | */ |
28 | int afs_register_server_cb_interest(struct afs_vnode *vnode, | 28 | int afs_register_server_cb_interest(struct afs_vnode *vnode, |
29 | struct afs_server_entry *entry) | 29 | struct afs_server_list *slist, |
30 | unsigned int index) | ||
30 | { | 31 | { |
31 | struct afs_cb_interest *cbi = entry->cb_interest, *vcbi, *new, *x; | 32 | struct afs_server_entry *entry = &slist->servers[index]; |
33 | struct afs_cb_interest *cbi, *vcbi, *new, *old; | ||
32 | struct afs_server *server = entry->server; | 34 | struct afs_server *server = entry->server; |
33 | 35 | ||
34 | again: | 36 | again: |
37 | if (vnode->cb_interest && | ||
38 | likely(vnode->cb_interest == entry->cb_interest)) | ||
39 | return 0; | ||
40 | |||
41 | read_lock(&slist->lock); | ||
42 | cbi = afs_get_cb_interest(entry->cb_interest); | ||
43 | read_unlock(&slist->lock); | ||
44 | |||
35 | vcbi = vnode->cb_interest; | 45 | vcbi = vnode->cb_interest; |
36 | if (vcbi) { | 46 | if (vcbi) { |
37 | if (vcbi == cbi) | 47 | if (vcbi == cbi) { |
48 | afs_put_cb_interest(afs_v2net(vnode), cbi); | ||
38 | return 0; | 49 | return 0; |
50 | } | ||
39 | 51 | ||
52 | /* Use a new interest in the server list for the same server | ||
53 | * rather than an old one that's still attached to a vnode. | ||
54 | */ | ||
40 | if (cbi && vcbi->server == cbi->server) { | 55 | if (cbi && vcbi->server == cbi->server) { |
41 | write_seqlock(&vnode->cb_lock); | 56 | write_seqlock(&vnode->cb_lock); |
42 | vnode->cb_interest = afs_get_cb_interest(cbi); | 57 | old = vnode->cb_interest; |
58 | vnode->cb_interest = cbi; | ||
43 | write_sequnlock(&vnode->cb_lock); | 59 | write_sequnlock(&vnode->cb_lock); |
44 | afs_put_cb_interest(afs_v2net(vnode), cbi); | 60 | afs_put_cb_interest(afs_v2net(vnode), old); |
45 | return 0; | 61 | return 0; |
46 | } | 62 | } |
47 | 63 | ||
64 | /* Re-use the one attached to the vnode. */ | ||
48 | if (!cbi && vcbi->server == server) { | 65 | if (!cbi && vcbi->server == server) { |
49 | afs_get_cb_interest(vcbi); | 66 | write_lock(&slist->lock); |
50 | x = cmpxchg(&entry->cb_interest, cbi, vcbi); | 67 | if (entry->cb_interest) { |
51 | if (x != cbi) { | 68 | write_unlock(&slist->lock); |
52 | cbi = x; | 69 | afs_put_cb_interest(afs_v2net(vnode), cbi); |
53 | afs_put_cb_interest(afs_v2net(vnode), vcbi); | ||
54 | goto again; | 70 | goto again; |
55 | } | 71 | } |
72 | |||
73 | entry->cb_interest = cbi; | ||
74 | write_unlock(&slist->lock); | ||
56 | return 0; | 75 | return 0; |
57 | } | 76 | } |
58 | } | 77 | } |
@@ -72,13 +91,16 @@ again: | |||
72 | list_add_tail(&new->cb_link, &server->cb_interests); | 91 | list_add_tail(&new->cb_link, &server->cb_interests); |
73 | write_unlock(&server->cb_break_lock); | 92 | write_unlock(&server->cb_break_lock); |
74 | 93 | ||
75 | x = cmpxchg(&entry->cb_interest, cbi, new); | 94 | write_lock(&slist->lock); |
76 | if (x == cbi) { | 95 | if (!entry->cb_interest) { |
96 | entry->cb_interest = afs_get_cb_interest(new); | ||
77 | cbi = new; | 97 | cbi = new; |
98 | new = NULL; | ||
78 | } else { | 99 | } else { |
79 | cbi = x; | 100 | cbi = afs_get_cb_interest(entry->cb_interest); |
80 | afs_put_cb_interest(afs_v2net(vnode), new); | ||
81 | } | 101 | } |
102 | write_unlock(&slist->lock); | ||
103 | afs_put_cb_interest(afs_v2net(vnode), new); | ||
82 | } | 104 | } |
83 | 105 | ||
84 | ASSERT(cbi); | 106 | ASSERT(cbi); |
@@ -88,11 +110,14 @@ again: | |||
88 | */ | 110 | */ |
89 | write_seqlock(&vnode->cb_lock); | 111 | write_seqlock(&vnode->cb_lock); |
90 | 112 | ||
91 | vnode->cb_interest = afs_get_cb_interest(cbi); | 113 | old = vnode->cb_interest; |
114 | vnode->cb_interest = cbi; | ||
92 | vnode->cb_s_break = cbi->server->cb_s_break; | 115 | vnode->cb_s_break = cbi->server->cb_s_break; |
116 | vnode->cb_v_break = vnode->volume->cb_v_break; | ||
93 | clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); | 117 | clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); |
94 | 118 | ||
95 | write_sequnlock(&vnode->cb_lock); | 119 | write_sequnlock(&vnode->cb_lock); |
120 | afs_put_cb_interest(afs_v2net(vnode), old); | ||
96 | return 0; | 121 | return 0; |
97 | } | 122 | } |
98 | 123 | ||
@@ -171,13 +196,24 @@ static void afs_break_one_callback(struct afs_server *server, | |||
171 | if (cbi->vid != fid->vid) | 196 | if (cbi->vid != fid->vid) |
172 | continue; | 197 | continue; |
173 | 198 | ||
174 | data.volume = NULL; | 199 | if (fid->vnode == 0 && fid->unique == 0) { |
175 | data.fid = *fid; | 200 | /* The callback break applies to an entire volume. */ |
176 | inode = ilookup5_nowait(cbi->sb, fid->vnode, afs_iget5_test, &data); | 201 | struct afs_super_info *as = AFS_FS_S(cbi->sb); |
177 | if (inode) { | 202 | struct afs_volume *volume = as->volume; |
178 | vnode = AFS_FS_I(inode); | 203 | |
179 | afs_break_callback(vnode); | 204 | write_lock(&volume->cb_break_lock); |
180 | iput(inode); | 205 | volume->cb_v_break++; |
206 | write_unlock(&volume->cb_break_lock); | ||
207 | } else { | ||
208 | data.volume = NULL; | ||
209 | data.fid = *fid; | ||
210 | inode = ilookup5_nowait(cbi->sb, fid->vnode, | ||
211 | afs_iget5_test, &data); | ||
212 | if (inode) { | ||
213 | vnode = AFS_FS_I(inode); | ||
214 | afs_break_callback(vnode); | ||
215 | iput(inode); | ||
216 | } | ||
181 | } | 217 | } |
182 | } | 218 | } |
183 | 219 | ||
@@ -195,6 +231,8 @@ void afs_break_callbacks(struct afs_server *server, size_t count, | |||
195 | ASSERT(server != NULL); | 231 | ASSERT(server != NULL); |
196 | ASSERTCMP(count, <=, AFSCBMAX); | 232 | ASSERTCMP(count, <=, AFSCBMAX); |
197 | 233 | ||
234 | /* TODO: Sort the callback break list by volume ID */ | ||
235 | |||
198 | for (; count > 0; callbacks++, count--) { | 236 | for (; count > 0; callbacks++, count--) { |
199 | _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", | 237 | _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", |
200 | callbacks->fid.vid, | 238 | callbacks->fid.vid, |
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 357de908df3a..c332c95a6940 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
@@ -133,21 +133,10 @@ bool afs_cm_incoming_call(struct afs_call *call) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * clean up a cache manager call | 136 | * Clean up a cache manager call. |
137 | */ | 137 | */ |
138 | static void afs_cm_destructor(struct afs_call *call) | 138 | static void afs_cm_destructor(struct afs_call *call) |
139 | { | 139 | { |
140 | _enter(""); | ||
141 | |||
142 | /* Break the callbacks here so that we do it after the final ACK is | ||
143 | * received. The step number here must match the final number in | ||
144 | * afs_deliver_cb_callback(). | ||
145 | */ | ||
146 | if (call->unmarshall == 5) { | ||
147 | ASSERT(call->cm_server && call->count && call->request); | ||
148 | afs_break_callbacks(call->cm_server, call->count, call->request); | ||
149 | } | ||
150 | |||
151 | kfree(call->buffer); | 140 | kfree(call->buffer); |
152 | call->buffer = NULL; | 141 | call->buffer = NULL; |
153 | } | 142 | } |
@@ -161,14 +150,14 @@ static void SRXAFSCB_CallBack(struct work_struct *work) | |||
161 | 150 | ||
162 | _enter(""); | 151 | _enter(""); |
163 | 152 | ||
164 | /* be sure to send the reply *before* attempting to spam the AFS server | 153 | /* We need to break the callbacks before sending the reply as the |
165 | * with FSFetchStatus requests on the vnodes with broken callbacks lest | 154 | * server holds up change visibility till it receives our reply so as |
166 | * the AFS server get into a vicious cycle of trying to break further | 155 | * to maintain cache coherency. |
167 | * callbacks because it hadn't received completion of the CBCallBack op | 156 | */ |
168 | * yet */ | 157 | if (call->cm_server) |
169 | afs_send_empty_reply(call); | 158 | afs_break_callbacks(call->cm_server, call->count, call->request); |
170 | 159 | ||
171 | afs_break_callbacks(call->cm_server, call->count, call->request); | 160 | afs_send_empty_reply(call); |
172 | afs_put_call(call); | 161 | afs_put_call(call); |
173 | _leave(""); | 162 | _leave(""); |
174 | } | 163 | } |
@@ -180,7 +169,6 @@ static int afs_deliver_cb_callback(struct afs_call *call) | |||
180 | { | 169 | { |
181 | struct afs_callback_break *cb; | 170 | struct afs_callback_break *cb; |
182 | struct sockaddr_rxrpc srx; | 171 | struct sockaddr_rxrpc srx; |
183 | struct afs_server *server; | ||
184 | __be32 *bp; | 172 | __be32 *bp; |
185 | int ret, loop; | 173 | int ret, loop; |
186 | 174 | ||
@@ -267,15 +255,6 @@ static int afs_deliver_cb_callback(struct afs_call *call) | |||
267 | 255 | ||
268 | call->offset = 0; | 256 | call->offset = 0; |
269 | call->unmarshall++; | 257 | call->unmarshall++; |
270 | |||
271 | /* Record that the message was unmarshalled successfully so | ||
272 | * that the call destructor can know do the callback breaking | ||
273 | * work, even if the final ACK isn't received. | ||
274 | * | ||
275 | * If the step number changes, then afs_cm_destructor() must be | ||
276 | * updated also. | ||
277 | */ | ||
278 | call->unmarshall++; | ||
279 | case 5: | 258 | case 5: |
280 | break; | 259 | break; |
281 | } | 260 | } |
@@ -286,10 +265,9 @@ static int afs_deliver_cb_callback(struct afs_call *call) | |||
286 | /* we'll need the file server record as that tells us which set of | 265 | /* we'll need the file server record as that tells us which set of |
287 | * vnodes to operate upon */ | 266 | * vnodes to operate upon */ |
288 | rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); | 267 | rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); |
289 | server = afs_find_server(call->net, &srx); | 268 | call->cm_server = afs_find_server(call->net, &srx); |
290 | if (!server) | 269 | if (!call->cm_server) |
291 | return -ENOTCONN; | 270 | trace_afs_cm_no_server(call, &srx); |
292 | call->cm_server = server; | ||
293 | 271 | ||
294 | return afs_queue_call_work(call); | 272 | return afs_queue_call_work(call); |
295 | } | 273 | } |
@@ -303,7 +281,8 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work) | |||
303 | 281 | ||
304 | _enter("{%p}", call->cm_server); | 282 | _enter("{%p}", call->cm_server); |
305 | 283 | ||
306 | afs_init_callback_state(call->cm_server); | 284 | if (call->cm_server) |
285 | afs_init_callback_state(call->cm_server); | ||
307 | afs_send_empty_reply(call); | 286 | afs_send_empty_reply(call); |
308 | afs_put_call(call); | 287 | afs_put_call(call); |
309 | _leave(""); | 288 | _leave(""); |
@@ -315,7 +294,6 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work) | |||
315 | static int afs_deliver_cb_init_call_back_state(struct afs_call *call) | 294 | static int afs_deliver_cb_init_call_back_state(struct afs_call *call) |
316 | { | 295 | { |
317 | struct sockaddr_rxrpc srx; | 296 | struct sockaddr_rxrpc srx; |
318 | struct afs_server *server; | ||
319 | int ret; | 297 | int ret; |
320 | 298 | ||
321 | _enter(""); | 299 | _enter(""); |
@@ -328,10 +306,9 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call) | |||
328 | 306 | ||
329 | /* we'll need the file server record as that tells us which set of | 307 | /* we'll need the file server record as that tells us which set of |
330 | * vnodes to operate upon */ | 308 | * vnodes to operate upon */ |
331 | server = afs_find_server(call->net, &srx); | 309 | call->cm_server = afs_find_server(call->net, &srx); |
332 | if (!server) | 310 | if (!call->cm_server) |
333 | return -ENOTCONN; | 311 | trace_afs_cm_no_server(call, &srx); |
334 | call->cm_server = server; | ||
335 | 312 | ||
336 | return afs_queue_call_work(call); | 313 | return afs_queue_call_work(call); |
337 | } | 314 | } |
@@ -341,8 +318,6 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call) | |||
341 | */ | 318 | */ |
342 | static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) | 319 | static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) |
343 | { | 320 | { |
344 | struct sockaddr_rxrpc srx; | ||
345 | struct afs_server *server; | ||
346 | struct afs_uuid *r; | 321 | struct afs_uuid *r; |
347 | unsigned loop; | 322 | unsigned loop; |
348 | __be32 *b; | 323 | __be32 *b; |
@@ -398,11 +373,11 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) | |||
398 | 373 | ||
399 | /* we'll need the file server record as that tells us which set of | 374 | /* we'll need the file server record as that tells us which set of |
400 | * vnodes to operate upon */ | 375 | * vnodes to operate upon */ |
401 | rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); | 376 | rcu_read_lock(); |
402 | server = afs_find_server(call->net, &srx); | 377 | call->cm_server = afs_find_server_by_uuid(call->net, call->request); |
403 | if (!server) | 378 | rcu_read_unlock(); |
404 | return -ENOTCONN; | 379 | if (!call->cm_server) |
405 | call->cm_server = server; | 380 | trace_afs_cm_no_server_u(call, call->request); |
406 | 381 | ||
407 | return afs_queue_call_work(call); | 382 | return afs_queue_call_work(call); |
408 | } | 383 | } |
diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 5889f70d4d27..7d623008157f 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c | |||
@@ -180,6 +180,7 @@ static int afs_dir_open(struct inode *inode, struct file *file) | |||
180 | * get reclaimed during the iteration. | 180 | * get reclaimed during the iteration. |
181 | */ | 181 | */ |
182 | static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) | 182 | static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) |
183 | __acquires(&dvnode->validate_lock) | ||
183 | { | 184 | { |
184 | struct afs_read *req; | 185 | struct afs_read *req; |
185 | loff_t i_size; | 186 | loff_t i_size; |
@@ -261,18 +262,21 @@ retry: | |||
261 | /* If we're going to reload, we need to lock all the pages to prevent | 262 | /* If we're going to reload, we need to lock all the pages to prevent |
262 | * races. | 263 | * races. |
263 | */ | 264 | */ |
264 | if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { | 265 | ret = -ERESTARTSYS; |
265 | ret = -ERESTARTSYS; | 266 | if (down_read_killable(&dvnode->validate_lock) < 0) |
266 | for (i = 0; i < req->nr_pages; i++) | 267 | goto error; |
267 | if (lock_page_killable(req->pages[i]) < 0) | ||
268 | goto error_unlock; | ||
269 | 268 | ||
270 | if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) | 269 | if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) |
271 | goto success; | 270 | goto success; |
271 | |||
272 | up_read(&dvnode->validate_lock); | ||
273 | if (down_write_killable(&dvnode->validate_lock) < 0) | ||
274 | goto error; | ||
272 | 275 | ||
276 | if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { | ||
273 | ret = afs_fetch_data(dvnode, key, req); | 277 | ret = afs_fetch_data(dvnode, key, req); |
274 | if (ret < 0) | 278 | if (ret < 0) |
275 | goto error_unlock_all; | 279 | goto error_unlock; |
276 | 280 | ||
277 | task_io_account_read(PAGE_SIZE * req->nr_pages); | 281 | task_io_account_read(PAGE_SIZE * req->nr_pages); |
278 | 282 | ||
@@ -284,33 +288,26 @@ retry: | |||
284 | for (i = 0; i < req->nr_pages; i++) | 288 | for (i = 0; i < req->nr_pages; i++) |
285 | if (!afs_dir_check_page(dvnode, req->pages[i], | 289 | if (!afs_dir_check_page(dvnode, req->pages[i], |
286 | req->actual_len)) | 290 | req->actual_len)) |
287 | goto error_unlock_all; | 291 | goto error_unlock; |
288 | 292 | ||
289 | // TODO: Trim excess pages | 293 | // TODO: Trim excess pages |
290 | 294 | ||
291 | set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags); | 295 | set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags); |
292 | } | 296 | } |
293 | 297 | ||
298 | downgrade_write(&dvnode->validate_lock); | ||
294 | success: | 299 | success: |
295 | i = req->nr_pages; | ||
296 | while (i > 0) | ||
297 | unlock_page(req->pages[--i]); | ||
298 | return req; | 300 | return req; |
299 | 301 | ||
300 | error_unlock_all: | ||
301 | i = req->nr_pages; | ||
302 | error_unlock: | 302 | error_unlock: |
303 | while (i > 0) | 303 | up_write(&dvnode->validate_lock); |
304 | unlock_page(req->pages[--i]); | ||
305 | error: | 304 | error: |
306 | afs_put_read(req); | 305 | afs_put_read(req); |
307 | _leave(" = %d", ret); | 306 | _leave(" = %d", ret); |
308 | return ERR_PTR(ret); | 307 | return ERR_PTR(ret); |
309 | 308 | ||
310 | content_has_grown: | 309 | content_has_grown: |
311 | i = req->nr_pages; | 310 | up_write(&dvnode->validate_lock); |
312 | while (i > 0) | ||
313 | unlock_page(req->pages[--i]); | ||
314 | afs_put_read(req); | 311 | afs_put_read(req); |
315 | goto retry; | 312 | goto retry; |
316 | } | 313 | } |
@@ -473,6 +470,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, | |||
473 | } | 470 | } |
474 | 471 | ||
475 | out: | 472 | out: |
473 | up_read(&dvnode->validate_lock); | ||
476 | afs_put_read(req); | 474 | afs_put_read(req); |
477 | _leave(" = %d", ret); | 475 | _leave(" = %d", ret); |
478 | return ret; | 476 | return ret; |
@@ -1143,7 +1141,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
1143 | ret = -ERESTARTSYS; | 1141 | ret = -ERESTARTSYS; |
1144 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { | 1142 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { |
1145 | while (afs_select_fileserver(&fc)) { | 1143 | while (afs_select_fileserver(&fc)) { |
1146 | fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; | 1144 | fc.cb_break = afs_calc_vnode_cb_break(dvnode); |
1147 | afs_fs_create(&fc, dentry->d_name.name, mode, data_version, | 1145 | afs_fs_create(&fc, dentry->d_name.name, mode, data_version, |
1148 | &newfid, &newstatus, &newcb); | 1146 | &newfid, &newstatus, &newcb); |
1149 | } | 1147 | } |
@@ -1213,7 +1211,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) | |||
1213 | ret = -ERESTARTSYS; | 1211 | ret = -ERESTARTSYS; |
1214 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { | 1212 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { |
1215 | while (afs_select_fileserver(&fc)) { | 1213 | while (afs_select_fileserver(&fc)) { |
1216 | fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; | 1214 | fc.cb_break = afs_calc_vnode_cb_break(dvnode); |
1217 | afs_fs_remove(&fc, dentry->d_name.name, true, | 1215 | afs_fs_remove(&fc, dentry->d_name.name, true, |
1218 | data_version); | 1216 | data_version); |
1219 | } | 1217 | } |
@@ -1316,7 +1314,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) | |||
1316 | ret = -ERESTARTSYS; | 1314 | ret = -ERESTARTSYS; |
1317 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { | 1315 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { |
1318 | while (afs_select_fileserver(&fc)) { | 1316 | while (afs_select_fileserver(&fc)) { |
1319 | fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; | 1317 | fc.cb_break = afs_calc_vnode_cb_break(dvnode); |
1320 | afs_fs_remove(&fc, dentry->d_name.name, false, | 1318 | afs_fs_remove(&fc, dentry->d_name.name, false, |
1321 | data_version); | 1319 | data_version); |
1322 | } | 1320 | } |
@@ -1373,7 +1371,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
1373 | ret = -ERESTARTSYS; | 1371 | ret = -ERESTARTSYS; |
1374 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { | 1372 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { |
1375 | while (afs_select_fileserver(&fc)) { | 1373 | while (afs_select_fileserver(&fc)) { |
1376 | fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; | 1374 | fc.cb_break = afs_calc_vnode_cb_break(dvnode); |
1377 | afs_fs_create(&fc, dentry->d_name.name, mode, data_version, | 1375 | afs_fs_create(&fc, dentry->d_name.name, mode, data_version, |
1378 | &newfid, &newstatus, &newcb); | 1376 | &newfid, &newstatus, &newcb); |
1379 | } | 1377 | } |
@@ -1443,8 +1441,8 @@ static int afs_link(struct dentry *from, struct inode *dir, | |||
1443 | } | 1441 | } |
1444 | 1442 | ||
1445 | while (afs_select_fileserver(&fc)) { | 1443 | while (afs_select_fileserver(&fc)) { |
1446 | fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; | 1444 | fc.cb_break = afs_calc_vnode_cb_break(dvnode); |
1447 | fc.cb_break_2 = vnode->cb_break + vnode->cb_s_break; | 1445 | fc.cb_break_2 = afs_calc_vnode_cb_break(vnode); |
1448 | afs_fs_link(&fc, vnode, dentry->d_name.name, data_version); | 1446 | afs_fs_link(&fc, vnode, dentry->d_name.name, data_version); |
1449 | } | 1447 | } |
1450 | 1448 | ||
@@ -1512,7 +1510,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, | |||
1512 | ret = -ERESTARTSYS; | 1510 | ret = -ERESTARTSYS; |
1513 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { | 1511 | if (afs_begin_vnode_operation(&fc, dvnode, key)) { |
1514 | while (afs_select_fileserver(&fc)) { | 1512 | while (afs_select_fileserver(&fc)) { |
1515 | fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; | 1513 | fc.cb_break = afs_calc_vnode_cb_break(dvnode); |
1516 | afs_fs_symlink(&fc, dentry->d_name.name, | 1514 | afs_fs_symlink(&fc, dentry->d_name.name, |
1517 | content, data_version, | 1515 | content, data_version, |
1518 | &newfid, &newstatus); | 1516 | &newfid, &newstatus); |
@@ -1588,8 +1586,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1588 | } | 1586 | } |
1589 | } | 1587 | } |
1590 | while (afs_select_fileserver(&fc)) { | 1588 | while (afs_select_fileserver(&fc)) { |
1591 | fc.cb_break = orig_dvnode->cb_break + orig_dvnode->cb_s_break; | 1589 | fc.cb_break = afs_calc_vnode_cb_break(orig_dvnode); |
1592 | fc.cb_break_2 = new_dvnode->cb_break + new_dvnode->cb_s_break; | 1590 | fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode); |
1593 | afs_fs_rename(&fc, old_dentry->d_name.name, | 1591 | afs_fs_rename(&fc, old_dentry->d_name.name, |
1594 | new_dvnode, new_dentry->d_name.name, | 1592 | new_dvnode, new_dentry->d_name.name, |
1595 | orig_data_version, new_data_version); | 1593 | orig_data_version, new_data_version); |
diff --git a/fs/afs/file.c b/fs/afs/file.c index c24c08016dd9..7d4f26198573 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
@@ -238,7 +238,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de | |||
238 | ret = -ERESTARTSYS; | 238 | ret = -ERESTARTSYS; |
239 | if (afs_begin_vnode_operation(&fc, vnode, key)) { | 239 | if (afs_begin_vnode_operation(&fc, vnode, key)) { |
240 | while (afs_select_fileserver(&fc)) { | 240 | while (afs_select_fileserver(&fc)) { |
241 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 241 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
242 | afs_fs_fetch_data(&fc, desc); | 242 | afs_fs_fetch_data(&fc, desc); |
243 | } | 243 | } |
244 | 244 | ||
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 7a0e017070ec..dc62d15a964b 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c | |||
@@ -86,7 +86,7 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key, | |||
86 | ret = -ERESTARTSYS; | 86 | ret = -ERESTARTSYS; |
87 | if (afs_begin_vnode_operation(&fc, vnode, key)) { | 87 | if (afs_begin_vnode_operation(&fc, vnode, key)) { |
88 | while (afs_select_fileserver(&fc)) { | 88 | while (afs_select_fileserver(&fc)) { |
89 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 89 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
90 | afs_fs_set_lock(&fc, type); | 90 | afs_fs_set_lock(&fc, type); |
91 | } | 91 | } |
92 | 92 | ||
@@ -117,7 +117,7 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key) | |||
117 | ret = -ERESTARTSYS; | 117 | ret = -ERESTARTSYS; |
118 | if (afs_begin_vnode_operation(&fc, vnode, key)) { | 118 | if (afs_begin_vnode_operation(&fc, vnode, key)) { |
119 | while (afs_select_current_fileserver(&fc)) { | 119 | while (afs_select_current_fileserver(&fc)) { |
120 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 120 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
121 | afs_fs_extend_lock(&fc); | 121 | afs_fs_extend_lock(&fc); |
122 | } | 122 | } |
123 | 123 | ||
@@ -148,7 +148,7 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key) | |||
148 | ret = -ERESTARTSYS; | 148 | ret = -ERESTARTSYS; |
149 | if (afs_begin_vnode_operation(&fc, vnode, key)) { | 149 | if (afs_begin_vnode_operation(&fc, vnode, key)) { |
150 | while (afs_select_current_fileserver(&fc)) { | 150 | while (afs_select_current_fileserver(&fc)) { |
151 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 151 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
152 | afs_fs_release_lock(&fc); | 152 | afs_fs_release_lock(&fc); |
153 | } | 153 | } |
154 | 154 | ||
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index efacdb7c1dee..b273e1d60478 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
@@ -134,6 +134,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, | |||
134 | struct afs_read *read_req) | 134 | struct afs_read *read_req) |
135 | { | 135 | { |
136 | const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; | 136 | const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; |
137 | bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); | ||
137 | u64 data_version, size; | 138 | u64 data_version, size; |
138 | u32 type, abort_code; | 139 | u32 type, abort_code; |
139 | u8 flags = 0; | 140 | u8 flags = 0; |
@@ -142,13 +143,32 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, | |||
142 | if (vnode) | 143 | if (vnode) |
143 | write_seqlock(&vnode->cb_lock); | 144 | write_seqlock(&vnode->cb_lock); |
144 | 145 | ||
146 | abort_code = ntohl(xdr->abort_code); | ||
147 | |||
145 | if (xdr->if_version != htonl(AFS_FSTATUS_VERSION)) { | 148 | if (xdr->if_version != htonl(AFS_FSTATUS_VERSION)) { |
149 | if (xdr->if_version == htonl(0) && | ||
150 | abort_code != 0 && | ||
151 | inline_error) { | ||
152 | /* The OpenAFS fileserver has a bug in FS.InlineBulkStatus | ||
153 | * whereby it doesn't set the interface version in the error | ||
154 | * case. | ||
155 | */ | ||
156 | status->abort_code = abort_code; | ||
157 | ret = 0; | ||
158 | goto out; | ||
159 | } | ||
160 | |||
146 | pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); | 161 | pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); |
147 | goto bad; | 162 | goto bad; |
148 | } | 163 | } |
149 | 164 | ||
165 | if (abort_code != 0 && inline_error) { | ||
166 | status->abort_code = abort_code; | ||
167 | ret = 0; | ||
168 | goto out; | ||
169 | } | ||
170 | |||
150 | type = ntohl(xdr->type); | 171 | type = ntohl(xdr->type); |
151 | abort_code = ntohl(xdr->abort_code); | ||
152 | switch (type) { | 172 | switch (type) { |
153 | case AFS_FTYPE_FILE: | 173 | case AFS_FTYPE_FILE: |
154 | case AFS_FTYPE_DIR: | 174 | case AFS_FTYPE_DIR: |
@@ -165,13 +185,6 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, | |||
165 | } | 185 | } |
166 | status->type = type; | 186 | status->type = type; |
167 | break; | 187 | break; |
168 | case AFS_FTYPE_INVALID: | ||
169 | if (abort_code != 0) { | ||
170 | status->abort_code = abort_code; | ||
171 | ret = 0; | ||
172 | goto out; | ||
173 | } | ||
174 | /* Fall through */ | ||
175 | default: | 188 | default: |
176 | goto bad; | 189 | goto bad; |
177 | } | 190 | } |
@@ -248,7 +261,7 @@ static void xdr_decode_AFSCallBack(struct afs_call *call, | |||
248 | 261 | ||
249 | write_seqlock(&vnode->cb_lock); | 262 | write_seqlock(&vnode->cb_lock); |
250 | 263 | ||
251 | if (call->cb_break == (vnode->cb_break + cbi->server->cb_s_break)) { | 264 | if (call->cb_break == afs_cb_break_sum(vnode, cbi)) { |
252 | vnode->cb_version = ntohl(*bp++); | 265 | vnode->cb_version = ntohl(*bp++); |
253 | cb_expiry = ntohl(*bp++); | 266 | cb_expiry = ntohl(*bp++); |
254 | vnode->cb_type = ntohl(*bp++); | 267 | vnode->cb_type = ntohl(*bp++); |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 06194cfe9724..479b7fdda124 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
@@ -108,7 +108,7 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode) | |||
108 | ret = -ERESTARTSYS; | 108 | ret = -ERESTARTSYS; |
109 | if (afs_begin_vnode_operation(&fc, vnode, key)) { | 109 | if (afs_begin_vnode_operation(&fc, vnode, key)) { |
110 | while (afs_select_fileserver(&fc)) { | 110 | while (afs_select_fileserver(&fc)) { |
111 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 111 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
112 | afs_fs_fetch_file_status(&fc, NULL, new_inode); | 112 | afs_fs_fetch_file_status(&fc, NULL, new_inode); |
113 | } | 113 | } |
114 | 114 | ||
@@ -393,15 +393,18 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) | |||
393 | read_seqlock_excl(&vnode->cb_lock); | 393 | read_seqlock_excl(&vnode->cb_lock); |
394 | 394 | ||
395 | if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { | 395 | if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { |
396 | if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break) { | 396 | if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break || |
397 | vnode->cb_v_break != vnode->volume->cb_v_break) { | ||
397 | vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; | 398 | vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; |
399 | vnode->cb_v_break = vnode->volume->cb_v_break; | ||
400 | valid = false; | ||
398 | } else if (vnode->status.type == AFS_FTYPE_DIR && | 401 | } else if (vnode->status.type == AFS_FTYPE_DIR && |
399 | test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) && | 402 | test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) && |
400 | vnode->cb_expires_at - 10 > now) { | 403 | vnode->cb_expires_at - 10 > now) { |
401 | valid = true; | 404 | valid = true; |
402 | } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && | 405 | } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && |
403 | vnode->cb_expires_at - 10 > now) { | 406 | vnode->cb_expires_at - 10 > now) { |
404 | valid = true; | 407 | valid = true; |
405 | } | 408 | } |
406 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { | 409 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { |
407 | valid = true; | 410 | valid = true; |
@@ -415,7 +418,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) | |||
415 | if (valid) | 418 | if (valid) |
416 | goto valid; | 419 | goto valid; |
417 | 420 | ||
418 | mutex_lock(&vnode->validate_lock); | 421 | down_write(&vnode->validate_lock); |
419 | 422 | ||
420 | /* if the promise has expired, we need to check the server again to get | 423 | /* if the promise has expired, we need to check the server again to get |
421 | * a new promise - note that if the (parent) directory's metadata was | 424 | * a new promise - note that if the (parent) directory's metadata was |
@@ -444,13 +447,13 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) | |||
444 | * different */ | 447 | * different */ |
445 | if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) | 448 | if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) |
446 | afs_zap_data(vnode); | 449 | afs_zap_data(vnode); |
447 | mutex_unlock(&vnode->validate_lock); | 450 | up_write(&vnode->validate_lock); |
448 | valid: | 451 | valid: |
449 | _leave(" = 0"); | 452 | _leave(" = 0"); |
450 | return 0; | 453 | return 0; |
451 | 454 | ||
452 | error_unlock: | 455 | error_unlock: |
453 | mutex_unlock(&vnode->validate_lock); | 456 | up_write(&vnode->validate_lock); |
454 | _leave(" = %d", ret); | 457 | _leave(" = %d", ret); |
455 | return ret; | 458 | return ret; |
456 | } | 459 | } |
@@ -574,7 +577,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr) | |||
574 | ret = -ERESTARTSYS; | 577 | ret = -ERESTARTSYS; |
575 | if (afs_begin_vnode_operation(&fc, vnode, key)) { | 578 | if (afs_begin_vnode_operation(&fc, vnode, key)) { |
576 | while (afs_select_fileserver(&fc)) { | 579 | while (afs_select_fileserver(&fc)) { |
577 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 580 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
578 | afs_fs_setattr(&fc, attr); | 581 | afs_fs_setattr(&fc, attr); |
579 | } | 582 | } |
580 | 583 | ||
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index f8086ec95e24..e3f8a46663db 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -396,6 +396,7 @@ struct afs_server { | |||
396 | #define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */ | 396 | #define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */ |
397 | #define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */ | 397 | #define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */ |
398 | #define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */ | 398 | #define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */ |
399 | #define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */ | ||
399 | atomic_t usage; | 400 | atomic_t usage; |
400 | u32 addr_version; /* Address list version */ | 401 | u32 addr_version; /* Address list version */ |
401 | 402 | ||
@@ -433,6 +434,7 @@ struct afs_server_list { | |||
433 | unsigned short index; /* Server currently in use */ | 434 | unsigned short index; /* Server currently in use */ |
434 | unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */ | 435 | unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */ |
435 | unsigned int seq; /* Set to ->servers_seq when installed */ | 436 | unsigned int seq; /* Set to ->servers_seq when installed */ |
437 | rwlock_t lock; | ||
436 | struct afs_server_entry servers[]; | 438 | struct afs_server_entry servers[]; |
437 | }; | 439 | }; |
438 | 440 | ||
@@ -459,6 +461,9 @@ struct afs_volume { | |||
459 | rwlock_t servers_lock; /* Lock for ->servers */ | 461 | rwlock_t servers_lock; /* Lock for ->servers */ |
460 | unsigned int servers_seq; /* Incremented each time ->servers changes */ | 462 | unsigned int servers_seq; /* Incremented each time ->servers changes */ |
461 | 463 | ||
464 | unsigned cb_v_break; /* Break-everything counter. */ | ||
465 | rwlock_t cb_break_lock; | ||
466 | |||
462 | afs_voltype_t type; /* type of volume */ | 467 | afs_voltype_t type; /* type of volume */ |
463 | short error; | 468 | short error; |
464 | char type_force; /* force volume type (suppress R/O -> R/W) */ | 469 | char type_force; /* force volume type (suppress R/O -> R/W) */ |
@@ -494,7 +499,7 @@ struct afs_vnode { | |||
494 | #endif | 499 | #endif |
495 | struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */ | 500 | struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */ |
496 | struct mutex io_lock; /* Lock for serialising I/O on this mutex */ | 501 | struct mutex io_lock; /* Lock for serialising I/O on this mutex */ |
497 | struct mutex validate_lock; /* lock for validating this vnode */ | 502 | struct rw_semaphore validate_lock; /* lock for validating this vnode */ |
498 | spinlock_t wb_lock; /* lock for wb_keys */ | 503 | spinlock_t wb_lock; /* lock for wb_keys */ |
499 | spinlock_t lock; /* waitqueue/flags lock */ | 504 | spinlock_t lock; /* waitqueue/flags lock */ |
500 | unsigned long flags; | 505 | unsigned long flags; |
@@ -519,6 +524,7 @@ struct afs_vnode { | |||
519 | /* outstanding callback notification on this file */ | 524 | /* outstanding callback notification on this file */ |
520 | struct afs_cb_interest *cb_interest; /* Server on which this resides */ | 525 | struct afs_cb_interest *cb_interest; /* Server on which this resides */ |
521 | unsigned int cb_s_break; /* Mass break counter on ->server */ | 526 | unsigned int cb_s_break; /* Mass break counter on ->server */ |
527 | unsigned int cb_v_break; /* Mass break counter on ->volume */ | ||
522 | unsigned int cb_break; /* Break counter on vnode */ | 528 | unsigned int cb_break; /* Break counter on vnode */ |
523 | seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */ | 529 | seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */ |
524 | 530 | ||
@@ -648,16 +654,29 @@ extern void afs_init_callback_state(struct afs_server *); | |||
648 | extern void afs_break_callback(struct afs_vnode *); | 654 | extern void afs_break_callback(struct afs_vnode *); |
649 | extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*); | 655 | extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*); |
650 | 656 | ||
651 | extern int afs_register_server_cb_interest(struct afs_vnode *, struct afs_server_entry *); | 657 | extern int afs_register_server_cb_interest(struct afs_vnode *, |
658 | struct afs_server_list *, unsigned int); | ||
652 | extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *); | 659 | extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *); |
653 | extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *); | 660 | extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *); |
654 | 661 | ||
655 | static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi) | 662 | static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi) |
656 | { | 663 | { |
657 | refcount_inc(&cbi->usage); | 664 | if (cbi) |
665 | refcount_inc(&cbi->usage); | ||
658 | return cbi; | 666 | return cbi; |
659 | } | 667 | } |
660 | 668 | ||
669 | static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode) | ||
670 | { | ||
671 | return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break; | ||
672 | } | ||
673 | |||
674 | static inline unsigned int afs_cb_break_sum(struct afs_vnode *vnode, | ||
675 | struct afs_cb_interest *cbi) | ||
676 | { | ||
677 | return vnode->cb_break + cbi->server->cb_s_break + vnode->volume->cb_v_break; | ||
678 | } | ||
679 | |||
661 | /* | 680 | /* |
662 | * cell.c | 681 | * cell.c |
663 | */ | 682 | */ |
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index ac0feac9d746..e065bc0768e6 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c | |||
@@ -179,7 +179,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) | |||
179 | */ | 179 | */ |
180 | if (fc->flags & AFS_FS_CURSOR_VNOVOL) { | 180 | if (fc->flags & AFS_FS_CURSOR_VNOVOL) { |
181 | fc->ac.error = -EREMOTEIO; | 181 | fc->ac.error = -EREMOTEIO; |
182 | goto failed; | 182 | goto next_server; |
183 | } | 183 | } |
184 | 184 | ||
185 | write_lock(&vnode->volume->servers_lock); | 185 | write_lock(&vnode->volume->servers_lock); |
@@ -201,7 +201,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) | |||
201 | */ | 201 | */ |
202 | if (vnode->volume->servers == fc->server_list) { | 202 | if (vnode->volume->servers == fc->server_list) { |
203 | fc->ac.error = -EREMOTEIO; | 203 | fc->ac.error = -EREMOTEIO; |
204 | goto failed; | 204 | goto next_server; |
205 | } | 205 | } |
206 | 206 | ||
207 | /* Try again */ | 207 | /* Try again */ |
@@ -350,8 +350,8 @@ use_server: | |||
350 | * break request before we've finished decoding the reply and | 350 | * break request before we've finished decoding the reply and |
351 | * installing the vnode. | 351 | * installing the vnode. |
352 | */ | 352 | */ |
353 | fc->ac.error = afs_register_server_cb_interest( | 353 | fc->ac.error = afs_register_server_cb_interest(vnode, fc->server_list, |
354 | vnode, &fc->server_list->servers[fc->index]); | 354 | fc->index); |
355 | if (fc->ac.error < 0) | 355 | if (fc->ac.error < 0) |
356 | goto failed; | 356 | goto failed; |
357 | 357 | ||
@@ -369,8 +369,16 @@ use_server: | |||
369 | if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) { | 369 | if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) { |
370 | fc->ac.alist = afs_get_addrlist(alist); | 370 | fc->ac.alist = afs_get_addrlist(alist); |
371 | 371 | ||
372 | if (!afs_probe_fileserver(fc)) | 372 | if (!afs_probe_fileserver(fc)) { |
373 | goto failed; | 373 | switch (fc->ac.error) { |
374 | case -ENOMEM: | ||
375 | case -ERESTARTSYS: | ||
376 | case -EINTR: | ||
377 | goto failed; | ||
378 | default: | ||
379 | goto next_server; | ||
380 | } | ||
381 | } | ||
374 | } | 382 | } |
375 | 383 | ||
376 | if (!fc->ac.alist) | 384 | if (!fc->ac.alist) |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 5c6263972ec9..08735948f15d 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
@@ -41,6 +41,7 @@ int afs_open_socket(struct afs_net *net) | |||
41 | { | 41 | { |
42 | struct sockaddr_rxrpc srx; | 42 | struct sockaddr_rxrpc srx; |
43 | struct socket *socket; | 43 | struct socket *socket; |
44 | unsigned int min_level; | ||
44 | int ret; | 45 | int ret; |
45 | 46 | ||
46 | _enter(""); | 47 | _enter(""); |
@@ -60,6 +61,12 @@ int afs_open_socket(struct afs_net *net) | |||
60 | srx.transport.sin6.sin6_family = AF_INET6; | 61 | srx.transport.sin6.sin6_family = AF_INET6; |
61 | srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); | 62 | srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); |
62 | 63 | ||
64 | min_level = RXRPC_SECURITY_ENCRYPT; | ||
65 | ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL, | ||
66 | (void *)&min_level, sizeof(min_level)); | ||
67 | if (ret < 0) | ||
68 | goto error_2; | ||
69 | |||
63 | ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); | 70 | ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); |
64 | if (ret == -EADDRINUSE) { | 71 | if (ret == -EADDRINUSE) { |
65 | srx.transport.sin6.sin6_port = 0; | 72 | srx.transport.sin6.sin6_port = 0; |
@@ -482,8 +489,12 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
482 | state = READ_ONCE(call->state); | 489 | state = READ_ONCE(call->state); |
483 | switch (ret) { | 490 | switch (ret) { |
484 | case 0: | 491 | case 0: |
485 | if (state == AFS_CALL_CL_PROC_REPLY) | 492 | if (state == AFS_CALL_CL_PROC_REPLY) { |
493 | if (call->cbi) | ||
494 | set_bit(AFS_SERVER_FL_MAY_HAVE_CB, | ||
495 | &call->cbi->server->flags); | ||
486 | goto call_complete; | 496 | goto call_complete; |
497 | } | ||
487 | ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY); | 498 | ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY); |
488 | goto done; | 499 | goto done; |
489 | case -EINPROGRESS: | 500 | case -EINPROGRESS: |
@@ -493,11 +504,6 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
493 | case -ECONNABORTED: | 504 | case -ECONNABORTED: |
494 | ASSERTCMP(state, ==, AFS_CALL_COMPLETE); | 505 | ASSERTCMP(state, ==, AFS_CALL_COMPLETE); |
495 | goto done; | 506 | goto done; |
496 | case -ENOTCONN: | ||
497 | abort_code = RX_CALL_DEAD; | ||
498 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | ||
499 | abort_code, ret, "KNC"); | ||
500 | goto local_abort; | ||
501 | case -ENOTSUPP: | 507 | case -ENOTSUPP: |
502 | abort_code = RXGEN_OPCODE; | 508 | abort_code = RXGEN_OPCODE; |
503 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | 509 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, |
diff --git a/fs/afs/security.c b/fs/afs/security.c index cea2fff313dc..1992b0ffa543 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c | |||
@@ -147,8 +147,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, | |||
147 | break; | 147 | break; |
148 | } | 148 | } |
149 | 149 | ||
150 | if (cb_break != (vnode->cb_break + | 150 | if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) { |
151 | vnode->cb_interest->server->cb_s_break)) { | ||
152 | changed = true; | 151 | changed = true; |
153 | break; | 152 | break; |
154 | } | 153 | } |
@@ -178,7 +177,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, | |||
178 | } | 177 | } |
179 | } | 178 | } |
180 | 179 | ||
181 | if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break)) | 180 | if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) |
182 | goto someone_else_changed_it; | 181 | goto someone_else_changed_it; |
183 | 182 | ||
184 | /* We need a ref on any permits list we want to copy as we'll have to | 183 | /* We need a ref on any permits list we want to copy as we'll have to |
@@ -257,7 +256,7 @@ found: | |||
257 | 256 | ||
258 | spin_lock(&vnode->lock); | 257 | spin_lock(&vnode->lock); |
259 | zap = rcu_access_pointer(vnode->permit_cache); | 258 | zap = rcu_access_pointer(vnode->permit_cache); |
260 | if (cb_break == (vnode->cb_break + vnode->cb_interest->server->cb_s_break) && | 259 | if (cb_break == afs_cb_break_sum(vnode, vnode->cb_interest) && |
261 | zap == permits) | 260 | zap == permits) |
262 | rcu_assign_pointer(vnode->permit_cache, replacement); | 261 | rcu_assign_pointer(vnode->permit_cache, replacement); |
263 | else | 262 | else |
diff --git a/fs/afs/server.c b/fs/afs/server.c index 629c74986cff..3af4625e2f8c 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
@@ -67,12 +67,6 @@ struct afs_server *afs_find_server(struct afs_net *net, | |||
67 | sizeof(struct in6_addr)); | 67 | sizeof(struct in6_addr)); |
68 | if (diff == 0) | 68 | if (diff == 0) |
69 | goto found; | 69 | goto found; |
70 | if (diff < 0) { | ||
71 | // TODO: Sort the list | ||
72 | //if (i == alist->nr_ipv4) | ||
73 | // goto not_found; | ||
74 | break; | ||
75 | } | ||
76 | } | 70 | } |
77 | } | 71 | } |
78 | } else { | 72 | } else { |
@@ -87,17 +81,10 @@ struct afs_server *afs_find_server(struct afs_net *net, | |||
87 | (u32 __force)b->sin6_addr.s6_addr32[3]); | 81 | (u32 __force)b->sin6_addr.s6_addr32[3]); |
88 | if (diff == 0) | 82 | if (diff == 0) |
89 | goto found; | 83 | goto found; |
90 | if (diff < 0) { | ||
91 | // TODO: Sort the list | ||
92 | //if (i == 0) | ||
93 | // goto not_found; | ||
94 | break; | ||
95 | } | ||
96 | } | 84 | } |
97 | } | 85 | } |
98 | } | 86 | } |
99 | 87 | ||
100 | //not_found: | ||
101 | server = NULL; | 88 | server = NULL; |
102 | found: | 89 | found: |
103 | if (server && !atomic_inc_not_zero(&server->usage)) | 90 | if (server && !atomic_inc_not_zero(&server->usage)) |
@@ -395,14 +382,16 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server) | |||
395 | struct afs_addr_list *alist = rcu_access_pointer(server->addresses); | 382 | struct afs_addr_list *alist = rcu_access_pointer(server->addresses); |
396 | struct afs_addr_cursor ac = { | 383 | struct afs_addr_cursor ac = { |
397 | .alist = alist, | 384 | .alist = alist, |
398 | .addr = &alist->addrs[0], | ||
399 | .start = alist->index, | 385 | .start = alist->index, |
400 | .index = alist->index, | 386 | .index = 0, |
387 | .addr = &alist->addrs[alist->index], | ||
401 | .error = 0, | 388 | .error = 0, |
402 | }; | 389 | }; |
403 | _enter("%p", server); | 390 | _enter("%p", server); |
404 | 391 | ||
405 | afs_fs_give_up_all_callbacks(net, server, &ac, NULL); | 392 | if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags)) |
393 | afs_fs_give_up_all_callbacks(net, server, &ac, NULL); | ||
394 | |||
406 | call_rcu(&server->rcu, afs_server_rcu); | 395 | call_rcu(&server->rcu, afs_server_rcu); |
407 | afs_dec_servers_outstanding(net); | 396 | afs_dec_servers_outstanding(net); |
408 | } | 397 | } |
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 0f8dc4c8f07c..8a5760aa5832 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c | |||
@@ -49,6 +49,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, | |||
49 | goto error; | 49 | goto error; |
50 | 50 | ||
51 | refcount_set(&slist->usage, 1); | 51 | refcount_set(&slist->usage, 1); |
52 | rwlock_init(&slist->lock); | ||
52 | 53 | ||
53 | /* Make sure a records exists for each server in the list. */ | 54 | /* Make sure a records exists for each server in the list. */ |
54 | for (i = 0; i < vldb->nr_servers; i++) { | 55 | for (i = 0; i < vldb->nr_servers; i++) { |
@@ -64,9 +65,11 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, | |||
64 | goto error_2; | 65 | goto error_2; |
65 | } | 66 | } |
66 | 67 | ||
67 | /* Insertion-sort by server pointer */ | 68 | /* Insertion-sort by UUID */ |
68 | for (j = 0; j < slist->nr_servers; j++) | 69 | for (j = 0; j < slist->nr_servers; j++) |
69 | if (slist->servers[j].server >= server) | 70 | if (memcmp(&slist->servers[j].server->uuid, |
71 | &server->uuid, | ||
72 | sizeof(server->uuid)) >= 0) | ||
70 | break; | 73 | break; |
71 | if (j < slist->nr_servers) { | 74 | if (j < slist->nr_servers) { |
72 | if (slist->servers[j].server == server) { | 75 | if (slist->servers[j].server == server) { |
diff --git a/fs/afs/super.c b/fs/afs/super.c index 65081ec3c36e..9e5d7966621c 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c | |||
@@ -590,7 +590,7 @@ static void afs_i_init_once(void *_vnode) | |||
590 | memset(vnode, 0, sizeof(*vnode)); | 590 | memset(vnode, 0, sizeof(*vnode)); |
591 | inode_init_once(&vnode->vfs_inode); | 591 | inode_init_once(&vnode->vfs_inode); |
592 | mutex_init(&vnode->io_lock); | 592 | mutex_init(&vnode->io_lock); |
593 | mutex_init(&vnode->validate_lock); | 593 | init_rwsem(&vnode->validate_lock); |
594 | spin_lock_init(&vnode->wb_lock); | 594 | spin_lock_init(&vnode->wb_lock); |
595 | spin_lock_init(&vnode->lock); | 595 | spin_lock_init(&vnode->lock); |
596 | INIT_LIST_HEAD(&vnode->wb_keys); | 596 | INIT_LIST_HEAD(&vnode->wb_keys); |
@@ -688,7 +688,7 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
688 | if (afs_begin_vnode_operation(&fc, vnode, key)) { | 688 | if (afs_begin_vnode_operation(&fc, vnode, key)) { |
689 | fc.flags |= AFS_FS_CURSOR_NO_VSLEEP; | 689 | fc.flags |= AFS_FS_CURSOR_NO_VSLEEP; |
690 | while (afs_select_fileserver(&fc)) { | 690 | while (afs_select_fileserver(&fc)) { |
691 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 691 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
692 | afs_fs_get_volume_status(&fc, &vs); | 692 | afs_fs_get_volume_status(&fc, &vs); |
693 | } | 693 | } |
694 | 694 | ||
diff --git a/fs/afs/write.c b/fs/afs/write.c index c164698dc304..8b39e6ebb40b 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -351,7 +351,7 @@ found_key: | |||
351 | ret = -ERESTARTSYS; | 351 | ret = -ERESTARTSYS; |
352 | if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) { | 352 | if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) { |
353 | while (afs_select_fileserver(&fc)) { | 353 | while (afs_select_fileserver(&fc)) { |
354 | fc.cb_break = vnode->cb_break + vnode->cb_s_break; | 354 | fc.cb_break = afs_calc_vnode_cb_break(vnode); |
355 | afs_fs_store_data(&fc, mapping, first, last, offset, to); | 355 | afs_fs_store_data(&fc, mapping, first, last, offset, to); |
356 | } | 356 | } |
357 | 357 | ||
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index f85040d73e3d..cf0e45b10121 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -70,69 +70,104 @@ static __le32 ceph_flags_sys2wire(u32 flags) | |||
70 | */ | 70 | */ |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Calculate the length sum of direct io vectors that can | 73 | * How many pages to get in one call to iov_iter_get_pages(). This |
74 | * be combined into one page vector. | 74 | * determines the size of the on-stack array used as a buffer. |
75 | */ | 75 | */ |
76 | static size_t dio_get_pagev_size(const struct iov_iter *it) | 76 | #define ITER_GET_BVECS_PAGES 64 |
77 | |||
78 | static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize, | ||
79 | struct bio_vec *bvecs) | ||
77 | { | 80 | { |
78 | const struct iovec *iov = it->iov; | 81 | size_t size = 0; |
79 | const struct iovec *iovend = iov + it->nr_segs; | 82 | int bvec_idx = 0; |
80 | size_t size; | 83 | |
81 | 84 | if (maxsize > iov_iter_count(iter)) | |
82 | size = iov->iov_len - it->iov_offset; | 85 | maxsize = iov_iter_count(iter); |
83 | /* | 86 | |
84 | * An iov can be page vectored when both the current tail | 87 | while (size < maxsize) { |
85 | * and the next base are page aligned. | 88 | struct page *pages[ITER_GET_BVECS_PAGES]; |
86 | */ | 89 | ssize_t bytes; |
87 | while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) && | 90 | size_t start; |
88 | (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) { | 91 | int idx = 0; |
89 | size += iov->iov_len; | 92 | |
90 | } | 93 | bytes = iov_iter_get_pages(iter, pages, maxsize - size, |
91 | dout("dio_get_pagevlen len = %zu\n", size); | 94 | ITER_GET_BVECS_PAGES, &start); |
92 | return size; | 95 | if (bytes < 0) |
96 | return size ?: bytes; | ||
97 | |||
98 | iov_iter_advance(iter, bytes); | ||
99 | size += bytes; | ||
100 | |||
101 | for ( ; bytes; idx++, bvec_idx++) { | ||
102 | struct bio_vec bv = { | ||
103 | .bv_page = pages[idx], | ||
104 | .bv_len = min_t(int, bytes, PAGE_SIZE - start), | ||
105 | .bv_offset = start, | ||
106 | }; | ||
107 | |||
108 | bvecs[bvec_idx] = bv; | ||
109 | bytes -= bv.bv_len; | ||
110 | start = 0; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | return size; | ||
93 | } | 115 | } |
94 | 116 | ||
95 | /* | 117 | /* |
96 | * Allocate a page vector based on (@it, @nbytes). | 118 | * iov_iter_get_pages() only considers one iov_iter segment, no matter |
97 | * The return value is the tuple describing a page vector, | 119 | * what maxsize or maxpages are given. For ITER_BVEC that is a single |
98 | * that is (@pages, @page_align, @num_pages). | 120 | * page. |
121 | * | ||
122 | * Attempt to get up to @maxsize bytes worth of pages from @iter. | ||
123 | * Return the number of bytes in the created bio_vec array, or an error. | ||
99 | */ | 124 | */ |
100 | static struct page ** | 125 | static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize, |
101 | dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes, | 126 | struct bio_vec **bvecs, int *num_bvecs) |
102 | size_t *page_align, int *num_pages) | ||
103 | { | 127 | { |
104 | struct iov_iter tmp_it = *it; | 128 | struct bio_vec *bv; |
105 | size_t align; | 129 | size_t orig_count = iov_iter_count(iter); |
106 | struct page **pages; | 130 | ssize_t bytes; |
107 | int ret = 0, idx, npages; | 131 | int npages; |
108 | 132 | ||
109 | align = (unsigned long)(it->iov->iov_base + it->iov_offset) & | 133 | iov_iter_truncate(iter, maxsize); |
110 | (PAGE_SIZE - 1); | 134 | npages = iov_iter_npages(iter, INT_MAX); |
111 | npages = calc_pages_for(align, nbytes); | 135 | iov_iter_reexpand(iter, orig_count); |
112 | pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL); | ||
113 | if (!pages) | ||
114 | return ERR_PTR(-ENOMEM); | ||
115 | 136 | ||
116 | for (idx = 0; idx < npages; ) { | 137 | /* |
117 | size_t start; | 138 | * __iter_get_bvecs() may populate only part of the array -- zero it |
118 | ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes, | 139 | * out. |
119 | npages - idx, &start); | 140 | */ |
120 | if (ret < 0) | 141 | bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO); |
121 | goto fail; | 142 | if (!bv) |
143 | return -ENOMEM; | ||
122 | 144 | ||
123 | iov_iter_advance(&tmp_it, ret); | 145 | bytes = __iter_get_bvecs(iter, maxsize, bv); |
124 | nbytes -= ret; | 146 | if (bytes < 0) { |
125 | idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE; | 147 | /* |
148 | * No pages were pinned -- just free the array. | ||
149 | */ | ||
150 | kvfree(bv); | ||
151 | return bytes; | ||
126 | } | 152 | } |
127 | 153 | ||
128 | BUG_ON(nbytes != 0); | 154 | *bvecs = bv; |
129 | *num_pages = npages; | 155 | *num_bvecs = npages; |
130 | *page_align = align; | 156 | return bytes; |
131 | dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); | 157 | } |
132 | return pages; | 158 | |
133 | fail: | 159 | static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty) |
134 | ceph_put_page_vector(pages, idx, false); | 160 | { |
135 | return ERR_PTR(ret); | 161 | int i; |
162 | |||
163 | for (i = 0; i < num_bvecs; i++) { | ||
164 | if (bvecs[i].bv_page) { | ||
165 | if (should_dirty) | ||
166 | set_page_dirty_lock(bvecs[i].bv_page); | ||
167 | put_page(bvecs[i].bv_page); | ||
168 | } | ||
169 | } | ||
170 | kvfree(bvecs); | ||
136 | } | 171 | } |
137 | 172 | ||
138 | /* | 173 | /* |
@@ -746,11 +781,12 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) | |||
746 | struct inode *inode = req->r_inode; | 781 | struct inode *inode = req->r_inode; |
747 | struct ceph_aio_request *aio_req = req->r_priv; | 782 | struct ceph_aio_request *aio_req = req->r_priv; |
748 | struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); | 783 | struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); |
749 | int num_pages = calc_pages_for((u64)osd_data->alignment, | ||
750 | osd_data->length); | ||
751 | 784 | ||
752 | dout("ceph_aio_complete_req %p rc %d bytes %llu\n", | 785 | BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS); |
753 | inode, rc, osd_data->length); | 786 | BUG_ON(!osd_data->num_bvecs); |
787 | |||
788 | dout("ceph_aio_complete_req %p rc %d bytes %u\n", | ||
789 | inode, rc, osd_data->bvec_pos.iter.bi_size); | ||
754 | 790 | ||
755 | if (rc == -EOLDSNAPC) { | 791 | if (rc == -EOLDSNAPC) { |
756 | struct ceph_aio_work *aio_work; | 792 | struct ceph_aio_work *aio_work; |
@@ -768,9 +804,10 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) | |||
768 | } else if (!aio_req->write) { | 804 | } else if (!aio_req->write) { |
769 | if (rc == -ENOENT) | 805 | if (rc == -ENOENT) |
770 | rc = 0; | 806 | rc = 0; |
771 | if (rc >= 0 && osd_data->length > rc) { | 807 | if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) { |
772 | int zoff = osd_data->alignment + rc; | 808 | struct iov_iter i; |
773 | int zlen = osd_data->length - rc; | 809 | int zlen = osd_data->bvec_pos.iter.bi_size - rc; |
810 | |||
774 | /* | 811 | /* |
775 | * If read is satisfied by single OSD request, | 812 | * If read is satisfied by single OSD request, |
776 | * it can pass EOF. Otherwise read is within | 813 | * it can pass EOF. Otherwise read is within |
@@ -785,13 +822,16 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) | |||
785 | aio_req->total_len = rc + zlen; | 822 | aio_req->total_len = rc + zlen; |
786 | } | 823 | } |
787 | 824 | ||
788 | if (zlen > 0) | 825 | iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs, |
789 | ceph_zero_page_vector_range(zoff, zlen, | 826 | osd_data->num_bvecs, |
790 | osd_data->pages); | 827 | osd_data->bvec_pos.iter.bi_size); |
828 | iov_iter_advance(&i, rc); | ||
829 | iov_iter_zero(zlen, &i); | ||
791 | } | 830 | } |
792 | } | 831 | } |
793 | 832 | ||
794 | ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty); | 833 | put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs, |
834 | aio_req->should_dirty); | ||
795 | ceph_osdc_put_request(req); | 835 | ceph_osdc_put_request(req); |
796 | 836 | ||
797 | if (rc < 0) | 837 | if (rc < 0) |
@@ -879,7 +919,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, | |||
879 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | 919 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
880 | struct ceph_vino vino; | 920 | struct ceph_vino vino; |
881 | struct ceph_osd_request *req; | 921 | struct ceph_osd_request *req; |
882 | struct page **pages; | 922 | struct bio_vec *bvecs; |
883 | struct ceph_aio_request *aio_req = NULL; | 923 | struct ceph_aio_request *aio_req = NULL; |
884 | int num_pages = 0; | 924 | int num_pages = 0; |
885 | int flags; | 925 | int flags; |
@@ -914,10 +954,14 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, | |||
914 | } | 954 | } |
915 | 955 | ||
916 | while (iov_iter_count(iter) > 0) { | 956 | while (iov_iter_count(iter) > 0) { |
917 | u64 size = dio_get_pagev_size(iter); | 957 | u64 size = iov_iter_count(iter); |
918 | size_t start = 0; | ||
919 | ssize_t len; | 958 | ssize_t len; |
920 | 959 | ||
960 | if (write) | ||
961 | size = min_t(u64, size, fsc->mount_options->wsize); | ||
962 | else | ||
963 | size = min_t(u64, size, fsc->mount_options->rsize); | ||
964 | |||
921 | vino = ceph_vino(inode); | 965 | vino = ceph_vino(inode); |
922 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, | 966 | req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, |
923 | vino, pos, &size, 0, | 967 | vino, pos, &size, 0, |
@@ -933,18 +977,14 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, | |||
933 | break; | 977 | break; |
934 | } | 978 | } |
935 | 979 | ||
936 | if (write) | 980 | len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages); |
937 | size = min_t(u64, size, fsc->mount_options->wsize); | 981 | if (len < 0) { |
938 | else | ||
939 | size = min_t(u64, size, fsc->mount_options->rsize); | ||
940 | |||
941 | len = size; | ||
942 | pages = dio_get_pages_alloc(iter, len, &start, &num_pages); | ||
943 | if (IS_ERR(pages)) { | ||
944 | ceph_osdc_put_request(req); | 982 | ceph_osdc_put_request(req); |
945 | ret = PTR_ERR(pages); | 983 | ret = len; |
946 | break; | 984 | break; |
947 | } | 985 | } |
986 | if (len != size) | ||
987 | osd_req_op_extent_update(req, 0, len); | ||
948 | 988 | ||
949 | /* | 989 | /* |
950 | * To simplify error handling, allow AIO when IO within i_size | 990 | * To simplify error handling, allow AIO when IO within i_size |
@@ -977,8 +1017,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, | |||
977 | req->r_mtime = mtime; | 1017 | req->r_mtime = mtime; |
978 | } | 1018 | } |
979 | 1019 | ||
980 | osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, | 1020 | osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len); |
981 | false, false); | ||
982 | 1021 | ||
983 | if (aio_req) { | 1022 | if (aio_req) { |
984 | aio_req->total_len += len; | 1023 | aio_req->total_len += len; |
@@ -991,7 +1030,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, | |||
991 | list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); | 1030 | list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs); |
992 | 1031 | ||
993 | pos += len; | 1032 | pos += len; |
994 | iov_iter_advance(iter, len); | ||
995 | continue; | 1033 | continue; |
996 | } | 1034 | } |
997 | 1035 | ||
@@ -1004,25 +1042,26 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, | |||
1004 | if (ret == -ENOENT) | 1042 | if (ret == -ENOENT) |
1005 | ret = 0; | 1043 | ret = 0; |
1006 | if (ret >= 0 && ret < len && pos + ret < size) { | 1044 | if (ret >= 0 && ret < len && pos + ret < size) { |
1045 | struct iov_iter i; | ||
1007 | int zlen = min_t(size_t, len - ret, | 1046 | int zlen = min_t(size_t, len - ret, |
1008 | size - pos - ret); | 1047 | size - pos - ret); |
1009 | ceph_zero_page_vector_range(start + ret, zlen, | 1048 | |
1010 | pages); | 1049 | iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages, |
1050 | len); | ||
1051 | iov_iter_advance(&i, ret); | ||
1052 | iov_iter_zero(zlen, &i); | ||
1011 | ret += zlen; | 1053 | ret += zlen; |
1012 | } | 1054 | } |
1013 | if (ret >= 0) | 1055 | if (ret >= 0) |
1014 | len = ret; | 1056 | len = ret; |
1015 | } | 1057 | } |
1016 | 1058 | ||
1017 | ceph_put_page_vector(pages, num_pages, should_dirty); | 1059 | put_bvecs(bvecs, num_pages, should_dirty); |
1018 | |||
1019 | ceph_osdc_put_request(req); | 1060 | ceph_osdc_put_request(req); |
1020 | if (ret < 0) | 1061 | if (ret < 0) |
1021 | break; | 1062 | break; |
1022 | 1063 | ||
1023 | pos += len; | 1064 | pos += len; |
1024 | iov_iter_advance(iter, len); | ||
1025 | |||
1026 | if (!write && pos >= size) | 1065 | if (!write && pos >= size) |
1027 | break; | 1066 | break; |
1028 | 1067 | ||
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f715609b13f3..5a5a0158cc8f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -1047,6 +1047,18 @@ out: | |||
1047 | return rc; | 1047 | return rc; |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | /* | ||
1051 | * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() | ||
1052 | * is a dummy operation. | ||
1053 | */ | ||
1054 | static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) | ||
1055 | { | ||
1056 | cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", | ||
1057 | file, datasync); | ||
1058 | |||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1050 | static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, | 1062 | static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, |
1051 | struct file *dst_file, loff_t destoff, | 1063 | struct file *dst_file, loff_t destoff, |
1052 | size_t len, unsigned int flags) | 1064 | size_t len, unsigned int flags) |
@@ -1181,6 +1193,7 @@ const struct file_operations cifs_dir_ops = { | |||
1181 | .copy_file_range = cifs_copy_file_range, | 1193 | .copy_file_range = cifs_copy_file_range, |
1182 | .clone_file_range = cifs_clone_file_range, | 1194 | .clone_file_range = cifs_clone_file_range, |
1183 | .llseek = generic_file_llseek, | 1195 | .llseek = generic_file_llseek, |
1196 | .fsync = cifs_dir_fsync, | ||
1184 | }; | 1197 | }; |
1185 | 1198 | ||
1186 | static void | 1199 | static void |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a5aa158d535a..7a10a5d0731f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1977,14 +1977,6 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1977 | goto cifs_parse_mount_err; | 1977 | goto cifs_parse_mount_err; |
1978 | } | 1978 | } |
1979 | 1979 | ||
1980 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
1981 | if (vol->rdma && vol->sign) { | ||
1982 | cifs_dbg(VFS, "Currently SMB direct doesn't support signing." | ||
1983 | " This is being fixed\n"); | ||
1984 | goto cifs_parse_mount_err; | ||
1985 | } | ||
1986 | #endif | ||
1987 | |||
1988 | #ifndef CONFIG_KEYS | 1980 | #ifndef CONFIG_KEYS |
1989 | /* Muliuser mounts require CONFIG_KEYS support */ | 1981 | /* Muliuser mounts require CONFIG_KEYS support */ |
1990 | if (vol->multiuser) { | 1982 | if (vol->multiuser) { |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index b76b85881dcc..9c6d95ffca97 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -589,9 +589,15 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, | |||
589 | 589 | ||
590 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); | 590 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); |
591 | 591 | ||
592 | /* | ||
593 | * If ea_name is NULL (listxattr) and there are no EAs, return 0 as it's | ||
594 | * not an error. Otherwise, the specified ea_name was not found. | ||
595 | */ | ||
592 | if (!rc) | 596 | if (!rc) |
593 | rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data, | 597 | rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data, |
594 | SMB2_MAX_EA_BUF, ea_name); | 598 | SMB2_MAX_EA_BUF, ea_name); |
599 | else if (!ea_name && rc == -ENODATA) | ||
600 | rc = 0; | ||
595 | 601 | ||
596 | kfree(smb2_data); | 602 | kfree(smb2_data); |
597 | return rc; | 603 | return rc; |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 60db51bae0e3..0f48741a0130 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -730,19 +730,14 @@ neg_exit: | |||
730 | 730 | ||
731 | int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | 731 | int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) |
732 | { | 732 | { |
733 | int rc = 0; | 733 | int rc; |
734 | struct validate_negotiate_info_req vneg_inbuf; | 734 | struct validate_negotiate_info_req *pneg_inbuf; |
735 | struct validate_negotiate_info_rsp *pneg_rsp = NULL; | 735 | struct validate_negotiate_info_rsp *pneg_rsp = NULL; |
736 | u32 rsplen; | 736 | u32 rsplen; |
737 | u32 inbuflen; /* max of 4 dialects */ | 737 | u32 inbuflen; /* max of 4 dialects */ |
738 | 738 | ||
739 | cifs_dbg(FYI, "validate negotiate\n"); | 739 | cifs_dbg(FYI, "validate negotiate\n"); |
740 | 740 | ||
741 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
742 | if (tcon->ses->server->rdma) | ||
743 | return 0; | ||
744 | #endif | ||
745 | |||
746 | /* In SMB3.11 preauth integrity supersedes validate negotiate */ | 741 | /* In SMB3.11 preauth integrity supersedes validate negotiate */ |
747 | if (tcon->ses->server->dialect == SMB311_PROT_ID) | 742 | if (tcon->ses->server->dialect == SMB311_PROT_ID) |
748 | return 0; | 743 | return 0; |
@@ -765,63 +760,69 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
765 | if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) | 760 | if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) |
766 | cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); | 761 | cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); |
767 | 762 | ||
768 | vneg_inbuf.Capabilities = | 763 | pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS); |
764 | if (!pneg_inbuf) | ||
765 | return -ENOMEM; | ||
766 | |||
767 | pneg_inbuf->Capabilities = | ||
769 | cpu_to_le32(tcon->ses->server->vals->req_capabilities); | 768 | cpu_to_le32(tcon->ses->server->vals->req_capabilities); |
770 | memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid, | 769 | memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid, |
771 | SMB2_CLIENT_GUID_SIZE); | 770 | SMB2_CLIENT_GUID_SIZE); |
772 | 771 | ||
773 | if (tcon->ses->sign) | 772 | if (tcon->ses->sign) |
774 | vneg_inbuf.SecurityMode = | 773 | pneg_inbuf->SecurityMode = |
775 | cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); | 774 | cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); |
776 | else if (global_secflags & CIFSSEC_MAY_SIGN) | 775 | else if (global_secflags & CIFSSEC_MAY_SIGN) |
777 | vneg_inbuf.SecurityMode = | 776 | pneg_inbuf->SecurityMode = |
778 | cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); | 777 | cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); |
779 | else | 778 | else |
780 | vneg_inbuf.SecurityMode = 0; | 779 | pneg_inbuf->SecurityMode = 0; |
781 | 780 | ||
782 | 781 | ||
783 | if (strcmp(tcon->ses->server->vals->version_string, | 782 | if (strcmp(tcon->ses->server->vals->version_string, |
784 | SMB3ANY_VERSION_STRING) == 0) { | 783 | SMB3ANY_VERSION_STRING) == 0) { |
785 | vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID); | 784 | pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); |
786 | vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID); | 785 | pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); |
787 | vneg_inbuf.DialectCount = cpu_to_le16(2); | 786 | pneg_inbuf->DialectCount = cpu_to_le16(2); |
788 | /* structure is big enough for 3 dialects, sending only 2 */ | 787 | /* structure is big enough for 3 dialects, sending only 2 */ |
789 | inbuflen = sizeof(struct validate_negotiate_info_req) - 2; | 788 | inbuflen = sizeof(*pneg_inbuf) - |
789 | sizeof(pneg_inbuf->Dialects[0]); | ||
790 | } else if (strcmp(tcon->ses->server->vals->version_string, | 790 | } else if (strcmp(tcon->ses->server->vals->version_string, |
791 | SMBDEFAULT_VERSION_STRING) == 0) { | 791 | SMBDEFAULT_VERSION_STRING) == 0) { |
792 | vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID); | 792 | pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); |
793 | vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID); | 793 | pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); |
794 | vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID); | 794 | pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); |
795 | vneg_inbuf.DialectCount = cpu_to_le16(3); | 795 | pneg_inbuf->DialectCount = cpu_to_le16(3); |
796 | /* structure is big enough for 3 dialects */ | 796 | /* structure is big enough for 3 dialects */ |
797 | inbuflen = sizeof(struct validate_negotiate_info_req); | 797 | inbuflen = sizeof(*pneg_inbuf); |
798 | } else { | 798 | } else { |
799 | /* otherwise specific dialect was requested */ | 799 | /* otherwise specific dialect was requested */ |
800 | vneg_inbuf.Dialects[0] = | 800 | pneg_inbuf->Dialects[0] = |
801 | cpu_to_le16(tcon->ses->server->vals->protocol_id); | 801 | cpu_to_le16(tcon->ses->server->vals->protocol_id); |
802 | vneg_inbuf.DialectCount = cpu_to_le16(1); | 802 | pneg_inbuf->DialectCount = cpu_to_le16(1); |
803 | /* structure is big enough for 3 dialects, sending only 1 */ | 803 | /* structure is big enough for 3 dialects, sending only 1 */ |
804 | inbuflen = sizeof(struct validate_negotiate_info_req) - 4; | 804 | inbuflen = sizeof(*pneg_inbuf) - |
805 | sizeof(pneg_inbuf->Dialects[0]) * 2; | ||
805 | } | 806 | } |
806 | 807 | ||
807 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 808 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
808 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, | 809 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, |
809 | (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), | 810 | (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen); |
810 | (char **)&pneg_rsp, &rsplen); | ||
811 | 811 | ||
812 | if (rc != 0) { | 812 | if (rc != 0) { |
813 | cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); | 813 | cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); |
814 | return -EIO; | 814 | rc = -EIO; |
815 | goto out_free_inbuf; | ||
815 | } | 816 | } |
816 | 817 | ||
817 | if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { | 818 | rc = -EIO; |
819 | if (rsplen != sizeof(*pneg_rsp)) { | ||
818 | cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n", | 820 | cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n", |
819 | rsplen); | 821 | rsplen); |
820 | 822 | ||
821 | /* relax check since Mac returns max bufsize allowed on ioctl */ | 823 | /* relax check since Mac returns max bufsize allowed on ioctl */ |
822 | if ((rsplen > CIFSMaxBufSize) | 824 | if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) |
823 | || (rsplen < sizeof(struct validate_negotiate_info_rsp))) | 825 | goto out_free_rsp; |
824 | goto err_rsp_free; | ||
825 | } | 826 | } |
826 | 827 | ||
827 | /* check validate negotiate info response matches what we got earlier */ | 828 | /* check validate negotiate info response matches what we got earlier */ |
@@ -838,15 +839,17 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
838 | goto vneg_out; | 839 | goto vneg_out; |
839 | 840 | ||
840 | /* validate negotiate successful */ | 841 | /* validate negotiate successful */ |
842 | rc = 0; | ||
841 | cifs_dbg(FYI, "validate negotiate info successful\n"); | 843 | cifs_dbg(FYI, "validate negotiate info successful\n"); |
842 | kfree(pneg_rsp); | 844 | goto out_free_rsp; |
843 | return 0; | ||
844 | 845 | ||
845 | vneg_out: | 846 | vneg_out: |
846 | cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); | 847 | cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); |
847 | err_rsp_free: | 848 | out_free_rsp: |
848 | kfree(pneg_rsp); | 849 | kfree(pneg_rsp); |
849 | return -EIO; | 850 | out_free_inbuf: |
851 | kfree(pneg_inbuf); | ||
852 | return rc; | ||
850 | } | 853 | } |
851 | 854 | ||
852 | enum securityEnum | 855 | enum securityEnum |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 513c357c734b..a6c0f54c48c3 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
@@ -588,6 +588,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
588 | return 0; | 588 | return 0; |
589 | 589 | ||
590 | out_put_hidden_dir: | 590 | out_put_hidden_dir: |
591 | cancel_delayed_work_sync(&sbi->sync_work); | ||
591 | iput(sbi->hidden_dir); | 592 | iput(sbi->hidden_dir); |
592 | out_put_root: | 593 | out_put_root: |
593 | dput(sb->s_root); | 594 | dput(sb->s_root); |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 01c6b3894406..7869622af22a 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -4250,10 +4250,11 @@ out: | |||
4250 | static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, | 4250 | static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, |
4251 | struct dentry *new_dentry, bool preserve) | 4251 | struct dentry *new_dentry, bool preserve) |
4252 | { | 4252 | { |
4253 | int error; | 4253 | int error, had_lock; |
4254 | struct inode *inode = d_inode(old_dentry); | 4254 | struct inode *inode = d_inode(old_dentry); |
4255 | struct buffer_head *old_bh = NULL; | 4255 | struct buffer_head *old_bh = NULL; |
4256 | struct inode *new_orphan_inode = NULL; | 4256 | struct inode *new_orphan_inode = NULL; |
4257 | struct ocfs2_lock_holder oh; | ||
4257 | 4258 | ||
4258 | if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) | 4259 | if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) |
4259 | return -EOPNOTSUPP; | 4260 | return -EOPNOTSUPP; |
@@ -4295,6 +4296,14 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, | |||
4295 | goto out; | 4296 | goto out; |
4296 | } | 4297 | } |
4297 | 4298 | ||
4299 | had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1, | ||
4300 | &oh); | ||
4301 | if (had_lock < 0) { | ||
4302 | error = had_lock; | ||
4303 | mlog_errno(error); | ||
4304 | goto out; | ||
4305 | } | ||
4306 | |||
4298 | /* If the security isn't preserved, we need to re-initialize them. */ | 4307 | /* If the security isn't preserved, we need to re-initialize them. */ |
4299 | if (!preserve) { | 4308 | if (!preserve) { |
4300 | error = ocfs2_init_security_and_acl(dir, new_orphan_inode, | 4309 | error = ocfs2_init_security_and_acl(dir, new_orphan_inode, |
@@ -4302,14 +4311,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, | |||
4302 | if (error) | 4311 | if (error) |
4303 | mlog_errno(error); | 4312 | mlog_errno(error); |
4304 | } | 4313 | } |
4305 | out: | ||
4306 | if (!error) { | 4314 | if (!error) { |
4307 | error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, | 4315 | error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, |
4308 | new_dentry); | 4316 | new_dentry); |
4309 | if (error) | 4317 | if (error) |
4310 | mlog_errno(error); | 4318 | mlog_errno(error); |
4311 | } | 4319 | } |
4320 | ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock); | ||
4312 | 4321 | ||
4322 | out: | ||
4313 | if (new_orphan_inode) { | 4323 | if (new_orphan_inode) { |
4314 | /* | 4324 | /* |
4315 | * We need to open_unlock the inode no matter whether we | 4325 | * We need to open_unlock the inode no matter whether we |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 1b2ede6abcdf..1a76d751cf3c 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -261,7 +261,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
261 | * Inherently racy -- command line shares address space | 261 | * Inherently racy -- command line shares address space |
262 | * with code and data. | 262 | * with code and data. |
263 | */ | 263 | */ |
264 | rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); | 264 | rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON); |
265 | if (rv <= 0) | 265 | if (rv <= 0) |
266 | goto out_free_page; | 266 | goto out_free_page; |
267 | 267 | ||
@@ -279,7 +279,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
279 | int nr_read; | 279 | int nr_read; |
280 | 280 | ||
281 | _count = min3(count, len, PAGE_SIZE); | 281 | _count = min3(count, len, PAGE_SIZE); |
282 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 282 | nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); |
283 | if (nr_read < 0) | 283 | if (nr_read < 0) |
284 | rv = nr_read; | 284 | rv = nr_read; |
285 | if (nr_read <= 0) | 285 | if (nr_read <= 0) |
@@ -325,7 +325,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
325 | bool final; | 325 | bool final; |
326 | 326 | ||
327 | _count = min3(count, len, PAGE_SIZE); | 327 | _count = min3(count, len, PAGE_SIZE); |
328 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 328 | nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); |
329 | if (nr_read < 0) | 329 | if (nr_read < 0) |
330 | rv = nr_read; | 330 | rv = nr_read; |
331 | if (nr_read <= 0) | 331 | if (nr_read <= 0) |
@@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, | |||
946 | max_len = min_t(size_t, PAGE_SIZE, count); | 946 | max_len = min_t(size_t, PAGE_SIZE, count); |
947 | this_len = min(max_len, this_len); | 947 | this_len = min(max_len, this_len); |
948 | 948 | ||
949 | retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); | 949 | retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON); |
950 | 950 | ||
951 | if (retval <= 0) { | 951 | if (retval <= 0) { |
952 | ret = retval; | 952 | ret = retval; |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index d1e82761de81..e64ecb9f2720 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -209,25 +209,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) | |||
209 | { | 209 | { |
210 | struct list_head *head = (struct list_head *)arg; | 210 | struct list_head *head = (struct list_head *)arg; |
211 | struct kcore_list *ent; | 211 | struct kcore_list *ent; |
212 | struct page *p; | ||
213 | |||
214 | if (!pfn_valid(pfn)) | ||
215 | return 1; | ||
216 | |||
217 | p = pfn_to_page(pfn); | ||
218 | if (!memmap_valid_within(pfn, p, page_zone(p))) | ||
219 | return 1; | ||
212 | 220 | ||
213 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); | 221 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); |
214 | if (!ent) | 222 | if (!ent) |
215 | return -ENOMEM; | 223 | return -ENOMEM; |
216 | ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT)); | 224 | ent->addr = (unsigned long)page_to_virt(p); |
217 | ent->size = nr_pages << PAGE_SHIFT; | 225 | ent->size = nr_pages << PAGE_SHIFT; |
218 | 226 | ||
219 | /* Sanity check: Can happen in 32bit arch...maybe */ | 227 | if (!virt_addr_valid(ent->addr)) |
220 | if (ent->addr < (unsigned long) __va(0)) | ||
221 | goto free_out; | 228 | goto free_out; |
222 | 229 | ||
223 | /* cut not-mapped area. ....from ppc-32 code. */ | 230 | /* cut not-mapped area. ....from ppc-32 code. */ |
224 | if (ULONG_MAX - ent->addr < ent->size) | 231 | if (ULONG_MAX - ent->addr < ent->size) |
225 | ent->size = ULONG_MAX - ent->addr; | 232 | ent->size = ULONG_MAX - ent->addr; |
226 | 233 | ||
227 | /* cut when vmalloc() area is higher than direct-map area */ | 234 | /* |
228 | if (VMALLOC_START > (unsigned long)__va(0)) { | 235 | * We've already checked virt_addr_valid so we know this address |
229 | if (ent->addr > VMALLOC_START) | 236 | * is a valid pointer, therefore we can check against it to determine |
230 | goto free_out; | 237 | * if we need to trim |
238 | */ | ||
239 | if (VMALLOC_START > ent->addr) { | ||
231 | if (VMALLOC_START - ent->addr < ent->size) | 240 | if (VMALLOC_START - ent->addr < ent->size) |
232 | ent->size = VMALLOC_START - ent->addr; | 241 | ent->size = VMALLOC_START - ent->addr; |
233 | } | 242 | } |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index d3339dd48b1a..b324e01ccf2d 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #define PHY_ID_BCM54612E 0x03625e60 | 25 | #define PHY_ID_BCM54612E 0x03625e60 |
26 | #define PHY_ID_BCM54616S 0x03625d10 | 26 | #define PHY_ID_BCM54616S 0x03625d10 |
27 | #define PHY_ID_BCM57780 0x03625d90 | 27 | #define PHY_ID_BCM57780 0x03625d90 |
28 | #define PHY_ID_BCM89610 0x03625cd0 | ||
28 | 29 | ||
29 | #define PHY_ID_BCM7250 0xae025280 | 30 | #define PHY_ID_BCM7250 0xae025280 |
30 | #define PHY_ID_BCM7260 0xae025190 | 31 | #define PHY_ID_BCM7260 0xae025190 |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 528ccc943cee..96bb32285989 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
@@ -77,7 +77,10 @@ struct ceph_osd_data { | |||
77 | u32 bio_length; | 77 | u32 bio_length; |
78 | }; | 78 | }; |
79 | #endif /* CONFIG_BLOCK */ | 79 | #endif /* CONFIG_BLOCK */ |
80 | struct ceph_bvec_iter bvec_pos; | 80 | struct { |
81 | struct ceph_bvec_iter bvec_pos; | ||
82 | u32 num_bvecs; | ||
83 | }; | ||
81 | }; | 84 | }; |
82 | }; | 85 | }; |
83 | 86 | ||
@@ -412,6 +415,10 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, | |||
412 | struct ceph_bio_iter *bio_pos, | 415 | struct ceph_bio_iter *bio_pos, |
413 | u32 bio_length); | 416 | u32 bio_length); |
414 | #endif /* CONFIG_BLOCK */ | 417 | #endif /* CONFIG_BLOCK */ |
418 | void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, | ||
419 | unsigned int which, | ||
420 | struct bio_vec *bvecs, u32 num_bvecs, | ||
421 | u32 bytes); | ||
415 | void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, | 422 | void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, |
416 | unsigned int which, | 423 | unsigned int which, |
417 | struct ceph_bvec_iter *bvec_pos); | 424 | struct ceph_bvec_iter *bvec_pos); |
@@ -426,7 +433,8 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, | |||
426 | bool own_pages); | 433 | bool own_pages); |
427 | void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, | 434 | void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, |
428 | unsigned int which, | 435 | unsigned int which, |
429 | struct bio_vec *bvecs, u32 bytes); | 436 | struct bio_vec *bvecs, u32 num_bvecs, |
437 | u32 bytes); | ||
430 | extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, | 438 | extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, |
431 | unsigned int which, | 439 | unsigned int which, |
432 | struct page **pages, u64 length, | 440 | struct page **pages, u64 length, |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index c1961761311d..2803264c512f 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
@@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k); | |||
62 | int kthread_park(struct task_struct *k); | 62 | int kthread_park(struct task_struct *k); |
63 | void kthread_unpark(struct task_struct *k); | 63 | void kthread_unpark(struct task_struct *k); |
64 | void kthread_parkme(void); | 64 | void kthread_parkme(void); |
65 | void kthread_park_complete(struct task_struct *k); | ||
65 | 66 | ||
66 | int kthreadd(void *unused); | 67 | int kthreadd(void *unused); |
67 | extern struct task_struct *kthreadd_task; | 68 | extern struct task_struct *kthreadd_task; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1ac1f06a4be6..c6fa9a255dbf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2466,6 +2466,13 @@ static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, | |||
2466 | return VM_FAULT_NOPAGE; | 2466 | return VM_FAULT_NOPAGE; |
2467 | } | 2467 | } |
2468 | 2468 | ||
2469 | static inline vm_fault_t vmf_error(int err) | ||
2470 | { | ||
2471 | if (err == -ENOMEM) | ||
2472 | return VM_FAULT_OOM; | ||
2473 | return VM_FAULT_SIGBUS; | ||
2474 | } | ||
2475 | |||
2469 | struct page *follow_page_mask(struct vm_area_struct *vma, | 2476 | struct page *follow_page_mask(struct vm_area_struct *vma, |
2470 | unsigned long address, unsigned int foll_flags, | 2477 | unsigned long address, unsigned int foll_flags, |
2471 | unsigned int *page_mask); | 2478 | unsigned int *page_mask); |
@@ -2493,6 +2500,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, | |||
2493 | #define FOLL_MLOCK 0x1000 /* lock present pages */ | 2500 | #define FOLL_MLOCK 0x1000 /* lock present pages */ |
2494 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ | 2501 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ |
2495 | #define FOLL_COW 0x4000 /* internal GUP flag */ | 2502 | #define FOLL_COW 0x4000 /* internal GUP flag */ |
2503 | #define FOLL_ANON 0x8000 /* don't do file mappings */ | ||
2496 | 2504 | ||
2497 | static inline int vm_fault_to_errno(int vm_fault, int foll_flags) | 2505 | static inline int vm_fault_to_errno(int vm_fault, int foll_flags) |
2498 | { | 2506 | { |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index b5b43f94f311..01b990e4b228 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd); | |||
312 | ({ \ | 312 | ({ \ |
313 | int i, ret = 1; \ | 313 | int i, ret = 1; \ |
314 | for (i = 0; i < map_words(map); i++) { \ | 314 | for (i = 0; i < map_words(map); i++) { \ |
315 | if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ | 315 | if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ |
316 | ret = 0; \ | 316 | ret = 0; \ |
317 | break; \ | 317 | break; \ |
318 | } \ | 318 | } \ |
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 5dad59b31244..17c919436f48 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h | |||
@@ -867,12 +867,18 @@ struct nand_op_instr { | |||
867 | * tBERS (during an erase) which all of them are u64 values that cannot be | 867 | * tBERS (during an erase) which all of them are u64 values that cannot be |
868 | * divided by usual kernel macros and must be handled with the special | 868 | * divided by usual kernel macros and must be handled with the special |
869 | * DIV_ROUND_UP_ULL() macro. | 869 | * DIV_ROUND_UP_ULL() macro. |
870 | * | ||
871 | * Cast to type of dividend is needed here to guarantee that the result won't | ||
872 | * be an unsigned long long when the dividend is an unsigned long (or smaller), | ||
873 | * which is what the compiler does when it sees ternary operator with 2 | ||
874 | * different return types (picks the largest type to make sure there's no | ||
875 | * loss). | ||
870 | */ | 876 | */ |
871 | #define __DIVIDE(dividend, divisor) ({ \ | 877 | #define __DIVIDE(dividend, divisor) ({ \ |
872 | sizeof(dividend) == sizeof(u32) ? \ | 878 | (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \ |
873 | DIV_ROUND_UP(dividend, divisor) : \ | 879 | DIV_ROUND_UP(dividend, divisor) : \ |
874 | DIV_ROUND_UP_ULL(dividend, divisor); \ | 880 | DIV_ROUND_UP_ULL(dividend, divisor)); \ |
875 | }) | 881 | }) |
876 | #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) | 882 | #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) |
877 | #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) | 883 | #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) |
878 | 884 | ||
diff --git a/include/linux/oom.h b/include/linux/oom.h index 5bad038ac012..6adac113e96d 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm) | |||
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | void __oom_reap_task_mm(struct mm_struct *mm); | ||
99 | |||
98 | extern unsigned long oom_badness(struct task_struct *p, | 100 | extern unsigned long oom_badness(struct task_struct *p, |
99 | struct mem_cgroup *memcg, const nodemask_t *nodemask, | 101 | struct mem_cgroup *memcg, const nodemask_t *nodemask, |
100 | unsigned long totalpages); | 102 | unsigned long totalpages); |
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 6bfd2b581f75..af8a61be2d8d 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/compiler.h> | 27 | #include <linux/compiler.h> |
28 | #include <linux/rbtree.h> | 28 | #include <linux/rbtree.h> |
29 | #include <linux/rcupdate.h> | ||
29 | 30 | ||
30 | /* | 31 | /* |
31 | * Please note - only struct rb_augment_callbacks and the prototypes for | 32 | * Please note - only struct rb_augment_callbacks and the prototypes for |
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h index ece43e882b56..7d012faa509a 100644 --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include <linux/rbtree.h> | 36 | #include <linux/rbtree.h> |
37 | #include <linux/seqlock.h> | 37 | #include <linux/seqlock.h> |
38 | #include <linux/rcupdate.h> | ||
38 | 39 | ||
39 | struct latch_tree_node { | 40 | struct latch_tree_node { |
40 | struct rb_node node[2]; | 41 | struct rb_node node[2]; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b3d697f3b573..c2413703f45d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -112,17 +112,36 @@ struct task_group; | |||
112 | 112 | ||
113 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | 113 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
114 | 114 | ||
115 | /* | ||
116 | * Special states are those that do not use the normal wait-loop pattern. See | ||
117 | * the comment with set_special_state(). | ||
118 | */ | ||
119 | #define is_special_task_state(state) \ | ||
120 | ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) | ||
121 | |||
115 | #define __set_current_state(state_value) \ | 122 | #define __set_current_state(state_value) \ |
116 | do { \ | 123 | do { \ |
124 | WARN_ON_ONCE(is_special_task_state(state_value));\ | ||
117 | current->task_state_change = _THIS_IP_; \ | 125 | current->task_state_change = _THIS_IP_; \ |
118 | current->state = (state_value); \ | 126 | current->state = (state_value); \ |
119 | } while (0) | 127 | } while (0) |
128 | |||
120 | #define set_current_state(state_value) \ | 129 | #define set_current_state(state_value) \ |
121 | do { \ | 130 | do { \ |
131 | WARN_ON_ONCE(is_special_task_state(state_value));\ | ||
122 | current->task_state_change = _THIS_IP_; \ | 132 | current->task_state_change = _THIS_IP_; \ |
123 | smp_store_mb(current->state, (state_value)); \ | 133 | smp_store_mb(current->state, (state_value)); \ |
124 | } while (0) | 134 | } while (0) |
125 | 135 | ||
136 | #define set_special_state(state_value) \ | ||
137 | do { \ | ||
138 | unsigned long flags; /* may shadow */ \ | ||
139 | WARN_ON_ONCE(!is_special_task_state(state_value)); \ | ||
140 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ | ||
141 | current->task_state_change = _THIS_IP_; \ | ||
142 | current->state = (state_value); \ | ||
143 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ | ||
144 | } while (0) | ||
126 | #else | 145 | #else |
127 | /* | 146 | /* |
128 | * set_current_state() includes a barrier so that the write of current->state | 147 | * set_current_state() includes a barrier so that the write of current->state |
@@ -144,8 +163,8 @@ struct task_group; | |||
144 | * | 163 | * |
145 | * The above is typically ordered against the wakeup, which does: | 164 | * The above is typically ordered against the wakeup, which does: |
146 | * | 165 | * |
147 | * need_sleep = false; | 166 | * need_sleep = false; |
148 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); | 167 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
149 | * | 168 | * |
150 | * Where wake_up_state() (and all other wakeup primitives) imply enough | 169 | * Where wake_up_state() (and all other wakeup primitives) imply enough |
151 | * barriers to order the store of the variable against wakeup. | 170 | * barriers to order the store of the variable against wakeup. |
@@ -154,12 +173,33 @@ struct task_group; | |||
154 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a | 173 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
155 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). | 174 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
156 | * | 175 | * |
157 | * This is obviously fine, since they both store the exact same value. | 176 | * However, with slightly different timing the wakeup TASK_RUNNING store can |
177 | * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not | ||
178 | * a problem either because that will result in one extra go around the loop | ||
179 | * and our @cond test will save the day. | ||
158 | * | 180 | * |
159 | * Also see the comments of try_to_wake_up(). | 181 | * Also see the comments of try_to_wake_up(). |
160 | */ | 182 | */ |
161 | #define __set_current_state(state_value) do { current->state = (state_value); } while (0) | 183 | #define __set_current_state(state_value) \ |
162 | #define set_current_state(state_value) smp_store_mb(current->state, (state_value)) | 184 | current->state = (state_value) |
185 | |||
186 | #define set_current_state(state_value) \ | ||
187 | smp_store_mb(current->state, (state_value)) | ||
188 | |||
189 | /* | ||
190 | * set_special_state() should be used for those states when the blocking task | ||
191 | * can not use the regular condition based wait-loop. In that case we must | ||
192 | * serialize against wakeups such that any possible in-flight TASK_RUNNING stores | ||
193 | * will not collide with our state change. | ||
194 | */ | ||
195 | #define set_special_state(state_value) \ | ||
196 | do { \ | ||
197 | unsigned long flags; /* may shadow */ \ | ||
198 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ | ||
199 | current->state = (state_value); \ | ||
200 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ | ||
201 | } while (0) | ||
202 | |||
163 | #endif | 203 | #endif |
164 | 204 | ||
165 | /* Task command name length: */ | 205 | /* Task command name length: */ |
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index a7ce74c74e49..113d1ad1ced7 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h | |||
@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void) | |||
280 | { | 280 | { |
281 | spin_lock_irq(¤t->sighand->siglock); | 281 | spin_lock_irq(¤t->sighand->siglock); |
282 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) | 282 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) |
283 | __set_current_state(TASK_STOPPED); | 283 | set_special_state(TASK_STOPPED); |
284 | spin_unlock_irq(¤t->sighand->siglock); | 284 | spin_unlock_irq(¤t->sighand->siglock); |
285 | 285 | ||
286 | schedule(); | 286 | schedule(); |
diff --git a/include/net/bonding.h b/include/net/bonding.h index f801fc940b29..b52235158836 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h | |||
@@ -198,6 +198,7 @@ struct bonding { | |||
198 | struct slave __rcu *primary_slave; | 198 | struct slave __rcu *primary_slave; |
199 | struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ | 199 | struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ |
200 | bool force_primary; | 200 | bool force_primary; |
201 | u32 nest_level; | ||
201 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ | 202 | s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ |
202 | int (*recv_probe)(const struct sk_buff *, struct bonding *, | 203 | int (*recv_probe)(const struct sk_buff *, struct bonding *, |
203 | struct slave *); | 204 | struct slave *); |
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 9a074776f70b..d1fcf2442a42 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h | |||
@@ -251,7 +251,7 @@ extern struct flow_dissector flow_keys_buf_dissector; | |||
251 | * This structure is used to hold a digest of the full flow keys. This is a | 251 | * This structure is used to hold a digest of the full flow keys. This is a |
252 | * larger "hash" of a flow to allow definitively matching specific flows where | 252 | * larger "hash" of a flow to allow definitively matching specific flows where |
253 | * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so | 253 | * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so |
254 | * that it can by used in CB of skb (see sch_choke for an example). | 254 | * that it can be used in CB of skb (see sch_choke for an example). |
255 | */ | 255 | */ |
256 | #define FLOW_KEYS_DIGEST_LEN 16 | 256 | #define FLOW_KEYS_DIGEST_LEN 16 |
257 | struct flow_keys_digest { | 257 | struct flow_keys_digest { |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index d2279b2d61aa..b2f3a0c018e7 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -2080,7 +2080,7 @@ struct ieee80211_txq { | |||
2080 | * virtual interface might not be given air time for the transmission of | 2080 | * virtual interface might not be given air time for the transmission of |
2081 | * the frame, as it is not synced with the AP/P2P GO yet, and thus the | 2081 | * the frame, as it is not synced with the AP/P2P GO yet, and thus the |
2082 | * deauthentication frame might not be transmitted. | 2082 | * deauthentication frame might not be transmitted. |
2083 | > | 2083 | * |
2084 | * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't | 2084 | * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't |
2085 | * support QoS NDP for AP probing - that's most likely a driver bug. | 2085 | * support QoS NDP for AP probing - that's most likely a driver bug. |
2086 | * | 2086 | * |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a872379b69da..45e75c36b738 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -375,6 +375,7 @@ struct xfrm_input_afinfo { | |||
375 | int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); | 375 | int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); |
376 | int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); | 376 | int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); |
377 | 377 | ||
378 | void xfrm_flush_gc(void); | ||
378 | void xfrm_state_delete_tunnel(struct xfrm_state *x); | 379 | void xfrm_state_delete_tunnel(struct xfrm_state *x); |
379 | 380 | ||
380 | struct xfrm_type { | 381 | struct xfrm_type { |
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index f0820554caa9..d0a341bc4540 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h | |||
@@ -575,6 +575,48 @@ TRACE_EVENT(afs_protocol_error, | |||
575 | __entry->call, __entry->error, __entry->where) | 575 | __entry->call, __entry->error, __entry->where) |
576 | ); | 576 | ); |
577 | 577 | ||
578 | TRACE_EVENT(afs_cm_no_server, | ||
579 | TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx), | ||
580 | |||
581 | TP_ARGS(call, srx), | ||
582 | |||
583 | TP_STRUCT__entry( | ||
584 | __field(unsigned int, call ) | ||
585 | __field(unsigned int, op_id ) | ||
586 | __field_struct(struct sockaddr_rxrpc, srx ) | ||
587 | ), | ||
588 | |||
589 | TP_fast_assign( | ||
590 | __entry->call = call->debug_id; | ||
591 | __entry->op_id = call->operation_ID; | ||
592 | memcpy(&__entry->srx, srx, sizeof(__entry->srx)); | ||
593 | ), | ||
594 | |||
595 | TP_printk("c=%08x op=%u %pISpc", | ||
596 | __entry->call, __entry->op_id, &__entry->srx.transport) | ||
597 | ); | ||
598 | |||
599 | TRACE_EVENT(afs_cm_no_server_u, | ||
600 | TP_PROTO(struct afs_call *call, const uuid_t *uuid), | ||
601 | |||
602 | TP_ARGS(call, uuid), | ||
603 | |||
604 | TP_STRUCT__entry( | ||
605 | __field(unsigned int, call ) | ||
606 | __field(unsigned int, op_id ) | ||
607 | __field_struct(uuid_t, uuid ) | ||
608 | ), | ||
609 | |||
610 | TP_fast_assign( | ||
611 | __entry->call = call->debug_id; | ||
612 | __entry->op_id = call->operation_ID; | ||
613 | memcpy(&__entry->uuid, uuid, sizeof(__entry->uuid)); | ||
614 | ), | ||
615 | |||
616 | TP_printk("c=%08x op=%u %pU", | ||
617 | __entry->call, __entry->op_id, &__entry->uuid) | ||
618 | ); | ||
619 | |||
578 | #endif /* _TRACE_AFS_H */ | 620 | #endif /* _TRACE_AFS_H */ |
579 | 621 | ||
580 | /* This part must be outside protection */ | 622 | /* This part must be outside protection */ |
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 9e96c2fe2793..077e664ac9a2 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #define _TRACE_RXRPC_H | 15 | #define _TRACE_RXRPC_H |
16 | 16 | ||
17 | #include <linux/tracepoint.h> | 17 | #include <linux/tracepoint.h> |
18 | #include <linux/errqueue.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Define enums for tracing information. | 21 | * Define enums for tracing information. |
@@ -210,6 +211,20 @@ enum rxrpc_congest_change { | |||
210 | rxrpc_cong_saw_nack, | 211 | rxrpc_cong_saw_nack, |
211 | }; | 212 | }; |
212 | 213 | ||
214 | enum rxrpc_tx_fail_trace { | ||
215 | rxrpc_tx_fail_call_abort, | ||
216 | rxrpc_tx_fail_call_ack, | ||
217 | rxrpc_tx_fail_call_data_frag, | ||
218 | rxrpc_tx_fail_call_data_nofrag, | ||
219 | rxrpc_tx_fail_call_final_resend, | ||
220 | rxrpc_tx_fail_conn_abort, | ||
221 | rxrpc_tx_fail_conn_challenge, | ||
222 | rxrpc_tx_fail_conn_response, | ||
223 | rxrpc_tx_fail_reject, | ||
224 | rxrpc_tx_fail_version_keepalive, | ||
225 | rxrpc_tx_fail_version_reply, | ||
226 | }; | ||
227 | |||
213 | #endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ | 228 | #endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ |
214 | 229 | ||
215 | /* | 230 | /* |
@@ -437,6 +452,19 @@ enum rxrpc_congest_change { | |||
437 | EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \ | 452 | EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \ |
438 | E_(RXRPC_CALL_NETWORK_ERROR, "NetError") | 453 | E_(RXRPC_CALL_NETWORK_ERROR, "NetError") |
439 | 454 | ||
455 | #define rxrpc_tx_fail_traces \ | ||
456 | EM(rxrpc_tx_fail_call_abort, "CallAbort") \ | ||
457 | EM(rxrpc_tx_fail_call_ack, "CallAck") \ | ||
458 | EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \ | ||
459 | EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \ | ||
460 | EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \ | ||
461 | EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \ | ||
462 | EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \ | ||
463 | EM(rxrpc_tx_fail_conn_response, "ConnResp") \ | ||
464 | EM(rxrpc_tx_fail_reject, "Reject") \ | ||
465 | EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \ | ||
466 | E_(rxrpc_tx_fail_version_reply, "VerReply") | ||
467 | |||
440 | /* | 468 | /* |
441 | * Export enum symbols via userspace. | 469 | * Export enum symbols via userspace. |
442 | */ | 470 | */ |
@@ -460,6 +488,7 @@ rxrpc_propose_ack_traces; | |||
460 | rxrpc_propose_ack_outcomes; | 488 | rxrpc_propose_ack_outcomes; |
461 | rxrpc_congest_modes; | 489 | rxrpc_congest_modes; |
462 | rxrpc_congest_changes; | 490 | rxrpc_congest_changes; |
491 | rxrpc_tx_fail_traces; | ||
463 | 492 | ||
464 | /* | 493 | /* |
465 | * Now redefine the EM() and E_() macros to map the enums to the strings that | 494 | * Now redefine the EM() and E_() macros to map the enums to the strings that |
@@ -1374,6 +1403,62 @@ TRACE_EVENT(rxrpc_resend, | |||
1374 | __entry->anno) | 1403 | __entry->anno) |
1375 | ); | 1404 | ); |
1376 | 1405 | ||
1406 | TRACE_EVENT(rxrpc_rx_icmp, | ||
1407 | TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee, | ||
1408 | struct sockaddr_rxrpc *srx), | ||
1409 | |||
1410 | TP_ARGS(peer, ee, srx), | ||
1411 | |||
1412 | TP_STRUCT__entry( | ||
1413 | __field(unsigned int, peer ) | ||
1414 | __field_struct(struct sock_extended_err, ee ) | ||
1415 | __field_struct(struct sockaddr_rxrpc, srx ) | ||
1416 | ), | ||
1417 | |||
1418 | TP_fast_assign( | ||
1419 | __entry->peer = peer->debug_id; | ||
1420 | memcpy(&__entry->ee, ee, sizeof(__entry->ee)); | ||
1421 | memcpy(&__entry->srx, srx, sizeof(__entry->srx)); | ||
1422 | ), | ||
1423 | |||
1424 | TP_printk("P=%08x o=%u t=%u c=%u i=%u d=%u e=%d %pISp", | ||
1425 | __entry->peer, | ||
1426 | __entry->ee.ee_origin, | ||
1427 | __entry->ee.ee_type, | ||
1428 | __entry->ee.ee_code, | ||
1429 | __entry->ee.ee_info, | ||
1430 | __entry->ee.ee_data, | ||
1431 | __entry->ee.ee_errno, | ||
1432 | &__entry->srx.transport) | ||
1433 | ); | ||
1434 | |||
1435 | TRACE_EVENT(rxrpc_tx_fail, | ||
1436 | TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret, | ||
1437 | enum rxrpc_tx_fail_trace what), | ||
1438 | |||
1439 | TP_ARGS(debug_id, serial, ret, what), | ||
1440 | |||
1441 | TP_STRUCT__entry( | ||
1442 | __field(unsigned int, debug_id ) | ||
1443 | __field(rxrpc_serial_t, serial ) | ||
1444 | __field(int, ret ) | ||
1445 | __field(enum rxrpc_tx_fail_trace, what ) | ||
1446 | ), | ||
1447 | |||
1448 | TP_fast_assign( | ||
1449 | __entry->debug_id = debug_id; | ||
1450 | __entry->serial = serial; | ||
1451 | __entry->ret = ret; | ||
1452 | __entry->what = what; | ||
1453 | ), | ||
1454 | |||
1455 | TP_printk("c=%08x r=%x ret=%d %s", | ||
1456 | __entry->debug_id, | ||
1457 | __entry->serial, | ||
1458 | __entry->ret, | ||
1459 | __print_symbolic(__entry->what, rxrpc_tx_fail_traces)) | ||
1460 | ); | ||
1461 | |||
1377 | #endif /* _TRACE_RXRPC_H */ | 1462 | #endif /* _TRACE_RXRPC_H */ |
1378 | 1463 | ||
1379 | /* This part must be outside protection */ | 1464 | /* This part must be outside protection */ |
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 335d87242439..bbb08a3ef5cc 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h | |||
@@ -224,6 +224,8 @@ TRACE_EVENT(rpc_stats_latency, | |||
224 | TP_ARGS(task, backlog, rtt, execute), | 224 | TP_ARGS(task, backlog, rtt, execute), |
225 | 225 | ||
226 | TP_STRUCT__entry( | 226 | TP_STRUCT__entry( |
227 | __field(unsigned int, task_id) | ||
228 | __field(unsigned int, client_id) | ||
227 | __field(u32, xid) | 229 | __field(u32, xid) |
228 | __field(int, version) | 230 | __field(int, version) |
229 | __string(progname, task->tk_client->cl_program->name) | 231 | __string(progname, task->tk_client->cl_program->name) |
@@ -231,13 +233,11 @@ TRACE_EVENT(rpc_stats_latency, | |||
231 | __field(unsigned long, backlog) | 233 | __field(unsigned long, backlog) |
232 | __field(unsigned long, rtt) | 234 | __field(unsigned long, rtt) |
233 | __field(unsigned long, execute) | 235 | __field(unsigned long, execute) |
234 | __string(addr, | ||
235 | task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]) | ||
236 | __string(port, | ||
237 | task->tk_xprt->address_strings[RPC_DISPLAY_PORT]) | ||
238 | ), | 236 | ), |
239 | 237 | ||
240 | TP_fast_assign( | 238 | TP_fast_assign( |
239 | __entry->client_id = task->tk_client->cl_clid; | ||
240 | __entry->task_id = task->tk_pid; | ||
241 | __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); | 241 | __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); |
242 | __entry->version = task->tk_client->cl_vers; | 242 | __entry->version = task->tk_client->cl_vers; |
243 | __assign_str(progname, task->tk_client->cl_program->name) | 243 | __assign_str(progname, task->tk_client->cl_program->name) |
@@ -245,14 +245,10 @@ TRACE_EVENT(rpc_stats_latency, | |||
245 | __entry->backlog = ktime_to_us(backlog); | 245 | __entry->backlog = ktime_to_us(backlog); |
246 | __entry->rtt = ktime_to_us(rtt); | 246 | __entry->rtt = ktime_to_us(rtt); |
247 | __entry->execute = ktime_to_us(execute); | 247 | __entry->execute = ktime_to_us(execute); |
248 | __assign_str(addr, | ||
249 | task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]); | ||
250 | __assign_str(port, | ||
251 | task->tk_xprt->address_strings[RPC_DISPLAY_PORT]); | ||
252 | ), | 248 | ), |
253 | 249 | ||
254 | TP_printk("peer=[%s]:%s xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu", | 250 | TP_printk("task:%u@%d xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu", |
255 | __get_str(addr), __get_str(port), __entry->xid, | 251 | __entry->task_id, __entry->client_id, __entry->xid, |
256 | __get_str(progname), __entry->version, __get_str(procname), | 252 | __get_str(progname), __entry->version, __get_str(procname), |
257 | __entry->backlog, __entry->rtt, __entry->execute) | 253 | __entry->backlog, __entry->rtt, __entry->execute) |
258 | ); | 254 | ); |
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index 7dd8f34c37df..fdcf88bcf0ea 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h | |||
@@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd, | |||
352 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); | 352 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); |
353 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); | 353 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); |
354 | 354 | ||
355 | TRACE_EVENT(xen_mmu_flush_tlb_all, | ||
356 | TP_PROTO(int x), | ||
357 | TP_ARGS(x), | ||
358 | TP_STRUCT__entry(__array(char, x, 0)), | ||
359 | TP_fast_assign((void)x), | ||
360 | TP_printk("%s", "") | ||
361 | ); | ||
362 | |||
363 | TRACE_EVENT(xen_mmu_flush_tlb, | ||
364 | TP_PROTO(int x), | ||
365 | TP_ARGS(x), | ||
366 | TP_STRUCT__entry(__array(char, x, 0)), | ||
367 | TP_fast_assign((void)x), | ||
368 | TP_printk("%s", "") | ||
369 | ); | ||
370 | |||
371 | TRACE_EVENT(xen_mmu_flush_tlb_one_user, | 355 | TRACE_EVENT(xen_mmu_flush_tlb_one_user, |
372 | TP_PROTO(unsigned long addr), | 356 | TP_PROTO(unsigned long addr), |
373 | TP_ARGS(addr), | 357 | TP_ARGS(addr), |
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 15daf5e2638d..9c3630146cec 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h | |||
@@ -2698,6 +2698,8 @@ enum nl80211_attrs { | |||
2698 | #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS | 2698 | #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS |
2699 | #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS | 2699 | #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS |
2700 | 2700 | ||
2701 | #define NL80211_WIPHY_NAME_MAXLEN 128 | ||
2702 | |||
2701 | #define NL80211_MAX_SUPP_RATES 32 | 2703 | #define NL80211_MAX_SUPP_RATES 32 |
2702 | #define NL80211_MAX_SUPP_HT_RATES 77 | 2704 | #define NL80211_MAX_SUPP_HT_RATES 77 |
2703 | #define NL80211_MAX_SUPP_REG_RULES 64 | 2705 | #define NL80211_MAX_SUPP_REG_RULES 64 |
diff --git a/init/main.c b/init/main.c index b795aa341a3a..fd37315835b4 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -423,7 +423,7 @@ static noinline void __ref rest_init(void) | |||
423 | 423 | ||
424 | /* | 424 | /* |
425 | * Enable might_sleep() and smp_processor_id() checks. | 425 | * Enable might_sleep() and smp_processor_id() checks. |
426 | * They cannot be enabled earlier because with CONFIG_PRREMPT=y | 426 | * They cannot be enabled earlier because with CONFIG_PREEMPT=y |
427 | * kernel_thread() would trigger might_sleep() splats. With | 427 | * kernel_thread() would trigger might_sleep() splats. With |
428 | * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled | 428 | * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled |
429 | * already, but it's stuck on the kthreadd_done completion. | 429 | * already, but it's stuck on the kthreadd_done completion. |
@@ -1034,6 +1034,13 @@ __setup("rodata=", set_debug_rodata); | |||
1034 | static void mark_readonly(void) | 1034 | static void mark_readonly(void) |
1035 | { | 1035 | { |
1036 | if (rodata_enabled) { | 1036 | if (rodata_enabled) { |
1037 | /* | ||
1038 | * load_module() results in W+X mappings, which are cleaned up | ||
1039 | * with call_rcu_sched(). Let's make sure that queued work is | ||
1040 | * flushed so that we don't hit false positives looking for | ||
1041 | * insecure pages which are W+X. | ||
1042 | */ | ||
1043 | rcu_barrier_sched(); | ||
1037 | mark_rodata_ro(); | 1044 | mark_rodata_ro(); |
1038 | rodata_test(); | 1045 | rodata_test(); |
1039 | } else | 1046 | } else |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ebfe9f29dae8..016ef9025827 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/cred.h> | 26 | #include <linux/cred.h> |
27 | #include <linux/timekeeping.h> | 27 | #include <linux/timekeeping.h> |
28 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
29 | #include <linux/nospec.h> | ||
29 | 30 | ||
30 | #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ | 31 | #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ |
31 | (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ | 32 | (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ |
@@ -102,12 +103,14 @@ const struct bpf_map_ops bpf_map_offload_ops = { | |||
102 | static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) | 103 | static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) |
103 | { | 104 | { |
104 | const struct bpf_map_ops *ops; | 105 | const struct bpf_map_ops *ops; |
106 | u32 type = attr->map_type; | ||
105 | struct bpf_map *map; | 107 | struct bpf_map *map; |
106 | int err; | 108 | int err; |
107 | 109 | ||
108 | if (attr->map_type >= ARRAY_SIZE(bpf_map_types)) | 110 | if (type >= ARRAY_SIZE(bpf_map_types)) |
109 | return ERR_PTR(-EINVAL); | 111 | return ERR_PTR(-EINVAL); |
110 | ops = bpf_map_types[attr->map_type]; | 112 | type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); |
113 | ops = bpf_map_types[type]; | ||
111 | if (!ops) | 114 | if (!ops) |
112 | return ERR_PTR(-EINVAL); | 115 | return ERR_PTR(-EINVAL); |
113 | 116 | ||
@@ -122,7 +125,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) | |||
122 | if (IS_ERR(map)) | 125 | if (IS_ERR(map)) |
123 | return map; | 126 | return map; |
124 | map->ops = ops; | 127 | map->ops = ops; |
125 | map->map_type = attr->map_type; | 128 | map->map_type = type; |
126 | return map; | 129 | return map; |
127 | } | 130 | } |
128 | 131 | ||
@@ -871,11 +874,17 @@ static const struct bpf_prog_ops * const bpf_prog_types[] = { | |||
871 | 874 | ||
872 | static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) | 875 | static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) |
873 | { | 876 | { |
874 | if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type]) | 877 | const struct bpf_prog_ops *ops; |
878 | |||
879 | if (type >= ARRAY_SIZE(bpf_prog_types)) | ||
880 | return -EINVAL; | ||
881 | type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); | ||
882 | ops = bpf_prog_types[type]; | ||
883 | if (!ops) | ||
875 | return -EINVAL; | 884 | return -EINVAL; |
876 | 885 | ||
877 | if (!bpf_prog_is_dev_bound(prog->aux)) | 886 | if (!bpf_prog_is_dev_bound(prog->aux)) |
878 | prog->aux->ops = bpf_prog_types[type]; | 887 | prog->aux->ops = ops; |
879 | else | 888 | else |
880 | prog->aux->ops = &bpf_offload_prog_ops; | 889 | prog->aux->ops = &bpf_offload_prog_ops; |
881 | prog->type = type; | 890 | prog->type = type; |
diff --git a/kernel/compat.c b/kernel/compat.c index 6d21894806b4..92d8c98c0f57 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp) | |||
34 | { | 34 | { |
35 | struct compat_timex tx32; | 35 | struct compat_timex tx32; |
36 | 36 | ||
37 | memset(txc, 0, sizeof(struct timex)); | ||
37 | if (copy_from_user(&tx32, utp, sizeof(struct compat_timex))) | 38 | if (copy_from_user(&tx32, utp, sizeof(struct compat_timex))) |
38 | return -EFAULT; | 39 | return -EFAULT; |
39 | 40 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 6c6b3c48db71..1d8ca9ea9979 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/circ_buf.h> | 15 | #include <linux/circ_buf.h> |
16 | #include <linux/poll.h> | 16 | #include <linux/poll.h> |
17 | #include <linux/nospec.h> | ||
17 | 18 | ||
18 | #include "internal.h" | 19 | #include "internal.h" |
19 | 20 | ||
@@ -867,8 +868,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |||
867 | return NULL; | 868 | return NULL; |
868 | 869 | ||
869 | /* AUX space */ | 870 | /* AUX space */ |
870 | if (pgoff >= rb->aux_pgoff) | 871 | if (pgoff >= rb->aux_pgoff) { |
871 | return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]); | 872 | int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages); |
873 | return virt_to_page(rb->aux_pages[aux_pgoff]); | ||
874 | } | ||
872 | } | 875 | } |
873 | 876 | ||
874 | return __perf_mmap_to_page(rb, pgoff); | 877 | return __perf_mmap_to_page(rb, pgoff); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index cd50e99202b0..2017a39ab490 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -55,7 +55,6 @@ enum KTHREAD_BITS { | |||
55 | KTHREAD_IS_PER_CPU = 0, | 55 | KTHREAD_IS_PER_CPU = 0, |
56 | KTHREAD_SHOULD_STOP, | 56 | KTHREAD_SHOULD_STOP, |
57 | KTHREAD_SHOULD_PARK, | 57 | KTHREAD_SHOULD_PARK, |
58 | KTHREAD_IS_PARKED, | ||
59 | }; | 58 | }; |
60 | 59 | ||
61 | static inline void set_kthread_struct(void *kthread) | 60 | static inline void set_kthread_struct(void *kthread) |
@@ -177,14 +176,12 @@ void *kthread_probe_data(struct task_struct *task) | |||
177 | 176 | ||
178 | static void __kthread_parkme(struct kthread *self) | 177 | static void __kthread_parkme(struct kthread *self) |
179 | { | 178 | { |
180 | __set_current_state(TASK_PARKED); | 179 | for (;;) { |
181 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { | 180 | set_current_state(TASK_PARKED); |
182 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) | 181 | if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) |
183 | complete(&self->parked); | 182 | break; |
184 | schedule(); | 183 | schedule(); |
185 | __set_current_state(TASK_PARKED); | ||
186 | } | 184 | } |
187 | clear_bit(KTHREAD_IS_PARKED, &self->flags); | ||
188 | __set_current_state(TASK_RUNNING); | 185 | __set_current_state(TASK_RUNNING); |
189 | } | 186 | } |
190 | 187 | ||
@@ -194,6 +191,11 @@ void kthread_parkme(void) | |||
194 | } | 191 | } |
195 | EXPORT_SYMBOL_GPL(kthread_parkme); | 192 | EXPORT_SYMBOL_GPL(kthread_parkme); |
196 | 193 | ||
194 | void kthread_park_complete(struct task_struct *k) | ||
195 | { | ||
196 | complete(&to_kthread(k)->parked); | ||
197 | } | ||
198 | |||
197 | static int kthread(void *_create) | 199 | static int kthread(void *_create) |
198 | { | 200 | { |
199 | /* Copy data: it's on kthread's stack */ | 201 | /* Copy data: it's on kthread's stack */ |
@@ -450,22 +452,15 @@ void kthread_unpark(struct task_struct *k) | |||
450 | { | 452 | { |
451 | struct kthread *kthread = to_kthread(k); | 453 | struct kthread *kthread = to_kthread(k); |
452 | 454 | ||
453 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | ||
454 | /* | 455 | /* |
455 | * We clear the IS_PARKED bit here as we don't wait | 456 | * Newly created kthread was parked when the CPU was offline. |
456 | * until the task has left the park code. So if we'd | 457 | * The binding was lost and we need to set it again. |
457 | * park before that happens we'd see the IS_PARKED bit | ||
458 | * which might be about to be cleared. | ||
459 | */ | 458 | */ |
460 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | 459 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) |
461 | /* | 460 | __kthread_bind(k, kthread->cpu, TASK_PARKED); |
462 | * Newly created kthread was parked when the CPU was offline. | 461 | |
463 | * The binding was lost and we need to set it again. | 462 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
464 | */ | 463 | wake_up_state(k, TASK_PARKED); |
465 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
466 | __kthread_bind(k, kthread->cpu, TASK_PARKED); | ||
467 | wake_up_state(k, TASK_PARKED); | ||
468 | } | ||
469 | } | 464 | } |
470 | EXPORT_SYMBOL_GPL(kthread_unpark); | 465 | EXPORT_SYMBOL_GPL(kthread_unpark); |
471 | 466 | ||
@@ -488,12 +483,13 @@ int kthread_park(struct task_struct *k) | |||
488 | if (WARN_ON(k->flags & PF_EXITING)) | 483 | if (WARN_ON(k->flags & PF_EXITING)) |
489 | return -ENOSYS; | 484 | return -ENOSYS; |
490 | 485 | ||
491 | if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | 486 | if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) |
492 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 487 | return -EBUSY; |
493 | if (k != current) { | 488 | |
494 | wake_up_process(k); | 489 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
495 | wait_for_completion(&kthread->parked); | 490 | if (k != current) { |
496 | } | 491 | wake_up_process(k); |
492 | wait_for_completion(&kthread->parked); | ||
497 | } | 493 | } |
498 | 494 | ||
499 | return 0; | 495 | return 0; |
diff --git a/kernel/module.c b/kernel/module.c index ce8066b88178..c9bea7f2b43e 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -3517,6 +3517,11 @@ static noinline int do_init_module(struct module *mod) | |||
3517 | * walking this with preempt disabled. In all the failure paths, we | 3517 | * walking this with preempt disabled. In all the failure paths, we |
3518 | * call synchronize_sched(), but we don't want to slow down the success | 3518 | * call synchronize_sched(), but we don't want to slow down the success |
3519 | * path, so use actual RCU here. | 3519 | * path, so use actual RCU here. |
3520 | * Note that module_alloc() on most architectures creates W+X page | ||
3521 | * mappings which won't be cleaned up until do_free_init() runs. Any | ||
3522 | * code such as mark_rodata_ro() which depends on those mappings to | ||
3523 | * be cleaned up needs to sync with the queued work - ie | ||
3524 | * rcu_barrier_sched() | ||
3520 | */ | 3525 | */ |
3521 | call_rcu_sched(&freeinit->rcu, do_free_init); | 3526 | call_rcu_sched(&freeinit->rcu, do_free_init); |
3522 | mutex_unlock(&module_mutex); | 3527 | mutex_unlock(&module_mutex); |
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 6be6c575b6cd..2d4ff5353ded 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c | |||
@@ -2,6 +2,7 @@ | |||
2 | /* | 2 | /* |
3 | * Auto-group scheduling implementation: | 3 | * Auto-group scheduling implementation: |
4 | */ | 4 | */ |
5 | #include <linux/nospec.h> | ||
5 | #include "sched.h" | 6 | #include "sched.h" |
6 | 7 | ||
7 | unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; | 8 | unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; |
@@ -209,7 +210,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) | |||
209 | static unsigned long next = INITIAL_JIFFIES; | 210 | static unsigned long next = INITIAL_JIFFIES; |
210 | struct autogroup *ag; | 211 | struct autogroup *ag; |
211 | unsigned long shares; | 212 | unsigned long shares; |
212 | int err; | 213 | int err, idx; |
213 | 214 | ||
214 | if (nice < MIN_NICE || nice > MAX_NICE) | 215 | if (nice < MIN_NICE || nice > MAX_NICE) |
215 | return -EINVAL; | 216 | return -EINVAL; |
@@ -227,7 +228,9 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) | |||
227 | 228 | ||
228 | next = HZ / 10 + jiffies; | 229 | next = HZ / 10 + jiffies; |
229 | ag = autogroup_task_get(p); | 230 | ag = autogroup_task_get(p); |
230 | shares = scale_load(sched_prio_to_weight[nice + 20]); | 231 | |
232 | idx = array_index_nospec(nice + 20, 40); | ||
233 | shares = scale_load(sched_prio_to_weight[idx]); | ||
231 | 234 | ||
232 | down_write(&ag->lock); | 235 | down_write(&ag->lock); |
233 | err = sched_group_set_shares(ag->tg, shares); | 236 | err = sched_group_set_shares(ag->tg, shares); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5e10aaeebfcc..092f7c4de903 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7,6 +7,9 @@ | |||
7 | */ | 7 | */ |
8 | #include "sched.h" | 8 | #include "sched.h" |
9 | 9 | ||
10 | #include <linux/kthread.h> | ||
11 | #include <linux/nospec.h> | ||
12 | |||
10 | #include <asm/switch_to.h> | 13 | #include <asm/switch_to.h> |
11 | #include <asm/tlb.h> | 14 | #include <asm/tlb.h> |
12 | 15 | ||
@@ -2718,20 +2721,28 @@ static struct rq *finish_task_switch(struct task_struct *prev) | |||
2718 | membarrier_mm_sync_core_before_usermode(mm); | 2721 | membarrier_mm_sync_core_before_usermode(mm); |
2719 | mmdrop(mm); | 2722 | mmdrop(mm); |
2720 | } | 2723 | } |
2721 | if (unlikely(prev_state == TASK_DEAD)) { | 2724 | if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) { |
2722 | if (prev->sched_class->task_dead) | 2725 | switch (prev_state) { |
2723 | prev->sched_class->task_dead(prev); | 2726 | case TASK_DEAD: |
2727 | if (prev->sched_class->task_dead) | ||
2728 | prev->sched_class->task_dead(prev); | ||
2724 | 2729 | ||
2725 | /* | 2730 | /* |
2726 | * Remove function-return probe instances associated with this | 2731 | * Remove function-return probe instances associated with this |
2727 | * task and put them back on the free list. | 2732 | * task and put them back on the free list. |
2728 | */ | 2733 | */ |
2729 | kprobe_flush_task(prev); | 2734 | kprobe_flush_task(prev); |
2735 | |||
2736 | /* Task is done with its stack. */ | ||
2737 | put_task_stack(prev); | ||
2730 | 2738 | ||
2731 | /* Task is done with its stack. */ | 2739 | put_task_struct(prev); |
2732 | put_task_stack(prev); | 2740 | break; |
2733 | 2741 | ||
2734 | put_task_struct(prev); | 2742 | case TASK_PARKED: |
2743 | kthread_park_complete(prev); | ||
2744 | break; | ||
2745 | } | ||
2735 | } | 2746 | } |
2736 | 2747 | ||
2737 | tick_nohz_task_switch(); | 2748 | tick_nohz_task_switch(); |
@@ -3498,23 +3509,8 @@ static void __sched notrace __schedule(bool preempt) | |||
3498 | 3509 | ||
3499 | void __noreturn do_task_dead(void) | 3510 | void __noreturn do_task_dead(void) |
3500 | { | 3511 | { |
3501 | /* | ||
3502 | * The setting of TASK_RUNNING by try_to_wake_up() may be delayed | ||
3503 | * when the following two conditions become true. | ||
3504 | * - There is race condition of mmap_sem (It is acquired by | ||
3505 | * exit_mm()), and | ||
3506 | * - SMI occurs before setting TASK_RUNINNG. | ||
3507 | * (or hypervisor of virtual machine switches to other guest) | ||
3508 | * As a result, we may become TASK_RUNNING after becoming TASK_DEAD | ||
3509 | * | ||
3510 | * To avoid it, we have to wait for releasing tsk->pi_lock which | ||
3511 | * is held by try_to_wake_up() | ||
3512 | */ | ||
3513 | raw_spin_lock_irq(¤t->pi_lock); | ||
3514 | raw_spin_unlock_irq(¤t->pi_lock); | ||
3515 | |||
3516 | /* Causes final put_task_struct in finish_task_switch(): */ | 3512 | /* Causes final put_task_struct in finish_task_switch(): */ |
3517 | __set_current_state(TASK_DEAD); | 3513 | set_special_state(TASK_DEAD); |
3518 | 3514 | ||
3519 | /* Tell freezer to ignore us: */ | 3515 | /* Tell freezer to ignore us: */ |
3520 | current->flags |= PF_NOFREEZE; | 3516 | current->flags |= PF_NOFREEZE; |
@@ -6928,11 +6924,15 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, | |||
6928 | struct cftype *cft, s64 nice) | 6924 | struct cftype *cft, s64 nice) |
6929 | { | 6925 | { |
6930 | unsigned long weight; | 6926 | unsigned long weight; |
6927 | int idx; | ||
6931 | 6928 | ||
6932 | if (nice < MIN_NICE || nice > MAX_NICE) | 6929 | if (nice < MIN_NICE || nice > MAX_NICE) |
6933 | return -ERANGE; | 6930 | return -ERANGE; |
6934 | 6931 | ||
6935 | weight = sched_prio_to_weight[NICE_TO_PRIO(nice) - MAX_RT_PRIO]; | 6932 | idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; |
6933 | idx = array_index_nospec(idx, 40); | ||
6934 | weight = sched_prio_to_weight[idx]; | ||
6935 | |||
6936 | return sched_group_set_shares(css_tg(css), scale_load(weight)); | 6936 | return sched_group_set_shares(css_tg(css), scale_load(weight)); |
6937 | } | 6937 | } |
6938 | #endif | 6938 | #endif |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d2c6083304b4..e13df951aca7 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -305,7 +305,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, | |||
305 | * Do not reduce the frequency if the CPU has not been idle | 305 | * Do not reduce the frequency if the CPU has not been idle |
306 | * recently, as the reduction is likely to be premature then. | 306 | * recently, as the reduction is likely to be premature then. |
307 | */ | 307 | */ |
308 | if (busy && next_f < sg_policy->next_freq) { | 308 | if (busy && next_f < sg_policy->next_freq && |
309 | sg_policy->next_freq != UINT_MAX) { | ||
309 | next_f = sg_policy->next_freq; | 310 | next_f = sg_policy->next_freq; |
310 | 311 | ||
311 | /* Reset cached freq as next_freq has changed */ | 312 | /* Reset cached freq as next_freq has changed */ |
@@ -396,19 +397,6 @@ static void sugov_irq_work(struct irq_work *irq_work) | |||
396 | 397 | ||
397 | sg_policy = container_of(irq_work, struct sugov_policy, irq_work); | 398 | sg_policy = container_of(irq_work, struct sugov_policy, irq_work); |
398 | 399 | ||
399 | /* | ||
400 | * For RT tasks, the schedutil governor shoots the frequency to maximum. | ||
401 | * Special care must be taken to ensure that this kthread doesn't result | ||
402 | * in the same behavior. | ||
403 | * | ||
404 | * This is (mostly) guaranteed by the work_in_progress flag. The flag is | ||
405 | * updated only at the end of the sugov_work() function and before that | ||
406 | * the schedutil governor rejects all other frequency scaling requests. | ||
407 | * | ||
408 | * There is a very rare case though, where the RT thread yields right | ||
409 | * after the work_in_progress flag is cleared. The effects of that are | ||
410 | * neglected for now. | ||
411 | */ | ||
412 | kthread_queue_work(&sg_policy->worker, &sg_policy->work); | 400 | kthread_queue_work(&sg_policy->worker, &sg_policy->work); |
413 | } | 401 | } |
414 | 402 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 54dc31e7ab9b..79f574dba096 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1854,7 +1854,6 @@ static int task_numa_migrate(struct task_struct *p) | |||
1854 | static void numa_migrate_preferred(struct task_struct *p) | 1854 | static void numa_migrate_preferred(struct task_struct *p) |
1855 | { | 1855 | { |
1856 | unsigned long interval = HZ; | 1856 | unsigned long interval = HZ; |
1857 | unsigned long numa_migrate_retry; | ||
1858 | 1857 | ||
1859 | /* This task has no NUMA fault statistics yet */ | 1858 | /* This task has no NUMA fault statistics yet */ |
1860 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) | 1859 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) |
@@ -1862,18 +1861,7 @@ static void numa_migrate_preferred(struct task_struct *p) | |||
1862 | 1861 | ||
1863 | /* Periodically retry migrating the task to the preferred node */ | 1862 | /* Periodically retry migrating the task to the preferred node */ |
1864 | interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); | 1863 | interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); |
1865 | numa_migrate_retry = jiffies + interval; | 1864 | p->numa_migrate_retry = jiffies + interval; |
1866 | |||
1867 | /* | ||
1868 | * Check that the new retry threshold is after the current one. If | ||
1869 | * the retry is in the future, it implies that wake_affine has | ||
1870 | * temporarily asked NUMA balancing to backoff from placement. | ||
1871 | */ | ||
1872 | if (numa_migrate_retry > p->numa_migrate_retry) | ||
1873 | return; | ||
1874 | |||
1875 | /* Safe to try placing the task on the preferred node */ | ||
1876 | p->numa_migrate_retry = numa_migrate_retry; | ||
1877 | 1865 | ||
1878 | /* Success if task is already running on preferred CPU */ | 1866 | /* Success if task is already running on preferred CPU */ |
1879 | if (task_node(p) == p->numa_preferred_nid) | 1867 | if (task_node(p) == p->numa_preferred_nid) |
@@ -5922,48 +5910,6 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, | |||
5922 | return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; | 5910 | return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; |
5923 | } | 5911 | } |
5924 | 5912 | ||
5925 | #ifdef CONFIG_NUMA_BALANCING | ||
5926 | static void | ||
5927 | update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target) | ||
5928 | { | ||
5929 | unsigned long interval; | ||
5930 | |||
5931 | if (!static_branch_likely(&sched_numa_balancing)) | ||
5932 | return; | ||
5933 | |||
5934 | /* If balancing has no preference then continue gathering data */ | ||
5935 | if (p->numa_preferred_nid == -1) | ||
5936 | return; | ||
5937 | |||
5938 | /* | ||
5939 | * If the wakeup is not affecting locality then it is neutral from | ||
5940 | * the perspective of NUMA balacing so continue gathering data. | ||
5941 | */ | ||
5942 | if (cpu_to_node(prev_cpu) == cpu_to_node(target)) | ||
5943 | return; | ||
5944 | |||
5945 | /* | ||
5946 | * Temporarily prevent NUMA balancing trying to place waker/wakee after | ||
5947 | * wakee has been moved by wake_affine. This will potentially allow | ||
5948 | * related tasks to converge and update their data placement. The | ||
5949 | * 4 * numa_scan_period is to allow the two-pass filter to migrate | ||
5950 | * hot data to the wakers node. | ||
5951 | */ | ||
5952 | interval = max(sysctl_numa_balancing_scan_delay, | ||
5953 | p->numa_scan_period << 2); | ||
5954 | p->numa_migrate_retry = jiffies + msecs_to_jiffies(interval); | ||
5955 | |||
5956 | interval = max(sysctl_numa_balancing_scan_delay, | ||
5957 | current->numa_scan_period << 2); | ||
5958 | current->numa_migrate_retry = jiffies + msecs_to_jiffies(interval); | ||
5959 | } | ||
5960 | #else | ||
5961 | static void | ||
5962 | update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target) | ||
5963 | { | ||
5964 | } | ||
5965 | #endif | ||
5966 | |||
5967 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, | 5913 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, |
5968 | int this_cpu, int prev_cpu, int sync) | 5914 | int this_cpu, int prev_cpu, int sync) |
5969 | { | 5915 | { |
@@ -5979,7 +5925,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, | |||
5979 | if (target == nr_cpumask_bits) | 5925 | if (target == nr_cpumask_bits) |
5980 | return prev_cpu; | 5926 | return prev_cpu; |
5981 | 5927 | ||
5982 | update_wa_numa_placement(p, prev_cpu, target); | ||
5983 | schedstat_inc(sd->ttwu_move_affine); | 5928 | schedstat_inc(sd->ttwu_move_affine); |
5984 | schedstat_inc(p->se.statistics.nr_wakeups_affine); | 5929 | schedstat_inc(p->se.statistics.nr_wakeups_affine); |
5985 | return target; | 5930 | return target; |
@@ -9847,6 +9792,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) | |||
9847 | if (curr_cost > this_rq->max_idle_balance_cost) | 9792 | if (curr_cost > this_rq->max_idle_balance_cost) |
9848 | this_rq->max_idle_balance_cost = curr_cost; | 9793 | this_rq->max_idle_balance_cost = curr_cost; |
9849 | 9794 | ||
9795 | out: | ||
9850 | /* | 9796 | /* |
9851 | * While browsing the domains, we released the rq lock, a task could | 9797 | * While browsing the domains, we released the rq lock, a task could |
9852 | * have been enqueued in the meantime. Since we're not going idle, | 9798 | * have been enqueued in the meantime. Since we're not going idle, |
@@ -9855,7 +9801,6 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) | |||
9855 | if (this_rq->cfs.h_nr_running && !pulled_task) | 9801 | if (this_rq->cfs.h_nr_running && !pulled_task) |
9856 | pulled_task = 1; | 9802 | pulled_task = 1; |
9857 | 9803 | ||
9858 | out: | ||
9859 | /* Move the next balance forward */ | 9804 | /* Move the next balance forward */ |
9860 | if (time_after(this_rq->next_balance, next_balance)) | 9805 | if (time_after(this_rq->next_balance, next_balance)) |
9861 | this_rq->next_balance = next_balance; | 9806 | this_rq->next_balance = next_balance; |
diff --git a/kernel/signal.c b/kernel/signal.c index d4ccea599692..9c33163a6165 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1961,14 +1961,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |||
1961 | return; | 1961 | return; |
1962 | } | 1962 | } |
1963 | 1963 | ||
1964 | set_special_state(TASK_TRACED); | ||
1965 | |||
1964 | /* | 1966 | /* |
1965 | * We're committing to trapping. TRACED should be visible before | 1967 | * We're committing to trapping. TRACED should be visible before |
1966 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). | 1968 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). |
1967 | * Also, transition to TRACED and updates to ->jobctl should be | 1969 | * Also, transition to TRACED and updates to ->jobctl should be |
1968 | * atomic with respect to siglock and should be done after the arch | 1970 | * atomic with respect to siglock and should be done after the arch |
1969 | * hook as siglock is released and regrabbed across it. | 1971 | * hook as siglock is released and regrabbed across it. |
1972 | * | ||
1973 | * TRACER TRACEE | ||
1974 | * | ||
1975 | * ptrace_attach() | ||
1976 | * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) | ||
1977 | * do_wait() | ||
1978 | * set_current_state() smp_wmb(); | ||
1979 | * ptrace_do_wait() | ||
1980 | * wait_task_stopped() | ||
1981 | * task_stopped_code() | ||
1982 | * [L] task_is_traced() [S] task_clear_jobctl_trapping(); | ||
1970 | */ | 1983 | */ |
1971 | set_current_state(TASK_TRACED); | 1984 | smp_wmb(); |
1972 | 1985 | ||
1973 | current->last_siginfo = info; | 1986 | current->last_siginfo = info; |
1974 | current->exit_code = exit_code; | 1987 | current->exit_code = exit_code; |
@@ -2176,7 +2189,7 @@ static bool do_signal_stop(int signr) | |||
2176 | if (task_participate_group_stop(current)) | 2189 | if (task_participate_group_stop(current)) |
2177 | notify = CLD_STOPPED; | 2190 | notify = CLD_STOPPED; |
2178 | 2191 | ||
2179 | __set_current_state(TASK_STOPPED); | 2192 | set_special_state(TASK_STOPPED); |
2180 | spin_unlock_irq(¤t->sighand->siglock); | 2193 | spin_unlock_irq(¤t->sighand->siglock); |
2181 | 2194 | ||
2182 | /* | 2195 | /* |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7591261652d..64c0291b579c 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/smpboot.h> | 21 | #include <linux/smpboot.h> |
22 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
23 | #include <linux/nmi.h> | 23 | #include <linux/nmi.h> |
24 | #include <linux/sched/wake_q.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * Structure to determine completion condition and record errors. May | 27 | * Structure to determine completion condition and record errors. May |
@@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done) | |||
65 | } | 66 | } |
66 | 67 | ||
67 | static void __cpu_stop_queue_work(struct cpu_stopper *stopper, | 68 | static void __cpu_stop_queue_work(struct cpu_stopper *stopper, |
68 | struct cpu_stop_work *work) | 69 | struct cpu_stop_work *work, |
70 | struct wake_q_head *wakeq) | ||
69 | { | 71 | { |
70 | list_add_tail(&work->list, &stopper->works); | 72 | list_add_tail(&work->list, &stopper->works); |
71 | wake_up_process(stopper->thread); | 73 | wake_q_add(wakeq, stopper->thread); |
72 | } | 74 | } |
73 | 75 | ||
74 | /* queue @work to @stopper. if offline, @work is completed immediately */ | 76 | /* queue @work to @stopper. if offline, @work is completed immediately */ |
75 | static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) | 77 | static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) |
76 | { | 78 | { |
77 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | 79 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
80 | DEFINE_WAKE_Q(wakeq); | ||
78 | unsigned long flags; | 81 | unsigned long flags; |
79 | bool enabled; | 82 | bool enabled; |
80 | 83 | ||
81 | spin_lock_irqsave(&stopper->lock, flags); | 84 | spin_lock_irqsave(&stopper->lock, flags); |
82 | enabled = stopper->enabled; | 85 | enabled = stopper->enabled; |
83 | if (enabled) | 86 | if (enabled) |
84 | __cpu_stop_queue_work(stopper, work); | 87 | __cpu_stop_queue_work(stopper, work, &wakeq); |
85 | else if (work->done) | 88 | else if (work->done) |
86 | cpu_stop_signal_done(work->done); | 89 | cpu_stop_signal_done(work->done); |
87 | spin_unlock_irqrestore(&stopper->lock, flags); | 90 | spin_unlock_irqrestore(&stopper->lock, flags); |
88 | 91 | ||
92 | wake_up_q(&wakeq); | ||
93 | |||
89 | return enabled; | 94 | return enabled; |
90 | } | 95 | } |
91 | 96 | ||
@@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, | |||
229 | { | 234 | { |
230 | struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); | 235 | struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); |
231 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); | 236 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); |
237 | DEFINE_WAKE_Q(wakeq); | ||
232 | int err; | 238 | int err; |
233 | retry: | 239 | retry: |
234 | spin_lock_irq(&stopper1->lock); | 240 | spin_lock_irq(&stopper1->lock); |
@@ -252,8 +258,8 @@ retry: | |||
252 | goto unlock; | 258 | goto unlock; |
253 | 259 | ||
254 | err = 0; | 260 | err = 0; |
255 | __cpu_stop_queue_work(stopper1, work1); | 261 | __cpu_stop_queue_work(stopper1, work1, &wakeq); |
256 | __cpu_stop_queue_work(stopper2, work2); | 262 | __cpu_stop_queue_work(stopper2, work2, &wakeq); |
257 | unlock: | 263 | unlock: |
258 | spin_unlock(&stopper2->lock); | 264 | spin_unlock(&stopper2->lock); |
259 | spin_unlock_irq(&stopper1->lock); | 265 | spin_unlock_irq(&stopper1->lock); |
@@ -263,6 +269,9 @@ unlock: | |||
263 | cpu_relax(); | 269 | cpu_relax(); |
264 | goto retry; | 270 | goto retry; |
265 | } | 271 | } |
272 | |||
273 | wake_up_q(&wakeq); | ||
274 | |||
266 | return err; | 275 | return err; |
267 | } | 276 | } |
268 | /** | 277 | /** |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 1f951b3df60c..7d306b74230f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -762,6 +762,9 @@ static int regex_match_full(char *str, struct regex *r, int len) | |||
762 | 762 | ||
763 | static int regex_match_front(char *str, struct regex *r, int len) | 763 | static int regex_match_front(char *str, struct regex *r, int len) |
764 | { | 764 | { |
765 | if (len < r->len) | ||
766 | return 0; | ||
767 | |||
765 | if (strncmp(str, r->pattern, r->len) == 0) | 768 | if (strncmp(str, r->pattern, r->len) == 0) |
766 | return 1; | 769 | return 1; |
767 | return 0; | 770 | return 0; |
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 5985a25e6cbc..5367ffa5c18f 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c | |||
@@ -132,7 +132,12 @@ static int __init find_bit_test(void) | |||
132 | test_find_next_bit(bitmap, BITMAP_LEN); | 132 | test_find_next_bit(bitmap, BITMAP_LEN); |
133 | test_find_next_zero_bit(bitmap, BITMAP_LEN); | 133 | test_find_next_zero_bit(bitmap, BITMAP_LEN); |
134 | test_find_last_bit(bitmap, BITMAP_LEN); | 134 | test_find_last_bit(bitmap, BITMAP_LEN); |
135 | test_find_first_bit(bitmap, BITMAP_LEN); | 135 | |
136 | /* | ||
137 | * test_find_first_bit() may take some time, so | ||
138 | * traverse only part of bitmap to avoid soft lockup. | ||
139 | */ | ||
140 | test_find_first_bit(bitmap, BITMAP_LEN / 10); | ||
136 | test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); | 141 | test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); |
137 | 142 | ||
138 | pr_err("\nStart testing find_bit() with sparse bitmap\n"); | 143 | pr_err("\nStart testing find_bit() with sparse bitmap\n"); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index da9e10c827df..43e0cbedc3a0 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_tree_iter *iter, | |||
1612 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, | 1612 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
1613 | void __rcu **slot, struct radix_tree_iter *iter) | 1613 | void __rcu **slot, struct radix_tree_iter *iter) |
1614 | { | 1614 | { |
1615 | void *sib = node_to_entry(slot - 1); | ||
1616 | |||
1617 | while (iter->index < iter->next_index) { | 1615 | while (iter->index < iter->next_index) { |
1618 | *nodep = rcu_dereference_raw(*slot); | 1616 | *nodep = rcu_dereference_raw(*slot); |
1619 | if (*nodep && *nodep != sib) | 1617 | if (*nodep && !is_sibling_entry(iter->node, *nodep)) |
1620 | return slot; | 1618 | return slot; |
1621 | slot++; | 1619 | slot++; |
1622 | iter->index = __radix_tree_iter_add(iter, 1); | 1620 | iter->index = __radix_tree_iter_add(iter, 1); |
@@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot, | |||
1631 | struct radix_tree_iter *iter, unsigned flags) | 1629 | struct radix_tree_iter *iter, unsigned flags) |
1632 | { | 1630 | { |
1633 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; | 1631 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
1634 | struct radix_tree_node *node = rcu_dereference_raw(*slot); | 1632 | struct radix_tree_node *node; |
1635 | 1633 | ||
1636 | slot = skip_siblings(&node, slot, iter); | 1634 | slot = skip_siblings(&node, slot, iter); |
1637 | 1635 | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 12fbaa445637..cc640588f145 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -714,7 +714,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
714 | 714 | ||
715 | phys_addr = swiotlb_tbl_map_single(dev, | 715 | phys_addr = swiotlb_tbl_map_single(dev, |
716 | __phys_to_dma(dev, io_tlb_start), | 716 | __phys_to_dma(dev, io_tlb_start), |
717 | 0, size, DMA_FROM_DEVICE, 0); | 717 | 0, size, DMA_FROM_DEVICE, attrs); |
718 | if (phys_addr == SWIOTLB_MAP_ERROR) | 718 | if (phys_addr == SWIOTLB_MAP_ERROR) |
719 | goto out_warn; | 719 | goto out_warn; |
720 | 720 | ||
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index de16f7869fb1..6cd7d0740005 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c | |||
@@ -331,23 +331,32 @@ static void noinline __init test_mem_optimisations(void) | |||
331 | unsigned int start, nbits; | 331 | unsigned int start, nbits; |
332 | 332 | ||
333 | for (start = 0; start < 1024; start += 8) { | 333 | for (start = 0; start < 1024; start += 8) { |
334 | memset(bmap1, 0x5a, sizeof(bmap1)); | ||
335 | memset(bmap2, 0x5a, sizeof(bmap2)); | ||
336 | for (nbits = 0; nbits < 1024 - start; nbits += 8) { | 334 | for (nbits = 0; nbits < 1024 - start; nbits += 8) { |
335 | memset(bmap1, 0x5a, sizeof(bmap1)); | ||
336 | memset(bmap2, 0x5a, sizeof(bmap2)); | ||
337 | |||
337 | bitmap_set(bmap1, start, nbits); | 338 | bitmap_set(bmap1, start, nbits); |
338 | __bitmap_set(bmap2, start, nbits); | 339 | __bitmap_set(bmap2, start, nbits); |
339 | if (!bitmap_equal(bmap1, bmap2, 1024)) | 340 | if (!bitmap_equal(bmap1, bmap2, 1024)) { |
340 | printk("set not equal %d %d\n", start, nbits); | 341 | printk("set not equal %d %d\n", start, nbits); |
341 | if (!__bitmap_equal(bmap1, bmap2, 1024)) | 342 | failed_tests++; |
343 | } | ||
344 | if (!__bitmap_equal(bmap1, bmap2, 1024)) { | ||
342 | printk("set not __equal %d %d\n", start, nbits); | 345 | printk("set not __equal %d %d\n", start, nbits); |
346 | failed_tests++; | ||
347 | } | ||
343 | 348 | ||
344 | bitmap_clear(bmap1, start, nbits); | 349 | bitmap_clear(bmap1, start, nbits); |
345 | __bitmap_clear(bmap2, start, nbits); | 350 | __bitmap_clear(bmap2, start, nbits); |
346 | if (!bitmap_equal(bmap1, bmap2, 1024)) | 351 | if (!bitmap_equal(bmap1, bmap2, 1024)) { |
347 | printk("clear not equal %d %d\n", start, nbits); | 352 | printk("clear not equal %d %d\n", start, nbits); |
348 | if (!__bitmap_equal(bmap1, bmap2, 1024)) | 353 | failed_tests++; |
354 | } | ||
355 | if (!__bitmap_equal(bmap1, bmap2, 1024)) { | ||
349 | printk("clear not __equal %d %d\n", start, | 356 | printk("clear not __equal %d %d\n", start, |
350 | nbits); | 357 | nbits); |
358 | failed_tests++; | ||
359 | } | ||
351 | } | 360 | } |
352 | } | 361 | } |
353 | } | 362 | } |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 30c0cb8cc9bc..23920c5ff728 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -1669,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr, | |||
1669 | return number(buf, end, (unsigned long int)ptr, spec); | 1669 | return number(buf, end, (unsigned long int)ptr, spec); |
1670 | } | 1670 | } |
1671 | 1671 | ||
1672 | static bool have_filled_random_ptr_key __read_mostly; | 1672 | static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); |
1673 | static siphash_key_t ptr_key __read_mostly; | 1673 | static siphash_key_t ptr_key __read_mostly; |
1674 | 1674 | ||
1675 | static void fill_random_ptr_key(struct random_ready_callback *unused) | 1675 | static void enable_ptr_key_workfn(struct work_struct *work) |
1676 | { | 1676 | { |
1677 | get_random_bytes(&ptr_key, sizeof(ptr_key)); | 1677 | get_random_bytes(&ptr_key, sizeof(ptr_key)); |
1678 | /* | 1678 | /* Needs to run from preemptible context */ |
1679 | * have_filled_random_ptr_key==true is dependent on get_random_bytes(). | 1679 | static_branch_disable(¬_filled_random_ptr_key); |
1680 | * ptr_to_id() needs to see have_filled_random_ptr_key==true | 1680 | } |
1681 | * after get_random_bytes() returns. | 1681 | |
1682 | */ | 1682 | static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); |
1683 | smp_mb(); | 1683 | |
1684 | WRITE_ONCE(have_filled_random_ptr_key, true); | 1684 | static void fill_random_ptr_key(struct random_ready_callback *unused) |
1685 | { | ||
1686 | /* This may be in an interrupt handler. */ | ||
1687 | queue_work(system_unbound_wq, &enable_ptr_key_work); | ||
1685 | } | 1688 | } |
1686 | 1689 | ||
1687 | static struct random_ready_callback random_ready = { | 1690 | static struct random_ready_callback random_ready = { |
@@ -1695,7 +1698,8 @@ static int __init initialize_ptr_random(void) | |||
1695 | if (!ret) { | 1698 | if (!ret) { |
1696 | return 0; | 1699 | return 0; |
1697 | } else if (ret == -EALREADY) { | 1700 | } else if (ret == -EALREADY) { |
1698 | fill_random_ptr_key(&random_ready); | 1701 | /* This is in preemptible context */ |
1702 | enable_ptr_key_workfn(&enable_ptr_key_work); | ||
1699 | return 0; | 1703 | return 0; |
1700 | } | 1704 | } |
1701 | 1705 | ||
@@ -1709,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | |||
1709 | unsigned long hashval; | 1713 | unsigned long hashval; |
1710 | const int default_width = 2 * sizeof(ptr); | 1714 | const int default_width = 2 * sizeof(ptr); |
1711 | 1715 | ||
1712 | if (unlikely(!have_filled_random_ptr_key)) { | 1716 | if (static_branch_unlikely(¬_filled_random_ptr_key)) { |
1713 | spec.field_width = default_width; | 1717 | spec.field_width = default_width; |
1714 | /* string length must be less than default_width */ | 1718 | /* string length must be less than default_width */ |
1715 | return string(buf, end, "(ptrval)", spec); | 1719 | return string(buf, end, "(ptrval)", spec); |
diff --git a/mm/Kconfig b/mm/Kconfig index d5004d82a1d6..e14c01513bfd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -636,6 +636,7 @@ config DEFERRED_STRUCT_PAGE_INIT | |||
636 | default n | 636 | default n |
637 | depends on NO_BOOTMEM | 637 | depends on NO_BOOTMEM |
638 | depends on !FLATMEM | 638 | depends on !FLATMEM |
639 | depends on !NEED_PER_CPU_KM | ||
639 | help | 640 | help |
640 | Ordinarily all struct pages are initialised during early boot in a | 641 | Ordinarily all struct pages are initialised during early boot in a |
641 | single thread. On very large machines this can take a considerable | 642 | single thread. On very large machines this can take a considerable |
@@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) | |||
544 | if (vm_flags & (VM_IO | VM_PFNMAP)) | 544 | if (vm_flags & (VM_IO | VM_PFNMAP)) |
545 | return -EFAULT; | 545 | return -EFAULT; |
546 | 546 | ||
547 | if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) | ||
548 | return -EFAULT; | ||
549 | |||
547 | if (write) { | 550 | if (write) { |
548 | if (!(vm_flags & VM_WRITE)) { | 551 | if (!(vm_flags & VM_WRITE)) { |
549 | if (!(gup_flags & FOLL_FORCE)) | 552 | if (!(gup_flags & FOLL_FORCE)) |
diff --git a/mm/migrate.c b/mm/migrate.c index 568433023831..8c0af0f7cab1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -528,14 +528,12 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
528 | int i; | 528 | int i; |
529 | int index = page_index(page); | 529 | int index = page_index(page); |
530 | 530 | ||
531 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 531 | for (i = 1; i < HPAGE_PMD_NR; i++) { |
532 | pslot = radix_tree_lookup_slot(&mapping->i_pages, | 532 | pslot = radix_tree_lookup_slot(&mapping->i_pages, |
533 | index + i); | 533 | index + i); |
534 | radix_tree_replace_slot(&mapping->i_pages, pslot, | 534 | radix_tree_replace_slot(&mapping->i_pages, pslot, |
535 | newpage + i); | 535 | newpage + i); |
536 | } | 536 | } |
537 | } else { | ||
538 | radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); | ||
539 | } | 537 | } |
540 | 538 | ||
541 | /* | 539 | /* |
@@ -1324,6 +1324,35 @@ static inline int mlock_future_check(struct mm_struct *mm, | |||
1324 | return 0; | 1324 | return 0; |
1325 | } | 1325 | } |
1326 | 1326 | ||
1327 | static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) | ||
1328 | { | ||
1329 | if (S_ISREG(inode->i_mode)) | ||
1330 | return inode->i_sb->s_maxbytes; | ||
1331 | |||
1332 | if (S_ISBLK(inode->i_mode)) | ||
1333 | return MAX_LFS_FILESIZE; | ||
1334 | |||
1335 | /* Special "we do even unsigned file positions" case */ | ||
1336 | if (file->f_mode & FMODE_UNSIGNED_OFFSET) | ||
1337 | return 0; | ||
1338 | |||
1339 | /* Yes, random drivers might want more. But I'm tired of buggy drivers */ | ||
1340 | return ULONG_MAX; | ||
1341 | } | ||
1342 | |||
1343 | static inline bool file_mmap_ok(struct file *file, struct inode *inode, | ||
1344 | unsigned long pgoff, unsigned long len) | ||
1345 | { | ||
1346 | u64 maxsize = file_mmap_size_max(file, inode); | ||
1347 | |||
1348 | if (maxsize && len > maxsize) | ||
1349 | return false; | ||
1350 | maxsize -= len; | ||
1351 | if (pgoff > maxsize >> PAGE_SHIFT) | ||
1352 | return false; | ||
1353 | return true; | ||
1354 | } | ||
1355 | |||
1327 | /* | 1356 | /* |
1328 | * The caller must hold down_write(¤t->mm->mmap_sem). | 1357 | * The caller must hold down_write(¤t->mm->mmap_sem). |
1329 | */ | 1358 | */ |
@@ -1409,6 +1438,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, | |||
1409 | struct inode *inode = file_inode(file); | 1438 | struct inode *inode = file_inode(file); |
1410 | unsigned long flags_mask; | 1439 | unsigned long flags_mask; |
1411 | 1440 | ||
1441 | if (!file_mmap_ok(file, inode, pgoff, len)) | ||
1442 | return -EOVERFLOW; | ||
1443 | |||
1412 | flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; | 1444 | flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; |
1413 | 1445 | ||
1414 | switch (flags & MAP_TYPE) { | 1446 | switch (flags & MAP_TYPE) { |
@@ -3024,6 +3056,32 @@ void exit_mmap(struct mm_struct *mm) | |||
3024 | /* mm's last user has gone, and its about to be pulled down */ | 3056 | /* mm's last user has gone, and its about to be pulled down */ |
3025 | mmu_notifier_release(mm); | 3057 | mmu_notifier_release(mm); |
3026 | 3058 | ||
3059 | if (unlikely(mm_is_oom_victim(mm))) { | ||
3060 | /* | ||
3061 | * Manually reap the mm to free as much memory as possible. | ||
3062 | * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard | ||
3063 | * this mm from further consideration. Taking mm->mmap_sem for | ||
3064 | * write after setting MMF_OOM_SKIP will guarantee that the oom | ||
3065 | * reaper will not run on this mm again after mmap_sem is | ||
3066 | * dropped. | ||
3067 | * | ||
3068 | * Nothing can be holding mm->mmap_sem here and the above call | ||
3069 | * to mmu_notifier_release(mm) ensures mmu notifier callbacks in | ||
3070 | * __oom_reap_task_mm() will not block. | ||
3071 | * | ||
3072 | * This needs to be done before calling munlock_vma_pages_all(), | ||
3073 | * which clears VM_LOCKED, otherwise the oom reaper cannot | ||
3074 | * reliably test it. | ||
3075 | */ | ||
3076 | mutex_lock(&oom_lock); | ||
3077 | __oom_reap_task_mm(mm); | ||
3078 | mutex_unlock(&oom_lock); | ||
3079 | |||
3080 | set_bit(MMF_OOM_SKIP, &mm->flags); | ||
3081 | down_write(&mm->mmap_sem); | ||
3082 | up_write(&mm->mmap_sem); | ||
3083 | } | ||
3084 | |||
3027 | if (mm->locked_vm) { | 3085 | if (mm->locked_vm) { |
3028 | vma = mm->mmap; | 3086 | vma = mm->mmap; |
3029 | while (vma) { | 3087 | while (vma) { |
@@ -3045,24 +3103,6 @@ void exit_mmap(struct mm_struct *mm) | |||
3045 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 3103 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
3046 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 3104 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
3047 | unmap_vmas(&tlb, vma, 0, -1); | 3105 | unmap_vmas(&tlb, vma, 0, -1); |
3048 | |||
3049 | if (unlikely(mm_is_oom_victim(mm))) { | ||
3050 | /* | ||
3051 | * Wait for oom_reap_task() to stop working on this | ||
3052 | * mm. Because MMF_OOM_SKIP is already set before | ||
3053 | * calling down_read(), oom_reap_task() will not run | ||
3054 | * on this "mm" post up_write(). | ||
3055 | * | ||
3056 | * mm_is_oom_victim() cannot be set from under us | ||
3057 | * either because victim->mm is already set to NULL | ||
3058 | * under task_lock before calling mmput and oom_mm is | ||
3059 | * set not NULL by the OOM killer only if victim->mm | ||
3060 | * is found not NULL while holding the task_lock. | ||
3061 | */ | ||
3062 | set_bit(MMF_OOM_SKIP, &mm->flags); | ||
3063 | down_write(&mm->mmap_sem); | ||
3064 | up_write(&mm->mmap_sem); | ||
3065 | } | ||
3066 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); | 3106 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); |
3067 | tlb_finish_mmu(&tlb, 0, -1); | 3107 | tlb_finish_mmu(&tlb, 0, -1); |
3068 | 3108 | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ff992fa8760a..8ba6cb88cf58 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -469,7 +469,6 @@ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) | |||
469 | return false; | 469 | return false; |
470 | } | 470 | } |
471 | 471 | ||
472 | |||
473 | #ifdef CONFIG_MMU | 472 | #ifdef CONFIG_MMU |
474 | /* | 473 | /* |
475 | * OOM Reaper kernel thread which tries to reap the memory used by the OOM | 474 | * OOM Reaper kernel thread which tries to reap the memory used by the OOM |
@@ -480,16 +479,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); | |||
480 | static struct task_struct *oom_reaper_list; | 479 | static struct task_struct *oom_reaper_list; |
481 | static DEFINE_SPINLOCK(oom_reaper_lock); | 480 | static DEFINE_SPINLOCK(oom_reaper_lock); |
482 | 481 | ||
483 | static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) | 482 | void __oom_reap_task_mm(struct mm_struct *mm) |
484 | { | 483 | { |
485 | struct mmu_gather tlb; | ||
486 | struct vm_area_struct *vma; | 484 | struct vm_area_struct *vma; |
485 | |||
486 | /* | ||
487 | * Tell all users of get_user/copy_from_user etc... that the content | ||
488 | * is no longer stable. No barriers really needed because unmapping | ||
489 | * should imply barriers already and the reader would hit a page fault | ||
490 | * if it stumbled over a reaped memory. | ||
491 | */ | ||
492 | set_bit(MMF_UNSTABLE, &mm->flags); | ||
493 | |||
494 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | ||
495 | if (!can_madv_dontneed_vma(vma)) | ||
496 | continue; | ||
497 | |||
498 | /* | ||
499 | * Only anonymous pages have a good chance to be dropped | ||
500 | * without additional steps which we cannot afford as we | ||
501 | * are OOM already. | ||
502 | * | ||
503 | * We do not even care about fs backed pages because all | ||
504 | * which are reclaimable have already been reclaimed and | ||
505 | * we do not want to block exit_mmap by keeping mm ref | ||
506 | * count elevated without a good reason. | ||
507 | */ | ||
508 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { | ||
509 | const unsigned long start = vma->vm_start; | ||
510 | const unsigned long end = vma->vm_end; | ||
511 | struct mmu_gather tlb; | ||
512 | |||
513 | tlb_gather_mmu(&tlb, mm, start, end); | ||
514 | mmu_notifier_invalidate_range_start(mm, start, end); | ||
515 | unmap_page_range(&tlb, vma, start, end, NULL); | ||
516 | mmu_notifier_invalidate_range_end(mm, start, end); | ||
517 | tlb_finish_mmu(&tlb, start, end); | ||
518 | } | ||
519 | } | ||
520 | } | ||
521 | |||
522 | static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) | ||
523 | { | ||
487 | bool ret = true; | 524 | bool ret = true; |
488 | 525 | ||
489 | /* | 526 | /* |
490 | * We have to make sure to not race with the victim exit path | 527 | * We have to make sure to not race with the victim exit path |
491 | * and cause premature new oom victim selection: | 528 | * and cause premature new oom victim selection: |
492 | * __oom_reap_task_mm exit_mm | 529 | * oom_reap_task_mm exit_mm |
493 | * mmget_not_zero | 530 | * mmget_not_zero |
494 | * mmput | 531 | * mmput |
495 | * atomic_dec_and_test | 532 | * atomic_dec_and_test |
@@ -534,39 +571,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) | |||
534 | 571 | ||
535 | trace_start_task_reaping(tsk->pid); | 572 | trace_start_task_reaping(tsk->pid); |
536 | 573 | ||
537 | /* | 574 | __oom_reap_task_mm(mm); |
538 | * Tell all users of get_user/copy_from_user etc... that the content | ||
539 | * is no longer stable. No barriers really needed because unmapping | ||
540 | * should imply barriers already and the reader would hit a page fault | ||
541 | * if it stumbled over a reaped memory. | ||
542 | */ | ||
543 | set_bit(MMF_UNSTABLE, &mm->flags); | ||
544 | |||
545 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | ||
546 | if (!can_madv_dontneed_vma(vma)) | ||
547 | continue; | ||
548 | 575 | ||
549 | /* | ||
550 | * Only anonymous pages have a good chance to be dropped | ||
551 | * without additional steps which we cannot afford as we | ||
552 | * are OOM already. | ||
553 | * | ||
554 | * We do not even care about fs backed pages because all | ||
555 | * which are reclaimable have already been reclaimed and | ||
556 | * we do not want to block exit_mmap by keeping mm ref | ||
557 | * count elevated without a good reason. | ||
558 | */ | ||
559 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { | ||
560 | const unsigned long start = vma->vm_start; | ||
561 | const unsigned long end = vma->vm_end; | ||
562 | |||
563 | tlb_gather_mmu(&tlb, mm, start, end); | ||
564 | mmu_notifier_invalidate_range_start(mm, start, end); | ||
565 | unmap_page_range(&tlb, vma, start, end, NULL); | ||
566 | mmu_notifier_invalidate_range_end(mm, start, end); | ||
567 | tlb_finish_mmu(&tlb, start, end); | ||
568 | } | ||
569 | } | ||
570 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", | 576 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
571 | task_pid_nr(tsk), tsk->comm, | 577 | task_pid_nr(tsk), tsk->comm, |
572 | K(get_mm_counter(mm, MM_ANONPAGES)), | 578 | K(get_mm_counter(mm, MM_ANONPAGES)), |
@@ -587,14 +593,13 @@ static void oom_reap_task(struct task_struct *tsk) | |||
587 | struct mm_struct *mm = tsk->signal->oom_mm; | 593 | struct mm_struct *mm = tsk->signal->oom_mm; |
588 | 594 | ||
589 | /* Retry the down_read_trylock(mmap_sem) a few times */ | 595 | /* Retry the down_read_trylock(mmap_sem) a few times */ |
590 | while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) | 596 | while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) |
591 | schedule_timeout_idle(HZ/10); | 597 | schedule_timeout_idle(HZ/10); |
592 | 598 | ||
593 | if (attempts <= MAX_OOM_REAP_RETRIES || | 599 | if (attempts <= MAX_OOM_REAP_RETRIES || |
594 | test_bit(MMF_OOM_SKIP, &mm->flags)) | 600 | test_bit(MMF_OOM_SKIP, &mm->flags)) |
595 | goto done; | 601 | goto done; |
596 | 602 | ||
597 | |||
598 | pr_info("oom_reaper: unable to reap pid:%d (%s)\n", | 603 | pr_info("oom_reaper: unable to reap pid:%d (%s)\n", |
599 | task_pid_nr(tsk), tsk->comm); | 604 | task_pid_nr(tsk), tsk->comm); |
600 | debug_show_all_locks(); | 605 | debug_show_all_locks(); |
diff --git a/mm/sparse.c b/mm/sparse.c index 62eef264a7bd..73dc2fcc0eab 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -629,7 +629,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | |||
629 | unsigned long pfn; | 629 | unsigned long pfn; |
630 | 630 | ||
631 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | 631 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
632 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | 632 | unsigned long section_nr = pfn_to_section_nr(pfn); |
633 | struct mem_section *ms; | 633 | struct mem_section *ms; |
634 | 634 | ||
635 | /* | 635 | /* |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 536332e988b8..a2b9518980ce 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -1161,7 +1161,7 @@ const char * const vmstat_text[] = { | |||
1161 | "nr_vmscan_immediate_reclaim", | 1161 | "nr_vmscan_immediate_reclaim", |
1162 | "nr_dirtied", | 1162 | "nr_dirtied", |
1163 | "nr_written", | 1163 | "nr_written", |
1164 | "nr_indirectly_reclaimable", | 1164 | "", /* nr_indirectly_reclaimable */ |
1165 | 1165 | ||
1166 | /* enum writeback_stat_item counters */ | 1166 | /* enum writeback_stat_item counters */ |
1167 | "nr_dirty_threshold", | 1167 | "nr_dirty_threshold", |
@@ -1740,6 +1740,10 @@ static int vmstat_show(struct seq_file *m, void *arg) | |||
1740 | unsigned long *l = arg; | 1740 | unsigned long *l = arg; |
1741 | unsigned long off = l - (unsigned long *)m->private; | 1741 | unsigned long off = l - (unsigned long *)m->private; |
1742 | 1742 | ||
1743 | /* Skip hidden vmstat items. */ | ||
1744 | if (*vmstat_text[off] == '\0') | ||
1745 | return 0; | ||
1746 | |||
1743 | seq_puts(m, vmstat_text[off]); | 1747 | seq_puts(m, vmstat_text[off]); |
1744 | seq_put_decimal_ull(m, " ", *l); | 1748 | seq_put_decimal_ull(m, " ", *l); |
1745 | seq_putc(m, '\n'); | 1749 | seq_putc(m, '\n'); |
diff --git a/mm/z3fold.c b/mm/z3fold.c index c0bca6153b95..4b366d181f35 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -144,7 +144,8 @@ enum z3fold_page_flags { | |||
144 | PAGE_HEADLESS = 0, | 144 | PAGE_HEADLESS = 0, |
145 | MIDDLE_CHUNK_MAPPED, | 145 | MIDDLE_CHUNK_MAPPED, |
146 | NEEDS_COMPACTING, | 146 | NEEDS_COMPACTING, |
147 | PAGE_STALE | 147 | PAGE_STALE, |
148 | UNDER_RECLAIM | ||
148 | }; | 149 | }; |
149 | 150 | ||
150 | /***************** | 151 | /***************** |
@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, | |||
173 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | 174 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); |
174 | clear_bit(NEEDS_COMPACTING, &page->private); | 175 | clear_bit(NEEDS_COMPACTING, &page->private); |
175 | clear_bit(PAGE_STALE, &page->private); | 176 | clear_bit(PAGE_STALE, &page->private); |
177 | clear_bit(UNDER_RECLAIM, &page->private); | ||
176 | 178 | ||
177 | spin_lock_init(&zhdr->page_lock); | 179 | spin_lock_init(&zhdr->page_lock); |
178 | kref_init(&zhdr->refcount); | 180 | kref_init(&zhdr->refcount); |
@@ -756,6 +758,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
756 | atomic64_dec(&pool->pages_nr); | 758 | atomic64_dec(&pool->pages_nr); |
757 | return; | 759 | return; |
758 | } | 760 | } |
761 | if (test_bit(UNDER_RECLAIM, &page->private)) { | ||
762 | z3fold_page_unlock(zhdr); | ||
763 | return; | ||
764 | } | ||
759 | if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { | 765 | if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { |
760 | z3fold_page_unlock(zhdr); | 766 | z3fold_page_unlock(zhdr); |
761 | return; | 767 | return; |
@@ -840,6 +846,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
840 | kref_get(&zhdr->refcount); | 846 | kref_get(&zhdr->refcount); |
841 | list_del_init(&zhdr->buddy); | 847 | list_del_init(&zhdr->buddy); |
842 | zhdr->cpu = -1; | 848 | zhdr->cpu = -1; |
849 | set_bit(UNDER_RECLAIM, &page->private); | ||
850 | break; | ||
843 | } | 851 | } |
844 | 852 | ||
845 | list_del_init(&page->lru); | 853 | list_del_init(&page->lru); |
@@ -887,25 +895,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
887 | goto next; | 895 | goto next; |
888 | } | 896 | } |
889 | next: | 897 | next: |
890 | spin_lock(&pool->lock); | ||
891 | if (test_bit(PAGE_HEADLESS, &page->private)) { | 898 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
892 | if (ret == 0) { | 899 | if (ret == 0) { |
893 | spin_unlock(&pool->lock); | ||
894 | free_z3fold_page(page); | 900 | free_z3fold_page(page); |
895 | return 0; | 901 | return 0; |
896 | } | 902 | } |
897 | } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { | 903 | spin_lock(&pool->lock); |
898 | atomic64_dec(&pool->pages_nr); | 904 | list_add(&page->lru, &pool->lru); |
905 | spin_unlock(&pool->lock); | ||
906 | } else { | ||
907 | z3fold_page_lock(zhdr); | ||
908 | clear_bit(UNDER_RECLAIM, &page->private); | ||
909 | if (kref_put(&zhdr->refcount, | ||
910 | release_z3fold_page_locked)) { | ||
911 | atomic64_dec(&pool->pages_nr); | ||
912 | return 0; | ||
913 | } | ||
914 | /* | ||
915 | * if we are here, the page is still not completely | ||
916 | * free. Take the global pool lock then to be able | ||
917 | * to add it back to the lru list | ||
918 | */ | ||
919 | spin_lock(&pool->lock); | ||
920 | list_add(&page->lru, &pool->lru); | ||
899 | spin_unlock(&pool->lock); | 921 | spin_unlock(&pool->lock); |
900 | return 0; | 922 | z3fold_page_unlock(zhdr); |
901 | } | 923 | } |
902 | 924 | ||
903 | /* | 925 | /* We started off locked to we need to lock the pool back */ |
904 | * Add to the beginning of LRU. | 926 | spin_lock(&pool->lock); |
905 | * Pool lock has to be kept here to ensure the page has | ||
906 | * not already been released | ||
907 | */ | ||
908 | list_add(&page->lru, &pool->lru); | ||
909 | } | 927 | } |
910 | spin_unlock(&pool->lock); | 928 | spin_unlock(&pool->lock); |
911 | return -EAGAIN; | 929 | return -EAGAIN; |
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index 38aa6345bdfa..b718db2085b2 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | 17 | ||
18 | /** | 18 | /** |
19 | * p9_release_req_pages - Release pages after the transaction. | 19 | * p9_release_pages - Release pages after the transaction. |
20 | */ | 20 | */ |
21 | void p9_release_pages(struct page **pages, int nr_pages) | 21 | void p9_release_pages(struct page **pages, int nr_pages) |
22 | { | 22 | { |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 0cfba919d167..848969fe7979 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -1092,8 +1092,8 @@ static struct p9_trans_module p9_fd_trans = { | |||
1092 | }; | 1092 | }; |
1093 | 1093 | ||
1094 | /** | 1094 | /** |
1095 | * p9_poll_proc - poll worker thread | 1095 | * p9_poll_workfn - poll worker thread |
1096 | * @a: thread state and arguments | 1096 | * @work: work queue |
1097 | * | 1097 | * |
1098 | * polls all v9fs transports for new events and queues the appropriate | 1098 | * polls all v9fs transports for new events and queues the appropriate |
1099 | * work to the work queue | 1099 | * work to the work queue |
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 6d8e3031978f..3d414acb7015 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -68,8 +68,6 @@ | |||
68 | * @pd: Protection Domain pointer | 68 | * @pd: Protection Domain pointer |
69 | * @qp: Queue Pair pointer | 69 | * @qp: Queue Pair pointer |
70 | * @cq: Completion Queue pointer | 70 | * @cq: Completion Queue pointer |
71 | * @dm_mr: DMA Memory Region pointer | ||
72 | * @lkey: The local access only memory region key | ||
73 | * @timeout: Number of uSecs to wait for connection management events | 71 | * @timeout: Number of uSecs to wait for connection management events |
74 | * @privport: Whether a privileged port may be used | 72 | * @privport: Whether a privileged port may be used |
75 | * @port: The port to use | 73 | * @port: The port to use |
@@ -632,7 +630,7 @@ static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) | |||
632 | } | 630 | } |
633 | 631 | ||
634 | /** | 632 | /** |
635 | * trans_create_rdma - Transport method for creating atransport instance | 633 | * rdma_create_trans - Transport method for creating a transport instance |
636 | * @client: client instance | 634 | * @client: client instance |
637 | * @addr: IP address string | 635 | * @addr: IP address string |
638 | * @args: Mount options string | 636 | * @args: Mount options string |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 3aa5a93ad107..4d0372263e5d 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -60,7 +60,6 @@ static atomic_t vp_pinned = ATOMIC_INIT(0); | |||
60 | 60 | ||
61 | /** | 61 | /** |
62 | * struct virtio_chan - per-instance transport information | 62 | * struct virtio_chan - per-instance transport information |
63 | * @initialized: whether the channel is initialized | ||
64 | * @inuse: whether the channel is in use | 63 | * @inuse: whether the channel is in use |
65 | * @lock: protects multiple elements within this structure | 64 | * @lock: protects multiple elements within this structure |
66 | * @client: client instance | 65 | * @client: client instance |
@@ -385,8 +384,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, | |||
385 | * @uidata: user bffer that should be ued for zero copy read | 384 | * @uidata: user bffer that should be ued for zero copy read |
386 | * @uodata: user buffer that shoud be user for zero copy write | 385 | * @uodata: user buffer that shoud be user for zero copy write |
387 | * @inlen: read buffer size | 386 | * @inlen: read buffer size |
388 | * @olen: write buffer size | 387 | * @outlen: write buffer size |
389 | * @hdrlen: reader header size, This is the size of response protocol data | 388 | * @in_hdr_len: reader header size, This is the size of response protocol data |
390 | * | 389 | * |
391 | */ | 390 | */ |
392 | static int | 391 | static int |
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 086a4abdfa7c..0f19960390a6 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c | |||
@@ -485,7 +485,7 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, | |||
485 | 485 | ||
486 | static int xen_9pfs_front_resume(struct xenbus_device *dev) | 486 | static int xen_9pfs_front_resume(struct xenbus_device *dev) |
487 | { | 487 | { |
488 | dev_warn(&dev->dev, "suspsend/resume unsupported\n"); | 488 | dev_warn(&dev->dev, "suspend/resume unsupported\n"); |
489 | return 0; | 489 | return 0; |
490 | } | 490 | } |
491 | 491 | ||
diff --git a/net/atm/lec.c b/net/atm/lec.c index 01d5d20a6eb1..3138a869b5c0 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; | |||
41 | #include <linux/module.h> | 41 | #include <linux/module.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | 43 | ||
44 | /* Hardening for Spectre-v1 */ | ||
45 | #include <linux/nospec.h> | ||
46 | |||
44 | #include "lec.h" | 47 | #include "lec.h" |
45 | #include "lec_arpc.h" | 48 | #include "lec_arpc.h" |
46 | #include "resources.h" | 49 | #include "resources.h" |
@@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) | |||
687 | bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); | 690 | bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); |
688 | if (bytes_left != 0) | 691 | if (bytes_left != 0) |
689 | pr_info("copy from user failed for %d bytes\n", bytes_left); | 692 | pr_info("copy from user failed for %d bytes\n", bytes_left); |
690 | if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || | 693 | if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF) |
691 | !dev_lec[ioc_data.dev_num]) | 694 | return -EINVAL; |
695 | ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF); | ||
696 | if (!dev_lec[ioc_data.dev_num]) | ||
692 | return -EINVAL; | 697 | return -EINVAL; |
693 | vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); | 698 | vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); |
694 | if (!vpriv) | 699 | if (!vpriv) |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ea2a6c9fb7ce..d2667e5dddc3 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -157,10 +157,12 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, | |||
157 | #endif /* CONFIG_BLOCK */ | 157 | #endif /* CONFIG_BLOCK */ |
158 | 158 | ||
159 | static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, | 159 | static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, |
160 | struct ceph_bvec_iter *bvec_pos) | 160 | struct ceph_bvec_iter *bvec_pos, |
161 | u32 num_bvecs) | ||
161 | { | 162 | { |
162 | osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; | 163 | osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; |
163 | osd_data->bvec_pos = *bvec_pos; | 164 | osd_data->bvec_pos = *bvec_pos; |
165 | osd_data->num_bvecs = num_bvecs; | ||
164 | } | 166 | } |
165 | 167 | ||
166 | #define osd_req_op_data(oreq, whch, typ, fld) \ | 168 | #define osd_req_op_data(oreq, whch, typ, fld) \ |
@@ -237,6 +239,22 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, | |||
237 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); | 239 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); |
238 | #endif /* CONFIG_BLOCK */ | 240 | #endif /* CONFIG_BLOCK */ |
239 | 241 | ||
242 | void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, | ||
243 | unsigned int which, | ||
244 | struct bio_vec *bvecs, u32 num_bvecs, | ||
245 | u32 bytes) | ||
246 | { | ||
247 | struct ceph_osd_data *osd_data; | ||
248 | struct ceph_bvec_iter it = { | ||
249 | .bvecs = bvecs, | ||
250 | .iter = { .bi_size = bytes }, | ||
251 | }; | ||
252 | |||
253 | osd_data = osd_req_op_data(osd_req, which, extent, osd_data); | ||
254 | ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); | ||
255 | } | ||
256 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); | ||
257 | |||
240 | void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, | 258 | void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, |
241 | unsigned int which, | 259 | unsigned int which, |
242 | struct ceph_bvec_iter *bvec_pos) | 260 | struct ceph_bvec_iter *bvec_pos) |
@@ -244,7 +262,7 @@ void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, | |||
244 | struct ceph_osd_data *osd_data; | 262 | struct ceph_osd_data *osd_data; |
245 | 263 | ||
246 | osd_data = osd_req_op_data(osd_req, which, extent, osd_data); | 264 | osd_data = osd_req_op_data(osd_req, which, extent, osd_data); |
247 | ceph_osd_data_bvecs_init(osd_data, bvec_pos); | 265 | ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); |
248 | } | 266 | } |
249 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); | 267 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); |
250 | 268 | ||
@@ -287,7 +305,8 @@ EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); | |||
287 | 305 | ||
288 | void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, | 306 | void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, |
289 | unsigned int which, | 307 | unsigned int which, |
290 | struct bio_vec *bvecs, u32 bytes) | 308 | struct bio_vec *bvecs, u32 num_bvecs, |
309 | u32 bytes) | ||
291 | { | 310 | { |
292 | struct ceph_osd_data *osd_data; | 311 | struct ceph_osd_data *osd_data; |
293 | struct ceph_bvec_iter it = { | 312 | struct ceph_bvec_iter it = { |
@@ -296,7 +315,7 @@ void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, | |||
296 | }; | 315 | }; |
297 | 316 | ||
298 | osd_data = osd_req_op_data(osd_req, which, cls, request_data); | 317 | osd_data = osd_req_op_data(osd_req, which, cls, request_data); |
299 | ceph_osd_data_bvecs_init(osd_data, &it); | 318 | ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); |
300 | osd_req->r_ops[which].cls.indata_len += bytes; | 319 | osd_req->r_ops[which].cls.indata_len += bytes; |
301 | osd_req->r_ops[which].indata_len += bytes; | 320 | osd_req->r_ops[which].indata_len += bytes; |
302 | } | 321 | } |
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index b8d95cb71c25..44a7e16bf3b5 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h | |||
@@ -20,8 +20,8 @@ typedef unsigned __bitwise lowpan_rx_result; | |||
20 | struct frag_lowpan_compare_key { | 20 | struct frag_lowpan_compare_key { |
21 | u16 tag; | 21 | u16 tag; |
22 | u16 d_size; | 22 | u16 d_size; |
23 | const struct ieee802154_addr src; | 23 | struct ieee802154_addr src; |
24 | const struct ieee802154_addr dst; | 24 | struct ieee802154_addr dst; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | /* Equivalent of ipv4 struct ipq | 27 | /* Equivalent of ipv4 struct ipq |
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 1790b65944b3..2cc224106b69 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c | |||
@@ -75,14 +75,14 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb, | |||
75 | { | 75 | { |
76 | struct netns_ieee802154_lowpan *ieee802154_lowpan = | 76 | struct netns_ieee802154_lowpan *ieee802154_lowpan = |
77 | net_ieee802154_lowpan(net); | 77 | net_ieee802154_lowpan(net); |
78 | struct frag_lowpan_compare_key key = { | 78 | struct frag_lowpan_compare_key key = {}; |
79 | .tag = cb->d_tag, | ||
80 | .d_size = cb->d_size, | ||
81 | .src = *src, | ||
82 | .dst = *dst, | ||
83 | }; | ||
84 | struct inet_frag_queue *q; | 79 | struct inet_frag_queue *q; |
85 | 80 | ||
81 | key.tag = cb->d_tag; | ||
82 | key.d_size = cb->d_size; | ||
83 | key.src = *src; | ||
84 | key.dst = *dst; | ||
85 | |||
86 | q = inet_frag_find(&ieee802154_lowpan->frags, &key); | 86 | q = inet_frag_find(&ieee802154_lowpan->frags, &key); |
87 | if (!q) | 87 | if (!q) |
88 | return NULL; | 88 | return NULL; |
@@ -372,7 +372,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type) | |||
372 | struct lowpan_frag_queue *fq; | 372 | struct lowpan_frag_queue *fq; |
373 | struct net *net = dev_net(skb->dev); | 373 | struct net *net = dev_net(skb->dev); |
374 | struct lowpan_802154_cb *cb = lowpan_802154_cb(skb); | 374 | struct lowpan_802154_cb *cb = lowpan_802154_cb(skb); |
375 | struct ieee802154_hdr hdr; | 375 | struct ieee802154_hdr hdr = {}; |
376 | int err; | 376 | int err; |
377 | 377 | ||
378 | if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) | 378 | if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 05e47d777009..56a010622f70 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
775 | ipc.addr = faddr = daddr; | 775 | ipc.addr = faddr = daddr; |
776 | 776 | ||
777 | if (ipc.opt && ipc.opt->opt.srr) { | 777 | if (ipc.opt && ipc.opt->opt.srr) { |
778 | if (!daddr) | 778 | if (!daddr) { |
779 | return -EINVAL; | 779 | err = -EINVAL; |
780 | goto out_free; | ||
781 | } | ||
780 | faddr = ipc.opt->opt.faddr; | 782 | faddr = ipc.opt->opt.faddr; |
781 | } | 783 | } |
782 | tos = get_rttos(&ipc, inet); | 784 | tos = get_rttos(&ipc, inet); |
@@ -842,6 +844,7 @@ back_from_confirm: | |||
842 | 844 | ||
843 | out: | 845 | out: |
844 | ip_rt_put(rt); | 846 | ip_rt_put(rt); |
847 | out_free: | ||
845 | if (free) | 848 | if (free) |
846 | kfree(ipc.opt); | 849 | kfree(ipc.opt); |
847 | if (!err) { | 850 | if (!err) { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 1412a7baf0b9..29268efad247 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1375,6 +1375,7 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, | |||
1375 | fnhe->fnhe_gw = 0; | 1375 | fnhe->fnhe_gw = 0; |
1376 | fnhe->fnhe_pmtu = 0; | 1376 | fnhe->fnhe_pmtu = 0; |
1377 | fnhe->fnhe_expires = 0; | 1377 | fnhe->fnhe_expires = 0; |
1378 | fnhe->fnhe_mtu_locked = false; | ||
1378 | fnhe_flush_routes(fnhe); | 1379 | fnhe_flush_routes(fnhe); |
1379 | orig = NULL; | 1380 | orig = NULL; |
1380 | } | 1381 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 24b5c59b1c53..b61a770884fa 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -401,9 +401,9 @@ static int compute_score(struct sock *sk, struct net *net, | |||
401 | bool dev_match = (sk->sk_bound_dev_if == dif || | 401 | bool dev_match = (sk->sk_bound_dev_if == dif || |
402 | sk->sk_bound_dev_if == sdif); | 402 | sk->sk_bound_dev_if == sdif); |
403 | 403 | ||
404 | if (exact_dif && !dev_match) | 404 | if (!dev_match) |
405 | return -1; | 405 | return -1; |
406 | if (sk->sk_bound_dev_if && dev_match) | 406 | if (sk->sk_bound_dev_if) |
407 | score += 4; | 407 | score += 4; |
408 | } | 408 | } |
409 | 409 | ||
@@ -952,8 +952,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
952 | sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); | 952 | sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); |
953 | 953 | ||
954 | if (ipc.opt && ipc.opt->opt.srr) { | 954 | if (ipc.opt && ipc.opt->opt.srr) { |
955 | if (!daddr) | 955 | if (!daddr) { |
956 | return -EINVAL; | 956 | err = -EINVAL; |
957 | goto out_free; | ||
958 | } | ||
957 | faddr = ipc.opt->opt.faddr; | 959 | faddr = ipc.opt->opt.faddr; |
958 | connected = 0; | 960 | connected = 0; |
959 | } | 961 | } |
@@ -1074,6 +1076,7 @@ do_append_data: | |||
1074 | 1076 | ||
1075 | out: | 1077 | out: |
1076 | ip_rt_put(rt); | 1078 | ip_rt_put(rt); |
1079 | out_free: | ||
1077 | if (free) | 1080 | if (free) |
1078 | kfree(ipc.opt); | 1081 | kfree(ipc.opt); |
1079 | if (!err) | 1082 | if (!err) |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 6794ddf0547c..11e4e80cf7e9 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -34,16 +34,15 @@ config IPV6_ROUTE_INFO | |||
34 | bool "IPv6: Route Information (RFC 4191) support" | 34 | bool "IPv6: Route Information (RFC 4191) support" |
35 | depends on IPV6_ROUTER_PREF | 35 | depends on IPV6_ROUTER_PREF |
36 | ---help--- | 36 | ---help--- |
37 | This is experimental support of Route Information. | 37 | Support of Route Information. |
38 | 38 | ||
39 | If unsure, say N. | 39 | If unsure, say N. |
40 | 40 | ||
41 | config IPV6_OPTIMISTIC_DAD | 41 | config IPV6_OPTIMISTIC_DAD |
42 | bool "IPv6: Enable RFC 4429 Optimistic DAD" | 42 | bool "IPv6: Enable RFC 4429 Optimistic DAD" |
43 | ---help--- | 43 | ---help--- |
44 | This is experimental support for optimistic Duplicate | 44 | Support for optimistic Duplicate Address Detection. It allows for |
45 | Address Detection. It allows for autoconfigured addresses | 45 | autoconfigured addresses to be used more quickly. |
46 | to be used more quickly. | ||
47 | 46 | ||
48 | If unsure, say N. | 47 | If unsure, say N. |
49 | 48 | ||
@@ -280,7 +279,7 @@ config IPV6_MROUTE | |||
280 | depends on IPV6 | 279 | depends on IPV6 |
281 | select IP_MROUTE_COMMON | 280 | select IP_MROUTE_COMMON |
282 | ---help--- | 281 | ---help--- |
283 | Experimental support for IPv6 multicast forwarding. | 282 | Support for IPv6 multicast forwarding. |
284 | If unsure, say N. | 283 | If unsure, say N. |
285 | 284 | ||
286 | config IPV6_MROUTE_MULTIPLE_TABLES | 285 | config IPV6_MROUTE_MULTIPLE_TABLES |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index c214ffec02f0..ca957dd93a29 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -669,7 +669,7 @@ static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu) | |||
669 | else | 669 | else |
670 | mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr); | 670 | mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr); |
671 | 671 | ||
672 | dev->mtu = max_t(int, mtu, IPV6_MIN_MTU); | 672 | dev->mtu = max_t(int, mtu, IPV4_MIN_MTU); |
673 | } | 673 | } |
674 | 674 | ||
675 | /** | 675 | /** |
@@ -881,7 +881,7 @@ static void vti6_dev_setup(struct net_device *dev) | |||
881 | dev->priv_destructor = vti6_dev_free; | 881 | dev->priv_destructor = vti6_dev_free; |
882 | 882 | ||
883 | dev->type = ARPHRD_TUNNEL6; | 883 | dev->type = ARPHRD_TUNNEL6; |
884 | dev->min_mtu = IPV6_MIN_MTU; | 884 | dev->min_mtu = IPV4_MIN_MTU; |
885 | dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); | 885 | dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); |
886 | dev->flags |= IFF_NOARP; | 886 | dev->flags |= IFF_NOARP; |
887 | dev->addr_len = sizeof(struct in6_addr); | 887 | dev->addr_len = sizeof(struct in6_addr); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4ec76a87aeb8..ea0730028e5d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -148,9 +148,9 @@ static int compute_score(struct sock *sk, struct net *net, | |||
148 | bool dev_match = (sk->sk_bound_dev_if == dif || | 148 | bool dev_match = (sk->sk_bound_dev_if == dif || |
149 | sk->sk_bound_dev_if == sdif); | 149 | sk->sk_bound_dev_if == sdif); |
150 | 150 | ||
151 | if (exact_dif && !dev_match) | 151 | if (!dev_match) |
152 | return -1; | 152 | return -1; |
153 | if (sk->sk_bound_dev_if && dev_match) | 153 | if (sk->sk_bound_dev_if) |
154 | score++; | 154 | score++; |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index f85f0d7480ac..4a46df8441c9 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -341,6 +341,9 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) | |||
341 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 341 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
342 | unsigned int i; | 342 | unsigned int i; |
343 | 343 | ||
344 | xfrm_state_flush(net, IPSEC_PROTO_ANY, false); | ||
345 | xfrm_flush_gc(); | ||
346 | |||
344 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) | 347 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) |
345 | WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); | 348 | WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); |
346 | 349 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 7e2e7188e7f4..e62e52e8f141 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -437,6 +437,24 @@ static int verify_address_len(const void *p) | |||
437 | return 0; | 437 | return 0; |
438 | } | 438 | } |
439 | 439 | ||
440 | static inline int sadb_key_len(const struct sadb_key *key) | ||
441 | { | ||
442 | int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8); | ||
443 | |||
444 | return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes, | ||
445 | sizeof(uint64_t)); | ||
446 | } | ||
447 | |||
448 | static int verify_key_len(const void *p) | ||
449 | { | ||
450 | const struct sadb_key *key = p; | ||
451 | |||
452 | if (sadb_key_len(key) > key->sadb_key_len) | ||
453 | return -EINVAL; | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
440 | static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx) | 458 | static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx) |
441 | { | 459 | { |
442 | return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) + | 460 | return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) + |
@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void * | |||
533 | return -EINVAL; | 551 | return -EINVAL; |
534 | if (ext_hdrs[ext_type-1] != NULL) | 552 | if (ext_hdrs[ext_type-1] != NULL) |
535 | return -EINVAL; | 553 | return -EINVAL; |
536 | if (ext_type == SADB_EXT_ADDRESS_SRC || | 554 | switch (ext_type) { |
537 | ext_type == SADB_EXT_ADDRESS_DST || | 555 | case SADB_EXT_ADDRESS_SRC: |
538 | ext_type == SADB_EXT_ADDRESS_PROXY || | 556 | case SADB_EXT_ADDRESS_DST: |
539 | ext_type == SADB_X_EXT_NAT_T_OA) { | 557 | case SADB_EXT_ADDRESS_PROXY: |
558 | case SADB_X_EXT_NAT_T_OA: | ||
540 | if (verify_address_len(p)) | 559 | if (verify_address_len(p)) |
541 | return -EINVAL; | 560 | return -EINVAL; |
542 | } | 561 | break; |
543 | if (ext_type == SADB_X_EXT_SEC_CTX) { | 562 | case SADB_X_EXT_SEC_CTX: |
544 | if (verify_sec_ctx_len(p)) | 563 | if (verify_sec_ctx_len(p)) |
545 | return -EINVAL; | 564 | return -EINVAL; |
565 | break; | ||
566 | case SADB_EXT_KEY_AUTH: | ||
567 | case SADB_EXT_KEY_ENCRYPT: | ||
568 | if (verify_key_len(p)) | ||
569 | return -EINVAL; | ||
570 | break; | ||
571 | default: | ||
572 | break; | ||
546 | } | 573 | } |
547 | ext_hdrs[ext_type-1] = (void *) p; | 574 | ext_hdrs[ext_type-1] = (void *) p; |
548 | } | 575 | } |
@@ -1104,14 +1131,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
1104 | key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; | 1131 | key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; |
1105 | if (key != NULL && | 1132 | if (key != NULL && |
1106 | sa->sadb_sa_auth != SADB_X_AALG_NULL && | 1133 | sa->sadb_sa_auth != SADB_X_AALG_NULL && |
1107 | ((key->sadb_key_bits+7) / 8 == 0 || | 1134 | key->sadb_key_bits == 0) |
1108 | (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) | ||
1109 | return ERR_PTR(-EINVAL); | 1135 | return ERR_PTR(-EINVAL); |
1110 | key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]; | 1136 | key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]; |
1111 | if (key != NULL && | 1137 | if (key != NULL && |
1112 | sa->sadb_sa_encrypt != SADB_EALG_NULL && | 1138 | sa->sadb_sa_encrypt != SADB_EALG_NULL && |
1113 | ((key->sadb_key_bits+7) / 8 == 0 || | 1139 | key->sadb_key_bits == 0) |
1114 | (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) | ||
1115 | return ERR_PTR(-EINVAL); | 1140 | return ERR_PTR(-EINVAL); |
1116 | 1141 | ||
1117 | x = xfrm_state_alloc(net); | 1142 | x = xfrm_state_alloc(net); |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index cb80ebb38311..1beeea9549fa 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -930,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
930 | if (size > llc->dev->mtu) | 930 | if (size > llc->dev->mtu) |
931 | size = llc->dev->mtu; | 931 | size = llc->dev->mtu; |
932 | copied = size - hdrlen; | 932 | copied = size - hdrlen; |
933 | rc = -EINVAL; | ||
934 | if (copied < 0) | ||
935 | goto release; | ||
933 | release_sock(sk); | 936 | release_sock(sk); |
934 | skb = sock_alloc_send_skb(sk, size, noblock, &rc); | 937 | skb = sock_alloc_send_skb(sk, size, noblock, &rc); |
935 | lock_sock(sk); | 938 | lock_sock(sk); |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 595c662a61e8..ac4295296514 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH |
11 | * Copyright (C) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -970,6 +971,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
970 | 971 | ||
971 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 972 | sta->ampdu_mlme.addba_req_num[tid] = 0; |
972 | 973 | ||
974 | tid_tx->timeout = | ||
975 | le16_to_cpu(mgmt->u.action.u.addba_resp.timeout); | ||
976 | |||
973 | if (tid_tx->timeout) { | 977 | if (tid_tx->timeout) { |
974 | mod_timer(&tid_tx->session_timer, | 978 | mod_timer(&tid_tx->session_timer, |
975 | TU_TO_EXP_TIME(tid_tx->timeout)); | 979 | TU_TO_EXP_TIME(tid_tx->timeout)); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 69449db7e283..233068756502 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 36 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
37 | #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) | 37 | #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) |
38 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) | 38 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) |
39 | #define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2) | ||
39 | #define IEEE80211_AUTH_MAX_TRIES 3 | 40 | #define IEEE80211_AUTH_MAX_TRIES 3 |
40 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) | 41 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) |
41 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 42 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
@@ -1787,7 +1788,7 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, | |||
1787 | params[ac].acm = acm; | 1788 | params[ac].acm = acm; |
1788 | params[ac].uapsd = uapsd; | 1789 | params[ac].uapsd = uapsd; |
1789 | 1790 | ||
1790 | if (params->cw_min == 0 || | 1791 | if (params[ac].cw_min == 0 || |
1791 | params[ac].cw_min > params[ac].cw_max) { | 1792 | params[ac].cw_min > params[ac].cw_max) { |
1792 | sdata_info(sdata, | 1793 | sdata_info(sdata, |
1793 | "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", | 1794 | "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", |
@@ -3814,16 +3815,19 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) | |||
3814 | tx_flags); | 3815 | tx_flags); |
3815 | 3816 | ||
3816 | if (tx_flags == 0) { | 3817 | if (tx_flags == 0) { |
3817 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; | 3818 | if (auth_data->algorithm == WLAN_AUTH_SAE) |
3818 | auth_data->timeout_started = true; | 3819 | auth_data->timeout = jiffies + |
3819 | run_again(sdata, auth_data->timeout); | 3820 | IEEE80211_AUTH_TIMEOUT_SAE; |
3821 | else | ||
3822 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; | ||
3820 | } else { | 3823 | } else { |
3821 | auth_data->timeout = | 3824 | auth_data->timeout = |
3822 | round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); | 3825 | round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); |
3823 | auth_data->timeout_started = true; | ||
3824 | run_again(sdata, auth_data->timeout); | ||
3825 | } | 3826 | } |
3826 | 3827 | ||
3828 | auth_data->timeout_started = true; | ||
3829 | run_again(sdata, auth_data->timeout); | ||
3830 | |||
3827 | return 0; | 3831 | return 0; |
3828 | } | 3832 | } |
3829 | 3833 | ||
@@ -3894,8 +3898,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) | |||
3894 | ifmgd->status_received = false; | 3898 | ifmgd->status_received = false; |
3895 | if (ifmgd->auth_data && ieee80211_is_auth(fc)) { | 3899 | if (ifmgd->auth_data && ieee80211_is_auth(fc)) { |
3896 | if (status_acked) { | 3900 | if (status_acked) { |
3897 | ifmgd->auth_data->timeout = | 3901 | if (ifmgd->auth_data->algorithm == |
3898 | jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; | 3902 | WLAN_AUTH_SAE) |
3903 | ifmgd->auth_data->timeout = | ||
3904 | jiffies + | ||
3905 | IEEE80211_AUTH_TIMEOUT_SAE; | ||
3906 | else | ||
3907 | ifmgd->auth_data->timeout = | ||
3908 | jiffies + | ||
3909 | IEEE80211_AUTH_TIMEOUT_SHORT; | ||
3899 | run_again(sdata, ifmgd->auth_data->timeout); | 3910 | run_again(sdata, ifmgd->auth_data->timeout); |
3900 | } else { | 3911 | } else { |
3901 | ifmgd->auth_data->timeout = jiffies - 1; | 3912 | ifmgd->auth_data->timeout = jiffies - 1; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 535de3161a78..05a265cd573d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 4 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright (C) 2018 Intel Corporation | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -1135,7 +1136,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, | |||
1135 | } | 1136 | } |
1136 | 1137 | ||
1137 | /* reset session timer */ | 1138 | /* reset session timer */ |
1138 | if (reset_agg_timer && tid_tx->timeout) | 1139 | if (reset_agg_timer) |
1139 | tid_tx->last_tx = jiffies; | 1140 | tid_tx->last_tx = jiffies; |
1140 | 1141 | ||
1141 | return queued; | 1142 | return queued; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 55342c4d5cec..2e2dd88fc79f 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2606,13 +2606,13 @@ static int netlink_seq_show(struct seq_file *seq, void *v) | |||
2606 | { | 2606 | { |
2607 | if (v == SEQ_START_TOKEN) { | 2607 | if (v == SEQ_START_TOKEN) { |
2608 | seq_puts(seq, | 2608 | seq_puts(seq, |
2609 | "sk Eth Pid Groups " | 2609 | "sk Eth Pid Groups " |
2610 | "Rmem Wmem Dump Locks Drops Inode\n"); | 2610 | "Rmem Wmem Dump Locks Drops Inode\n"); |
2611 | } else { | 2611 | } else { |
2612 | struct sock *s = v; | 2612 | struct sock *s = v; |
2613 | struct netlink_sock *nlk = nlk_sk(s); | 2613 | struct netlink_sock *nlk = nlk_sk(s); |
2614 | 2614 | ||
2615 | seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n", | 2615 | seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8d %-8lu\n", |
2616 | s, | 2616 | s, |
2617 | s->sk_protocol, | 2617 | s->sk_protocol, |
2618 | nlk->portid, | 2618 | nlk->portid, |
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index d7da99a0b0b8..9696ef96b719 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c | |||
@@ -57,6 +57,8 @@ int nsh_pop(struct sk_buff *skb) | |||
57 | return -ENOMEM; | 57 | return -ENOMEM; |
58 | nh = (struct nshhdr *)(skb->data); | 58 | nh = (struct nshhdr *)(skb->data); |
59 | length = nsh_hdr_len(nh); | 59 | length = nsh_hdr_len(nh); |
60 | if (length < NSH_BASE_HDR_LEN) | ||
61 | return -EINVAL; | ||
60 | inner_proto = tun_p_to_eth_p(nh->np); | 62 | inner_proto = tun_p_to_eth_p(nh->np); |
61 | if (!pskb_may_pull(skb, length)) | 63 | if (!pskb_may_pull(skb, length)) |
62 | return -ENOMEM; | 64 | return -ENOMEM; |
@@ -90,6 +92,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, | |||
90 | if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) | 92 | if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) |
91 | goto out; | 93 | goto out; |
92 | nsh_len = nsh_hdr_len(nsh_hdr(skb)); | 94 | nsh_len = nsh_hdr_len(nsh_hdr(skb)); |
95 | if (nsh_len < NSH_BASE_HDR_LEN) | ||
96 | goto out; | ||
93 | if (unlikely(!pskb_may_pull(skb, nsh_len))) | 97 | if (unlikely(!pskb_may_pull(skb, nsh_len))) |
94 | goto out; | 98 | goto out; |
95 | 99 | ||
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 7322aa1e382e..492ab0c36f7c 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -1712,13 +1712,10 @@ static void nlattr_set(struct nlattr *attr, u8 val, | |||
1712 | 1712 | ||
1713 | /* The nlattr stream should already have been validated */ | 1713 | /* The nlattr stream should already have been validated */ |
1714 | nla_for_each_nested(nla, attr, rem) { | 1714 | nla_for_each_nested(nla, attr, rem) { |
1715 | if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { | 1715 | if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) |
1716 | if (tbl[nla_type(nla)].next) | 1716 | nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl); |
1717 | tbl = tbl[nla_type(nla)].next; | 1717 | else |
1718 | nlattr_set(nla, val, tbl); | ||
1719 | } else { | ||
1720 | memset(nla_data(nla), val, nla_len(nla)); | 1718 | memset(nla_data(nla), val, nla_len(nla)); |
1721 | } | ||
1722 | 1719 | ||
1723 | if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) | 1720 | if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) |
1724 | *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; | 1721 | *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 41bd496531d4..00192a996be0 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
@@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev) | |||
137 | 137 | ||
138 | ret = rfkill_register(rfkill->rfkill_dev); | 138 | ret = rfkill_register(rfkill->rfkill_dev); |
139 | if (ret < 0) | 139 | if (ret < 0) |
140 | return ret; | 140 | goto err_destroy; |
141 | 141 | ||
142 | platform_set_drvdata(pdev, rfkill); | 142 | platform_set_drvdata(pdev, rfkill); |
143 | 143 | ||
144 | dev_info(&pdev->dev, "%s device registered.\n", rfkill->name); | 144 | dev_info(&pdev->dev, "%s device registered.\n", rfkill->name); |
145 | 145 | ||
146 | return 0; | 146 | return 0; |
147 | |||
148 | err_destroy: | ||
149 | rfkill_destroy(rfkill->rfkill_dev); | ||
150 | |||
151 | return ret; | ||
147 | } | 152 | } |
148 | 153 | ||
149 | static int rfkill_gpio_remove(struct platform_device *pdev) | 154 | static int rfkill_gpio_remove(struct platform_device *pdev) |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 9a2c8e7c000e..2b463047dd7b 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -313,7 +313,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, | |||
313 | memset(&cp, 0, sizeof(cp)); | 313 | memset(&cp, 0, sizeof(cp)); |
314 | cp.local = rx->local; | 314 | cp.local = rx->local; |
315 | cp.key = key; | 315 | cp.key = key; |
316 | cp.security_level = 0; | 316 | cp.security_level = rx->min_sec_level; |
317 | cp.exclusive = false; | 317 | cp.exclusive = false; |
318 | cp.upgrade = upgrade; | 318 | cp.upgrade = upgrade; |
319 | cp.service_id = srx->srx_service; | 319 | cp.service_id = srx->srx_service; |
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 90d7079e0aa9..19975d2ca9a2 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -476,6 +476,7 @@ enum rxrpc_call_flag { | |||
476 | RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ | 476 | RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ |
477 | RXRPC_CALL_PINGING, /* Ping in process */ | 477 | RXRPC_CALL_PINGING, /* Ping in process */ |
478 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ | 478 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ |
479 | RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ | ||
479 | }; | 480 | }; |
480 | 481 | ||
481 | /* | 482 | /* |
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index c717152070df..1350f1be8037 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c | |||
@@ -40,7 +40,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
40 | } __attribute__((packed)) pkt; | 40 | } __attribute__((packed)) pkt; |
41 | struct rxrpc_ackinfo ack_info; | 41 | struct rxrpc_ackinfo ack_info; |
42 | size_t len; | 42 | size_t len; |
43 | int ioc; | 43 | int ret, ioc; |
44 | u32 serial, mtu, call_id, padding; | 44 | u32 serial, mtu, call_id, padding; |
45 | 45 | ||
46 | _enter("%d", conn->debug_id); | 46 | _enter("%d", conn->debug_id); |
@@ -135,10 +135,13 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
135 | break; | 135 | break; |
136 | } | 136 | } |
137 | 137 | ||
138 | kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); | 138 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); |
139 | conn->params.peer->last_tx_at = ktime_get_real(); | 139 | conn->params.peer->last_tx_at = ktime_get_real(); |
140 | if (ret < 0) | ||
141 | trace_rxrpc_tx_fail(conn->debug_id, serial, ret, | ||
142 | rxrpc_tx_fail_call_final_resend); | ||
143 | |||
140 | _leave(""); | 144 | _leave(""); |
141 | return; | ||
142 | } | 145 | } |
143 | 146 | ||
144 | /* | 147 | /* |
@@ -236,6 +239,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
236 | 239 | ||
237 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); | 240 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); |
238 | if (ret < 0) { | 241 | if (ret < 0) { |
242 | trace_rxrpc_tx_fail(conn->debug_id, serial, ret, | ||
243 | rxrpc_tx_fail_conn_abort); | ||
239 | _debug("sendmsg failed: %d", ret); | 244 | _debug("sendmsg failed: %d", ret); |
240 | return -EAGAIN; | 245 | return -EAGAIN; |
241 | } | 246 | } |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 0410d2277ca2..b5fd6381313d 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
@@ -971,7 +971,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
971 | if (timo) { | 971 | if (timo) { |
972 | unsigned long now = jiffies, expect_rx_by; | 972 | unsigned long now = jiffies, expect_rx_by; |
973 | 973 | ||
974 | expect_rx_by = jiffies + timo; | 974 | expect_rx_by = now + timo; |
975 | WRITE_ONCE(call->expect_rx_by, expect_rx_by); | 975 | WRITE_ONCE(call->expect_rx_by, expect_rx_by); |
976 | rxrpc_reduce_call_timer(call, expect_rx_by, now, | 976 | rxrpc_reduce_call_timer(call, expect_rx_by, now, |
977 | rxrpc_timer_set_for_normal); | 977 | rxrpc_timer_set_for_normal); |
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index 93b5d910b4a1..8325f1b86840 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c | |||
@@ -71,7 +71,8 @@ static void rxrpc_send_version_request(struct rxrpc_local *local, | |||
71 | 71 | ||
72 | ret = kernel_sendmsg(local->socket, &msg, iov, 2, len); | 72 | ret = kernel_sendmsg(local->socket, &msg, iov, 2, len); |
73 | if (ret < 0) | 73 | if (ret < 0) |
74 | _debug("sendmsg failed: %d", ret); | 74 | trace_rxrpc_tx_fail(local->debug_id, 0, ret, |
75 | rxrpc_tx_fail_version_reply); | ||
75 | 76 | ||
76 | _leave(""); | 77 | _leave(""); |
77 | } | 78 | } |
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 8b54e9531d52..b493e6b62740 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
@@ -134,22 +134,49 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | |||
134 | } | 134 | } |
135 | } | 135 | } |
136 | 136 | ||
137 | /* we want to receive ICMP errors */ | 137 | switch (local->srx.transport.family) { |
138 | opt = 1; | 138 | case AF_INET: |
139 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, | 139 | /* we want to receive ICMP errors */ |
140 | (char *) &opt, sizeof(opt)); | 140 | opt = 1; |
141 | if (ret < 0) { | 141 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, |
142 | _debug("setsockopt failed"); | 142 | (char *) &opt, sizeof(opt)); |
143 | goto error; | 143 | if (ret < 0) { |
144 | } | 144 | _debug("setsockopt failed"); |
145 | goto error; | ||
146 | } | ||
145 | 147 | ||
146 | /* we want to set the don't fragment bit */ | 148 | /* we want to set the don't fragment bit */ |
147 | opt = IP_PMTUDISC_DO; | 149 | opt = IP_PMTUDISC_DO; |
148 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, | 150 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, |
149 | (char *) &opt, sizeof(opt)); | 151 | (char *) &opt, sizeof(opt)); |
150 | if (ret < 0) { | 152 | if (ret < 0) { |
151 | _debug("setsockopt failed"); | 153 | _debug("setsockopt failed"); |
152 | goto error; | 154 | goto error; |
155 | } | ||
156 | break; | ||
157 | |||
158 | case AF_INET6: | ||
159 | /* we want to receive ICMP errors */ | ||
160 | opt = 1; | ||
161 | ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, | ||
162 | (char *) &opt, sizeof(opt)); | ||
163 | if (ret < 0) { | ||
164 | _debug("setsockopt failed"); | ||
165 | goto error; | ||
166 | } | ||
167 | |||
168 | /* we want to set the don't fragment bit */ | ||
169 | opt = IPV6_PMTUDISC_DO; | ||
170 | ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, | ||
171 | (char *) &opt, sizeof(opt)); | ||
172 | if (ret < 0) { | ||
173 | _debug("setsockopt failed"); | ||
174 | goto error; | ||
175 | } | ||
176 | break; | ||
177 | |||
178 | default: | ||
179 | BUG(); | ||
153 | } | 180 | } |
154 | 181 | ||
155 | /* set the socket up */ | 182 | /* set the socket up */ |
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 7f1fc04775b3..f03de1c59ba3 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c | |||
@@ -210,6 +210,9 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, | |||
210 | if (ping) | 210 | if (ping) |
211 | call->ping_time = now; | 211 | call->ping_time = now; |
212 | conn->params.peer->last_tx_at = ktime_get_real(); | 212 | conn->params.peer->last_tx_at = ktime_get_real(); |
213 | if (ret < 0) | ||
214 | trace_rxrpc_tx_fail(call->debug_id, serial, ret, | ||
215 | rxrpc_tx_fail_call_ack); | ||
213 | 216 | ||
214 | if (call->state < RXRPC_CALL_COMPLETE) { | 217 | if (call->state < RXRPC_CALL_COMPLETE) { |
215 | if (ret < 0) { | 218 | if (ret < 0) { |
@@ -294,6 +297,10 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call) | |||
294 | ret = kernel_sendmsg(conn->params.local->socket, | 297 | ret = kernel_sendmsg(conn->params.local->socket, |
295 | &msg, iov, 1, sizeof(pkt)); | 298 | &msg, iov, 1, sizeof(pkt)); |
296 | conn->params.peer->last_tx_at = ktime_get_real(); | 299 | conn->params.peer->last_tx_at = ktime_get_real(); |
300 | if (ret < 0) | ||
301 | trace_rxrpc_tx_fail(call->debug_id, serial, ret, | ||
302 | rxrpc_tx_fail_call_abort); | ||
303 | |||
297 | 304 | ||
298 | rxrpc_put_connection(conn); | 305 | rxrpc_put_connection(conn); |
299 | return ret; | 306 | return ret; |
@@ -387,6 +394,9 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, | |||
387 | conn->params.peer->last_tx_at = ktime_get_real(); | 394 | conn->params.peer->last_tx_at = ktime_get_real(); |
388 | 395 | ||
389 | up_read(&conn->params.local->defrag_sem); | 396 | up_read(&conn->params.local->defrag_sem); |
397 | if (ret < 0) | ||
398 | trace_rxrpc_tx_fail(call->debug_id, serial, ret, | ||
399 | rxrpc_tx_fail_call_data_nofrag); | ||
390 | if (ret == -EMSGSIZE) | 400 | if (ret == -EMSGSIZE) |
391 | goto send_fragmentable; | 401 | goto send_fragmentable; |
392 | 402 | ||
@@ -414,6 +424,17 @@ done: | |||
414 | rxrpc_timer_set_for_lost_ack); | 424 | rxrpc_timer_set_for_lost_ack); |
415 | } | 425 | } |
416 | } | 426 | } |
427 | |||
428 | if (sp->hdr.seq == 1 && | ||
429 | !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, | ||
430 | &call->flags)) { | ||
431 | unsigned long nowj = jiffies, expect_rx_by; | ||
432 | |||
433 | expect_rx_by = nowj + call->next_rx_timo; | ||
434 | WRITE_ONCE(call->expect_rx_by, expect_rx_by); | ||
435 | rxrpc_reduce_call_timer(call, expect_rx_by, nowj, | ||
436 | rxrpc_timer_set_for_normal); | ||
437 | } | ||
417 | } | 438 | } |
418 | 439 | ||
419 | rxrpc_set_keepalive(call); | 440 | rxrpc_set_keepalive(call); |
@@ -465,6 +486,10 @@ send_fragmentable: | |||
465 | #endif | 486 | #endif |
466 | } | 487 | } |
467 | 488 | ||
489 | if (ret < 0) | ||
490 | trace_rxrpc_tx_fail(call->debug_id, serial, ret, | ||
491 | rxrpc_tx_fail_call_data_frag); | ||
492 | |||
468 | up_write(&conn->params.local->defrag_sem); | 493 | up_write(&conn->params.local->defrag_sem); |
469 | goto done; | 494 | goto done; |
470 | } | 495 | } |
@@ -482,6 +507,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) | |||
482 | struct kvec iov[2]; | 507 | struct kvec iov[2]; |
483 | size_t size; | 508 | size_t size; |
484 | __be32 code; | 509 | __be32 code; |
510 | int ret; | ||
485 | 511 | ||
486 | _enter("%d", local->debug_id); | 512 | _enter("%d", local->debug_id); |
487 | 513 | ||
@@ -516,7 +542,10 @@ void rxrpc_reject_packets(struct rxrpc_local *local) | |||
516 | whdr.flags ^= RXRPC_CLIENT_INITIATED; | 542 | whdr.flags ^= RXRPC_CLIENT_INITIATED; |
517 | whdr.flags &= RXRPC_CLIENT_INITIATED; | 543 | whdr.flags &= RXRPC_CLIENT_INITIATED; |
518 | 544 | ||
519 | kernel_sendmsg(local->socket, &msg, iov, 2, size); | 545 | ret = kernel_sendmsg(local->socket, &msg, iov, 2, size); |
546 | if (ret < 0) | ||
547 | trace_rxrpc_tx_fail(local->debug_id, 0, ret, | ||
548 | rxrpc_tx_fail_reject); | ||
520 | } | 549 | } |
521 | 550 | ||
522 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 551 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
@@ -567,7 +596,8 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer) | |||
567 | 596 | ||
568 | ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len); | 597 | ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len); |
569 | if (ret < 0) | 598 | if (ret < 0) |
570 | _debug("sendmsg failed: %d", ret); | 599 | trace_rxrpc_tx_fail(peer->debug_id, 0, ret, |
600 | rxrpc_tx_fail_version_keepalive); | ||
571 | 601 | ||
572 | peer->last_tx_at = ktime_get_real(); | 602 | peer->last_tx_at = ktime_get_real(); |
573 | _leave(""); | 603 | _leave(""); |
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 78c2f95d1f22..0ed8b651cec2 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
@@ -28,39 +28,39 @@ static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); | |||
28 | * Find the peer associated with an ICMP packet. | 28 | * Find the peer associated with an ICMP packet. |
29 | */ | 29 | */ |
30 | static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, | 30 | static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, |
31 | const struct sk_buff *skb) | 31 | const struct sk_buff *skb, |
32 | struct sockaddr_rxrpc *srx) | ||
32 | { | 33 | { |
33 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); | 34 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
34 | struct sockaddr_rxrpc srx; | ||
35 | 35 | ||
36 | _enter(""); | 36 | _enter(""); |
37 | 37 | ||
38 | memset(&srx, 0, sizeof(srx)); | 38 | memset(srx, 0, sizeof(*srx)); |
39 | srx.transport_type = local->srx.transport_type; | 39 | srx->transport_type = local->srx.transport_type; |
40 | srx.transport_len = local->srx.transport_len; | 40 | srx->transport_len = local->srx.transport_len; |
41 | srx.transport.family = local->srx.transport.family; | 41 | srx->transport.family = local->srx.transport.family; |
42 | 42 | ||
43 | /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice | 43 | /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice |
44 | * versa? | 44 | * versa? |
45 | */ | 45 | */ |
46 | switch (srx.transport.family) { | 46 | switch (srx->transport.family) { |
47 | case AF_INET: | 47 | case AF_INET: |
48 | srx.transport.sin.sin_port = serr->port; | 48 | srx->transport.sin.sin_port = serr->port; |
49 | switch (serr->ee.ee_origin) { | 49 | switch (serr->ee.ee_origin) { |
50 | case SO_EE_ORIGIN_ICMP: | 50 | case SO_EE_ORIGIN_ICMP: |
51 | _net("Rx ICMP"); | 51 | _net("Rx ICMP"); |
52 | memcpy(&srx.transport.sin.sin_addr, | 52 | memcpy(&srx->transport.sin.sin_addr, |
53 | skb_network_header(skb) + serr->addr_offset, | 53 | skb_network_header(skb) + serr->addr_offset, |
54 | sizeof(struct in_addr)); | 54 | sizeof(struct in_addr)); |
55 | break; | 55 | break; |
56 | case SO_EE_ORIGIN_ICMP6: | 56 | case SO_EE_ORIGIN_ICMP6: |
57 | _net("Rx ICMP6 on v4 sock"); | 57 | _net("Rx ICMP6 on v4 sock"); |
58 | memcpy(&srx.transport.sin.sin_addr, | 58 | memcpy(&srx->transport.sin.sin_addr, |
59 | skb_network_header(skb) + serr->addr_offset + 12, | 59 | skb_network_header(skb) + serr->addr_offset + 12, |
60 | sizeof(struct in_addr)); | 60 | sizeof(struct in_addr)); |
61 | break; | 61 | break; |
62 | default: | 62 | default: |
63 | memcpy(&srx.transport.sin.sin_addr, &ip_hdr(skb)->saddr, | 63 | memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr, |
64 | sizeof(struct in_addr)); | 64 | sizeof(struct in_addr)); |
65 | break; | 65 | break; |
66 | } | 66 | } |
@@ -68,25 +68,25 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, | |||
68 | 68 | ||
69 | #ifdef CONFIG_AF_RXRPC_IPV6 | 69 | #ifdef CONFIG_AF_RXRPC_IPV6 |
70 | case AF_INET6: | 70 | case AF_INET6: |
71 | srx.transport.sin6.sin6_port = serr->port; | 71 | srx->transport.sin6.sin6_port = serr->port; |
72 | switch (serr->ee.ee_origin) { | 72 | switch (serr->ee.ee_origin) { |
73 | case SO_EE_ORIGIN_ICMP6: | 73 | case SO_EE_ORIGIN_ICMP6: |
74 | _net("Rx ICMP6"); | 74 | _net("Rx ICMP6"); |
75 | memcpy(&srx.transport.sin6.sin6_addr, | 75 | memcpy(&srx->transport.sin6.sin6_addr, |
76 | skb_network_header(skb) + serr->addr_offset, | 76 | skb_network_header(skb) + serr->addr_offset, |
77 | sizeof(struct in6_addr)); | 77 | sizeof(struct in6_addr)); |
78 | break; | 78 | break; |
79 | case SO_EE_ORIGIN_ICMP: | 79 | case SO_EE_ORIGIN_ICMP: |
80 | _net("Rx ICMP on v6 sock"); | 80 | _net("Rx ICMP on v6 sock"); |
81 | srx.transport.sin6.sin6_addr.s6_addr32[0] = 0; | 81 | srx->transport.sin6.sin6_addr.s6_addr32[0] = 0; |
82 | srx.transport.sin6.sin6_addr.s6_addr32[1] = 0; | 82 | srx->transport.sin6.sin6_addr.s6_addr32[1] = 0; |
83 | srx.transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); | 83 | srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); |
84 | memcpy(srx.transport.sin6.sin6_addr.s6_addr + 12, | 84 | memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12, |
85 | skb_network_header(skb) + serr->addr_offset, | 85 | skb_network_header(skb) + serr->addr_offset, |
86 | sizeof(struct in_addr)); | 86 | sizeof(struct in_addr)); |
87 | break; | 87 | break; |
88 | default: | 88 | default: |
89 | memcpy(&srx.transport.sin6.sin6_addr, | 89 | memcpy(&srx->transport.sin6.sin6_addr, |
90 | &ipv6_hdr(skb)->saddr, | 90 | &ipv6_hdr(skb)->saddr, |
91 | sizeof(struct in6_addr)); | 91 | sizeof(struct in6_addr)); |
92 | break; | 92 | break; |
@@ -98,7 +98,7 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, | |||
98 | BUG(); | 98 | BUG(); |
99 | } | 99 | } |
100 | 100 | ||
101 | return rxrpc_lookup_peer_rcu(local, &srx); | 101 | return rxrpc_lookup_peer_rcu(local, srx); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -146,6 +146,7 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *se | |||
146 | void rxrpc_error_report(struct sock *sk) | 146 | void rxrpc_error_report(struct sock *sk) |
147 | { | 147 | { |
148 | struct sock_exterr_skb *serr; | 148 | struct sock_exterr_skb *serr; |
149 | struct sockaddr_rxrpc srx; | ||
149 | struct rxrpc_local *local = sk->sk_user_data; | 150 | struct rxrpc_local *local = sk->sk_user_data; |
150 | struct rxrpc_peer *peer; | 151 | struct rxrpc_peer *peer; |
151 | struct sk_buff *skb; | 152 | struct sk_buff *skb; |
@@ -166,7 +167,7 @@ void rxrpc_error_report(struct sock *sk) | |||
166 | } | 167 | } |
167 | 168 | ||
168 | rcu_read_lock(); | 169 | rcu_read_lock(); |
169 | peer = rxrpc_lookup_peer_icmp_rcu(local, skb); | 170 | peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx); |
170 | if (peer && !rxrpc_get_peer_maybe(peer)) | 171 | if (peer && !rxrpc_get_peer_maybe(peer)) |
171 | peer = NULL; | 172 | peer = NULL; |
172 | if (!peer) { | 173 | if (!peer) { |
@@ -176,6 +177,8 @@ void rxrpc_error_report(struct sock *sk) | |||
176 | return; | 177 | return; |
177 | } | 178 | } |
178 | 179 | ||
180 | trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); | ||
181 | |||
179 | if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && | 182 | if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && |
180 | serr->ee.ee_type == ICMP_DEST_UNREACH && | 183 | serr->ee.ee_type == ICMP_DEST_UNREACH && |
181 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { | 184 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { |
@@ -209,9 +212,6 @@ static void rxrpc_store_error(struct rxrpc_peer *peer, | |||
209 | 212 | ||
210 | ee = &serr->ee; | 213 | ee = &serr->ee; |
211 | 214 | ||
212 | _net("Rx Error o=%d t=%d c=%d e=%d", | ||
213 | ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno); | ||
214 | |||
215 | err = ee->ee_errno; | 215 | err = ee->ee_errno; |
216 | 216 | ||
217 | switch (ee->ee_origin) { | 217 | switch (ee->ee_origin) { |
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 588fea0dd362..6c0ae27fff84 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
@@ -664,7 +664,8 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) | |||
664 | 664 | ||
665 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); | 665 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); |
666 | if (ret < 0) { | 666 | if (ret < 0) { |
667 | _debug("sendmsg failed: %d", ret); | 667 | trace_rxrpc_tx_fail(conn->debug_id, serial, ret, |
668 | rxrpc_tx_fail_conn_challenge); | ||
668 | return -EAGAIN; | 669 | return -EAGAIN; |
669 | } | 670 | } |
670 | 671 | ||
@@ -719,7 +720,8 @@ static int rxkad_send_response(struct rxrpc_connection *conn, | |||
719 | 720 | ||
720 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len); | 721 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len); |
721 | if (ret < 0) { | 722 | if (ret < 0) { |
722 | _debug("sendmsg failed: %d", ret); | 723 | trace_rxrpc_tx_fail(conn->debug_id, serial, ret, |
724 | rxrpc_tx_fail_conn_response); | ||
723 | return -EAGAIN; | 725 | return -EAGAIN; |
724 | } | 726 | } |
725 | 727 | ||
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 206e802ccbdc..be01f9c5d963 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c | |||
@@ -223,6 +223,15 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
223 | 223 | ||
224 | ret = rxrpc_send_data_packet(call, skb, false); | 224 | ret = rxrpc_send_data_packet(call, skb, false); |
225 | if (ret < 0) { | 225 | if (ret < 0) { |
226 | switch (ret) { | ||
227 | case -ENETUNREACH: | ||
228 | case -EHOSTUNREACH: | ||
229 | case -ECONNREFUSED: | ||
230 | rxrpc_set_call_completion(call, | ||
231 | RXRPC_CALL_LOCAL_ERROR, | ||
232 | 0, ret); | ||
233 | goto out; | ||
234 | } | ||
226 | _debug("need instant resend %d", ret); | 235 | _debug("need instant resend %d", ret); |
227 | rxrpc_instant_resend(call, ix); | 236 | rxrpc_instant_resend(call, ix); |
228 | } else { | 237 | } else { |
@@ -241,6 +250,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
241 | rxrpc_timer_set_for_send); | 250 | rxrpc_timer_set_for_send); |
242 | } | 251 | } |
243 | 252 | ||
253 | out: | ||
244 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 254 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); |
245 | _leave(""); | 255 | _leave(""); |
246 | } | 256 | } |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index ddf69fc01bdf..6138d1d71900 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -121,7 +121,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | |||
121 | return 0; | 121 | return 0; |
122 | 122 | ||
123 | if (!flags) { | 123 | if (!flags) { |
124 | tcf_idr_release(*a, bind); | 124 | if (exists) |
125 | tcf_idr_release(*a, bind); | ||
125 | return -EINVAL; | 126 | return -EINVAL; |
126 | } | 127 | } |
127 | 128 | ||
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index bbcbdce732cc..ad050d7d4b46 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c | |||
@@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, | |||
131 | if (exists && bind) | 131 | if (exists && bind) |
132 | return 0; | 132 | return 0; |
133 | 133 | ||
134 | if (!lflags) | 134 | if (!lflags) { |
135 | if (exists) | ||
136 | tcf_idr_release(*a, bind); | ||
135 | return -EINVAL; | 137 | return -EINVAL; |
138 | } | ||
136 | 139 | ||
137 | if (!exists) { | 140 | if (!exists) { |
138 | ret = tcf_idr_create(tn, parm->index, est, a, | 141 | ret = tcf_idr_create(tn, parm->index, est, a, |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b66754f52a9f..963e4bf0aab8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -152,8 +152,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, | |||
152 | NL_SET_ERR_MSG(extack, "TC classifier not found"); | 152 | NL_SET_ERR_MSG(extack, "TC classifier not found"); |
153 | err = -ENOENT; | 153 | err = -ENOENT; |
154 | } | 154 | } |
155 | goto errout; | ||
156 | #endif | 155 | #endif |
156 | goto errout; | ||
157 | } | 157 | } |
158 | tp->classify = tp->ops->classify; | 158 | tp->classify = tp->ops->classify; |
159 | tp->protocol = protocol; | 159 | tp->protocol = protocol; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 837806dd5799..a47179da24e6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1024,8 +1024,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) | |||
1024 | struct sctp_endpoint *ep; | 1024 | struct sctp_endpoint *ep; |
1025 | struct sctp_chunk *chunk; | 1025 | struct sctp_chunk *chunk; |
1026 | struct sctp_inq *inqueue; | 1026 | struct sctp_inq *inqueue; |
1027 | int state; | 1027 | int first_time = 1; /* is this the first time through the loop */ |
1028 | int error = 0; | 1028 | int error = 0; |
1029 | int state; | ||
1029 | 1030 | ||
1030 | /* The association should be held so we should be safe. */ | 1031 | /* The association should be held so we should be safe. */ |
1031 | ep = asoc->ep; | 1032 | ep = asoc->ep; |
@@ -1036,6 +1037,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) | |||
1036 | state = asoc->state; | 1037 | state = asoc->state; |
1037 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); | 1038 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); |
1038 | 1039 | ||
1040 | /* If the first chunk in the packet is AUTH, do special | ||
1041 | * processing specified in Section 6.3 of SCTP-AUTH spec | ||
1042 | */ | ||
1043 | if (first_time && subtype.chunk == SCTP_CID_AUTH) { | ||
1044 | struct sctp_chunkhdr *next_hdr; | ||
1045 | |||
1046 | next_hdr = sctp_inq_peek(inqueue); | ||
1047 | if (!next_hdr) | ||
1048 | goto normal; | ||
1049 | |||
1050 | /* If the next chunk is COOKIE-ECHO, skip the AUTH | ||
1051 | * chunk while saving a pointer to it so we can do | ||
1052 | * Authentication later (during cookie-echo | ||
1053 | * processing). | ||
1054 | */ | ||
1055 | if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { | ||
1056 | chunk->auth_chunk = skb_clone(chunk->skb, | ||
1057 | GFP_ATOMIC); | ||
1058 | chunk->auth = 1; | ||
1059 | continue; | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1063 | normal: | ||
1039 | /* SCTP-AUTH, Section 6.3: | 1064 | /* SCTP-AUTH, Section 6.3: |
1040 | * The receiver has a list of chunk types which it expects | 1065 | * The receiver has a list of chunk types which it expects |
1041 | * to be received only after an AUTH-chunk. This list has | 1066 | * to be received only after an AUTH-chunk. This list has |
@@ -1074,6 +1099,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) | |||
1074 | /* If there is an error on chunk, discard this packet. */ | 1099 | /* If there is an error on chunk, discard this packet. */ |
1075 | if (error && chunk) | 1100 | if (error && chunk) |
1076 | chunk->pdiscard = 1; | 1101 | chunk->pdiscard = 1; |
1102 | |||
1103 | if (first_time) | ||
1104 | first_time = 0; | ||
1077 | } | 1105 | } |
1078 | sctp_association_put(asoc); | 1106 | sctp_association_put(asoc); |
1079 | } | 1107 | } |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 5a4fb1dc8400..e62addb60434 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1152,7 +1152,7 @@ struct sctp_chunk *sctp_make_violation_max_retrans( | |||
1152 | const struct sctp_association *asoc, | 1152 | const struct sctp_association *asoc, |
1153 | const struct sctp_chunk *chunk) | 1153 | const struct sctp_chunk *chunk) |
1154 | { | 1154 | { |
1155 | static const char error[] = "Association exceeded its max_retans count"; | 1155 | static const char error[] = "Association exceeded its max_retrans count"; |
1156 | size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr); | 1156 | size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr); |
1157 | struct sctp_chunk *retval; | 1157 | struct sctp_chunk *retval; |
1158 | 1158 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 28c070e187c2..c9ae3404b1bb 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -153,10 +153,7 @@ static enum sctp_disposition sctp_sf_violation_chunk( | |||
153 | struct sctp_cmd_seq *commands); | 153 | struct sctp_cmd_seq *commands); |
154 | 154 | ||
155 | static enum sctp_ierror sctp_sf_authenticate( | 155 | static enum sctp_ierror sctp_sf_authenticate( |
156 | struct net *net, | ||
157 | const struct sctp_endpoint *ep, | ||
158 | const struct sctp_association *asoc, | 156 | const struct sctp_association *asoc, |
159 | const union sctp_subtype type, | ||
160 | struct sctp_chunk *chunk); | 157 | struct sctp_chunk *chunk); |
161 | 158 | ||
162 | static enum sctp_disposition __sctp_sf_do_9_1_abort( | 159 | static enum sctp_disposition __sctp_sf_do_9_1_abort( |
@@ -626,6 +623,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net, | |||
626 | return SCTP_DISPOSITION_CONSUME; | 623 | return SCTP_DISPOSITION_CONSUME; |
627 | } | 624 | } |
628 | 625 | ||
626 | static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk, | ||
627 | const struct sctp_association *asoc) | ||
628 | { | ||
629 | struct sctp_chunk auth; | ||
630 | |||
631 | if (!chunk->auth_chunk) | ||
632 | return true; | ||
633 | |||
634 | /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo | ||
635 | * is supposed to be authenticated and we have to do delayed | ||
636 | * authentication. We've just recreated the association using | ||
637 | * the information in the cookie and now it's much easier to | ||
638 | * do the authentication. | ||
639 | */ | ||
640 | |||
641 | /* Make sure that we and the peer are AUTH capable */ | ||
642 | if (!net->sctp.auth_enable || !asoc->peer.auth_capable) | ||
643 | return false; | ||
644 | |||
645 | /* set-up our fake chunk so that we can process it */ | ||
646 | auth.skb = chunk->auth_chunk; | ||
647 | auth.asoc = chunk->asoc; | ||
648 | auth.sctp_hdr = chunk->sctp_hdr; | ||
649 | auth.chunk_hdr = (struct sctp_chunkhdr *) | ||
650 | skb_push(chunk->auth_chunk, | ||
651 | sizeof(struct sctp_chunkhdr)); | ||
652 | skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); | ||
653 | auth.transport = chunk->transport; | ||
654 | |||
655 | return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR; | ||
656 | } | ||
657 | |||
629 | /* | 658 | /* |
630 | * Respond to a normal COOKIE ECHO chunk. | 659 | * Respond to a normal COOKIE ECHO chunk. |
631 | * We are the side that is being asked for an association. | 660 | * We are the side that is being asked for an association. |
@@ -763,37 +792,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, | |||
763 | if (error) | 792 | if (error) |
764 | goto nomem_init; | 793 | goto nomem_init; |
765 | 794 | ||
766 | /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo | 795 | if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) { |
767 | * is supposed to be authenticated and we have to do delayed | 796 | sctp_association_free(new_asoc); |
768 | * authentication. We've just recreated the association using | 797 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); |
769 | * the information in the cookie and now it's much easier to | ||
770 | * do the authentication. | ||
771 | */ | ||
772 | if (chunk->auth_chunk) { | ||
773 | struct sctp_chunk auth; | ||
774 | enum sctp_ierror ret; | ||
775 | |||
776 | /* Make sure that we and the peer are AUTH capable */ | ||
777 | if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { | ||
778 | sctp_association_free(new_asoc); | ||
779 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | ||
780 | } | ||
781 | |||
782 | /* set-up our fake chunk so that we can process it */ | ||
783 | auth.skb = chunk->auth_chunk; | ||
784 | auth.asoc = chunk->asoc; | ||
785 | auth.sctp_hdr = chunk->sctp_hdr; | ||
786 | auth.chunk_hdr = (struct sctp_chunkhdr *) | ||
787 | skb_push(chunk->auth_chunk, | ||
788 | sizeof(struct sctp_chunkhdr)); | ||
789 | skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); | ||
790 | auth.transport = chunk->transport; | ||
791 | |||
792 | ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); | ||
793 | if (ret != SCTP_IERROR_NO_ERROR) { | ||
794 | sctp_association_free(new_asoc); | ||
795 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | ||
796 | } | ||
797 | } | 798 | } |
798 | 799 | ||
799 | repl = sctp_make_cookie_ack(new_asoc, chunk); | 800 | repl = sctp_make_cookie_ack(new_asoc, chunk); |
@@ -1797,13 +1798,15 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( | |||
1797 | if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) | 1798 | if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) |
1798 | goto nomem; | 1799 | goto nomem; |
1799 | 1800 | ||
1801 | if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) | ||
1802 | return SCTP_DISPOSITION_DISCARD; | ||
1803 | |||
1800 | /* Make sure no new addresses are being added during the | 1804 | /* Make sure no new addresses are being added during the |
1801 | * restart. Though this is a pretty complicated attack | 1805 | * restart. Though this is a pretty complicated attack |
1802 | * since you'd have to get inside the cookie. | 1806 | * since you'd have to get inside the cookie. |
1803 | */ | 1807 | */ |
1804 | if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { | 1808 | if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) |
1805 | return SCTP_DISPOSITION_CONSUME; | 1809 | return SCTP_DISPOSITION_CONSUME; |
1806 | } | ||
1807 | 1810 | ||
1808 | /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes | 1811 | /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes |
1809 | * the peer has restarted (Action A), it MUST NOT setup a new | 1812 | * the peer has restarted (Action A), it MUST NOT setup a new |
@@ -1912,6 +1915,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b( | |||
1912 | if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) | 1915 | if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) |
1913 | goto nomem; | 1916 | goto nomem; |
1914 | 1917 | ||
1918 | if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) | ||
1919 | return SCTP_DISPOSITION_DISCARD; | ||
1920 | |||
1915 | /* Update the content of current association. */ | 1921 | /* Update the content of current association. */ |
1916 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); | 1922 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); |
1917 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 1923 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
@@ -2009,6 +2015,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( | |||
2009 | * a COOKIE ACK. | 2015 | * a COOKIE ACK. |
2010 | */ | 2016 | */ |
2011 | 2017 | ||
2018 | if (!sctp_auth_chunk_verify(net, chunk, asoc)) | ||
2019 | return SCTP_DISPOSITION_DISCARD; | ||
2020 | |||
2012 | /* Don't accidentally move back into established state. */ | 2021 | /* Don't accidentally move back into established state. */ |
2013 | if (asoc->state < SCTP_STATE_ESTABLISHED) { | 2022 | if (asoc->state < SCTP_STATE_ESTABLISHED) { |
2014 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 2023 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
@@ -4171,10 +4180,7 @@ gen_shutdown: | |||
4171 | * The return value is the disposition of the chunk. | 4180 | * The return value is the disposition of the chunk. |
4172 | */ | 4181 | */ |
4173 | static enum sctp_ierror sctp_sf_authenticate( | 4182 | static enum sctp_ierror sctp_sf_authenticate( |
4174 | struct net *net, | ||
4175 | const struct sctp_endpoint *ep, | ||
4176 | const struct sctp_association *asoc, | 4183 | const struct sctp_association *asoc, |
4177 | const union sctp_subtype type, | ||
4178 | struct sctp_chunk *chunk) | 4184 | struct sctp_chunk *chunk) |
4179 | { | 4185 | { |
4180 | struct sctp_shared_key *sh_key = NULL; | 4186 | struct sctp_shared_key *sh_key = NULL; |
@@ -4275,7 +4281,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net, | |||
4275 | commands); | 4281 | commands); |
4276 | 4282 | ||
4277 | auth_hdr = (struct sctp_authhdr *)chunk->skb->data; | 4283 | auth_hdr = (struct sctp_authhdr *)chunk->skb->data; |
4278 | error = sctp_sf_authenticate(net, ep, asoc, type, chunk); | 4284 | error = sctp_sf_authenticate(asoc, chunk); |
4279 | switch (error) { | 4285 | switch (error) { |
4280 | case SCTP_IERROR_AUTH_BAD_HMAC: | 4286 | case SCTP_IERROR_AUTH_BAD_HMAC: |
4281 | /* Generate the ERROR chunk and discard the rest | 4287 | /* Generate the ERROR chunk and discard the rest |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 84207ad33e8e..8cb7d9858270 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -715,7 +715,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, | |||
715 | return event; | 715 | return event; |
716 | 716 | ||
717 | fail_mark: | 717 | fail_mark: |
718 | sctp_chunk_put(chunk); | ||
719 | kfree_skb(skb); | 718 | kfree_skb(skb); |
720 | fail: | 719 | fail: |
721 | return NULL; | 720 | return NULL; |
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 5cc68a824f45..f2f63959fddd 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -72,6 +72,7 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) | |||
72 | if (IS_ERR(mr->fmr.fm_mr)) | 72 | if (IS_ERR(mr->fmr.fm_mr)) |
73 | goto out_fmr_err; | 73 | goto out_fmr_err; |
74 | 74 | ||
75 | INIT_LIST_HEAD(&mr->mr_list); | ||
75 | return 0; | 76 | return 0; |
76 | 77 | ||
77 | out_fmr_err: | 78 | out_fmr_err: |
@@ -102,10 +103,6 @@ fmr_op_release_mr(struct rpcrdma_mr *mr) | |||
102 | LIST_HEAD(unmap_list); | 103 | LIST_HEAD(unmap_list); |
103 | int rc; | 104 | int rc; |
104 | 105 | ||
105 | /* Ensure MW is not on any rl_registered list */ | ||
106 | if (!list_empty(&mr->mr_list)) | ||
107 | list_del(&mr->mr_list); | ||
108 | |||
109 | kfree(mr->fmr.fm_physaddrs); | 106 | kfree(mr->fmr.fm_physaddrs); |
110 | kfree(mr->mr_sg); | 107 | kfree(mr->mr_sg); |
111 | 108 | ||
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index c5743a0960be..c59c5c788db0 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -110,6 +110,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) | |||
110 | if (!mr->mr_sg) | 110 | if (!mr->mr_sg) |
111 | goto out_list_err; | 111 | goto out_list_err; |
112 | 112 | ||
113 | INIT_LIST_HEAD(&mr->mr_list); | ||
113 | sg_init_table(mr->mr_sg, depth); | 114 | sg_init_table(mr->mr_sg, depth); |
114 | init_completion(&frwr->fr_linv_done); | 115 | init_completion(&frwr->fr_linv_done); |
115 | return 0; | 116 | return 0; |
@@ -133,10 +134,6 @@ frwr_op_release_mr(struct rpcrdma_mr *mr) | |||
133 | { | 134 | { |
134 | int rc; | 135 | int rc; |
135 | 136 | ||
136 | /* Ensure MR is not on any rl_registered list */ | ||
137 | if (!list_empty(&mr->mr_list)) | ||
138 | list_del(&mr->mr_list); | ||
139 | |||
140 | rc = ib_dereg_mr(mr->frwr.fr_mr); | 137 | rc = ib_dereg_mr(mr->frwr.fr_mr); |
141 | if (rc) | 138 | if (rc) |
142 | pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", | 139 | pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", |
@@ -195,7 +192,7 @@ frwr_op_recover_mr(struct rpcrdma_mr *mr) | |||
195 | return; | 192 | return; |
196 | 193 | ||
197 | out_release: | 194 | out_release: |
198 | pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr); | 195 | pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc, mr); |
199 | r_xprt->rx_stats.mrs_orphaned++; | 196 | r_xprt->rx_stats.mrs_orphaned++; |
200 | 197 | ||
201 | spin_lock(&r_xprt->rx_buf.rb_mrlock); | 198 | spin_lock(&r_xprt->rx_buf.rb_mrlock); |
@@ -476,7 +473,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) | |||
476 | 473 | ||
477 | list_for_each_entry(mr, mrs, mr_list) | 474 | list_for_each_entry(mr, mrs, mr_list) |
478 | if (mr->mr_handle == rep->rr_inv_rkey) { | 475 | if (mr->mr_handle == rep->rr_inv_rkey) { |
479 | list_del(&mr->mr_list); | 476 | list_del_init(&mr->mr_list); |
480 | trace_xprtrdma_remoteinv(mr); | 477 | trace_xprtrdma_remoteinv(mr); |
481 | mr->frwr.fr_state = FRWR_IS_INVALID; | 478 | mr->frwr.fr_state = FRWR_IS_INVALID; |
482 | rpcrdma_mr_unmap_and_put(mr); | 479 | rpcrdma_mr_unmap_and_put(mr); |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index fe5eaca2d197..c345d365af88 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1254,6 +1254,11 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) | |||
1254 | list_del(&mr->mr_all); | 1254 | list_del(&mr->mr_all); |
1255 | 1255 | ||
1256 | spin_unlock(&buf->rb_mrlock); | 1256 | spin_unlock(&buf->rb_mrlock); |
1257 | |||
1258 | /* Ensure MW is not on any rl_registered list */ | ||
1259 | if (!list_empty(&mr->mr_list)) | ||
1260 | list_del(&mr->mr_list); | ||
1261 | |||
1257 | ia->ri_ops->ro_release_mr(mr); | 1262 | ia->ri_ops->ro_release_mr(mr); |
1258 | count++; | 1263 | count++; |
1259 | spin_lock(&buf->rb_mrlock); | 1264 | spin_lock(&buf->rb_mrlock); |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 3d3b423fa9c1..cb41b12a3bf8 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -380,7 +380,7 @@ rpcrdma_mr_pop(struct list_head *list) | |||
380 | struct rpcrdma_mr *mr; | 380 | struct rpcrdma_mr *mr; |
381 | 381 | ||
382 | mr = list_first_entry(list, struct rpcrdma_mr, mr_list); | 382 | mr = list_first_entry(list, struct rpcrdma_mr, mr_list); |
383 | list_del(&mr->mr_list); | 383 | list_del_init(&mr->mr_list); |
384 | return mr; | 384 | return mr; |
385 | } | 385 | } |
386 | 386 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index baaf93f12cbd..f29549de9245 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -1950,6 +1950,7 @@ out: | |||
1950 | int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) | 1950 | int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) |
1951 | { | 1951 | { |
1952 | struct net *net = genl_info_net(info); | 1952 | struct net *net = genl_info_net(info); |
1953 | struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; | ||
1953 | struct tipc_nl_msg msg; | 1954 | struct tipc_nl_msg msg; |
1954 | char *name; | 1955 | char *name; |
1955 | int err; | 1956 | int err; |
@@ -1957,9 +1958,19 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) | |||
1957 | msg.portid = info->snd_portid; | 1958 | msg.portid = info->snd_portid; |
1958 | msg.seq = info->snd_seq; | 1959 | msg.seq = info->snd_seq; |
1959 | 1960 | ||
1960 | if (!info->attrs[TIPC_NLA_LINK_NAME]) | 1961 | if (!info->attrs[TIPC_NLA_LINK]) |
1961 | return -EINVAL; | 1962 | return -EINVAL; |
1962 | name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); | 1963 | |
1964 | err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, | ||
1965 | info->attrs[TIPC_NLA_LINK], | ||
1966 | tipc_nl_link_policy, info->extack); | ||
1967 | if (err) | ||
1968 | return err; | ||
1969 | |||
1970 | if (!attrs[TIPC_NLA_LINK_NAME]) | ||
1971 | return -EINVAL; | ||
1972 | |||
1973 | name = nla_data(attrs[TIPC_NLA_LINK_NAME]); | ||
1963 | 1974 | ||
1964 | msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 1975 | msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); |
1965 | if (!msg.skb) | 1976 | if (!msg.skb) |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 252a52ae0893..6be21575503a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1516,10 +1516,10 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) | |||
1516 | 1516 | ||
1517 | srcaddr->sock.family = AF_TIPC; | 1517 | srcaddr->sock.family = AF_TIPC; |
1518 | srcaddr->sock.addrtype = TIPC_ADDR_ID; | 1518 | srcaddr->sock.addrtype = TIPC_ADDR_ID; |
1519 | srcaddr->sock.scope = 0; | ||
1519 | srcaddr->sock.addr.id.ref = msg_origport(hdr); | 1520 | srcaddr->sock.addr.id.ref = msg_origport(hdr); |
1520 | srcaddr->sock.addr.id.node = msg_orignode(hdr); | 1521 | srcaddr->sock.addr.id.node = msg_orignode(hdr); |
1521 | srcaddr->sock.addr.name.domain = 0; | 1522 | srcaddr->sock.addr.name.domain = 0; |
1522 | srcaddr->sock.scope = 0; | ||
1523 | m->msg_namelen = sizeof(struct sockaddr_tipc); | 1523 | m->msg_namelen = sizeof(struct sockaddr_tipc); |
1524 | 1524 | ||
1525 | if (!msg_in_group(hdr)) | 1525 | if (!msg_in_group(hdr)) |
@@ -1528,6 +1528,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) | |||
1528 | /* Group message users may also want to know sending member's id */ | 1528 | /* Group message users may also want to know sending member's id */ |
1529 | srcaddr->member.family = AF_TIPC; | 1529 | srcaddr->member.family = AF_TIPC; |
1530 | srcaddr->member.addrtype = TIPC_ADDR_NAME; | 1530 | srcaddr->member.addrtype = TIPC_ADDR_NAME; |
1531 | srcaddr->member.scope = 0; | ||
1531 | srcaddr->member.addr.name.name.type = msg_nametype(hdr); | 1532 | srcaddr->member.addr.name.name.type = msg_nametype(hdr); |
1532 | srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; | 1533 | srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member; |
1533 | srcaddr->member.addr.name.domain = 0; | 1534 | srcaddr->member.addr.name.domain = 0; |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index cc03e00785c7..20cd93be6236 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -135,6 +135,7 @@ retry: | |||
135 | offset -= sg->offset; | 135 | offset -= sg->offset; |
136 | ctx->partially_sent_offset = offset; | 136 | ctx->partially_sent_offset = offset; |
137 | ctx->partially_sent_record = (void *)sg; | 137 | ctx->partially_sent_record = (void *)sg; |
138 | ctx->in_tcp_sendpages = false; | ||
138 | return ret; | 139 | return ret; |
139 | } | 140 | } |
140 | 141 | ||
@@ -248,16 +249,13 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) | |||
248 | struct tls_context *ctx = tls_get_ctx(sk); | 249 | struct tls_context *ctx = tls_get_ctx(sk); |
249 | long timeo = sock_sndtimeo(sk, 0); | 250 | long timeo = sock_sndtimeo(sk, 0); |
250 | void (*sk_proto_close)(struct sock *sk, long timeout); | 251 | void (*sk_proto_close)(struct sock *sk, long timeout); |
252 | bool free_ctx = false; | ||
251 | 253 | ||
252 | lock_sock(sk); | 254 | lock_sock(sk); |
253 | sk_proto_close = ctx->sk_proto_close; | 255 | sk_proto_close = ctx->sk_proto_close; |
254 | 256 | ||
255 | if (ctx->conf == TLS_HW_RECORD) | 257 | if (ctx->conf == TLS_BASE || ctx->conf == TLS_HW_RECORD) { |
256 | goto skip_tx_cleanup; | 258 | free_ctx = true; |
257 | |||
258 | if (ctx->conf == TLS_BASE) { | ||
259 | kfree(ctx); | ||
260 | ctx = NULL; | ||
261 | goto skip_tx_cleanup; | 259 | goto skip_tx_cleanup; |
262 | } | 260 | } |
263 | 261 | ||
@@ -294,7 +292,7 @@ skip_tx_cleanup: | |||
294 | /* free ctx for TLS_HW_RECORD, used by tcp_set_state | 292 | /* free ctx for TLS_HW_RECORD, used by tcp_set_state |
295 | * for sk->sk_prot->unhash [tls_hw_unhash] | 293 | * for sk->sk_prot->unhash [tls_hw_unhash] |
296 | */ | 294 | */ |
297 | if (ctx && ctx->conf == TLS_HW_RECORD) | 295 | if (free_ctx) |
298 | kfree(ctx); | 296 | kfree(ctx); |
299 | } | 297 | } |
300 | 298 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index a6f3cac8c640..c0fd8a85e7f7 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev, | |||
95 | 95 | ||
96 | ASSERT_RTNL(); | 96 | ASSERT_RTNL(); |
97 | 97 | ||
98 | if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN) | ||
99 | return -EINVAL; | ||
100 | |||
98 | /* prohibit calling the thing phy%d when %d is not its number */ | 101 | /* prohibit calling the thing phy%d when %d is not its number */ |
99 | sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); | 102 | sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); |
100 | if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { | 103 | if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ff28f8feeb09..a052693c2e85 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -9214,6 +9214,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | |||
9214 | 9214 | ||
9215 | if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { | 9215 | if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { |
9216 | if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { | 9216 | if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { |
9217 | kzfree(connkeys); | ||
9217 | GENL_SET_ERR_MSG(info, | 9218 | GENL_SET_ERR_MSG(info, |
9218 | "external auth requires connection ownership"); | 9219 | "external auth requires connection ownership"); |
9219 | return -EINVAL; | 9220 | return -EINVAL; |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 16c7e4ef5820..ac3e12c32aa3 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1026,6 +1026,7 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
1026 | 1026 | ||
1027 | if (!tmp_rd) { | 1027 | if (!tmp_rd) { |
1028 | kfree(regdom); | 1028 | kfree(regdom); |
1029 | kfree(wmm_ptrs); | ||
1029 | return -ENOMEM; | 1030 | return -ENOMEM; |
1030 | } | 1031 | } |
1031 | regdom = tmp_rd; | 1032 | regdom = tmp_rd; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f9d2f2233f09..6c177ae7a6d9 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -2175,6 +2175,12 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) | |||
2175 | return afinfo; | 2175 | return afinfo; |
2176 | } | 2176 | } |
2177 | 2177 | ||
2178 | void xfrm_flush_gc(void) | ||
2179 | { | ||
2180 | flush_work(&xfrm_state_gc_work); | ||
2181 | } | ||
2182 | EXPORT_SYMBOL(xfrm_flush_gc); | ||
2183 | |||
2178 | /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ | 2184 | /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ |
2179 | void xfrm_state_delete_tunnel(struct xfrm_state *x) | 2185 | void xfrm_state_delete_tunnel(struct xfrm_state *x) |
2180 | { | 2186 | { |
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c index c07ba4da9e36..815eaf140ab5 100644 --- a/scripts/dtc/checks.c +++ b/scripts/dtc/checks.c | |||
@@ -787,10 +787,9 @@ static void check_pci_bridge(struct check *c, struct dt_info *dti, struct node * | |||
787 | FAIL(c, dti, node, "incorrect #size-cells for PCI bridge"); | 787 | FAIL(c, dti, node, "incorrect #size-cells for PCI bridge"); |
788 | 788 | ||
789 | prop = get_property(node, "bus-range"); | 789 | prop = get_property(node, "bus-range"); |
790 | if (!prop) { | 790 | if (!prop) |
791 | FAIL(c, dti, node, "missing bus-range for PCI bridge"); | ||
792 | return; | 791 | return; |
793 | } | 792 | |
794 | if (prop->val.len != (sizeof(cell_t) * 2)) { | 793 | if (prop->val.len != (sizeof(cell_t) * 2)) { |
795 | FAIL_PROP(c, dti, node, prop, "value must be 2 cells"); | 794 | FAIL_PROP(c, dti, node, prop, "value must be 2 cells"); |
796 | return; | 795 | return; |
diff --git a/scripts/faddr2line b/scripts/faddr2line index 9e5735a4d3a5..1876a741087c 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line | |||
@@ -170,7 +170,10 @@ __faddr2line() { | |||
170 | echo "$file_lines" | while read -r line | 170 | echo "$file_lines" | while read -r line |
171 | do | 171 | do |
172 | echo $line | 172 | echo $line |
173 | eval $(echo $line | awk -F "[ :]" '{printf("n1=%d;n2=%d;f=%s",$NF-5, $NF+5, $(NF-1))}') | 173 | n=$(echo $line | sed 's/.*:\([0-9]\+\).*/\1/g') |
174 | n1=$[$n-5] | ||
175 | n2=$[$n+5] | ||
176 | f=$(echo $line | sed 's/.*at \(.\+\):.*/\1/g') | ||
174 | awk 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") {printf("%d\t%s\n", NR, $0)}' $f | 177 | awk 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") {printf("%d\t%s\n", NR, $0)}' $f |
175 | done | 178 | done |
176 | 179 | ||
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4cafe6a19167..be5817df0a9d 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -4576,6 +4576,7 @@ static int selinux_socket_post_create(struct socket *sock, int family, | |||
4576 | static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) | 4576 | static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) |
4577 | { | 4577 | { |
4578 | struct sock *sk = sock->sk; | 4578 | struct sock *sk = sock->sk; |
4579 | struct sk_security_struct *sksec = sk->sk_security; | ||
4579 | u16 family; | 4580 | u16 family; |
4580 | int err; | 4581 | int err; |
4581 | 4582 | ||
@@ -4587,11 +4588,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
4587 | family = sk->sk_family; | 4588 | family = sk->sk_family; |
4588 | if (family == PF_INET || family == PF_INET6) { | 4589 | if (family == PF_INET || family == PF_INET6) { |
4589 | char *addrp; | 4590 | char *addrp; |
4590 | struct sk_security_struct *sksec = sk->sk_security; | ||
4591 | struct common_audit_data ad; | 4591 | struct common_audit_data ad; |
4592 | struct lsm_network_audit net = {0,}; | 4592 | struct lsm_network_audit net = {0,}; |
4593 | struct sockaddr_in *addr4 = NULL; | 4593 | struct sockaddr_in *addr4 = NULL; |
4594 | struct sockaddr_in6 *addr6 = NULL; | 4594 | struct sockaddr_in6 *addr6 = NULL; |
4595 | u16 family_sa = address->sa_family; | ||
4595 | unsigned short snum; | 4596 | unsigned short snum; |
4596 | u32 sid, node_perm; | 4597 | u32 sid, node_perm; |
4597 | 4598 | ||
@@ -4601,11 +4602,20 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
4601 | * need to check address->sa_family as it is possible to have | 4602 | * need to check address->sa_family as it is possible to have |
4602 | * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET. | 4603 | * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET. |
4603 | */ | 4604 | */ |
4604 | switch (address->sa_family) { | 4605 | switch (family_sa) { |
4606 | case AF_UNSPEC: | ||
4605 | case AF_INET: | 4607 | case AF_INET: |
4606 | if (addrlen < sizeof(struct sockaddr_in)) | 4608 | if (addrlen < sizeof(struct sockaddr_in)) |
4607 | return -EINVAL; | 4609 | return -EINVAL; |
4608 | addr4 = (struct sockaddr_in *)address; | 4610 | addr4 = (struct sockaddr_in *)address; |
4611 | if (family_sa == AF_UNSPEC) { | ||
4612 | /* see __inet_bind(), we only want to allow | ||
4613 | * AF_UNSPEC if the address is INADDR_ANY | ||
4614 | */ | ||
4615 | if (addr4->sin_addr.s_addr != htonl(INADDR_ANY)) | ||
4616 | goto err_af; | ||
4617 | family_sa = AF_INET; | ||
4618 | } | ||
4609 | snum = ntohs(addr4->sin_port); | 4619 | snum = ntohs(addr4->sin_port); |
4610 | addrp = (char *)&addr4->sin_addr.s_addr; | 4620 | addrp = (char *)&addr4->sin_addr.s_addr; |
4611 | break; | 4621 | break; |
@@ -4617,15 +4627,14 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
4617 | addrp = (char *)&addr6->sin6_addr.s6_addr; | 4627 | addrp = (char *)&addr6->sin6_addr.s6_addr; |
4618 | break; | 4628 | break; |
4619 | default: | 4629 | default: |
4620 | /* Note that SCTP services expect -EINVAL, whereas | 4630 | goto err_af; |
4621 | * others expect -EAFNOSUPPORT. | ||
4622 | */ | ||
4623 | if (sksec->sclass == SECCLASS_SCTP_SOCKET) | ||
4624 | return -EINVAL; | ||
4625 | else | ||
4626 | return -EAFNOSUPPORT; | ||
4627 | } | 4631 | } |
4628 | 4632 | ||
4633 | ad.type = LSM_AUDIT_DATA_NET; | ||
4634 | ad.u.net = &net; | ||
4635 | ad.u.net->sport = htons(snum); | ||
4636 | ad.u.net->family = family_sa; | ||
4637 | |||
4629 | if (snum) { | 4638 | if (snum) { |
4630 | int low, high; | 4639 | int low, high; |
4631 | 4640 | ||
@@ -4637,10 +4646,6 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
4637 | snum, &sid); | 4646 | snum, &sid); |
4638 | if (err) | 4647 | if (err) |
4639 | goto out; | 4648 | goto out; |
4640 | ad.type = LSM_AUDIT_DATA_NET; | ||
4641 | ad.u.net = &net; | ||
4642 | ad.u.net->sport = htons(snum); | ||
4643 | ad.u.net->family = family; | ||
4644 | err = avc_has_perm(&selinux_state, | 4649 | err = avc_has_perm(&selinux_state, |
4645 | sksec->sid, sid, | 4650 | sksec->sid, sid, |
4646 | sksec->sclass, | 4651 | sksec->sclass, |
@@ -4672,16 +4677,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
4672 | break; | 4677 | break; |
4673 | } | 4678 | } |
4674 | 4679 | ||
4675 | err = sel_netnode_sid(addrp, family, &sid); | 4680 | err = sel_netnode_sid(addrp, family_sa, &sid); |
4676 | if (err) | 4681 | if (err) |
4677 | goto out; | 4682 | goto out; |
4678 | 4683 | ||
4679 | ad.type = LSM_AUDIT_DATA_NET; | 4684 | if (family_sa == AF_INET) |
4680 | ad.u.net = &net; | ||
4681 | ad.u.net->sport = htons(snum); | ||
4682 | ad.u.net->family = family; | ||
4683 | |||
4684 | if (address->sa_family == AF_INET) | ||
4685 | ad.u.net->v4info.saddr = addr4->sin_addr.s_addr; | 4685 | ad.u.net->v4info.saddr = addr4->sin_addr.s_addr; |
4686 | else | 4686 | else |
4687 | ad.u.net->v6info.saddr = addr6->sin6_addr; | 4687 | ad.u.net->v6info.saddr = addr6->sin6_addr; |
@@ -4694,6 +4694,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
4694 | } | 4694 | } |
4695 | out: | 4695 | out: |
4696 | return err; | 4696 | return err; |
4697 | err_af: | ||
4698 | /* Note that SCTP services expect -EINVAL, others -EAFNOSUPPORT. */ | ||
4699 | if (sksec->sclass == SECCLASS_SCTP_SOCKET) | ||
4700 | return -EINVAL; | ||
4701 | return -EAFNOSUPPORT; | ||
4697 | } | 4702 | } |
4698 | 4703 | ||
4699 | /* This supports connect(2) and SCTP connect services such as sctp_connectx(3) | 4704 | /* This supports connect(2) and SCTP connect services such as sctp_connectx(3) |
@@ -4771,7 +4776,7 @@ static int selinux_socket_connect_helper(struct socket *sock, | |||
4771 | ad.type = LSM_AUDIT_DATA_NET; | 4776 | ad.type = LSM_AUDIT_DATA_NET; |
4772 | ad.u.net = &net; | 4777 | ad.u.net = &net; |
4773 | ad.u.net->dport = htons(snum); | 4778 | ad.u.net->dport = htons(snum); |
4774 | ad.u.net->family = sk->sk_family; | 4779 | ad.u.net->family = address->sa_family; |
4775 | err = avc_has_perm(&selinux_state, | 4780 | err = avc_has_perm(&selinux_state, |
4776 | sksec->sid, sid, sksec->sclass, perm, &ad); | 4781 | sksec->sid, sid, sksec->sclass, perm, &ad); |
4777 | if (err) | 4782 | if (err) |
@@ -5272,6 +5277,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname, | |||
5272 | while (walk_size < addrlen) { | 5277 | while (walk_size < addrlen) { |
5273 | addr = addr_buf; | 5278 | addr = addr_buf; |
5274 | switch (addr->sa_family) { | 5279 | switch (addr->sa_family) { |
5280 | case AF_UNSPEC: | ||
5275 | case AF_INET: | 5281 | case AF_INET: |
5276 | len = sizeof(struct sockaddr_in); | 5282 | len = sizeof(struct sockaddr_in); |
5277 | break; | 5283 | break; |
@@ -5279,7 +5285,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname, | |||
5279 | len = sizeof(struct sockaddr_in6); | 5285 | len = sizeof(struct sockaddr_in6); |
5280 | break; | 5286 | break; |
5281 | default: | 5287 | default: |
5282 | return -EAFNOSUPPORT; | 5288 | return -EINVAL; |
5283 | } | 5289 | } |
5284 | 5290 | ||
5285 | err = -EINVAL; | 5291 | err = -EINVAL; |
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index a848836a5de0..507fd5210c1c 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c | |||
@@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, | |||
396 | if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || | 396 | if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || |
397 | copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) | 397 | copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) |
398 | goto error; | 398 | goto error; |
399 | if (get_user(data->owner, &data32->owner) || | 399 | if (get_user(data->owner, &data32->owner)) |
400 | get_user(data->type, &data32->type)) | ||
401 | goto error; | 400 | goto error; |
402 | switch (data->type) { | 401 | switch (data->type) { |
403 | case SNDRV_CTL_ELEM_TYPE_BOOLEAN: | 402 | case SNDRV_CTL_ELEM_TYPE_BOOLEAN: |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b0c8c79848a9..a0c93b9c9a28 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2210,6 +2210,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { | |||
2210 | SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), | 2210 | SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), |
2211 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ | 2211 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ |
2212 | SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), | 2212 | SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), |
2213 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */ | ||
2214 | SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0), | ||
2213 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ | 2215 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ |
2214 | SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), | 2216 | SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), |
2215 | {} | 2217 | {} |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2dd34dd77447..01a6643fc7d4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
2363 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), | 2363 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), |
2364 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), | 2364 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), |
2365 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), | 2365 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), |
2366 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), | ||
2366 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2367 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
2367 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2368 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
2368 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), | 2369 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 344d7b069d59..bb5ab7a7dfa5 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -967,6 +967,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, | |||
967 | } | 967 | } |
968 | break; | 968 | break; |
969 | 969 | ||
970 | case USB_ID(0x0d8c, 0x0103): | ||
971 | if (!strcmp(kctl->id.name, "PCM Playback Volume")) { | ||
972 | usb_audio_info(chip, | ||
973 | "set volume quirk for CM102-A+/102S+\n"); | ||
974 | cval->min = -256; | ||
975 | } | ||
976 | break; | ||
977 | |||
970 | case USB_ID(0x0471, 0x0101): | 978 | case USB_ID(0x0471, 0x0101): |
971 | case USB_ID(0x0471, 0x0104): | 979 | case USB_ID(0x0471, 0x0104): |
972 | case USB_ID(0x0471, 0x0105): | 980 | case USB_ID(0x0471, 0x0105): |
diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 956be9f7c72a..5ed334575fc7 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c | |||
@@ -576,7 +576,7 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, | |||
576 | 576 | ||
577 | if (protocol == UAC_VERSION_1) { | 577 | if (protocol == UAC_VERSION_1) { |
578 | attributes = csep->bmAttributes; | 578 | attributes = csep->bmAttributes; |
579 | } else { | 579 | } else if (protocol == UAC_VERSION_2) { |
580 | struct uac2_iso_endpoint_descriptor *csep2 = | 580 | struct uac2_iso_endpoint_descriptor *csep2 = |
581 | (struct uac2_iso_endpoint_descriptor *) csep; | 581 | (struct uac2_iso_endpoint_descriptor *) csep; |
582 | 582 | ||
@@ -585,6 +585,13 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, | |||
585 | /* emulate the endpoint attributes of a v1 device */ | 585 | /* emulate the endpoint attributes of a v1 device */ |
586 | if (csep2->bmControls & UAC2_CONTROL_PITCH) | 586 | if (csep2->bmControls & UAC2_CONTROL_PITCH) |
587 | attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; | 587 | attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; |
588 | } else { /* UAC_VERSION_3 */ | ||
589 | struct uac3_iso_endpoint_descriptor *csep3 = | ||
590 | (struct uac3_iso_endpoint_descriptor *) csep; | ||
591 | |||
592 | /* emulate the endpoint attributes of a v1 device */ | ||
593 | if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH) | ||
594 | attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; | ||
588 | } | 595 | } |
589 | 596 | ||
590 | return attributes; | 597 | return attributes; |
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 2ba95d6fe852..caae4843cb70 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h | |||
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot { | |||
195 | #define KVM_REG_ARM_VFP_FPINST 0x1009 | 195 | #define KVM_REG_ARM_VFP_FPINST 0x1009 |
196 | #define KVM_REG_ARM_VFP_FPINST2 0x100A | 196 | #define KVM_REG_ARM_VFP_FPINST2 0x100A |
197 | 197 | ||
198 | /* KVM-as-firmware specific pseudo-registers */ | ||
199 | #define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) | ||
200 | #define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ | ||
201 | KVM_REG_ARM_FW | ((r) & 0xffff)) | ||
202 | #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) | ||
203 | |||
198 | /* Device Control API: ARM VGIC */ | 204 | /* Device Control API: ARM VGIC */ |
199 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 | 205 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 |
200 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 | 206 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 |
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 9abbf3044654..04b3256f8e6d 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot { | |||
206 | #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) | 206 | #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) |
207 | #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) | 207 | #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) |
208 | 208 | ||
209 | /* KVM-as-firmware specific pseudo-registers */ | ||
210 | #define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) | ||
211 | #define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ | ||
212 | KVM_REG_ARM_FW | ((r) & 0xffff)) | ||
213 | #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) | ||
214 | |||
209 | /* Device Control API: ARM VGIC */ | 215 | /* Device Control API: ARM VGIC */ |
210 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 | 216 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 |
211 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 | 217 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index d554c11e01ff..578793e97431 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
@@ -320,6 +320,7 @@ | |||
320 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ | 320 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ |
321 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ | 321 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ |
322 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ | 322 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ |
323 | #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ | ||
323 | 324 | ||
324 | /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ | 325 | /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ |
325 | #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ | 326 | #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ |
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index b21b586b9854..1738c0391da4 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h | |||
@@ -6,8 +6,9 @@ | |||
6 | #include <stdbool.h> | 6 | #include <stdbool.h> |
7 | 7 | ||
8 | #define spinlock_t pthread_mutex_t | 8 | #define spinlock_t pthread_mutex_t |
9 | #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; | 9 | #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER |
10 | #define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER | 10 | #define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER |
11 | #define spin_lock_init(x) pthread_mutex_init(x, NULL) | ||
11 | 12 | ||
12 | #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) | 13 | #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) |
13 | #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) | 14 | #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) |
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 1065006c9bf5..b02c41e53d56 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h | |||
@@ -676,6 +676,13 @@ struct kvm_ioeventfd { | |||
676 | __u8 pad[36]; | 676 | __u8 pad[36]; |
677 | }; | 677 | }; |
678 | 678 | ||
679 | #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) | ||
680 | #define KVM_X86_DISABLE_EXITS_HTL (1 << 1) | ||
681 | #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) | ||
682 | #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ | ||
683 | KVM_X86_DISABLE_EXITS_HTL | \ | ||
684 | KVM_X86_DISABLE_EXITS_PAUSE) | ||
685 | |||
679 | /* for KVM_ENABLE_CAP */ | 686 | /* for KVM_ENABLE_CAP */ |
680 | struct kvm_enable_cap { | 687 | struct kvm_enable_cap { |
681 | /* in */ | 688 | /* in */ |
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 944070e98a2c..63eb49082774 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c | |||
@@ -175,7 +175,7 @@ static const struct option options[] = { | |||
175 | OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run (default: 5 secs)"), | 175 | OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run (default: 5 secs)"), |
176 | OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"), | 176 | OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"), |
177 | 177 | ||
178 | OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via writes (can be mixed with -W)"), | 178 | OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via reads (can be mixed with -W)"), |
179 | OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"), | 179 | OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"), |
180 | OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"), | 180 | OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"), |
181 | OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"), | 181 | OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"), |
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index 93656f2fd53a..7e3cce3bcf3b 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv | |||
@@ -29,7 +29,6 @@ GenuineIntel-6-4D,v13,silvermont,core | |||
29 | GenuineIntel-6-4C,v13,silvermont,core | 29 | GenuineIntel-6-4C,v13,silvermont,core |
30 | GenuineIntel-6-2A,v15,sandybridge,core | 30 | GenuineIntel-6-2A,v15,sandybridge,core |
31 | GenuineIntel-6-2C,v2,westmereep-dp,core | 31 | GenuineIntel-6-2C,v2,westmereep-dp,core |
32 | GenuineIntel-6-2C,v2,westmereep-dp,core | ||
33 | GenuineIntel-6-25,v2,westmereep-sp,core | 32 | GenuineIntel-6-25,v2,westmereep-sp,core |
34 | GenuineIntel-6-2F,v2,westmereex,core | 33 | GenuineIntel-6-2F,v2,westmereex,core |
35 | GenuineIntel-6-55,v1,skylakex,core | 34 | GenuineIntel-6-55,v1,skylakex,core |
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index d14464c42714..7afeb80cc39e 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y | |||
@@ -224,15 +224,15 @@ event_def: event_pmu | | |||
224 | event_bpf_file | 224 | event_bpf_file |
225 | 225 | ||
226 | event_pmu: | 226 | event_pmu: |
227 | PE_NAME '/' event_config '/' | 227 | PE_NAME opt_event_config |
228 | { | 228 | { |
229 | struct list_head *list, *orig_terms, *terms; | 229 | struct list_head *list, *orig_terms, *terms; |
230 | 230 | ||
231 | if (parse_events_copy_term_list($3, &orig_terms)) | 231 | if (parse_events_copy_term_list($2, &orig_terms)) |
232 | YYABORT; | 232 | YYABORT; |
233 | 233 | ||
234 | ALLOC_LIST(list); | 234 | ALLOC_LIST(list); |
235 | if (parse_events_add_pmu(_parse_state, list, $1, $3, false)) { | 235 | if (parse_events_add_pmu(_parse_state, list, $1, $2, false)) { |
236 | struct perf_pmu *pmu = NULL; | 236 | struct perf_pmu *pmu = NULL; |
237 | int ok = 0; | 237 | int ok = 0; |
238 | char *pattern; | 238 | char *pattern; |
@@ -262,7 +262,7 @@ PE_NAME '/' event_config '/' | |||
262 | if (!ok) | 262 | if (!ok) |
263 | YYABORT; | 263 | YYABORT; |
264 | } | 264 | } |
265 | parse_events_terms__delete($3); | 265 | parse_events_terms__delete($2); |
266 | parse_events_terms__delete(orig_terms); | 266 | parse_events_terms__delete(orig_terms); |
267 | $$ = list; | 267 | $$ = list; |
268 | } | 268 | } |
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index fa7ee369b3c9..db66f8a0d4be 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile | |||
@@ -17,7 +17,7 @@ ifeq ($(BUILD), 32) | |||
17 | LDFLAGS += -m32 | 17 | LDFLAGS += -m32 |
18 | endif | 18 | endif |
19 | 19 | ||
20 | targets: mapshift $(TARGETS) | 20 | targets: generated/map-shift.h $(TARGETS) |
21 | 21 | ||
22 | main: $(OFILES) | 22 | main: $(OFILES) |
23 | 23 | ||
@@ -42,9 +42,7 @@ radix-tree.c: ../../../lib/radix-tree.c | |||
42 | idr.c: ../../../lib/idr.c | 42 | idr.c: ../../../lib/idr.c |
43 | sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ | 43 | sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ |
44 | 44 | ||
45 | .PHONY: mapshift | 45 | generated/map-shift.h: |
46 | |||
47 | mapshift: | ||
48 | @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ | 46 | @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ |
49 | echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ | 47 | echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ |
50 | generated/map-shift.h; \ | 48 | generated/map-shift.h; \ |
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 59245b3d587c..7bf405638b0b 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/radix-tree.h> | 16 | #include <linux/radix-tree.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <pthread.h> | ||
19 | 20 | ||
20 | #include "test.h" | 21 | #include "test.h" |
21 | 22 | ||
@@ -624,6 +625,67 @@ static void multiorder_account(void) | |||
624 | item_kill_tree(&tree); | 625 | item_kill_tree(&tree); |
625 | } | 626 | } |
626 | 627 | ||
628 | bool stop_iteration = false; | ||
629 | |||
630 | static void *creator_func(void *ptr) | ||
631 | { | ||
632 | /* 'order' is set up to ensure we have sibling entries */ | ||
633 | unsigned int order = RADIX_TREE_MAP_SHIFT - 1; | ||
634 | struct radix_tree_root *tree = ptr; | ||
635 | int i; | ||
636 | |||
637 | for (i = 0; i < 10000; i++) { | ||
638 | item_insert_order(tree, 0, order); | ||
639 | item_delete_rcu(tree, 0); | ||
640 | } | ||
641 | |||
642 | stop_iteration = true; | ||
643 | return NULL; | ||
644 | } | ||
645 | |||
646 | static void *iterator_func(void *ptr) | ||
647 | { | ||
648 | struct radix_tree_root *tree = ptr; | ||
649 | struct radix_tree_iter iter; | ||
650 | struct item *item; | ||
651 | void **slot; | ||
652 | |||
653 | while (!stop_iteration) { | ||
654 | rcu_read_lock(); | ||
655 | radix_tree_for_each_slot(slot, tree, &iter, 0) { | ||
656 | item = radix_tree_deref_slot(slot); | ||
657 | |||
658 | if (!item) | ||
659 | continue; | ||
660 | if (radix_tree_deref_retry(item)) { | ||
661 | slot = radix_tree_iter_retry(&iter); | ||
662 | continue; | ||
663 | } | ||
664 | |||
665 | item_sanity(item, iter.index); | ||
666 | } | ||
667 | rcu_read_unlock(); | ||
668 | } | ||
669 | return NULL; | ||
670 | } | ||
671 | |||
672 | static void multiorder_iteration_race(void) | ||
673 | { | ||
674 | const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); | ||
675 | pthread_t worker_thread[num_threads]; | ||
676 | RADIX_TREE(tree, GFP_KERNEL); | ||
677 | int i; | ||
678 | |||
679 | pthread_create(&worker_thread[0], NULL, &creator_func, &tree); | ||
680 | for (i = 1; i < num_threads; i++) | ||
681 | pthread_create(&worker_thread[i], NULL, &iterator_func, &tree); | ||
682 | |||
683 | for (i = 0; i < num_threads; i++) | ||
684 | pthread_join(worker_thread[i], NULL); | ||
685 | |||
686 | item_kill_tree(&tree); | ||
687 | } | ||
688 | |||
627 | void multiorder_checks(void) | 689 | void multiorder_checks(void) |
628 | { | 690 | { |
629 | int i; | 691 | int i; |
@@ -644,6 +706,7 @@ void multiorder_checks(void) | |||
644 | multiorder_join(); | 706 | multiorder_join(); |
645 | multiorder_split(); | 707 | multiorder_split(); |
646 | multiorder_account(); | 708 | multiorder_account(); |
709 | multiorder_iteration_race(); | ||
647 | 710 | ||
648 | radix_tree_cpu_dead(0); | 711 | radix_tree_cpu_dead(0); |
649 | } | 712 | } |
diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index 5978ab1f403d..def6015570b2 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c | |||
@@ -75,6 +75,25 @@ int item_delete(struct radix_tree_root *root, unsigned long index) | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | static void item_free_rcu(struct rcu_head *head) | ||
79 | { | ||
80 | struct item *item = container_of(head, struct item, rcu_head); | ||
81 | |||
82 | free(item); | ||
83 | } | ||
84 | |||
85 | int item_delete_rcu(struct radix_tree_root *root, unsigned long index) | ||
86 | { | ||
87 | struct item *item = radix_tree_delete(root, index); | ||
88 | |||
89 | if (item) { | ||
90 | item_sanity(item, index); | ||
91 | call_rcu(&item->rcu_head, item_free_rcu); | ||
92 | return 1; | ||
93 | } | ||
94 | return 0; | ||
95 | } | ||
96 | |||
78 | void item_check_present(struct radix_tree_root *root, unsigned long index) | 97 | void item_check_present(struct radix_tree_root *root, unsigned long index) |
79 | { | 98 | { |
80 | struct item *item; | 99 | struct item *item; |
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index d9c031dbeb1a..31f1d9b6f506 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/rcupdate.h> | 5 | #include <linux/rcupdate.h> |
6 | 6 | ||
7 | struct item { | 7 | struct item { |
8 | struct rcu_head rcu_head; | ||
8 | unsigned long index; | 9 | unsigned long index; |
9 | unsigned int order; | 10 | unsigned int order; |
10 | }; | 11 | }; |
@@ -12,9 +13,11 @@ struct item { | |||
12 | struct item *item_create(unsigned long index, unsigned int order); | 13 | struct item *item_create(unsigned long index, unsigned int order); |
13 | int __item_insert(struct radix_tree_root *root, struct item *item); | 14 | int __item_insert(struct radix_tree_root *root, struct item *item); |
14 | int item_insert(struct radix_tree_root *root, unsigned long index); | 15 | int item_insert(struct radix_tree_root *root, unsigned long index); |
16 | void item_sanity(struct item *item, unsigned long index); | ||
15 | int item_insert_order(struct radix_tree_root *root, unsigned long index, | 17 | int item_insert_order(struct radix_tree_root *root, unsigned long index, |
16 | unsigned order); | 18 | unsigned order); |
17 | int item_delete(struct radix_tree_root *root, unsigned long index); | 19 | int item_delete(struct radix_tree_root *root, unsigned long index); |
20 | int item_delete_rcu(struct radix_tree_root *root, unsigned long index); | ||
18 | struct item *item_lookup(struct radix_tree_root *root, unsigned long index); | 21 | struct item *item_lookup(struct radix_tree_root *root, unsigned long index); |
19 | 22 | ||
20 | void item_check_present(struct radix_tree_root *root, unsigned long index); | 23 | void item_check_present(struct radix_tree_root *root, unsigned long index); |
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index daf5effec3f0..3ff81a478dbe 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile | |||
@@ -6,7 +6,7 @@ CFLAGS += -I../../../../usr/include/ | |||
6 | 6 | ||
7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh | 7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh |
8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh | 8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh |
9 | TEST_GEN_PROGS_EXTENDED := in_netns.sh | 9 | TEST_PROGS_EXTENDED := in_netns.sh |
10 | TEST_GEN_FILES = socket | 10 | TEST_GEN_FILES = socket |
11 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy | 11 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy |
12 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa | 12 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json index 5b012f4981d4..6f289a49e5ec 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json | |||
@@ -66,7 +66,7 @@ | |||
66 | "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667", | 66 | "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667", |
67 | "expExitCode": "0", | 67 | "expExitCode": "0", |
68 | "verifyCmd": "$TC action get action bpf index 667", | 68 | "verifyCmd": "$TC action get action bpf index 667", |
69 | "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c default-action pipe.*index 667 ref", | 69 | "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref", |
70 | "matchCount": "1", | 70 | "matchCount": "1", |
71 | "teardown": [ | 71 | "teardown": [ |
72 | "$TC action flush action bpf", | 72 | "$TC action flush action bpf", |
@@ -92,10 +92,15 @@ | |||
92 | "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667", | 92 | "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667", |
93 | "expExitCode": "255", | 93 | "expExitCode": "255", |
94 | "verifyCmd": "$TC action get action bpf index 667", | 94 | "verifyCmd": "$TC action get action bpf index 667", |
95 | "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9].*index 667 ref", | 95 | "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref", |
96 | "matchCount": "0", | 96 | "matchCount": "0", |
97 | "teardown": [ | 97 | "teardown": [ |
98 | "$TC action flush action bpf", | 98 | [ |
99 | "$TC action flush action bpf", | ||
100 | 0, | ||
101 | 1, | ||
102 | 255 | ||
103 | ], | ||
99 | "rm -f _c.o" | 104 | "rm -f _c.o" |
100 | ] | 105 | ] |
101 | }, | 106 | }, |
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 10b38178cff2..4ffc0b5e6105 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c | |||
@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
211 | struct vgic_state_iter *iter = (struct vgic_state_iter *)v; | 211 | struct vgic_state_iter *iter = (struct vgic_state_iter *)v; |
212 | struct vgic_irq *irq; | 212 | struct vgic_irq *irq; |
213 | struct kvm_vcpu *vcpu = NULL; | 213 | struct kvm_vcpu *vcpu = NULL; |
214 | unsigned long flags; | ||
214 | 215 | ||
215 | if (iter->dist_id == 0) { | 216 | if (iter->dist_id == 0) { |
216 | print_dist_state(s, &kvm->arch.vgic); | 217 | print_dist_state(s, &kvm->arch.vgic); |
@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
227 | irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; | 228 | irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; |
228 | } | 229 | } |
229 | 230 | ||
230 | spin_lock(&irq->irq_lock); | 231 | spin_lock_irqsave(&irq->irq_lock, flags); |
231 | print_irq_state(s, irq, vcpu); | 232 | print_irq_state(s, irq, vcpu); |
232 | spin_unlock(&irq->irq_lock); | 233 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
233 | 234 | ||
234 | return 0; | 235 | return 0; |
235 | } | 236 | } |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index a8f07243aa9f..4ed79c939fb4 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
52 | { | 52 | { |
53 | struct vgic_dist *dist = &kvm->arch.vgic; | 53 | struct vgic_dist *dist = &kvm->arch.vgic; |
54 | struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; | 54 | struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; |
55 | unsigned long flags; | ||
55 | int ret; | 56 | int ret; |
56 | 57 | ||
57 | /* In this case there is no put, since we keep the reference. */ | 58 | /* In this case there is no put, since we keep the reference. */ |
@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
71 | irq->intid = intid; | 72 | irq->intid = intid; |
72 | irq->target_vcpu = vcpu; | 73 | irq->target_vcpu = vcpu; |
73 | 74 | ||
74 | spin_lock(&dist->lpi_list_lock); | 75 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
75 | 76 | ||
76 | /* | 77 | /* |
77 | * There could be a race with another vgic_add_lpi(), so we need to | 78 | * There could be a race with another vgic_add_lpi(), so we need to |
@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
99 | dist->lpi_list_count++; | 100 | dist->lpi_list_count++; |
100 | 101 | ||
101 | out_unlock: | 102 | out_unlock: |
102 | spin_unlock(&dist->lpi_list_lock); | 103 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
103 | 104 | ||
104 | /* | 105 | /* |
105 | * We "cache" the configuration table entries in our struct vgic_irq's. | 106 | * We "cache" the configuration table entries in our struct vgic_irq's. |
@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
280 | int ret; | 281 | int ret; |
281 | unsigned long flags; | 282 | unsigned long flags; |
282 | 283 | ||
283 | ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, | 284 | ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, |
284 | &prop, 1); | 285 | &prop, 1); |
285 | 286 | ||
286 | if (ret) | 287 | if (ret) |
287 | return ret; | 288 | return ret; |
@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
315 | { | 316 | { |
316 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 317 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
317 | struct vgic_irq *irq; | 318 | struct vgic_irq *irq; |
319 | unsigned long flags; | ||
318 | u32 *intids; | 320 | u32 *intids; |
319 | int irq_count, i = 0; | 321 | int irq_count, i = 0; |
320 | 322 | ||
@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
330 | if (!intids) | 332 | if (!intids) |
331 | return -ENOMEM; | 333 | return -ENOMEM; |
332 | 334 | ||
333 | spin_lock(&dist->lpi_list_lock); | 335 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
334 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
335 | if (i == irq_count) | 337 | if (i == irq_count) |
336 | break; | 338 | break; |
@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
339 | continue; | 341 | continue; |
340 | intids[i++] = irq->intid; | 342 | intids[i++] = irq->intid; |
341 | } | 343 | } |
342 | spin_unlock(&dist->lpi_list_lock); | 344 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
343 | 345 | ||
344 | *intid_ptr = intids; | 346 | *intid_ptr = intids; |
345 | return i; | 347 | return i; |
@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
348 | static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) | 350 | static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) |
349 | { | 351 | { |
350 | int ret = 0; | 352 | int ret = 0; |
353 | unsigned long flags; | ||
351 | 354 | ||
352 | spin_lock(&irq->irq_lock); | 355 | spin_lock_irqsave(&irq->irq_lock, flags); |
353 | irq->target_vcpu = vcpu; | 356 | irq->target_vcpu = vcpu; |
354 | spin_unlock(&irq->irq_lock); | 357 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
355 | 358 | ||
356 | if (irq->hw) { | 359 | if (irq->hw) { |
357 | struct its_vlpi_map map; | 360 | struct its_vlpi_map map; |
@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
441 | * this very same byte in the last iteration. Reuse that. | 444 | * this very same byte in the last iteration. Reuse that. |
442 | */ | 445 | */ |
443 | if (byte_offset != last_byte_offset) { | 446 | if (byte_offset != last_byte_offset) { |
444 | ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset, | 447 | ret = kvm_read_guest_lock(vcpu->kvm, |
445 | &pendmask, 1); | 448 | pendbase + byte_offset, |
449 | &pendmask, 1); | ||
446 | if (ret) { | 450 | if (ret) { |
447 | kfree(intids); | 451 | kfree(intids); |
448 | return ret; | 452 | return ret; |
@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, | |||
786 | return false; | 790 | return false; |
787 | 791 | ||
788 | /* Each 1st level entry is represented by a 64-bit value. */ | 792 | /* Each 1st level entry is represented by a 64-bit value. */ |
789 | if (kvm_read_guest(its->dev->kvm, | 793 | if (kvm_read_guest_lock(its->dev->kvm, |
790 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), | 794 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), |
791 | &indirect_ptr, sizeof(indirect_ptr))) | 795 | &indirect_ptr, sizeof(indirect_ptr))) |
792 | return false; | 796 | return false; |
@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) | |||
1367 | cbaser = CBASER_ADDRESS(its->cbaser); | 1371 | cbaser = CBASER_ADDRESS(its->cbaser); |
1368 | 1372 | ||
1369 | while (its->cwriter != its->creadr) { | 1373 | while (its->cwriter != its->creadr) { |
1370 | int ret = kvm_read_guest(kvm, cbaser + its->creadr, | 1374 | int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, |
1371 | cmd_buf, ITS_CMD_SIZE); | 1375 | cmd_buf, ITS_CMD_SIZE); |
1372 | /* | 1376 | /* |
1373 | * If kvm_read_guest() fails, this could be due to the guest | 1377 | * If kvm_read_guest() fails, this could be due to the guest |
1374 | * programming a bogus value in CBASER or something else going | 1378 | * programming a bogus value in CBASER or something else going |
@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz, | |||
1893 | int next_offset; | 1897 | int next_offset; |
1894 | size_t byte_offset; | 1898 | size_t byte_offset; |
1895 | 1899 | ||
1896 | ret = kvm_read_guest(kvm, gpa, entry, esz); | 1900 | ret = kvm_read_guest_lock(kvm, gpa, entry, esz); |
1897 | if (ret) | 1901 | if (ret) |
1898 | return ret; | 1902 | return ret; |
1899 | 1903 | ||
@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) | |||
2263 | int ret; | 2267 | int ret; |
2264 | 2268 | ||
2265 | BUG_ON(esz > sizeof(val)); | 2269 | BUG_ON(esz > sizeof(val)); |
2266 | ret = kvm_read_guest(kvm, gpa, &val, esz); | 2270 | ret = kvm_read_guest_lock(kvm, gpa, &val, esz); |
2267 | if (ret) | 2271 | if (ret) |
2268 | return ret; | 2272 | return ret; |
2269 | val = le64_to_cpu(val); | 2273 | val = le64_to_cpu(val); |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index c7423f3768e5..bdcf8e7a6161 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -344,7 +344,7 @@ retry: | |||
344 | bit_nr = irq->intid % BITS_PER_BYTE; | 344 | bit_nr = irq->intid % BITS_PER_BYTE; |
345 | ptr = pendbase + byte_offset; | 345 | ptr = pendbase + byte_offset; |
346 | 346 | ||
347 | ret = kvm_read_guest(kvm, ptr, &val, 1); | 347 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
348 | if (ret) | 348 | if (ret) |
349 | return ret; | 349 | return ret; |
350 | 350 | ||
@@ -397,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) | |||
397 | ptr = pendbase + byte_offset; | 397 | ptr = pendbase + byte_offset; |
398 | 398 | ||
399 | if (byte_offset != last_byte_offset) { | 399 | if (byte_offset != last_byte_offset) { |
400 | ret = kvm_read_guest(kvm, ptr, &val, 1); | 400 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
401 | if (ret) | 401 | if (ret) |
402 | return ret; | 402 | return ret; |
403 | last_byte_offset = byte_offset; | 403 | last_byte_offset = byte_offset; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 97bfba8d9a59..33c8325c8f35 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { | |||
43 | * kvm->lock (mutex) | 43 | * kvm->lock (mutex) |
44 | * its->cmd_lock (mutex) | 44 | * its->cmd_lock (mutex) |
45 | * its->its_lock (mutex) | 45 | * its->its_lock (mutex) |
46 | * vgic_cpu->ap_list_lock | 46 | * vgic_cpu->ap_list_lock must be taken with IRQs disabled |
47 | * kvm->lpi_list_lock | 47 | * kvm->lpi_list_lock must be taken with IRQs disabled |
48 | * vgic_irq->irq_lock | 48 | * vgic_irq->irq_lock must be taken with IRQs disabled |
49 | * | ||
50 | * As the ap_list_lock might be taken from the timer interrupt handler, | ||
51 | * we have to disable IRQs before taking this lock and everything lower | ||
52 | * than it. | ||
49 | * | 53 | * |
50 | * If you need to take multiple locks, always take the upper lock first, | 54 | * If you need to take multiple locks, always take the upper lock first, |
51 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. | 55 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. |
@@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
72 | { | 76 | { |
73 | struct vgic_dist *dist = &kvm->arch.vgic; | 77 | struct vgic_dist *dist = &kvm->arch.vgic; |
74 | struct vgic_irq *irq = NULL; | 78 | struct vgic_irq *irq = NULL; |
79 | unsigned long flags; | ||
75 | 80 | ||
76 | spin_lock(&dist->lpi_list_lock); | 81 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
77 | 82 | ||
78 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 83 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
79 | if (irq->intid != intid) | 84 | if (irq->intid != intid) |
@@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
89 | irq = NULL; | 94 | irq = NULL; |
90 | 95 | ||
91 | out_unlock: | 96 | out_unlock: |
92 | spin_unlock(&dist->lpi_list_lock); | 97 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
93 | 98 | ||
94 | return irq; | 99 | return irq; |
95 | } | 100 | } |
@@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref) | |||
134 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | 139 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) |
135 | { | 140 | { |
136 | struct vgic_dist *dist = &kvm->arch.vgic; | 141 | struct vgic_dist *dist = &kvm->arch.vgic; |
142 | unsigned long flags; | ||
137 | 143 | ||
138 | if (irq->intid < VGIC_MIN_LPI) | 144 | if (irq->intid < VGIC_MIN_LPI) |
139 | return; | 145 | return; |
140 | 146 | ||
141 | spin_lock(&dist->lpi_list_lock); | 147 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { | 148 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
143 | spin_unlock(&dist->lpi_list_lock); | 149 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
144 | return; | 150 | return; |
145 | }; | 151 | }; |
146 | 152 | ||
147 | list_del(&irq->lpi_list); | 153 | list_del(&irq->lpi_list); |
148 | dist->lpi_list_count--; | 154 | dist->lpi_list_count--; |
149 | spin_unlock(&dist->lpi_list_lock); | 155 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
150 | 156 | ||
151 | kfree(irq); | 157 | kfree(irq); |
152 | } | 158 | } |