diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-02-16 13:51:27 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-02-16 13:51:27 -0500 |
commit | 5b1ad68f9b1951ef78312d2935906cc8a8bd2e12 (patch) | |
tree | e81b71c961e885cef0d3992e083acec845166870 | |
parent | 1013fe32a63d1139b1b32049ea46c0c462738d8b (diff) | |
parent | 7089db84e356562f8ba737c29e472cc42d530dbc (diff) |
Merge branch 'linus' into x86/mm
Make sure to get the latest fixes before applying the ptdump enhancements.
389 files changed, 3340 insertions, 1994 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt index 0dcb7c7d3e40..944657684d73 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt | |||
@@ -15,6 +15,9 @@ Properties: | |||
15 | Second cell specifies the irq distribution mode to cores | 15 | Second cell specifies the irq distribution mode to cores |
16 | 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3 | 16 | 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3 |
17 | 17 | ||
18 | The second cell in interrupts property is deprecated and may be ignored by | ||
19 | the kernel. | ||
20 | |||
18 | intc accessed via the special ARC AUX register interface, hence "reg" property | 21 | intc accessed via the special ARC AUX register interface, hence "reg" property |
19 | is not specified. | 22 | is not specified. |
20 | 23 | ||
diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst index 8267c31b317d..895d9c2d1c04 100644 --- a/Documentation/media/uapi/cec/cec-func-close.rst +++ b/Documentation/media/uapi/cec/cec-func-close.rst | |||
@@ -33,11 +33,6 @@ Arguments | |||
33 | Description | 33 | Description |
34 | =========== | 34 | =========== |
35 | 35 | ||
36 | .. note:: | ||
37 | |||
38 | This documents the proposed CEC API. This API is not yet finalized | ||
39 | and is currently only available as a staging kernel module. | ||
40 | |||
41 | Closes the cec device. Resources associated with the file descriptor are | 36 | Closes the cec device. Resources associated with the file descriptor are |
42 | freed. The device configuration remain unchanged. | 37 | freed. The device configuration remain unchanged. |
43 | 38 | ||
diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst index 9e8dbb118d6a..7dcfd178fb24 100644 --- a/Documentation/media/uapi/cec/cec-func-ioctl.rst +++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst | |||
@@ -39,11 +39,6 @@ Arguments | |||
39 | Description | 39 | Description |
40 | =========== | 40 | =========== |
41 | 41 | ||
42 | .. note:: | ||
43 | |||
44 | This documents the proposed CEC API. This API is not yet finalized | ||
45 | and is currently only available as a staging kernel module. | ||
46 | |||
47 | The :c:func:`ioctl()` function manipulates cec device parameters. The | 42 | The :c:func:`ioctl()` function manipulates cec device parameters. The |
48 | argument ``fd`` must be an open file descriptor. | 43 | argument ``fd`` must be an open file descriptor. |
49 | 44 | ||
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst index af3f5b5c24c6..0304388cd159 100644 --- a/Documentation/media/uapi/cec/cec-func-open.rst +++ b/Documentation/media/uapi/cec/cec-func-open.rst | |||
@@ -46,11 +46,6 @@ Arguments | |||
46 | Description | 46 | Description |
47 | =========== | 47 | =========== |
48 | 48 | ||
49 | .. note:: | ||
50 | |||
51 | This documents the proposed CEC API. This API is not yet finalized | ||
52 | and is currently only available as a staging kernel module. | ||
53 | |||
54 | To open a cec device applications call :c:func:`open()` with the | 49 | To open a cec device applications call :c:func:`open()` with the |
55 | desired device name. The function has no side effects; the device | 50 | desired device name. The function has no side effects; the device |
56 | configuration remain unchanged. | 51 | configuration remain unchanged. |
diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst index cfb73e6027a5..6a863cfda6e0 100644 --- a/Documentation/media/uapi/cec/cec-func-poll.rst +++ b/Documentation/media/uapi/cec/cec-func-poll.rst | |||
@@ -39,11 +39,6 @@ Arguments | |||
39 | Description | 39 | Description |
40 | =========== | 40 | =========== |
41 | 41 | ||
42 | .. note:: | ||
43 | |||
44 | This documents the proposed CEC API. This API is not yet finalized | ||
45 | and is currently only available as a staging kernel module. | ||
46 | |||
47 | With the :c:func:`poll()` function applications can wait for CEC | 42 | With the :c:func:`poll()` function applications can wait for CEC |
48 | events. | 43 | events. |
49 | 44 | ||
diff --git a/Documentation/media/uapi/cec/cec-intro.rst b/Documentation/media/uapi/cec/cec-intro.rst index 4a19ea5323a9..07ee2b8f89d6 100644 --- a/Documentation/media/uapi/cec/cec-intro.rst +++ b/Documentation/media/uapi/cec/cec-intro.rst | |||
@@ -3,11 +3,6 @@ | |||
3 | Introduction | 3 | Introduction |
4 | ============ | 4 | ============ |
5 | 5 | ||
6 | .. note:: | ||
7 | |||
8 | This documents the proposed CEC API. This API is not yet finalized | ||
9 | and is currently only available as a staging kernel module. | ||
10 | |||
11 | HDMI connectors provide a single pin for use by the Consumer Electronics | 6 | HDMI connectors provide a single pin for use by the Consumer Electronics |
12 | Control protocol. This protocol allows different devices connected by an | 7 | Control protocol. This protocol allows different devices connected by an |
13 | HDMI cable to communicate. The protocol for CEC version 1.4 is defined | 8 | HDMI cable to communicate. The protocol for CEC version 1.4 is defined |
@@ -31,3 +26,15 @@ control just the CEC pin. | |||
31 | Drivers that support CEC will create a CEC device node (/dev/cecX) to | 26 | Drivers that support CEC will create a CEC device node (/dev/cecX) to |
32 | give userspace access to the CEC adapter. The | 27 | give userspace access to the CEC adapter. The |
33 | :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do. | 28 | :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do. |
29 | |||
30 | In order to check the support and test it, it is suggested to download | ||
31 | the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It | ||
32 | provides three tools to handle CEC: | ||
33 | |||
34 | - cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit | ||
35 | and monitor CEC messages. | ||
36 | |||
37 | - cec-compliance: does a CEC compliance test of a remote CEC device to | ||
38 | determine how compliant the CEC implementation is. | ||
39 | |||
40 | - cec-follower: emulates a CEC follower. | ||
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst index 2b0ddb14b280..a0e961f11017 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst | |||
@@ -29,11 +29,6 @@ Arguments | |||
29 | Description | 29 | Description |
30 | =========== | 30 | =========== |
31 | 31 | ||
32 | .. note:: | ||
33 | |||
34 | This documents the proposed CEC API. This API is not yet finalized | ||
35 | and is currently only available as a staging kernel module. | ||
36 | |||
37 | All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query | 32 | All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query |
38 | device information, applications call the ioctl with a pointer to a | 33 | device information, applications call the ioctl with a pointer to a |
39 | struct :c:type:`cec_caps`. The driver fills the structure and | 34 | struct :c:type:`cec_caps`. The driver fills the structure and |
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst index b878637e91b3..09f09bbe28d4 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst | |||
@@ -35,11 +35,6 @@ Arguments | |||
35 | Description | 35 | Description |
36 | =========== | 36 | =========== |
37 | 37 | ||
38 | .. note:: | ||
39 | |||
40 | This documents the proposed CEC API. This API is not yet finalized | ||
41 | and is currently only available as a staging kernel module. | ||
42 | |||
43 | To query the current CEC logical addresses, applications call | 38 | To query the current CEC logical addresses, applications call |
44 | :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a | 39 | :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a |
45 | struct :c:type:`cec_log_addrs` where the driver stores the logical addresses. | 40 | struct :c:type:`cec_log_addrs` where the driver stores the logical addresses. |
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst index 3357deb43c85..a3cdc75cec3e 100644 --- a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst +++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst | |||
@@ -35,11 +35,6 @@ Arguments | |||
35 | Description | 35 | Description |
36 | =========== | 36 | =========== |
37 | 37 | ||
38 | .. note:: | ||
39 | |||
40 | This documents the proposed CEC API. This API is not yet finalized | ||
41 | and is currently only available as a staging kernel module. | ||
42 | |||
43 | To query the current physical address applications call | 38 | To query the current physical address applications call |
44 | :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the | 39 | :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the |
45 | driver stores the physical address. | 40 | driver stores the physical address. |
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst index e256c6605de7..6e589a1fae17 100644 --- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst +++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst | |||
@@ -30,11 +30,6 @@ Arguments | |||
30 | Description | 30 | Description |
31 | =========== | 31 | =========== |
32 | 32 | ||
33 | .. note:: | ||
34 | |||
35 | This documents the proposed CEC API. This API is not yet finalized | ||
36 | and is currently only available as a staging kernel module. | ||
37 | |||
38 | CEC devices can send asynchronous events. These can be retrieved by | 33 | CEC devices can send asynchronous events. These can be retrieved by |
39 | calling :c:func:`CEC_DQEVENT`. If the file descriptor is in | 34 | calling :c:func:`CEC_DQEVENT`. If the file descriptor is in |
40 | non-blocking mode and no event is pending, then it will return -1 and | 35 | non-blocking mode and no event is pending, then it will return -1 and |
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst index 4f5818b9d277..e4ded9df0a84 100644 --- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst +++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst | |||
@@ -31,11 +31,6 @@ Arguments | |||
31 | Description | 31 | Description |
32 | =========== | 32 | =========== |
33 | 33 | ||
34 | .. note:: | ||
35 | |||
36 | This documents the proposed CEC API. This API is not yet finalized | ||
37 | and is currently only available as a staging kernel module. | ||
38 | |||
39 | By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent | 34 | By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent |
40 | applications from stepping on each others toes it must be possible to | 35 | applications from stepping on each others toes it must be possible to |
41 | obtain exclusive access to the CEC adapter. This ioctl sets the | 36 | obtain exclusive access to the CEC adapter. This ioctl sets the |
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst index bdf015b1d1dc..dc2adb391c0a 100644 --- a/Documentation/media/uapi/cec/cec-ioc-receive.rst +++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst | |||
@@ -34,11 +34,6 @@ Arguments | |||
34 | Description | 34 | Description |
35 | =========== | 35 | =========== |
36 | 36 | ||
37 | .. note:: | ||
38 | |||
39 | This documents the proposed CEC API. This API is not yet finalized | ||
40 | and is currently only available as a staging kernel module. | ||
41 | |||
42 | To receive a CEC message the application has to fill in the | 37 | To receive a CEC message the application has to fill in the |
43 | ``timeout`` field of struct :c:type:`cec_msg` and pass it to | 38 | ``timeout`` field of struct :c:type:`cec_msg` and pass it to |
44 | :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. | 39 | :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. |
diff --git a/MAINTAINERS b/MAINTAINERS index 5f10c28b2e15..107c10e8f2d2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1091,7 +1091,7 @@ F: arch/arm/boot/dts/aspeed-* | |||
1091 | F: drivers/*/*aspeed* | 1091 | F: drivers/*/*aspeed* |
1092 | 1092 | ||
1093 | ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT | 1093 | ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT |
1094 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 1094 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
1095 | M: Alexandre Belloni <alexandre.belloni@free-electrons.com> | 1095 | M: Alexandre Belloni <alexandre.belloni@free-electrons.com> |
1096 | M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> | 1096 | M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> |
1097 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1097 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1773,7 +1773,7 @@ F: drivers/soc/renesas/ | |||
1773 | F: include/linux/soc/renesas/ | 1773 | F: include/linux/soc/renesas/ |
1774 | 1774 | ||
1775 | ARM/SOCFPGA ARCHITECTURE | 1775 | ARM/SOCFPGA ARCHITECTURE |
1776 | M: Dinh Nguyen <dinguyen@opensource.altera.com> | 1776 | M: Dinh Nguyen <dinguyen@kernel.org> |
1777 | S: Maintained | 1777 | S: Maintained |
1778 | F: arch/arm/mach-socfpga/ | 1778 | F: arch/arm/mach-socfpga/ |
1779 | F: arch/arm/boot/dts/socfpga* | 1779 | F: arch/arm/boot/dts/socfpga* |
@@ -1783,7 +1783,7 @@ W: http://www.rocketboards.org | |||
1783 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git | 1783 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git |
1784 | 1784 | ||
1785 | ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT | 1785 | ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT |
1786 | M: Dinh Nguyen <dinguyen@opensource.altera.com> | 1786 | M: Dinh Nguyen <dinguyen@kernel.org> |
1787 | S: Maintained | 1787 | S: Maintained |
1788 | F: drivers/clk/socfpga/ | 1788 | F: drivers/clk/socfpga/ |
1789 | 1789 | ||
@@ -2175,56 +2175,56 @@ F: include/linux/atm* | |||
2175 | F: include/uapi/linux/atm* | 2175 | F: include/uapi/linux/atm* |
2176 | 2176 | ||
2177 | ATMEL AT91 / AT32 MCI DRIVER | 2177 | ATMEL AT91 / AT32 MCI DRIVER |
2178 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 2178 | M: Ludovic Desroches <ludovic.desroches@microchip.com> |
2179 | S: Maintained | 2179 | S: Maintained |
2180 | F: drivers/mmc/host/atmel-mci.c | 2180 | F: drivers/mmc/host/atmel-mci.c |
2181 | 2181 | ||
2182 | ATMEL AT91 SAMA5D2-Compatible Shutdown Controller | 2182 | ATMEL AT91 SAMA5D2-Compatible Shutdown Controller |
2183 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2183 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2184 | S: Supported | 2184 | S: Supported |
2185 | F: drivers/power/reset/at91-sama5d2_shdwc.c | 2185 | F: drivers/power/reset/at91-sama5d2_shdwc.c |
2186 | 2186 | ||
2187 | ATMEL SAMA5D2 ADC DRIVER | 2187 | ATMEL SAMA5D2 ADC DRIVER |
2188 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 2188 | M: Ludovic Desroches <ludovic.desroches@microchip.com> |
2189 | L: linux-iio@vger.kernel.org | 2189 | L: linux-iio@vger.kernel.org |
2190 | S: Supported | 2190 | S: Supported |
2191 | F: drivers/iio/adc/at91-sama5d2_adc.c | 2191 | F: drivers/iio/adc/at91-sama5d2_adc.c |
2192 | 2192 | ||
2193 | ATMEL Audio ALSA driver | 2193 | ATMEL Audio ALSA driver |
2194 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2194 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2195 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 2195 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
2196 | S: Supported | 2196 | S: Supported |
2197 | F: sound/soc/atmel | 2197 | F: sound/soc/atmel |
2198 | 2198 | ||
2199 | ATMEL XDMA DRIVER | 2199 | ATMEL XDMA DRIVER |
2200 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 2200 | M: Ludovic Desroches <ludovic.desroches@microchip.com> |
2201 | L: linux-arm-kernel@lists.infradead.org | 2201 | L: linux-arm-kernel@lists.infradead.org |
2202 | L: dmaengine@vger.kernel.org | 2202 | L: dmaengine@vger.kernel.org |
2203 | S: Supported | 2203 | S: Supported |
2204 | F: drivers/dma/at_xdmac.c | 2204 | F: drivers/dma/at_xdmac.c |
2205 | 2205 | ||
2206 | ATMEL I2C DRIVER | 2206 | ATMEL I2C DRIVER |
2207 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 2207 | M: Ludovic Desroches <ludovic.desroches@microchip.com> |
2208 | L: linux-i2c@vger.kernel.org | 2208 | L: linux-i2c@vger.kernel.org |
2209 | S: Supported | 2209 | S: Supported |
2210 | F: drivers/i2c/busses/i2c-at91.c | 2210 | F: drivers/i2c/busses/i2c-at91.c |
2211 | 2211 | ||
2212 | ATMEL ISI DRIVER | 2212 | ATMEL ISI DRIVER |
2213 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 2213 | M: Ludovic Desroches <ludovic.desroches@microchip.com> |
2214 | L: linux-media@vger.kernel.org | 2214 | L: linux-media@vger.kernel.org |
2215 | S: Supported | 2215 | S: Supported |
2216 | F: drivers/media/platform/soc_camera/atmel-isi.c | 2216 | F: drivers/media/platform/soc_camera/atmel-isi.c |
2217 | F: include/media/atmel-isi.h | 2217 | F: include/media/atmel-isi.h |
2218 | 2218 | ||
2219 | ATMEL LCDFB DRIVER | 2219 | ATMEL LCDFB DRIVER |
2220 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2220 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2221 | L: linux-fbdev@vger.kernel.org | 2221 | L: linux-fbdev@vger.kernel.org |
2222 | S: Maintained | 2222 | S: Maintained |
2223 | F: drivers/video/fbdev/atmel_lcdfb.c | 2223 | F: drivers/video/fbdev/atmel_lcdfb.c |
2224 | F: include/video/atmel_lcdc.h | 2224 | F: include/video/atmel_lcdc.h |
2225 | 2225 | ||
2226 | ATMEL MACB ETHERNET DRIVER | 2226 | ATMEL MACB ETHERNET DRIVER |
2227 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2227 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2228 | S: Supported | 2228 | S: Supported |
2229 | F: drivers/net/ethernet/cadence/ | 2229 | F: drivers/net/ethernet/cadence/ |
2230 | 2230 | ||
@@ -2236,32 +2236,32 @@ S: Supported | |||
2236 | F: drivers/mtd/nand/atmel_nand* | 2236 | F: drivers/mtd/nand/atmel_nand* |
2237 | 2237 | ||
2238 | ATMEL SDMMC DRIVER | 2238 | ATMEL SDMMC DRIVER |
2239 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 2239 | M: Ludovic Desroches <ludovic.desroches@microchip.com> |
2240 | L: linux-mmc@vger.kernel.org | 2240 | L: linux-mmc@vger.kernel.org |
2241 | S: Supported | 2241 | S: Supported |
2242 | F: drivers/mmc/host/sdhci-of-at91.c | 2242 | F: drivers/mmc/host/sdhci-of-at91.c |
2243 | 2243 | ||
2244 | ATMEL SPI DRIVER | 2244 | ATMEL SPI DRIVER |
2245 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2245 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2246 | S: Supported | 2246 | S: Supported |
2247 | F: drivers/spi/spi-atmel.* | 2247 | F: drivers/spi/spi-atmel.* |
2248 | 2248 | ||
2249 | ATMEL SSC DRIVER | 2249 | ATMEL SSC DRIVER |
2250 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2250 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2251 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 2251 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
2252 | S: Supported | 2252 | S: Supported |
2253 | F: drivers/misc/atmel-ssc.c | 2253 | F: drivers/misc/atmel-ssc.c |
2254 | F: include/linux/atmel-ssc.h | 2254 | F: include/linux/atmel-ssc.h |
2255 | 2255 | ||
2256 | ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS | 2256 | ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS |
2257 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2257 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2258 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 2258 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
2259 | S: Supported | 2259 | S: Supported |
2260 | F: drivers/misc/atmel_tclib.c | 2260 | F: drivers/misc/atmel_tclib.c |
2261 | F: drivers/clocksource/tcb_clksrc.c | 2261 | F: drivers/clocksource/tcb_clksrc.c |
2262 | 2262 | ||
2263 | ATMEL USBA UDC DRIVER | 2263 | ATMEL USBA UDC DRIVER |
2264 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 2264 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
2265 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 2265 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
2266 | S: Supported | 2266 | S: Supported |
2267 | F: drivers/usb/gadget/udc/atmel_usba_udc.* | 2267 | F: drivers/usb/gadget/udc/atmel_usba_udc.* |
@@ -9736,7 +9736,7 @@ S: Maintained | |||
9736 | F: drivers/pinctrl/pinctrl-at91.* | 9736 | F: drivers/pinctrl/pinctrl-at91.* |
9737 | 9737 | ||
9738 | PIN CONTROLLER - ATMEL AT91 PIO4 | 9738 | PIN CONTROLLER - ATMEL AT91 PIO4 |
9739 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 9739 | M: Ludovic Desroches <ludovic.desroches@microchip.com> |
9740 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 9740 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
9741 | L: linux-gpio@vger.kernel.org | 9741 | L: linux-gpio@vger.kernel.org |
9742 | S: Supported | 9742 | S: Supported |
@@ -10195,7 +10195,6 @@ F: drivers/media/tuners/qt1010* | |||
10195 | QUALCOMM ATHEROS ATH9K WIRELESS DRIVER | 10195 | QUALCOMM ATHEROS ATH9K WIRELESS DRIVER |
10196 | M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com> | 10196 | M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com> |
10197 | L: linux-wireless@vger.kernel.org | 10197 | L: linux-wireless@vger.kernel.org |
10198 | L: ath9k-devel@lists.ath9k.org | ||
10199 | W: http://wireless.kernel.org/en/users/Drivers/ath9k | 10198 | W: http://wireless.kernel.org/en/users/Drivers/ath9k |
10200 | S: Supported | 10199 | S: Supported |
10201 | F: drivers/net/wireless/ath/ath9k/ | 10200 | F: drivers/net/wireless/ath/ath9k/ |
@@ -13066,7 +13065,7 @@ F: drivers/input/serio/userio.c | |||
13066 | F: include/uapi/linux/userio.h | 13065 | F: include/uapi/linux/userio.h |
13067 | 13066 | ||
13068 | VIRTIO CONSOLE DRIVER | 13067 | VIRTIO CONSOLE DRIVER |
13069 | M: Amit Shah <amit.shah@redhat.com> | 13068 | M: Amit Shah <amit@kernel.org> |
13070 | L: virtualization@lists.linux-foundation.org | 13069 | L: virtualization@lists.linux-foundation.org |
13071 | S: Maintained | 13070 | S: Maintained |
13072 | F: drivers/char/virtio_console.c | 13071 | F: drivers/char/virtio_console.c |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 10 | 2 | PATCHLEVEL = 10 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc8 |
5 | NAME = Anniversary Edition | 5 | NAME = Fearless Coyote |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
@@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) | |||
797 | KBUILD_ARFLAGS := $(call ar-option,D) | 797 | KBUILD_ARFLAGS := $(call ar-option,D) |
798 | 798 | ||
799 | # check for 'asm goto' | 799 | # check for 'asm goto' |
800 | ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) | 800 | ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) |
801 | KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO | 801 | KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO |
802 | KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO | 802 | KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO |
803 | endif | 803 | endif |
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index a36e8601114d..d5da2115d78a 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h | |||
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops) | |||
26 | " lp 1f \n" | 26 | " lp 1f \n" |
27 | " nop \n" | 27 | " nop \n" |
28 | "1: \n" | 28 | "1: \n" |
29 | : : "r"(loops)); | 29 | : |
30 | : "r"(loops) | ||
31 | : "lp_count"); | ||
30 | } | 32 | } |
31 | 33 | ||
32 | extern void __bad_udelay(void); | 34 | extern void __bad_udelay(void); |
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 689dd867fdff..8b90d25a15cc 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S | |||
@@ -71,14 +71,14 @@ ENTRY(stext) | |||
71 | GET_CPU_ID r5 | 71 | GET_CPU_ID r5 |
72 | cmp r5, 0 | 72 | cmp r5, 0 |
73 | mov.nz r0, r5 | 73 | mov.nz r0, r5 |
74 | #ifdef CONFIG_ARC_SMP_HALT_ON_RESET | 74 | bz .Lmaster_proceed |
75 | ; Non-Master can proceed as system would be booted sufficiently | 75 | |
76 | jnz first_lines_of_secondary | ||
77 | #else | ||
78 | ; Non-Masters wait for Master to boot enough and bring them up | 76 | ; Non-Masters wait for Master to boot enough and bring them up |
79 | jnz arc_platform_smp_wait_to_boot | 77 | ; when they resume, tail-call to entry point |
80 | #endif | 78 | mov blink, @first_lines_of_secondary |
81 | ; Master falls thru | 79 | j arc_platform_smp_wait_to_boot |
80 | |||
81 | .Lmaster_proceed: | ||
82 | #endif | 82 | #endif |
83 | 83 | ||
84 | ; Clear BSS before updating any globals | 84 | ; Clear BSS before updating any globals |
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 9274f8ade8c7..9f6b68fd4f3b 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void) | |||
93 | READ_BCR(ARC_REG_MCIP_BCR, mp); | 93 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
94 | 94 | ||
95 | sprintf(smp_cpuinfo_buf, | 95 | sprintf(smp_cpuinfo_buf, |
96 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", | 96 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", |
97 | mp.ver, mp.num_cores, | 97 | mp.ver, mp.num_cores, |
98 | IS_AVAIL1(mp.ipi, "IPI "), | 98 | IS_AVAIL1(mp.ipi, "IPI "), |
99 | IS_AVAIL1(mp.idu, "IDU "), | 99 | IS_AVAIL1(mp.idu, "IDU "), |
100 | IS_AVAIL1(mp.llm, "LLM "), | ||
101 | IS_AVAIL1(mp.dbg, "DEBUG "), | 100 | IS_AVAIL1(mp.dbg, "DEBUG "), |
102 | IS_AVAIL1(mp.gfrc, "GFRC")); | 101 | IS_AVAIL1(mp.gfrc, "GFRC")); |
103 | 102 | ||
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data) | |||
175 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | 174 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
176 | } | 175 | } |
177 | 176 | ||
178 | #ifdef CONFIG_SMP | ||
179 | static int | 177 | static int |
180 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | 178 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
181 | bool force) | 179 | bool force) |
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | |||
205 | 203 | ||
206 | return IRQ_SET_MASK_OK; | 204 | return IRQ_SET_MASK_OK; |
207 | } | 205 | } |
208 | #endif | 206 | |
207 | static void idu_irq_enable(struct irq_data *data) | ||
208 | { | ||
209 | /* | ||
210 | * By default send all common interrupts to all available online CPUs. | ||
211 | * The affinity of common interrupts in IDU must be set manually since | ||
212 | * in some cases the kernel will not call irq_set_affinity() by itself: | ||
213 | * 1. When the kernel is not configured with support of SMP. | ||
214 | * 2. When the kernel is configured with support of SMP but upper | ||
215 | * interrupt controllers does not support setting of the affinity | ||
216 | * and cannot propagate it to IDU. | ||
217 | */ | ||
218 | idu_irq_set_affinity(data, cpu_online_mask, false); | ||
219 | idu_irq_unmask(data); | ||
220 | } | ||
209 | 221 | ||
210 | static struct irq_chip idu_irq_chip = { | 222 | static struct irq_chip idu_irq_chip = { |
211 | .name = "MCIP IDU Intc", | 223 | .name = "MCIP IDU Intc", |
212 | .irq_mask = idu_irq_mask, | 224 | .irq_mask = idu_irq_mask, |
213 | .irq_unmask = idu_irq_unmask, | 225 | .irq_unmask = idu_irq_unmask, |
226 | .irq_enable = idu_irq_enable, | ||
214 | #ifdef CONFIG_SMP | 227 | #ifdef CONFIG_SMP |
215 | .irq_set_affinity = idu_irq_set_affinity, | 228 | .irq_set_affinity = idu_irq_set_affinity, |
216 | #endif | 229 | #endif |
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, | |||
243 | const u32 *intspec, unsigned int intsize, | 256 | const u32 *intspec, unsigned int intsize, |
244 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | 257 | irq_hw_number_t *out_hwirq, unsigned int *out_type) |
245 | { | 258 | { |
246 | irq_hw_number_t hwirq = *out_hwirq = intspec[0]; | 259 | /* |
247 | int distri = intspec[1]; | 260 | * Ignore value of interrupt distribution mode for common interrupts in |
248 | unsigned long flags; | 261 | * IDU which resides in intspec[1] since setting an affinity using value |
249 | 262 | * from Device Tree is deprecated in ARC. | |
263 | */ | ||
264 | *out_hwirq = intspec[0]; | ||
250 | *out_type = IRQ_TYPE_NONE; | 265 | *out_type = IRQ_TYPE_NONE; |
251 | 266 | ||
252 | /* XXX: validate distribution scheme again online cpu mask */ | ||
253 | if (distri == 0) { | ||
254 | /* 0 - Round Robin to all cpus, otherwise 1 bit per core */ | ||
255 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
256 | idu_set_dest(hwirq, BIT(num_online_cpus()) - 1); | ||
257 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); | ||
258 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
259 | } else { | ||
260 | /* | ||
261 | * DEST based distribution for Level Triggered intr can only | ||
262 | * have 1 CPU, so generalize it to always contain 1 cpu | ||
263 | */ | ||
264 | int cpu = ffs(distri); | ||
265 | |||
266 | if (cpu != fls(distri)) | ||
267 | pr_warn("IDU irq %lx distri mode set to cpu %x\n", | ||
268 | hwirq, cpu); | ||
269 | |||
270 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
271 | idu_set_dest(hwirq, cpu); | ||
272 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST); | ||
273 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
274 | } | ||
275 | |||
276 | return 0; | 267 | return 0; |
277 | } | 268 | } |
278 | 269 | ||
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 88674d972c9d..2afbafadb6ab 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
90 | */ | 90 | */ |
91 | static volatile int wake_flag; | 91 | static volatile int wake_flag; |
92 | 92 | ||
93 | #ifdef CONFIG_ISA_ARCOMPACT | ||
94 | |||
95 | #define __boot_read(f) f | ||
96 | #define __boot_write(f, v) f = v | ||
97 | |||
98 | #else | ||
99 | |||
100 | #define __boot_read(f) arc_read_uncached_32(&f) | ||
101 | #define __boot_write(f, v) arc_write_uncached_32(&f, v) | ||
102 | |||
103 | #endif | ||
104 | |||
93 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) | 105 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) |
94 | { | 106 | { |
95 | BUG_ON(cpu == 0); | 107 | BUG_ON(cpu == 0); |
96 | wake_flag = cpu; | 108 | |
109 | __boot_write(wake_flag, cpu); | ||
97 | } | 110 | } |
98 | 111 | ||
99 | void arc_platform_smp_wait_to_boot(int cpu) | 112 | void arc_platform_smp_wait_to_boot(int cpu) |
100 | { | 113 | { |
101 | while (wake_flag != cpu) | 114 | /* for halt-on-reset, we've waited already */ |
115 | if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) | ||
116 | return; | ||
117 | |||
118 | while (__boot_read(wake_flag) != cpu) | ||
102 | ; | 119 | ; |
103 | 120 | ||
104 | wake_flag = 0; | 121 | __boot_write(wake_flag, 0); |
105 | __asm__ __volatile__("j @first_lines_of_secondary \n"); | ||
106 | } | 122 | } |
107 | 123 | ||
108 | |||
109 | const char *arc_platform_smp_cpuinfo(void) | 124 | const char *arc_platform_smp_cpuinfo(void) |
110 | { | 125 | { |
111 | return plat_smp_ops.info ? : ""; | 126 | return plat_smp_ops.info ? : ""; |
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index abd961f3e763..5f69c3bd59bb 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c | |||
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, | |||
241 | if (state.fault) | 241 | if (state.fault) |
242 | goto fault; | 242 | goto fault; |
243 | 243 | ||
244 | /* clear any remanants of delay slot */ | ||
244 | if (delay_mode(regs)) { | 245 | if (delay_mode(regs)) { |
245 | regs->ret = regs->bta; | 246 | regs->ret = regs->bta & ~1U; |
246 | regs->status32 &= ~STATUS_DE_MASK; | 247 | regs->status32 &= ~STATUS_DE_MASK; |
247 | } else { | 248 | } else { |
248 | regs->ret += state.instr_len; | 249 | regs->ret += state.instr_len; |
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index f10fe8526239..01d178a2009f 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile | |||
@@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \ | |||
617 | orion5x-lacie-ethernet-disk-mini-v2.dtb \ | 617 | orion5x-lacie-ethernet-disk-mini-v2.dtb \ |
618 | orion5x-linkstation-lsgl.dtb \ | 618 | orion5x-linkstation-lsgl.dtb \ |
619 | orion5x-linkstation-lswtgl.dtb \ | 619 | orion5x-linkstation-lswtgl.dtb \ |
620 | orion5x-lschl.dtb \ | 620 | orion5x-linkstation-lschl.dtb \ |
621 | orion5x-lswsgl.dtb \ | 621 | orion5x-lswsgl.dtb \ |
622 | orion5x-maxtor-shared-storage-2.dtb \ | 622 | orion5x-maxtor-shared-storage-2.dtb \ |
623 | orion5x-netgear-wnr854t.dtb \ | 623 | orion5x-netgear-wnr854t.dtb \ |
diff --git a/arch/arm/boot/dts/imx1.dtsi b/arch/arm/boot/dts/imx1.dtsi index b792eee3899b..2ee40bc9ec21 100644 --- a/arch/arm/boot/dts/imx1.dtsi +++ b/arch/arm/boot/dts/imx1.dtsi | |||
@@ -18,6 +18,14 @@ | |||
18 | / { | 18 | / { |
19 | #address-cells = <1>; | 19 | #address-cells = <1>; |
20 | #size-cells = <1>; | 20 | #size-cells = <1>; |
21 | /* | ||
22 | * The decompressor and also some bootloaders rely on a | ||
23 | * pre-existing /chosen node to be available to insert the | ||
24 | * command line and merge other ATAGS info. | ||
25 | * Also for U-Boot there must be a pre-existing /memory node. | ||
26 | */ | ||
27 | chosen {}; | ||
28 | memory { device_type = "memory"; reg = <0 0>; }; | ||
21 | 29 | ||
22 | aliases { | 30 | aliases { |
23 | gpio0 = &gpio1; | 31 | gpio0 = &gpio1; |
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index ac2a9da62b6c..43ccbbf754a3 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi | |||
@@ -16,6 +16,14 @@ | |||
16 | #size-cells = <1>; | 16 | #size-cells = <1>; |
17 | 17 | ||
18 | interrupt-parent = <&icoll>; | 18 | interrupt-parent = <&icoll>; |
19 | /* | ||
20 | * The decompressor and also some bootloaders rely on a | ||
21 | * pre-existing /chosen node to be available to insert the | ||
22 | * command line and merge other ATAGS info. | ||
23 | * Also for U-Boot there must be a pre-existing /memory node. | ||
24 | */ | ||
25 | chosen {}; | ||
26 | memory { device_type = "memory"; reg = <0 0>; }; | ||
19 | 27 | ||
20 | aliases { | 28 | aliases { |
21 | gpio0 = &gpio0; | 29 | gpio0 = &gpio0; |
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index 831d09a28155..acd475659156 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi | |||
@@ -14,6 +14,14 @@ | |||
14 | / { | 14 | / { |
15 | #address-cells = <1>; | 15 | #address-cells = <1>; |
16 | #size-cells = <1>; | 16 | #size-cells = <1>; |
17 | /* | ||
18 | * The decompressor and also some bootloaders rely on a | ||
19 | * pre-existing /chosen node to be available to insert the | ||
20 | * command line and merge other ATAGS info. | ||
21 | * Also for U-Boot there must be a pre-existing /memory node. | ||
22 | */ | ||
23 | chosen {}; | ||
24 | memory { device_type = "memory"; reg = <0 0>; }; | ||
17 | 25 | ||
18 | aliases { | 26 | aliases { |
19 | ethernet0 = &fec; | 27 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 9d8b5969ee3b..b397384248f4 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi | |||
@@ -19,6 +19,14 @@ | |||
19 | / { | 19 | / { |
20 | #address-cells = <1>; | 20 | #address-cells = <1>; |
21 | #size-cells = <1>; | 21 | #size-cells = <1>; |
22 | /* | ||
23 | * The decompressor and also some bootloaders rely on a | ||
24 | * pre-existing /chosen node to be available to insert the | ||
25 | * command line and merge other ATAGS info. | ||
26 | * Also for U-Boot there must be a pre-existing /memory node. | ||
27 | */ | ||
28 | chosen {}; | ||
29 | memory { device_type = "memory"; reg = <0 0>; }; | ||
22 | 30 | ||
23 | aliases { | 31 | aliases { |
24 | ethernet0 = &fec; | 32 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 3aabf65a6a52..d6a2190b60ef 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi | |||
@@ -17,6 +17,14 @@ | |||
17 | #size-cells = <1>; | 17 | #size-cells = <1>; |
18 | 18 | ||
19 | interrupt-parent = <&icoll>; | 19 | interrupt-parent = <&icoll>; |
20 | /* | ||
21 | * The decompressor and also some bootloaders rely on a | ||
22 | * pre-existing /chosen node to be available to insert the | ||
23 | * command line and merge other ATAGS info. | ||
24 | * Also for U-Boot there must be a pre-existing /memory node. | ||
25 | */ | ||
26 | chosen {}; | ||
27 | memory { device_type = "memory"; reg = <0 0>; }; | ||
20 | 28 | ||
21 | aliases { | 29 | aliases { |
22 | ethernet0 = &mac0; | 30 | ethernet0 = &mac0; |
diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi index 85cd8be22f71..23b0d2cf9acd 100644 --- a/arch/arm/boot/dts/imx31.dtsi +++ b/arch/arm/boot/dts/imx31.dtsi | |||
@@ -12,6 +12,14 @@ | |||
12 | / { | 12 | / { |
13 | #address-cells = <1>; | 13 | #address-cells = <1>; |
14 | #size-cells = <1>; | 14 | #size-cells = <1>; |
15 | /* | ||
16 | * The decompressor and also some bootloaders rely on a | ||
17 | * pre-existing /chosen node to be available to insert the | ||
18 | * command line and merge other ATAGS info. | ||
19 | * Also for U-Boot there must be a pre-existing /memory node. | ||
20 | */ | ||
21 | chosen {}; | ||
22 | memory { device_type = "memory"; reg = <0 0>; }; | ||
15 | 23 | ||
16 | aliases { | 24 | aliases { |
17 | serial0 = &uart1; | 25 | serial0 = &uart1; |
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index 9f40e6229189..d0496c65cea2 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi | |||
@@ -13,6 +13,14 @@ | |||
13 | / { | 13 | / { |
14 | #address-cells = <1>; | 14 | #address-cells = <1>; |
15 | #size-cells = <1>; | 15 | #size-cells = <1>; |
16 | /* | ||
17 | * The decompressor and also some bootloaders rely on a | ||
18 | * pre-existing /chosen node to be available to insert the | ||
19 | * command line and merge other ATAGS info. | ||
20 | * Also for U-Boot there must be a pre-existing /memory node. | ||
21 | */ | ||
22 | chosen {}; | ||
23 | memory { device_type = "memory"; reg = <0 0>; }; | ||
16 | 24 | ||
17 | aliases { | 25 | aliases { |
18 | ethernet0 = &fec; | 26 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi index fe0221e4cbf7..ceae909e2201 100644 --- a/arch/arm/boot/dts/imx50.dtsi +++ b/arch/arm/boot/dts/imx50.dtsi | |||
@@ -17,6 +17,14 @@ | |||
17 | / { | 17 | / { |
18 | #address-cells = <1>; | 18 | #address-cells = <1>; |
19 | #size-cells = <1>; | 19 | #size-cells = <1>; |
20 | /* | ||
21 | * The decompressor and also some bootloaders rely on a | ||
22 | * pre-existing /chosen node to be available to insert the | ||
23 | * command line and merge other ATAGS info. | ||
24 | * Also for U-Boot there must be a pre-existing /memory node. | ||
25 | */ | ||
26 | chosen {}; | ||
27 | memory { device_type = "memory"; reg = <0 0>; }; | ||
20 | 28 | ||
21 | aliases { | 29 | aliases { |
22 | ethernet0 = &fec; | 30 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 33526cade735..1ee1d542d9ad 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi | |||
@@ -19,6 +19,14 @@ | |||
19 | / { | 19 | / { |
20 | #address-cells = <1>; | 20 | #address-cells = <1>; |
21 | #size-cells = <1>; | 21 | #size-cells = <1>; |
22 | /* | ||
23 | * The decompressor and also some bootloaders rely on a | ||
24 | * pre-existing /chosen node to be available to insert the | ||
25 | * command line and merge other ATAGS info. | ||
26 | * Also for U-Boot there must be a pre-existing /memory node. | ||
27 | */ | ||
28 | chosen {}; | ||
29 | memory { device_type = "memory"; reg = <0 0>; }; | ||
22 | 30 | ||
23 | aliases { | 31 | aliases { |
24 | ethernet0 = &fec; | 32 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index ca51dc03e327..2e516f4985e4 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
@@ -19,6 +19,14 @@ | |||
19 | / { | 19 | / { |
20 | #address-cells = <1>; | 20 | #address-cells = <1>; |
21 | #size-cells = <1>; | 21 | #size-cells = <1>; |
22 | /* | ||
23 | * The decompressor and also some bootloaders rely on a | ||
24 | * pre-existing /chosen node to be available to insert the | ||
25 | * command line and merge other ATAGS info. | ||
26 | * Also for U-Boot there must be a pre-existing /memory node. | ||
27 | */ | ||
28 | chosen {}; | ||
29 | memory { device_type = "memory"; reg = <0 0>; }; | ||
22 | 30 | ||
23 | aliases { | 31 | aliases { |
24 | ethernet0 = &fec; | 32 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi index 1ade1951e620..7aa120fbdc71 100644 --- a/arch/arm/boot/dts/imx6dl.dtsi +++ b/arch/arm/boot/dts/imx6dl.dtsi | |||
@@ -137,7 +137,7 @@ | |||
137 | &gpio4 { | 137 | &gpio4 { |
138 | gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>, | 138 | gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>, |
139 | <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>, | 139 | <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>, |
140 | <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, | 140 | <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, |
141 | <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>, | 141 | <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>, |
142 | <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>; | 142 | <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>; |
143 | }; | 143 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 89b834f3fa17..e7d30f45b161 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi | |||
@@ -16,6 +16,14 @@ | |||
16 | / { | 16 | / { |
17 | #address-cells = <1>; | 17 | #address-cells = <1>; |
18 | #size-cells = <1>; | 18 | #size-cells = <1>; |
19 | /* | ||
20 | * The decompressor and also some bootloaders rely on a | ||
21 | * pre-existing /chosen node to be available to insert the | ||
22 | * command line and merge other ATAGS info. | ||
23 | * Also for U-Boot there must be a pre-existing /memory node. | ||
24 | */ | ||
25 | chosen {}; | ||
26 | memory { device_type = "memory"; reg = <0 0>; }; | ||
19 | 27 | ||
20 | aliases { | 28 | aliases { |
21 | ethernet0 = &fec; | 29 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 19cbd879c448..cc9572ea2860 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi | |||
@@ -14,6 +14,14 @@ | |||
14 | / { | 14 | / { |
15 | #address-cells = <1>; | 15 | #address-cells = <1>; |
16 | #size-cells = <1>; | 16 | #size-cells = <1>; |
17 | /* | ||
18 | * The decompressor and also some bootloaders rely on a | ||
19 | * pre-existing /chosen node to be available to insert the | ||
20 | * command line and merge other ATAGS info. | ||
21 | * Also for U-Boot there must be a pre-existing /memory node. | ||
22 | */ | ||
23 | chosen {}; | ||
24 | memory { device_type = "memory"; reg = <0 0>; }; | ||
17 | 25 | ||
18 | aliases { | 26 | aliases { |
19 | ethernet0 = &fec; | 27 | ethernet0 = &fec; |
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 10f333016197..dd4ec85ecbaa 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi | |||
@@ -15,6 +15,14 @@ | |||
15 | / { | 15 | / { |
16 | #address-cells = <1>; | 16 | #address-cells = <1>; |
17 | #size-cells = <1>; | 17 | #size-cells = <1>; |
18 | /* | ||
19 | * The decompressor and also some bootloaders rely on a | ||
20 | * pre-existing /chosen node to be available to insert the | ||
21 | * command line and merge other ATAGS info. | ||
22 | * Also for U-Boot there must be a pre-existing /memory node. | ||
23 | */ | ||
24 | chosen {}; | ||
25 | memory { device_type = "memory"; reg = <0 0>; }; | ||
18 | 26 | ||
19 | aliases { | 27 | aliases { |
20 | can0 = &flexcan1; | 28 | can0 = &flexcan1; |
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index 39845a7e0463..53d3f8e41e9b 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi | |||
@@ -15,6 +15,14 @@ | |||
15 | / { | 15 | / { |
16 | #address-cells = <1>; | 16 | #address-cells = <1>; |
17 | #size-cells = <1>; | 17 | #size-cells = <1>; |
18 | /* | ||
19 | * The decompressor and also some bootloaders rely on a | ||
20 | * pre-existing /chosen node to be available to insert the | ||
21 | * command line and merge other ATAGS info. | ||
22 | * Also for U-Boot there must be a pre-existing /memory node. | ||
23 | */ | ||
24 | chosen {}; | ||
25 | memory { device_type = "memory"; reg = <0 0>; }; | ||
18 | 26 | ||
19 | aliases { | 27 | aliases { |
20 | ethernet0 = &fec1; | 28 | ethernet0 = &fec1; |
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index 8ff2cbdd8f0d..be33dfc86838 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi | |||
@@ -50,6 +50,14 @@ | |||
50 | / { | 50 | / { |
51 | #address-cells = <1>; | 51 | #address-cells = <1>; |
52 | #size-cells = <1>; | 52 | #size-cells = <1>; |
53 | /* | ||
54 | * The decompressor and also some bootloaders rely on a | ||
55 | * pre-existing /chosen node to be available to insert the | ||
56 | * command line and merge other ATAGS info. | ||
57 | * Also for U-Boot there must be a pre-existing /memory node. | ||
58 | */ | ||
59 | chosen {}; | ||
60 | memory { device_type = "memory"; reg = <0 0>; }; | ||
53 | 61 | ||
54 | aliases { | 62 | aliases { |
55 | gpio0 = &gpio1; | 63 | gpio0 = &gpio1; |
diff --git a/arch/arm/boot/dts/orion5x-lschl.dts b/arch/arm/boot/dts/orion5x-linkstation-lschl.dts index 947409252845..ea6c881634b9 100644 --- a/arch/arm/boot/dts/orion5x-lschl.dts +++ b/arch/arm/boot/dts/orion5x-linkstation-lschl.dts | |||
@@ -2,7 +2,7 @@ | |||
2 | * Device Tree file for Buffalo Linkstation LS-CHLv3 | 2 | * Device Tree file for Buffalo Linkstation LS-CHLv3 |
3 | * | 3 | * |
4 | * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk> | 4 | * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk> |
5 | * Copyright (C) 2015, 2016 | 5 | * Copyright (C) 2015-2017 |
6 | * Roger Shimizu <rogershimizu@gmail.com> | 6 | * Roger Shimizu <rogershimizu@gmail.com> |
7 | * | 7 | * |
8 | * This file is dual-licensed: you can use it either under the terms | 8 | * This file is dual-licensed: you can use it either under the terms |
@@ -52,7 +52,7 @@ | |||
52 | #include <dt-bindings/gpio/gpio.h> | 52 | #include <dt-bindings/gpio/gpio.h> |
53 | 53 | ||
54 | / { | 54 | / { |
55 | model = "Buffalo Linkstation Live v3 (LS-CHL)"; | 55 | model = "Buffalo Linkstation LiveV3 (LS-CHL)"; |
56 | compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x"; | 56 | compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x"; |
57 | 57 | ||
58 | memory { /* 128 MB */ | 58 | memory { /* 128 MB */ |
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi index c8b2944e304a..ace97e8576db 100644 --- a/arch/arm/boot/dts/stih407-family.dtsi +++ b/arch/arm/boot/dts/stih407-family.dtsi | |||
@@ -680,6 +680,7 @@ | |||
680 | phy-names = "usb2-phy", "usb3-phy"; | 680 | phy-names = "usb2-phy", "usb3-phy"; |
681 | phys = <&usb2_picophy0>, | 681 | phys = <&usb2_picophy0>, |
682 | <&phy_port2 PHY_TYPE_USB3>; | 682 | <&phy_port2 PHY_TYPE_USB3>; |
683 | snps,dis_u3_susphy_quirk; | ||
683 | }; | 684 | }; |
684 | }; | 685 | }; |
685 | 686 | ||
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig index ea316c4b890e..d3f1768840e2 100644 --- a/arch/arm/configs/ezx_defconfig +++ b/arch/arm/configs/ezx_defconfig | |||
@@ -64,8 +64,8 @@ CONFIG_NETFILTER=y | |||
64 | CONFIG_NETFILTER_NETLINK_QUEUE=m | 64 | CONFIG_NETFILTER_NETLINK_QUEUE=m |
65 | CONFIG_NF_CONNTRACK=m | 65 | CONFIG_NF_CONNTRACK=m |
66 | CONFIG_NF_CONNTRACK_EVENTS=y | 66 | CONFIG_NF_CONNTRACK_EVENTS=y |
67 | CONFIG_NF_CT_PROTO_SCTP=m | 67 | CONFIG_NF_CT_PROTO_SCTP=y |
68 | CONFIG_NF_CT_PROTO_UDPLITE=m | 68 | CONFIG_NF_CT_PROTO_UDPLITE=y |
69 | CONFIG_NF_CONNTRACK_AMANDA=m | 69 | CONFIG_NF_CONNTRACK_AMANDA=m |
70 | CONFIG_NF_CONNTRACK_FTP=m | 70 | CONFIG_NF_CONNTRACK_FTP=m |
71 | CONFIG_NF_CONNTRACK_H323=m | 71 | CONFIG_NF_CONNTRACK_H323=m |
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig index 18e59feaa307..7f479cdb3479 100644 --- a/arch/arm/configs/imote2_defconfig +++ b/arch/arm/configs/imote2_defconfig | |||
@@ -56,8 +56,8 @@ CONFIG_NETFILTER=y | |||
56 | CONFIG_NETFILTER_NETLINK_QUEUE=m | 56 | CONFIG_NETFILTER_NETLINK_QUEUE=m |
57 | CONFIG_NF_CONNTRACK=m | 57 | CONFIG_NF_CONNTRACK=m |
58 | CONFIG_NF_CONNTRACK_EVENTS=y | 58 | CONFIG_NF_CONNTRACK_EVENTS=y |
59 | CONFIG_NF_CT_PROTO_SCTP=m | 59 | CONFIG_NF_CT_PROTO_SCTP=y |
60 | CONFIG_NF_CT_PROTO_UDPLITE=m | 60 | CONFIG_NF_CT_PROTO_UDPLITE=y |
61 | CONFIG_NF_CONNTRACK_AMANDA=m | 61 | CONFIG_NF_CONNTRACK_AMANDA=m |
62 | CONFIG_NF_CONNTRACK_FTP=m | 62 | CONFIG_NF_CONNTRACK_FTP=m |
63 | CONFIG_NF_CONNTRACK_H323=m | 63 | CONFIG_NF_CONNTRACK_H323=m |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ce131ed5939d..ae738a6319f6 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target, | |||
600 | const void *kbuf, const void __user *ubuf) | 600 | const void *kbuf, const void __user *ubuf) |
601 | { | 601 | { |
602 | int ret; | 602 | int ret; |
603 | struct pt_regs newregs; | 603 | struct pt_regs newregs = *task_pt_regs(target); |
604 | 604 | ||
605 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 605 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
606 | &newregs, | 606 | &newregs, |
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index 699157759120..c03bf28d8bbc 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c | |||
@@ -60,7 +60,6 @@ | |||
60 | 60 | ||
61 | #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu) | 61 | #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu) |
62 | 62 | ||
63 | static enum cpuhp_state cpuhp_mmdc_state; | ||
64 | static int ddr_type; | 63 | static int ddr_type; |
65 | 64 | ||
66 | struct fsl_mmdc_devtype_data { | 65 | struct fsl_mmdc_devtype_data { |
@@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = { | |||
82 | 81 | ||
83 | #ifdef CONFIG_PERF_EVENTS | 82 | #ifdef CONFIG_PERF_EVENTS |
84 | 83 | ||
84 | static enum cpuhp_state cpuhp_mmdc_state; | ||
85 | static DEFINE_IDA(mmdc_ida); | 85 | static DEFINE_IDA(mmdc_ida); |
86 | 86 | ||
87 | PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00") | 87 | PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00") |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 3a2e678b8d30..0122ad1a6027 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr, | |||
610 | 610 | ||
611 | void __init early_abt_enable(void) | 611 | void __init early_abt_enable(void) |
612 | { | 612 | { |
613 | fsr_info[22].fn = early_abort_handler; | 613 | fsr_info[FSR_FS_AEA].fn = early_abort_handler; |
614 | local_abt_enable(); | 614 | local_abt_enable(); |
615 | fsr_info[22].fn = do_bad; | 615 | fsr_info[FSR_FS_AEA].fn = do_bad; |
616 | } | 616 | } |
617 | 617 | ||
618 | #ifndef CONFIG_ARM_LPAE | 618 | #ifndef CONFIG_ARM_LPAE |
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 67532f242271..afc1f84e763b 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h | |||
@@ -11,11 +11,15 @@ | |||
11 | #define FSR_FS5_0 (0x3f) | 11 | #define FSR_FS5_0 (0x3f) |
12 | 12 | ||
13 | #ifdef CONFIG_ARM_LPAE | 13 | #ifdef CONFIG_ARM_LPAE |
14 | #define FSR_FS_AEA 17 | ||
15 | |||
14 | static inline int fsr_fs(unsigned int fsr) | 16 | static inline int fsr_fs(unsigned int fsr) |
15 | { | 17 | { |
16 | return fsr & FSR_FS5_0; | 18 | return fsr & FSR_FS5_0; |
17 | } | 19 | } |
18 | #else | 20 | #else |
21 | #define FSR_FS_AEA 22 | ||
22 | |||
19 | static inline int fsr_fs(unsigned int fsr) | 23 | static inline int fsr_fs(unsigned int fsr) |
20 | { | 24 | { |
21 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; | 25 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index eada0b58ba1c..0cbe24b49710 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi | |||
@@ -55,6 +55,24 @@ | |||
55 | #address-cells = <2>; | 55 | #address-cells = <2>; |
56 | #size-cells = <2>; | 56 | #size-cells = <2>; |
57 | 57 | ||
58 | reserved-memory { | ||
59 | #address-cells = <2>; | ||
60 | #size-cells = <2>; | ||
61 | ranges; | ||
62 | |||
63 | /* 16 MiB reserved for Hardware ROM Firmware */ | ||
64 | hwrom_reserved: hwrom@0 { | ||
65 | reg = <0x0 0x0 0x0 0x1000000>; | ||
66 | no-map; | ||
67 | }; | ||
68 | |||
69 | /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ | ||
70 | secmon_reserved: secmon@10000000 { | ||
71 | reg = <0x0 0x10000000 0x0 0x200000>; | ||
72 | no-map; | ||
73 | }; | ||
74 | }; | ||
75 | |||
58 | cpus { | 76 | cpus { |
59 | #address-cells = <0x2>; | 77 | #address-cells = <0x2>; |
60 | #size-cells = <0x0>; | 78 | #size-cells = <0x0>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 5d28e1cdc998..c59403adb387 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts | |||
@@ -151,6 +151,18 @@ | |||
151 | status = "okay"; | 151 | status = "okay"; |
152 | pinctrl-0 = <ð_rgmii_pins>; | 152 | pinctrl-0 = <ð_rgmii_pins>; |
153 | pinctrl-names = "default"; | 153 | pinctrl-names = "default"; |
154 | phy-handle = <ð_phy0>; | ||
155 | |||
156 | mdio { | ||
157 | compatible = "snps,dwmac-mdio"; | ||
158 | #address-cells = <1>; | ||
159 | #size-cells = <0>; | ||
160 | |||
161 | eth_phy0: ethernet-phy@0 { | ||
162 | reg = <0>; | ||
163 | eee-broken-1000t; | ||
164 | }; | ||
165 | }; | ||
154 | }; | 166 | }; |
155 | 167 | ||
156 | &ir { | 168 | &ir { |
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index c53dbeae79f2..838dad5c209f 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S | |||
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt) | |||
193 | cbz w6, .Lcbcencloop | 193 | cbz w6, .Lcbcencloop |
194 | 194 | ||
195 | ld1 {v0.16b}, [x5] /* get iv */ | 195 | ld1 {v0.16b}, [x5] /* get iv */ |
196 | enc_prepare w3, x2, x5 | 196 | enc_prepare w3, x2, x6 |
197 | 197 | ||
198 | .Lcbcencloop: | 198 | .Lcbcencloop: |
199 | ld1 {v1.16b}, [x1], #16 /* get next pt block */ | 199 | ld1 {v1.16b}, [x1], #16 /* get next pt block */ |
200 | eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ | 200 | eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ |
201 | encrypt_block v0, w3, x2, x5, w6 | 201 | encrypt_block v0, w3, x2, x6, w7 |
202 | st1 {v0.16b}, [x0], #16 | 202 | st1 {v0.16b}, [x0], #16 |
203 | subs w4, w4, #1 | 203 | subs w4, w4, #1 |
204 | bne .Lcbcencloop | 204 | bne .Lcbcencloop |
205 | st1 {v0.16b}, [x5] /* return iv */ | ||
205 | ret | 206 | ret |
206 | AES_ENDPROC(aes_cbc_encrypt) | 207 | AES_ENDPROC(aes_cbc_encrypt) |
207 | 208 | ||
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt) | |||
211 | cbz w6, .LcbcdecloopNx | 212 | cbz w6, .LcbcdecloopNx |
212 | 213 | ||
213 | ld1 {v7.16b}, [x5] /* get iv */ | 214 | ld1 {v7.16b}, [x5] /* get iv */ |
214 | dec_prepare w3, x2, x5 | 215 | dec_prepare w3, x2, x6 |
215 | 216 | ||
216 | .LcbcdecloopNx: | 217 | .LcbcdecloopNx: |
217 | #if INTERLEAVE >= 2 | 218 | #if INTERLEAVE >= 2 |
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt) | |||
248 | .Lcbcdecloop: | 249 | .Lcbcdecloop: |
249 | ld1 {v1.16b}, [x1], #16 /* get next ct block */ | 250 | ld1 {v1.16b}, [x1], #16 /* get next ct block */ |
250 | mov v0.16b, v1.16b /* ...and copy to v0 */ | 251 | mov v0.16b, v1.16b /* ...and copy to v0 */ |
251 | decrypt_block v0, w3, x2, x5, w6 | 252 | decrypt_block v0, w3, x2, x6, w7 |
252 | eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ | 253 | eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ |
253 | mov v7.16b, v1.16b /* ct is next iv */ | 254 | mov v7.16b, v1.16b /* ct is next iv */ |
254 | st1 {v0.16b}, [x0], #16 | 255 | st1 {v0.16b}, [x0], #16 |
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt) | |||
256 | bne .Lcbcdecloop | 257 | bne .Lcbcdecloop |
257 | .Lcbcdecout: | 258 | .Lcbcdecout: |
258 | FRAME_POP | 259 | FRAME_POP |
260 | st1 {v7.16b}, [x5] /* return iv */ | ||
259 | ret | 261 | ret |
260 | AES_ENDPROC(aes_cbc_decrypt) | 262 | AES_ENDPROC(aes_cbc_decrypt) |
261 | 263 | ||
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt) | |||
267 | 269 | ||
268 | AES_ENTRY(aes_ctr_encrypt) | 270 | AES_ENTRY(aes_ctr_encrypt) |
269 | FRAME_PUSH | 271 | FRAME_PUSH |
270 | cbnz w6, .Lctrfirst /* 1st time around? */ | 272 | cbz w6, .Lctrnotfirst /* 1st time around? */ |
271 | umov x5, v4.d[1] /* keep swabbed ctr in reg */ | ||
272 | rev x5, x5 | ||
273 | #if INTERLEAVE >= 2 | ||
274 | cmn w5, w4 /* 32 bit overflow? */ | ||
275 | bcs .Lctrinc | ||
276 | add x5, x5, #1 /* increment BE ctr */ | ||
277 | b .LctrincNx | ||
278 | #else | ||
279 | b .Lctrinc | ||
280 | #endif | ||
281 | .Lctrfirst: | ||
282 | enc_prepare w3, x2, x6 | 273 | enc_prepare w3, x2, x6 |
283 | ld1 {v4.16b}, [x5] | 274 | ld1 {v4.16b}, [x5] |
284 | umov x5, v4.d[1] /* keep swabbed ctr in reg */ | 275 | |
285 | rev x5, x5 | 276 | .Lctrnotfirst: |
277 | umov x8, v4.d[1] /* keep swabbed ctr in reg */ | ||
278 | rev x8, x8 | ||
286 | #if INTERLEAVE >= 2 | 279 | #if INTERLEAVE >= 2 |
287 | cmn w5, w4 /* 32 bit overflow? */ | 280 | cmn w8, w4 /* 32 bit overflow? */ |
288 | bcs .Lctrloop | 281 | bcs .Lctrloop |
289 | .LctrloopNx: | 282 | .LctrloopNx: |
290 | subs w4, w4, #INTERLEAVE | 283 | subs w4, w4, #INTERLEAVE |
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt) | |||
292 | #if INTERLEAVE == 2 | 285 | #if INTERLEAVE == 2 |
293 | mov v0.8b, v4.8b | 286 | mov v0.8b, v4.8b |
294 | mov v1.8b, v4.8b | 287 | mov v1.8b, v4.8b |
295 | rev x7, x5 | 288 | rev x7, x8 |
296 | add x5, x5, #1 | 289 | add x8, x8, #1 |
297 | ins v0.d[1], x7 | 290 | ins v0.d[1], x7 |
298 | rev x7, x5 | 291 | rev x7, x8 |
299 | add x5, x5, #1 | 292 | add x8, x8, #1 |
300 | ins v1.d[1], x7 | 293 | ins v1.d[1], x7 |
301 | ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ | 294 | ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ |
302 | do_encrypt_block2x | 295 | do_encrypt_block2x |
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt) | |||
305 | st1 {v0.16b-v1.16b}, [x0], #32 | 298 | st1 {v0.16b-v1.16b}, [x0], #32 |
306 | #else | 299 | #else |
307 | ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ | 300 | ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ |
308 | dup v7.4s, w5 | 301 | dup v7.4s, w8 |
309 | mov v0.16b, v4.16b | 302 | mov v0.16b, v4.16b |
310 | add v7.4s, v7.4s, v8.4s | 303 | add v7.4s, v7.4s, v8.4s |
311 | mov v1.16b, v4.16b | 304 | mov v1.16b, v4.16b |
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt) | |||
323 | eor v2.16b, v7.16b, v2.16b | 316 | eor v2.16b, v7.16b, v2.16b |
324 | eor v3.16b, v5.16b, v3.16b | 317 | eor v3.16b, v5.16b, v3.16b |
325 | st1 {v0.16b-v3.16b}, [x0], #64 | 318 | st1 {v0.16b-v3.16b}, [x0], #64 |
326 | add x5, x5, #INTERLEAVE | 319 | add x8, x8, #INTERLEAVE |
327 | #endif | 320 | #endif |
328 | cbz w4, .LctroutNx | 321 | rev x7, x8 |
329 | .LctrincNx: | ||
330 | rev x7, x5 | ||
331 | ins v4.d[1], x7 | 322 | ins v4.d[1], x7 |
323 | cbz w4, .Lctrout | ||
332 | b .LctrloopNx | 324 | b .LctrloopNx |
333 | .LctroutNx: | ||
334 | sub x5, x5, #1 | ||
335 | rev x7, x5 | ||
336 | ins v4.d[1], x7 | ||
337 | b .Lctrout | ||
338 | .Lctr1x: | 325 | .Lctr1x: |
339 | adds w4, w4, #INTERLEAVE | 326 | adds w4, w4, #INTERLEAVE |
340 | beq .Lctrout | 327 | beq .Lctrout |
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt) | |||
342 | .Lctrloop: | 329 | .Lctrloop: |
343 | mov v0.16b, v4.16b | 330 | mov v0.16b, v4.16b |
344 | encrypt_block v0, w3, x2, x6, w7 | 331 | encrypt_block v0, w3, x2, x6, w7 |
332 | |||
333 | adds x8, x8, #1 /* increment BE ctr */ | ||
334 | rev x7, x8 | ||
335 | ins v4.d[1], x7 | ||
336 | bcs .Lctrcarry /* overflow? */ | ||
337 | |||
338 | .Lctrcarrydone: | ||
345 | subs w4, w4, #1 | 339 | subs w4, w4, #1 |
346 | bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ | 340 | bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ |
347 | ld1 {v3.16b}, [x1], #16 | 341 | ld1 {v3.16b}, [x1], #16 |
348 | eor v3.16b, v0.16b, v3.16b | 342 | eor v3.16b, v0.16b, v3.16b |
349 | st1 {v3.16b}, [x0], #16 | 343 | st1 {v3.16b}, [x0], #16 |
350 | beq .Lctrout | 344 | bne .Lctrloop |
351 | .Lctrinc: | 345 | |
352 | adds x5, x5, #1 /* increment BE ctr */ | 346 | .Lctrout: |
353 | rev x7, x5 | 347 | st1 {v4.16b}, [x5] /* return next CTR value */ |
354 | ins v4.d[1], x7 | 348 | FRAME_POP |
355 | bcc .Lctrloop /* no overflow? */ | 349 | ret |
356 | umov x7, v4.d[0] /* load upper word of ctr */ | 350 | |
357 | rev x7, x7 /* ... to handle the carry */ | ||
358 | add x7, x7, #1 | ||
359 | rev x7, x7 | ||
360 | ins v4.d[0], x7 | ||
361 | b .Lctrloop | ||
362 | .Lctrhalfblock: | 351 | .Lctrhalfblock: |
363 | ld1 {v3.8b}, [x1] | 352 | ld1 {v3.8b}, [x1] |
364 | eor v3.8b, v0.8b, v3.8b | 353 | eor v3.8b, v0.8b, v3.8b |
365 | st1 {v3.8b}, [x0] | 354 | st1 {v3.8b}, [x0] |
366 | .Lctrout: | ||
367 | FRAME_POP | 355 | FRAME_POP |
368 | ret | 356 | ret |
357 | |||
358 | .Lctrcarry: | ||
359 | umov x7, v4.d[0] /* load upper word of ctr */ | ||
360 | rev x7, x7 /* ... to handle the carry */ | ||
361 | add x7, x7, #1 | ||
362 | rev x7, x7 | ||
363 | ins v4.d[0], x7 | ||
364 | b .Lctrcarrydone | ||
369 | AES_ENDPROC(aes_ctr_encrypt) | 365 | AES_ENDPROC(aes_ctr_encrypt) |
370 | .ltorg | 366 | .ltorg |
371 | 367 | ||
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 23e9e13bd2aa..655e65f38f31 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * for more details. | 11 | * for more details. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/acpi.h> | ||
14 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
15 | #include <linux/cpumask.h> | 16 | #include <linux/cpumask.h> |
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = { | |||
209 | 210 | ||
210 | static int __init register_cpufreq_notifier(void) | 211 | static int __init register_cpufreq_notifier(void) |
211 | { | 212 | { |
212 | if (cap_parsing_failed) | 213 | /* |
214 | * on ACPI-based systems we need to use the default cpu capacity | ||
215 | * until we have the necessary code to parse the cpu capacity, so | ||
216 | * skip registering cpufreq notifier. | ||
217 | */ | ||
218 | if (!acpi_disabled || cap_parsing_failed) | ||
213 | return -EINVAL; | 219 | return -EINVAL; |
214 | 220 | ||
215 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { | 221 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { |
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h index 3f9406d9b9d6..da87943328a5 100644 --- a/arch/parisc/include/asm/bitops.h +++ b/arch/parisc/include/asm/bitops.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ | 9 | #include <asm/types.h> |
10 | #include <asm/byteorder.h> | 10 | #include <asm/byteorder.h> |
11 | #include <asm/barrier.h> | 11 | #include <asm/barrier.h> |
12 | #include <linux/atomic.h> | 12 | #include <linux/atomic.h> |
@@ -17,6 +17,12 @@ | |||
17 | * to include/asm-i386/bitops.h or kerneldoc | 17 | * to include/asm-i386/bitops.h or kerneldoc |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #if __BITS_PER_LONG == 64 | ||
21 | #define SHIFT_PER_LONG 6 | ||
22 | #else | ||
23 | #define SHIFT_PER_LONG 5 | ||
24 | #endif | ||
25 | |||
20 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) | 26 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) |
21 | 27 | ||
22 | 28 | ||
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h index e0a23c7bdd43..07fa7e50bdc0 100644 --- a/arch/parisc/include/uapi/asm/bitsperlong.h +++ b/arch/parisc/include/uapi/asm/bitsperlong.h | |||
@@ -3,10 +3,8 @@ | |||
3 | 3 | ||
4 | #if defined(__LP64__) | 4 | #if defined(__LP64__) |
5 | #define __BITS_PER_LONG 64 | 5 | #define __BITS_PER_LONG 64 |
6 | #define SHIFT_PER_LONG 6 | ||
7 | #else | 6 | #else |
8 | #define __BITS_PER_LONG 32 | 7 | #define __BITS_PER_LONG 32 |
9 | #define SHIFT_PER_LONG 5 | ||
10 | #endif | 8 | #endif |
11 | 9 | ||
12 | #include <asm-generic/bitsperlong.h> | 10 | #include <asm-generic/bitsperlong.h> |
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h index e78403b129ef..928e1bbac98f 100644 --- a/arch/parisc/include/uapi/asm/swab.h +++ b/arch/parisc/include/uapi/asm/swab.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _PARISC_SWAB_H | 1 | #ifndef _PARISC_SWAB_H |
2 | #define _PARISC_SWAB_H | 2 | #define _PARISC_SWAB_H |
3 | 3 | ||
4 | #include <asm/bitsperlong.h> | ||
4 | #include <linux/types.h> | 5 | #include <linux/types.h> |
5 | #include <linux/compiler.h> | 6 | #include <linux/compiler.h> |
6 | 7 | ||
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) | |||
38 | } | 39 | } |
39 | #define __arch_swab32 __arch_swab32 | 40 | #define __arch_swab32 __arch_swab32 |
40 | 41 | ||
41 | #if BITS_PER_LONG > 32 | 42 | #if __BITS_PER_LONG > 32 |
42 | /* | 43 | /* |
43 | ** From "PA-RISC 2.0 Architecture", HP Professional Books. | 44 | ** From "PA-RISC 2.0 Architecture", HP Professional Books. |
44 | ** See Appendix I page 8 , "Endian Byte Swapping". | 45 | ** See Appendix I page 8 , "Endian Byte Swapping". |
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x) | |||
61 | return x; | 62 | return x; |
62 | } | 63 | } |
63 | #define __arch_swab64 __arch_swab64 | 64 | #define __arch_swab64 __arch_swab64 |
64 | #endif /* BITS_PER_LONG > 32 */ | 65 | #endif /* __BITS_PER_LONG > 32 */ |
65 | 66 | ||
66 | #endif /* _PARISC_SWAB_H */ | 67 | #endif /* _PARISC_SWAB_H */ |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a8ee573fe610..281f4f1fcd1f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -164,7 +164,6 @@ config PPC | |||
164 | select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE | 164 | select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE |
165 | select HAVE_ARCH_HARDENED_USERCOPY | 165 | select HAVE_ARCH_HARDENED_USERCOPY |
166 | select HAVE_KERNEL_GZIP | 166 | select HAVE_KERNEL_GZIP |
167 | select HAVE_CC_STACKPROTECTOR | ||
168 | 167 | ||
169 | config GENERIC_CSUM | 168 | config GENERIC_CSUM |
170 | def_bool CPU_LITTLE_ENDIAN | 169 | def_bool CPU_LITTLE_ENDIAN |
@@ -484,6 +483,7 @@ config RELOCATABLE | |||
484 | bool "Build a relocatable kernel" | 483 | bool "Build a relocatable kernel" |
485 | depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) | 484 | depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) |
486 | select NONSTATIC_KERNEL | 485 | select NONSTATIC_KERNEL |
486 | select MODULE_REL_CRCS if MODVERSIONS | ||
487 | help | 487 | help |
488 | This builds a kernel image that is capable of running at the | 488 | This builds a kernel image that is capable of running at the |
489 | location the kernel is loaded at. For ppc32, there is no any | 489 | location the kernel is loaded at. For ppc32, there is no any |
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index b312b152461b..6e834caa3720 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h | |||
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature) | |||
23 | { | 23 | { |
24 | int i; | 24 | int i; |
25 | 25 | ||
26 | #ifndef __clang__ /* clang can't cope with this */ | ||
26 | BUILD_BUG_ON(!__builtin_constant_p(feature)); | 27 | BUILD_BUG_ON(!__builtin_constant_p(feature)); |
28 | #endif | ||
27 | 29 | ||
28 | #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG | 30 | #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG |
29 | if (!static_key_initialized) { | 31 | if (!static_key_initialized) { |
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index a34c764ca8dd..233a7e8cc8e3 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature) | |||
160 | { | 160 | { |
161 | int i; | 161 | int i; |
162 | 162 | ||
163 | #ifndef __clang__ /* clang can't cope with this */ | ||
163 | BUILD_BUG_ON(!__builtin_constant_p(feature)); | 164 | BUILD_BUG_ON(!__builtin_constant_p(feature)); |
165 | #endif | ||
164 | 166 | ||
165 | #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG | 167 | #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG |
166 | if (!static_key_initialized) { | 168 | if (!static_key_initialized) { |
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index cc12c61ef315..53885512b8d3 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h | |||
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec | |||
90 | } | 90 | } |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64) | ||
94 | #define ARCH_RELOCATES_KCRCTAB | ||
95 | #define reloc_start PHYSICAL_START | ||
96 | #endif | ||
97 | #endif /* __KERNEL__ */ | 93 | #endif /* __KERNEL__ */ |
98 | #endif /* _ASM_POWERPC_MODULE_H */ | 94 | #endif /* _ASM_POWERPC_MODULE_H */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 0d4531aa2052..dff79798903d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -649,9 +649,10 @@ | |||
649 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ | 649 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ |
650 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ | 650 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ |
651 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ | 651 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ |
652 | #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ | 652 | #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */ |
653 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ | 653 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ |
654 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ | 654 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ |
655 | #define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */ | ||
655 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ | 656 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ |
656 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ | 657 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ |
657 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ | 658 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ |
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h deleted file mode 100644 index 6720190eabec..000000000000 --- a/arch/powerpc/include/asm/stackprotector.h +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* | ||
2 | * GCC stack protector support. | ||
3 | * | ||
4 | * Stack protector works by putting predefined pattern at the start of | ||
5 | * the stack frame and verifying that it hasn't been overwritten when | ||
6 | * returning from the function. The pattern is called stack canary | ||
7 | * and gcc expects it to be defined by a global variable called | ||
8 | * "__stack_chk_guard" on PPC. This unfortunately means that on SMP | ||
9 | * we cannot have a different canary value per task. | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_STACKPROTECTOR_H | ||
13 | #define _ASM_STACKPROTECTOR_H | ||
14 | |||
15 | #include <linux/random.h> | ||
16 | #include <linux/version.h> | ||
17 | #include <asm/reg.h> | ||
18 | |||
19 | extern unsigned long __stack_chk_guard; | ||
20 | |||
21 | /* | ||
22 | * Initialize the stackprotector canary value. | ||
23 | * | ||
24 | * NOTE: this must only be called from functions that never return, | ||
25 | * and it must always be inlined. | ||
26 | */ | ||
27 | static __always_inline void boot_init_stack_canary(void) | ||
28 | { | ||
29 | unsigned long canary; | ||
30 | |||
31 | /* Try to get a semi random initial value. */ | ||
32 | get_random_bytes(&canary, sizeof(canary)); | ||
33 | canary ^= mftb(); | ||
34 | canary ^= LINUX_VERSION_CODE; | ||
35 | |||
36 | current->stack_canary = canary; | ||
37 | __stack_chk_guard = current->stack_canary; | ||
38 | } | ||
39 | |||
40 | #endif /* _ASM_STACKPROTECTOR_H */ | ||
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index f0b238516e9b..e0b9e576905a 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h | |||
@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; } | |||
44 | 44 | ||
45 | #ifdef CONFIG_PPC_POWERNV | 45 | #ifdef CONFIG_PPC_POWERNV |
46 | extern int icp_opal_init(void); | 46 | extern int icp_opal_init(void); |
47 | extern void icp_opal_flush_interrupt(void); | ||
47 | #else | 48 | #else |
48 | static inline int icp_opal_init(void) { return -ENODEV; } | 49 | static inline int icp_opal_init(void) { return -ENODEV; } |
49 | #endif | 50 | #endif |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 23f8082d7bfa..f4c2b52e58b3 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) | |||
19 | CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) | 19 | CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
20 | CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) | 20 | CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) |
21 | 21 | ||
22 | # -fstack-protector triggers protection checks in this code, | ||
23 | # but it is being used too early to link to meaningful stack_chk logic. | ||
24 | CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) | ||
25 | |||
26 | ifdef CONFIG_FUNCTION_TRACER | 22 | ifdef CONFIG_FUNCTION_TRACER |
27 | # Do not trace early boot code | 23 | # Do not trace early boot code |
28 | CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) | 24 | CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0601e6a7297c..195a9fc8f81c 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -91,9 +91,6 @@ int main(void) | |||
91 | DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); | 91 | DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
95 | DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); | ||
96 | #endif | ||
97 | DEFINE(KSP, offsetof(struct thread_struct, ksp)); | 94 | DEFINE(KSP, offsetof(struct thread_struct, ksp)); |
98 | DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); | 95 | DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); |
99 | #ifdef CONFIG_BOOKE | 96 | #ifdef CONFIG_BOOKE |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index d88573bdd090..b94887165a10 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) | |||
545 | static void *__eeh_clear_pe_frozen_state(void *data, void *flag) | 545 | static void *__eeh_clear_pe_frozen_state(void *data, void *flag) |
546 | { | 546 | { |
547 | struct eeh_pe *pe = (struct eeh_pe *)data; | 547 | struct eeh_pe *pe = (struct eeh_pe *)data; |
548 | bool *clear_sw_state = flag; | 548 | bool clear_sw_state = *(bool *)flag; |
549 | int i, rc = 1; | 549 | int i, rc = 1; |
550 | 550 | ||
551 | for (i = 0; rc && i < 3; i++) | 551 | for (i = 0; rc && i < 3; i++) |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 5742dbdbee46..3841d749a430 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION | |||
674 | mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ | 674 | mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ |
675 | END_FTR_SECTION_IFSET(CPU_FTR_SPE) | 675 | END_FTR_SECTION_IFSET(CPU_FTR_SPE) |
676 | #endif /* CONFIG_SPE */ | 676 | #endif /* CONFIG_SPE */ |
677 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 677 | |
678 | lwz r0,TSK_STACK_CANARY(r2) | ||
679 | lis r4,__stack_chk_guard@ha | ||
680 | stw r0,__stack_chk_guard@l(r4) | ||
681 | #endif | ||
682 | lwz r0,_CCR(r1) | 678 | lwz r0,_CCR(r1) |
683 | mtcrf 0xFF,r0 | 679 | mtcrf 0xFF,r0 |
684 | /* r3-r12 are destroyed -- Cort */ | 680 | /* r3-r12 are destroyed -- Cort */ |
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index bb1807184bad..0b0f89685b67 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers, | |||
286 | for (end = (void *)vers + size; vers < end; vers++) | 286 | for (end = (void *)vers + size; vers < end; vers++) |
287 | if (vers->name[0] == '.') { | 287 | if (vers->name[0] == '.') { |
288 | memmove(vers->name, vers->name+1, strlen(vers->name)); | 288 | memmove(vers->name, vers->name+1, strlen(vers->name)); |
289 | #ifdef ARCH_RELOCATES_KCRCTAB | ||
290 | /* The TOC symbol has no CRC computed. To avoid CRC | ||
291 | * check failing, we must force it to the expected | ||
292 | * value (see CRC check in module.c). | ||
293 | */ | ||
294 | if (!strcmp(vers->name, "TOC.")) | ||
295 | vers->crc = -(unsigned long)reloc_start; | ||
296 | #endif | ||
297 | } | 289 | } |
298 | } | 290 | } |
299 | 291 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 04885cec24df..5dd056df0baa 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -64,12 +64,6 @@ | |||
64 | #include <linux/kprobes.h> | 64 | #include <linux/kprobes.h> |
65 | #include <linux/kdebug.h> | 65 | #include <linux/kdebug.h> |
66 | 66 | ||
67 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
68 | #include <linux/stackprotector.h> | ||
69 | unsigned long __stack_chk_guard __read_mostly; | ||
70 | EXPORT_SYMBOL(__stack_chk_guard); | ||
71 | #endif | ||
72 | |||
73 | /* Transactional Memory debug */ | 67 | /* Transactional Memory debug */ |
74 | #ifdef TM_DEBUG_SW | 68 | #ifdef TM_DEBUG_SW |
75 | #define TM_DEBUG(x...) printk(KERN_INFO x) | 69 | #define TM_DEBUG(x...) printk(KERN_INFO x) |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ec47a939cbdd..ac83eb04a8b8 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void) | |||
2834 | 2834 | ||
2835 | cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); | 2835 | cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); |
2836 | 2836 | ||
2837 | if (!PHANDLE_VALID(cpu_pkg)) | ||
2838 | return; | ||
2839 | |||
2837 | prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); | 2840 | prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); |
2838 | prom.cpu = be32_to_cpu(rval); | 2841 | prom.cpu = be32_to_cpu(rval); |
2839 | 2842 | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6fd30ac7d14a..62a50d6d1053 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, | |||
253 | if (unlikely(debugger_fault_handler(regs))) | 253 | if (unlikely(debugger_fault_handler(regs))) |
254 | goto bail; | 254 | goto bail; |
255 | 255 | ||
256 | /* On a kernel SLB miss we can only check for a valid exception entry */ | 256 | /* |
257 | if (!user_mode(regs) && (address >= TASK_SIZE)) { | 257 | * The kernel should never take an execute fault nor should it |
258 | * take a page fault to a kernel address. | ||
259 | */ | ||
260 | if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) { | ||
258 | rc = SIGSEGV; | 261 | rc = SIGSEGV; |
259 | goto bail; | 262 | goto bail; |
260 | } | 263 | } |
@@ -391,20 +394,6 @@ good_area: | |||
391 | 394 | ||
392 | if (is_exec) { | 395 | if (is_exec) { |
393 | /* | 396 | /* |
394 | * An execution fault + no execute ? | ||
395 | * | ||
396 | * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we | ||
397 | * deliberately create NX mappings, and use the fault to do the | ||
398 | * cache flush. This is usually handled in hash_page_do_lazy_icache() | ||
399 | * but we could end up here if that races with a concurrent PTE | ||
400 | * update. In that case we need to fall through here to the VMA | ||
401 | * check below. | ||
402 | */ | ||
403 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && | ||
404 | (regs->msr & SRR1_ISI_N_OR_G)) | ||
405 | goto bad_area; | ||
406 | |||
407 | /* | ||
408 | * Allow execution from readable areas if the MMU does not | 397 | * Allow execution from readable areas if the MMU does not |
409 | * provide separate controls over reading and executing. | 398 | * provide separate controls over reading and executing. |
410 | * | 399 | * |
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index cfa53ccc8baf..34f1a0dbc898 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c | |||
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, | |||
65 | if (!pmdp) | 65 | if (!pmdp) |
66 | return -ENOMEM; | 66 | return -ENOMEM; |
67 | if (map_page_size == PMD_SIZE) { | 67 | if (map_page_size == PMD_SIZE) { |
68 | ptep = (pte_t *)pudp; | 68 | ptep = pmdp_ptep(pmdp); |
69 | goto set_the_pte; | 69 | goto set_the_pte; |
70 | } | 70 | } |
71 | ptep = pte_alloc_kernel(pmdp, ea); | 71 | ptep = pte_alloc_kernel(pmdp, ea); |
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa, | |||
90 | } | 90 | } |
91 | pmdp = pmd_offset(pudp, ea); | 91 | pmdp = pmd_offset(pudp, ea); |
92 | if (map_page_size == PMD_SIZE) { | 92 | if (map_page_size == PMD_SIZE) { |
93 | ptep = (pte_t *)pudp; | 93 | ptep = pmdp_ptep(pmdp); |
94 | goto set_the_pte; | 94 | goto set_the_pte; |
95 | } | 95 | } |
96 | if (!pmd_present(*pmdp)) { | 96 | if (!pmd_present(*pmdp)) { |
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 61b79119065f..952713d6cf04 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c | |||
@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) | |||
50 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { | 50 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { |
51 | __tlbiel_pid(pid, set, ric); | 51 | __tlbiel_pid(pid, set, ric); |
52 | } | 52 | } |
53 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | 53 | asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); |
54 | asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); | ||
55 | return; | ||
56 | } | 54 | } |
57 | 55 | ||
58 | static inline void _tlbie_pid(unsigned long pid, unsigned long ric) | 56 | static inline void _tlbie_pid(unsigned long pid, unsigned long ric) |
@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, | |||
85 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) | 83 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
86 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); | 84 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
87 | asm volatile("ptesync": : :"memory"); | 85 | asm volatile("ptesync": : :"memory"); |
88 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | ||
89 | asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); | ||
90 | } | 86 | } |
91 | 87 | ||
92 | static inline void _tlbie_va(unsigned long va, unsigned long pid, | 88 | static inline void _tlbie_va(unsigned long va, unsigned long pid, |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index c789258ae1e1..eec0e8d0454d 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void) | |||
155 | wmask = SRR1_WAKEMASK_P8; | 155 | wmask = SRR1_WAKEMASK_P8; |
156 | 156 | ||
157 | idle_states = pnv_get_supported_cpuidle_states(); | 157 | idle_states = pnv_get_supported_cpuidle_states(); |
158 | |||
158 | /* We don't want to take decrementer interrupts while we are offline, | 159 | /* We don't want to take decrementer interrupts while we are offline, |
159 | * so clear LPCR:PECE1. We keep PECE2 enabled. | 160 | * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9) |
161 | * enabled as to let IPIs in. | ||
160 | */ | 162 | */ |
161 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); | 163 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); |
162 | 164 | ||
@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void) | |||
206 | * contains 0. | 208 | * contains 0. |
207 | */ | 209 | */ |
208 | if (((srr1 & wmask) == SRR1_WAKEEE) || | 210 | if (((srr1 & wmask) == SRR1_WAKEEE) || |
211 | ((srr1 & wmask) == SRR1_WAKEHVI) || | ||
209 | (local_paca->irq_happened & PACA_IRQ_EE)) { | 212 | (local_paca->irq_happened & PACA_IRQ_EE)) { |
210 | icp_native_flush_interrupt(); | 213 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
214 | icp_opal_flush_interrupt(); | ||
215 | else | ||
216 | icp_native_flush_interrupt(); | ||
211 | } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { | 217 | } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { |
212 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); | 218 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
213 | asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); | 219 | asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); |
@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void) | |||
221 | if (srr1 && !generic_check_cpu_restart(cpu)) | 227 | if (srr1 && !generic_check_cpu_restart(cpu)) |
222 | DBG("CPU%d Unexpected exit while offline !\n", cpu); | 228 | DBG("CPU%d Unexpected exit while offline !\n", cpu); |
223 | } | 229 | } |
230 | |||
231 | /* Re-enable decrementer interrupts */ | ||
224 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); | 232 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); |
225 | DBG("CPU%d coming online...\n", cpu); | 233 | DBG("CPU%d coming online...\n", cpu); |
226 | } | 234 | } |
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 60c57657c772..f9670eabfcfa 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c | |||
@@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data) | |||
120 | { | 120 | { |
121 | int hw_cpu = get_hard_smp_processor_id(cpu); | 121 | int hw_cpu = get_hard_smp_processor_id(cpu); |
122 | 122 | ||
123 | kvmppc_set_host_ipi(cpu, 1); | ||
123 | opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); | 124 | opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); |
124 | } | 125 | } |
125 | 126 | ||
126 | static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) | 127 | static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) |
127 | { | 128 | { |
128 | int hw_cpu = hard_smp_processor_id(); | 129 | int cpu = smp_processor_id(); |
129 | 130 | ||
130 | opal_int_set_mfrr(hw_cpu, 0xff); | 131 | kvmppc_set_host_ipi(cpu, 0); |
132 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); | ||
131 | 133 | ||
132 | return smp_ipi_demux(); | 134 | return smp_ipi_demux(); |
133 | } | 135 | } |
134 | 136 | ||
137 | /* | ||
138 | * Called when an interrupt is received on an off-line CPU to | ||
139 | * clear the interrupt, so that the CPU can go back to nap mode. | ||
140 | */ | ||
141 | void icp_opal_flush_interrupt(void) | ||
142 | { | ||
143 | unsigned int xirr; | ||
144 | unsigned int vec; | ||
145 | |||
146 | do { | ||
147 | xirr = icp_opal_get_xirr(); | ||
148 | vec = xirr & 0x00ffffff; | ||
149 | if (vec == XICS_IRQ_SPURIOUS) | ||
150 | break; | ||
151 | if (vec == XICS_IPI) { | ||
152 | /* Clear pending IPI */ | ||
153 | int cpu = smp_processor_id(); | ||
154 | kvmppc_set_host_ipi(cpu, 0); | ||
155 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); | ||
156 | } else { | ||
157 | pr_err("XICS: hw interrupt 0x%x to offline cpu, " | ||
158 | "disabling\n", vec); | ||
159 | xics_mask_unknown_vec(vec); | ||
160 | } | ||
161 | |||
162 | /* EOI the interrupt */ | ||
163 | } while (opal_int_eoi(xirr) > 0); | ||
164 | } | ||
165 | |||
135 | #endif /* CONFIG_SMP */ | 166 | #endif /* CONFIG_SMP */ |
136 | 167 | ||
137 | static const struct icp_ops icp_opal_ops = { | 168 | static const struct icp_ops icp_opal_ops = { |
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index b84be675e507..d0317993e947 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h | |||
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa, | |||
35 | static inline void tsb_context_switch(struct mm_struct *mm) | 35 | static inline void tsb_context_switch(struct mm_struct *mm) |
36 | { | 36 | { |
37 | __tsb_context_switch(__pa(mm->pgd), | 37 | __tsb_context_switch(__pa(mm->pgd), |
38 | &mm->context.tsb_block[0], | 38 | &mm->context.tsb_block[MM_TSB_BASE], |
39 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 39 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
40 | (mm->context.tsb_block[1].tsb ? | 40 | (mm->context.tsb_block[MM_TSB_HUGE].tsb ? |
41 | &mm->context.tsb_block[1] : | 41 | &mm->context.tsb_block[MM_TSB_HUGE] : |
42 | NULL) | 42 | NULL) |
43 | #else | 43 | #else |
44 | NULL | 44 | NULL |
45 | #endif | 45 | #endif |
46 | , __pa(&mm->context.tsb_descr[0])); | 46 | , __pa(&mm->context.tsb_descr[MM_TSB_BASE])); |
47 | } | 47 | } |
48 | 48 | ||
49 | void tsb_grow(struct mm_struct *mm, | 49 | void tsb_grow(struct mm_struct *mm, |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 3bebf395252c..4d0248aa0928 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) | |||
1021 | unsigned long order = get_order(size); | 1021 | unsigned long order = get_order(size); |
1022 | unsigned long p; | 1022 | unsigned long p; |
1023 | 1023 | ||
1024 | p = __get_free_pages(GFP_KERNEL, order); | 1024 | p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
1025 | if (!p) { | 1025 | if (!p) { |
1026 | prom_printf("SUN4V: Error, cannot allocate queue.\n"); | 1026 | prom_printf("SUN4V: Error, cannot allocate queue.\n"); |
1027 | prom_halt(); | 1027 | prom_halt(); |
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c index c59af546f522..3caed4023589 100644 --- a/arch/sparc/kernel/sstate.c +++ b/arch/sparc/kernel/sstate.c | |||
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) = | |||
43 | "Linux powering off"; | 43 | "Linux powering off"; |
44 | static const char rebooting_msg[32] __attribute__((aligned(32))) = | 44 | static const char rebooting_msg[32] __attribute__((aligned(32))) = |
45 | "Linux rebooting"; | 45 | "Linux rebooting"; |
46 | static const char panicing_msg[32] __attribute__((aligned(32))) = | 46 | static const char panicking_msg[32] __attribute__((aligned(32))) = |
47 | "Linux panicing"; | 47 | "Linux panicking"; |
48 | 48 | ||
49 | static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) | 49 | static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) |
50 | { | 50 | { |
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = { | |||
76 | 76 | ||
77 | static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) | 77 | static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) |
78 | { | 78 | { |
79 | do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg); | 79 | do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg); |
80 | 80 | ||
81 | return NOTIFY_DONE; | 81 | return NOTIFY_DONE; |
82 | } | 82 | } |
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 4bc10e44d1ca..dfc97a47c9a0 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c | |||
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs) | |||
2051 | atomic_inc(&sun4v_resum_oflow_cnt); | 2051 | atomic_inc(&sun4v_resum_oflow_cnt); |
2052 | } | 2052 | } |
2053 | 2053 | ||
2054 | /* Given a set of registers, get the virtual addressi that was being accessed | ||
2055 | * by the faulting instructions at tpc. | ||
2056 | */ | ||
2057 | static unsigned long sun4v_get_vaddr(struct pt_regs *regs) | ||
2058 | { | ||
2059 | unsigned int insn; | ||
2060 | |||
2061 | if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) { | ||
2062 | return compute_effective_address(regs, insn, | ||
2063 | (insn >> 25) & 0x1f); | ||
2064 | } | ||
2065 | return 0; | ||
2066 | } | ||
2067 | |||
2068 | /* Attempt to handle non-resumable errors generated from userspace. | ||
2069 | * Returns true if the signal was handled, false otherwise. | ||
2070 | */ | ||
2071 | bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, | ||
2072 | struct sun4v_error_entry *ent) { | ||
2073 | |||
2074 | unsigned int attrs = ent->err_attrs; | ||
2075 | |||
2076 | if (attrs & SUN4V_ERR_ATTRS_MEMORY) { | ||
2077 | unsigned long addr = ent->err_raddr; | ||
2078 | siginfo_t info; | ||
2079 | |||
2080 | if (addr == ~(u64)0) { | ||
2081 | /* This seems highly unlikely to ever occur */ | ||
2082 | pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n"); | ||
2083 | } else { | ||
2084 | unsigned long page_cnt = DIV_ROUND_UP(ent->err_size, | ||
2085 | PAGE_SIZE); | ||
2086 | |||
2087 | /* Break the unfortunate news. */ | ||
2088 | pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n", | ||
2089 | addr); | ||
2090 | pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n", | ||
2091 | page_cnt); | ||
2092 | |||
2093 | while (page_cnt-- > 0) { | ||
2094 | if (pfn_valid(addr >> PAGE_SHIFT)) | ||
2095 | get_page(pfn_to_page(addr >> PAGE_SHIFT)); | ||
2096 | addr += PAGE_SIZE; | ||
2097 | } | ||
2098 | } | ||
2099 | info.si_signo = SIGKILL; | ||
2100 | info.si_errno = 0; | ||
2101 | info.si_trapno = 0; | ||
2102 | force_sig_info(info.si_signo, &info, current); | ||
2103 | |||
2104 | return true; | ||
2105 | } | ||
2106 | if (attrs & SUN4V_ERR_ATTRS_PIO) { | ||
2107 | siginfo_t info; | ||
2108 | |||
2109 | info.si_signo = SIGBUS; | ||
2110 | info.si_code = BUS_ADRERR; | ||
2111 | info.si_addr = (void __user *)sun4v_get_vaddr(regs); | ||
2112 | force_sig_info(info.si_signo, &info, current); | ||
2113 | |||
2114 | return true; | ||
2115 | } | ||
2116 | |||
2117 | /* Default to doing nothing */ | ||
2118 | return false; | ||
2119 | } | ||
2120 | |||
2054 | /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. | 2121 | /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. |
2055 | * Log the event, clear the first word of the entry, and die. | 2122 | * Log the event, clear the first word of the entry, and die. |
2056 | */ | 2123 | */ |
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) | |||
2075 | 2142 | ||
2076 | put_cpu(); | 2143 | put_cpu(); |
2077 | 2144 | ||
2145 | if (!(regs->tstate & TSTATE_PRIV) && | ||
2146 | sun4v_nonresum_error_user_handled(regs, &local_copy)) { | ||
2147 | /* DON'T PANIC: This userspace error was handled. */ | ||
2148 | return; | ||
2149 | } | ||
2150 | |||
2078 | #ifdef CONFIG_PCI | 2151 | #ifdef CONFIG_PCI |
2079 | /* Check for the special PCI poke sequence. */ | 2152 | /* Check for the special PCI poke sequence. */ |
2080 | if (pci_poke_in_progress && pci_poke_cpu == cpu) { | 2153 | if (pci_poke_in_progress && pci_poke_cpu == cpu) { |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 6ef688a1ef3e..7ff1b0c86a8e 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void) | |||
1085 | aesni_simd_skciphers[i]; i++) | 1085 | aesni_simd_skciphers[i]; i++) |
1086 | simd_skcipher_free(aesni_simd_skciphers[i]); | 1086 | simd_skcipher_free(aesni_simd_skciphers[i]); |
1087 | 1087 | ||
1088 | for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) && | 1088 | for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) |
1089 | aesni_simd_skciphers2[i].simd; i++) | 1089 | if (aesni_simd_skciphers2[i].simd) |
1090 | simd_skcipher_free(aesni_simd_skciphers2[i].simd); | 1090 | simd_skcipher_free(aesni_simd_skciphers2[i].simd); |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | static int __init aesni_init(void) | 1093 | static int __init aesni_init(void) |
@@ -1168,7 +1168,7 @@ static int __init aesni_init(void) | |||
1168 | simd = simd_skcipher_create_compat(algname, drvname, basename); | 1168 | simd = simd_skcipher_create_compat(algname, drvname, basename); |
1169 | err = PTR_ERR(simd); | 1169 | err = PTR_ERR(simd); |
1170 | if (IS_ERR(simd)) | 1170 | if (IS_ERR(simd)) |
1171 | goto unregister_simds; | 1171 | continue; |
1172 | 1172 | ||
1173 | aesni_simd_skciphers2[i].simd = simd; | 1173 | aesni_simd_skciphers2[i].simd = simd; |
1174 | } | 1174 | } |
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 17c3564d087a..22ef4f72cf32 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c | |||
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms; | |||
161 | 161 | ||
162 | static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) | 162 | static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) |
163 | { | 163 | { |
164 | return rapl_pmus->pmus[topology_logical_package_id(cpu)]; | 164 | unsigned int pkgid = topology_logical_package_id(cpu); |
165 | |||
166 | /* | ||
167 | * The unsigned check also catches the '-1' return value for non | ||
168 | * existent mappings in the topology map. | ||
169 | */ | ||
170 | return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL; | ||
165 | } | 171 | } |
166 | 172 | ||
167 | static inline u64 rapl_read_counter(struct perf_event *event) | 173 | static inline u64 rapl_read_counter(struct perf_event *event) |
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event) | |||
402 | 408 | ||
403 | /* must be done before validate_group */ | 409 | /* must be done before validate_group */ |
404 | pmu = cpu_to_rapl_pmu(event->cpu); | 410 | pmu = cpu_to_rapl_pmu(event->cpu); |
411 | if (!pmu) | ||
412 | return -EINVAL; | ||
405 | event->cpu = pmu->cpu; | 413 | event->cpu = pmu->cpu; |
406 | event->pmu_private = pmu; | 414 | event->pmu_private = pmu; |
407 | event->hw.event_base = msr; | 415 | event->hw.event_base = msr; |
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu) | |||
585 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); | 593 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); |
586 | int target; | 594 | int target; |
587 | 595 | ||
596 | if (!pmu) { | ||
597 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); | ||
598 | if (!pmu) | ||
599 | return -ENOMEM; | ||
600 | |||
601 | raw_spin_lock_init(&pmu->lock); | ||
602 | INIT_LIST_HEAD(&pmu->active_list); | ||
603 | pmu->pmu = &rapl_pmus->pmu; | ||
604 | pmu->timer_interval = ms_to_ktime(rapl_timer_ms); | ||
605 | rapl_hrtimer_init(pmu); | ||
606 | |||
607 | rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; | ||
608 | } | ||
609 | |||
588 | /* | 610 | /* |
589 | * Check if there is an online cpu in the package which collects rapl | 611 | * Check if there is an online cpu in the package which collects rapl |
590 | * events already. | 612 | * events already. |
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu) | |||
598 | return 0; | 620 | return 0; |
599 | } | 621 | } |
600 | 622 | ||
601 | static int rapl_cpu_prepare(unsigned int cpu) | ||
602 | { | ||
603 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); | ||
604 | |||
605 | if (pmu) | ||
606 | return 0; | ||
607 | |||
608 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); | ||
609 | if (!pmu) | ||
610 | return -ENOMEM; | ||
611 | |||
612 | raw_spin_lock_init(&pmu->lock); | ||
613 | INIT_LIST_HEAD(&pmu->active_list); | ||
614 | pmu->pmu = &rapl_pmus->pmu; | ||
615 | pmu->timer_interval = ms_to_ktime(rapl_timer_ms); | ||
616 | pmu->cpu = -1; | ||
617 | rapl_hrtimer_init(pmu); | ||
618 | rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; | ||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | static int rapl_check_hw_unit(bool apply_quirk) | 623 | static int rapl_check_hw_unit(bool apply_quirk) |
623 | { | 624 | { |
624 | u64 msr_rapl_power_unit_bits; | 625 | u64 msr_rapl_power_unit_bits; |
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void) | |||
803 | /* | 804 | /* |
804 | * Install callbacks. Core will call them for each online cpu. | 805 | * Install callbacks. Core will call them for each online cpu. |
805 | */ | 806 | */ |
806 | |||
807 | ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare", | ||
808 | rapl_cpu_prepare, NULL); | ||
809 | if (ret) | ||
810 | goto out; | ||
811 | |||
812 | ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, | 807 | ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, |
813 | "perf/x86/rapl:online", | 808 | "perf/x86/rapl:online", |
814 | rapl_cpu_online, rapl_cpu_offline); | 809 | rapl_cpu_online, rapl_cpu_offline); |
815 | if (ret) | 810 | if (ret) |
816 | goto out1; | 811 | goto out; |
817 | 812 | ||
818 | ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); | 813 | ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); |
819 | if (ret) | 814 | if (ret) |
820 | goto out2; | 815 | goto out1; |
821 | 816 | ||
822 | rapl_advertise(); | 817 | rapl_advertise(); |
823 | return 0; | 818 | return 0; |
824 | 819 | ||
825 | out2: | ||
826 | cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); | ||
827 | out1: | 820 | out1: |
828 | cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); | 821 | cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); |
829 | out: | 822 | out: |
830 | pr_warn("Initialization failed (%d), disabled\n", ret); | 823 | pr_warn("Initialization failed (%d), disabled\n", ret); |
831 | cleanup_rapl_pmus(); | 824 | cleanup_rapl_pmus(); |
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init); | |||
836 | static void __exit intel_rapl_exit(void) | 829 | static void __exit intel_rapl_exit(void) |
837 | { | 830 | { |
838 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); | 831 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); |
839 | cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP); | ||
840 | perf_pmu_unregister(&rapl_pmus->pmu); | 832 | perf_pmu_unregister(&rapl_pmus->pmu); |
841 | cleanup_rapl_pmus(); | 833 | cleanup_rapl_pmus(); |
842 | } | 834 | } |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 8c4ccdc3a3f3..1ab45976474d 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj, | |||
100 | 100 | ||
101 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) | 101 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) |
102 | { | 102 | { |
103 | return pmu->boxes[topology_logical_package_id(cpu)]; | 103 | unsigned int pkgid = topology_logical_package_id(cpu); |
104 | |||
105 | /* | ||
106 | * The unsigned check also catches the '-1' return value for non | ||
107 | * existent mappings in the topology map. | ||
108 | */ | ||
109 | return pkgid < max_packages ? pmu->boxes[pkgid] : NULL; | ||
104 | } | 110 | } |
105 | 111 | ||
106 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) | 112 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu) | |||
764 | pmu->registered = false; | 770 | pmu->registered = false; |
765 | } | 771 | } |
766 | 772 | ||
767 | static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu) | ||
768 | { | ||
769 | struct intel_uncore_pmu *pmu = type->pmus; | ||
770 | struct intel_uncore_box *box; | ||
771 | int i, pkg; | ||
772 | |||
773 | if (pmu) { | ||
774 | pkg = topology_physical_package_id(cpu); | ||
775 | for (i = 0; i < type->num_boxes; i++, pmu++) { | ||
776 | box = pmu->boxes[pkg]; | ||
777 | if (box) | ||
778 | uncore_box_exit(box); | ||
779 | } | ||
780 | } | ||
781 | } | ||
782 | |||
783 | static void uncore_exit_boxes(void *dummy) | ||
784 | { | ||
785 | struct intel_uncore_type **types; | ||
786 | |||
787 | for (types = uncore_msr_uncores; *types; types++) | ||
788 | __uncore_exit_boxes(*types++, smp_processor_id()); | ||
789 | } | ||
790 | |||
791 | static void uncore_free_boxes(struct intel_uncore_pmu *pmu) | 773 | static void uncore_free_boxes(struct intel_uncore_pmu *pmu) |
792 | { | 774 | { |
793 | int pkg; | 775 | int pkg; |
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void) | |||
1058 | } | 1040 | } |
1059 | } | 1041 | } |
1060 | 1042 | ||
1061 | static int uncore_cpu_dying(unsigned int cpu) | ||
1062 | { | ||
1063 | struct intel_uncore_type *type, **types = uncore_msr_uncores; | ||
1064 | struct intel_uncore_pmu *pmu; | ||
1065 | struct intel_uncore_box *box; | ||
1066 | int i, pkg; | ||
1067 | |||
1068 | pkg = topology_logical_package_id(cpu); | ||
1069 | for (; *types; types++) { | ||
1070 | type = *types; | ||
1071 | pmu = type->pmus; | ||
1072 | for (i = 0; i < type->num_boxes; i++, pmu++) { | ||
1073 | box = pmu->boxes[pkg]; | ||
1074 | if (box && atomic_dec_return(&box->refcnt) == 0) | ||
1075 | uncore_box_exit(box); | ||
1076 | } | ||
1077 | } | ||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | static int first_init; | ||
1082 | |||
1083 | static int uncore_cpu_starting(unsigned int cpu) | ||
1084 | { | ||
1085 | struct intel_uncore_type *type, **types = uncore_msr_uncores; | ||
1086 | struct intel_uncore_pmu *pmu; | ||
1087 | struct intel_uncore_box *box; | ||
1088 | int i, pkg, ncpus = 1; | ||
1089 | |||
1090 | if (first_init) { | ||
1091 | /* | ||
1092 | * On init we get the number of online cpus in the package | ||
1093 | * and set refcount for all of them. | ||
1094 | */ | ||
1095 | ncpus = cpumask_weight(topology_core_cpumask(cpu)); | ||
1096 | } | ||
1097 | |||
1098 | pkg = topology_logical_package_id(cpu); | ||
1099 | for (; *types; types++) { | ||
1100 | type = *types; | ||
1101 | pmu = type->pmus; | ||
1102 | for (i = 0; i < type->num_boxes; i++, pmu++) { | ||
1103 | box = pmu->boxes[pkg]; | ||
1104 | if (!box) | ||
1105 | continue; | ||
1106 | /* The first cpu on a package activates the box */ | ||
1107 | if (atomic_add_return(ncpus, &box->refcnt) == ncpus) | ||
1108 | uncore_box_init(box); | ||
1109 | } | ||
1110 | } | ||
1111 | |||
1112 | return 0; | ||
1113 | } | ||
1114 | |||
1115 | static int uncore_cpu_prepare(unsigned int cpu) | ||
1116 | { | ||
1117 | struct intel_uncore_type *type, **types = uncore_msr_uncores; | ||
1118 | struct intel_uncore_pmu *pmu; | ||
1119 | struct intel_uncore_box *box; | ||
1120 | int i, pkg; | ||
1121 | |||
1122 | pkg = topology_logical_package_id(cpu); | ||
1123 | for (; *types; types++) { | ||
1124 | type = *types; | ||
1125 | pmu = type->pmus; | ||
1126 | for (i = 0; i < type->num_boxes; i++, pmu++) { | ||
1127 | if (pmu->boxes[pkg]) | ||
1128 | continue; | ||
1129 | /* First cpu of a package allocates the box */ | ||
1130 | box = uncore_alloc_box(type, cpu_to_node(cpu)); | ||
1131 | if (!box) | ||
1132 | return -ENOMEM; | ||
1133 | box->pmu = pmu; | ||
1134 | box->pkgid = pkg; | ||
1135 | pmu->boxes[pkg] = box; | ||
1136 | } | ||
1137 | } | ||
1138 | return 0; | ||
1139 | } | ||
1140 | |||
1141 | static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, | 1043 | static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, |
1142 | int new_cpu) | 1044 | int new_cpu) |
1143 | { | 1045 | { |
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores, | |||
1177 | 1079 | ||
1178 | static int uncore_event_cpu_offline(unsigned int cpu) | 1080 | static int uncore_event_cpu_offline(unsigned int cpu) |
1179 | { | 1081 | { |
1180 | int target; | 1082 | struct intel_uncore_type *type, **types = uncore_msr_uncores; |
1083 | struct intel_uncore_pmu *pmu; | ||
1084 | struct intel_uncore_box *box; | ||
1085 | int i, pkg, target; | ||
1181 | 1086 | ||
1182 | /* Check if exiting cpu is used for collecting uncore events */ | 1087 | /* Check if exiting cpu is used for collecting uncore events */ |
1183 | if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) | 1088 | if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) |
1184 | return 0; | 1089 | goto unref; |
1185 | |||
1186 | /* Find a new cpu to collect uncore events */ | 1090 | /* Find a new cpu to collect uncore events */ |
1187 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); | 1091 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); |
1188 | 1092 | ||
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu) | |||
1194 | 1098 | ||
1195 | uncore_change_context(uncore_msr_uncores, cpu, target); | 1099 | uncore_change_context(uncore_msr_uncores, cpu, target); |
1196 | uncore_change_context(uncore_pci_uncores, cpu, target); | 1100 | uncore_change_context(uncore_pci_uncores, cpu, target); |
1101 | |||
1102 | unref: | ||
1103 | /* Clear the references */ | ||
1104 | pkg = topology_logical_package_id(cpu); | ||
1105 | for (; *types; types++) { | ||
1106 | type = *types; | ||
1107 | pmu = type->pmus; | ||
1108 | for (i = 0; i < type->num_boxes; i++, pmu++) { | ||
1109 | box = pmu->boxes[pkg]; | ||
1110 | if (box && atomic_dec_return(&box->refcnt) == 0) | ||
1111 | uncore_box_exit(box); | ||
1112 | } | ||
1113 | } | ||
1197 | return 0; | 1114 | return 0; |
1198 | } | 1115 | } |
1199 | 1116 | ||
1117 | static int allocate_boxes(struct intel_uncore_type **types, | ||
1118 | unsigned int pkg, unsigned int cpu) | ||
1119 | { | ||
1120 | struct intel_uncore_box *box, *tmp; | ||
1121 | struct intel_uncore_type *type; | ||
1122 | struct intel_uncore_pmu *pmu; | ||
1123 | LIST_HEAD(allocated); | ||
1124 | int i; | ||
1125 | |||
1126 | /* Try to allocate all required boxes */ | ||
1127 | for (; *types; types++) { | ||
1128 | type = *types; | ||
1129 | pmu = type->pmus; | ||
1130 | for (i = 0; i < type->num_boxes; i++, pmu++) { | ||
1131 | if (pmu->boxes[pkg]) | ||
1132 | continue; | ||
1133 | box = uncore_alloc_box(type, cpu_to_node(cpu)); | ||
1134 | if (!box) | ||
1135 | goto cleanup; | ||
1136 | box->pmu = pmu; | ||
1137 | box->pkgid = pkg; | ||
1138 | list_add(&box->active_list, &allocated); | ||
1139 | } | ||
1140 | } | ||
1141 | /* Install them in the pmus */ | ||
1142 | list_for_each_entry_safe(box, tmp, &allocated, active_list) { | ||
1143 | list_del_init(&box->active_list); | ||
1144 | box->pmu->boxes[pkg] = box; | ||
1145 | } | ||
1146 | return 0; | ||
1147 | |||
1148 | cleanup: | ||
1149 | list_for_each_entry_safe(box, tmp, &allocated, active_list) { | ||
1150 | list_del_init(&box->active_list); | ||
1151 | kfree(box); | ||
1152 | } | ||
1153 | return -ENOMEM; | ||
1154 | } | ||
1155 | |||
1200 | static int uncore_event_cpu_online(unsigned int cpu) | 1156 | static int uncore_event_cpu_online(unsigned int cpu) |
1201 | { | 1157 | { |
1202 | int target; | 1158 | struct intel_uncore_type *type, **types = uncore_msr_uncores; |
1159 | struct intel_uncore_pmu *pmu; | ||
1160 | struct intel_uncore_box *box; | ||
1161 | int i, ret, pkg, target; | ||
1162 | |||
1163 | pkg = topology_logical_package_id(cpu); | ||
1164 | ret = allocate_boxes(types, pkg, cpu); | ||
1165 | if (ret) | ||
1166 | return ret; | ||
1167 | |||
1168 | for (; *types; types++) { | ||
1169 | type = *types; | ||
1170 | pmu = type->pmus; | ||
1171 | for (i = 0; i < type->num_boxes; i++, pmu++) { | ||
1172 | box = pmu->boxes[pkg]; | ||
1173 | if (!box && atomic_inc_return(&box->refcnt) == 1) | ||
1174 | uncore_box_init(box); | ||
1175 | } | ||
1176 | } | ||
1203 | 1177 | ||
1204 | /* | 1178 | /* |
1205 | * Check if there is an online cpu in the package | 1179 | * Check if there is an online cpu in the package |
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void) | |||
1389 | if (cret && pret) | 1363 | if (cret && pret) |
1390 | return -ENODEV; | 1364 | return -ENODEV; |
1391 | 1365 | ||
1392 | /* | 1366 | /* Install hotplug callbacks to setup the targets for each package */ |
1393 | * Install callbacks. Core will call them for each online cpu. | 1367 | ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, |
1394 | * | 1368 | "perf/x86/intel/uncore:online", |
1395 | * The first online cpu of each package allocates and takes | 1369 | uncore_event_cpu_online, |
1396 | * the refcounts for all other online cpus in that package. | 1370 | uncore_event_cpu_offline); |
1397 | * If msrs are not enabled no allocation is required and | 1371 | if (ret) |
1398 | * uncore_cpu_prepare() is not called for each online cpu. | 1372 | goto err; |
1399 | */ | ||
1400 | if (!cret) { | ||
1401 | ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, | ||
1402 | "perf/x86/intel/uncore:prepare", | ||
1403 | uncore_cpu_prepare, NULL); | ||
1404 | if (ret) | ||
1405 | goto err; | ||
1406 | } else { | ||
1407 | cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP, | ||
1408 | "perf/x86/intel/uncore:prepare", | ||
1409 | uncore_cpu_prepare, NULL); | ||
1410 | } | ||
1411 | first_init = 1; | ||
1412 | cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, | ||
1413 | "perf/x86/uncore:starting", | ||
1414 | uncore_cpu_starting, uncore_cpu_dying); | ||
1415 | first_init = 0; | ||
1416 | cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, | ||
1417 | "perf/x86/uncore:online", | ||
1418 | uncore_event_cpu_online, uncore_event_cpu_offline); | ||
1419 | return 0; | 1373 | return 0; |
1420 | 1374 | ||
1421 | err: | 1375 | err: |
1422 | /* Undo box->init_box() */ | ||
1423 | on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1); | ||
1424 | uncore_types_exit(uncore_msr_uncores); | 1376 | uncore_types_exit(uncore_msr_uncores); |
1425 | uncore_pci_exit(); | 1377 | uncore_pci_exit(); |
1426 | return ret; | 1378 | return ret; |
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init); | |||
1429 | 1381 | ||
1430 | static void __exit intel_uncore_exit(void) | 1382 | static void __exit intel_uncore_exit(void) |
1431 | { | 1383 | { |
1432 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); | 1384 | cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); |
1433 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING); | ||
1434 | cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP); | ||
1435 | uncore_types_exit(uncore_msr_uncores); | 1385 | uncore_types_exit(uncore_msr_uncores); |
1436 | uncore_pci_exit(); | 1386 | uncore_pci_exit(); |
1437 | } | 1387 | } |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 38711df3bcb5..2266f864b747 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void); | |||
140 | extern void load_ucode_ap(void); | 140 | extern void load_ucode_ap(void); |
141 | void reload_early_microcode(void); | 141 | void reload_early_microcode(void); |
142 | extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); | 142 | extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); |
143 | extern bool initrd_gone; | ||
143 | #else | 144 | #else |
144 | static inline int __init microcode_init(void) { return 0; }; | 145 | static inline int __init microcode_init(void) { return 0; }; |
145 | static inline void __init load_ucode_bsp(void) { } | 146 | static inline void __init load_ucode_bsp(void) { } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1be64da0384e..e6cfe7ba2d65 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -104,6 +104,7 @@ struct cpuinfo_x86 { | |||
104 | __u8 x86_phys_bits; | 104 | __u8 x86_phys_bits; |
105 | /* CPUID returned core id bits: */ | 105 | /* CPUID returned core id bits: */ |
106 | __u8 x86_coreid_bits; | 106 | __u8 x86_coreid_bits; |
107 | __u8 cu_id; | ||
107 | /* Max extended CPUID function supported: */ | 108 | /* Max extended CPUID function supported: */ |
108 | __u32 extended_cpuid_level; | 109 | __u32 extended_cpuid_level; |
109 | /* Maximum supported CPUID level, -1=no CPUID: */ | 110 | /* Maximum supported CPUID level, -1=no CPUID: */ |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 1e35dd06b090..bd6b8c270c24 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
1875 | .irq_ack = irq_chip_ack_parent, | 1875 | .irq_ack = irq_chip_ack_parent, |
1876 | .irq_eoi = ioapic_ack_level, | 1876 | .irq_eoi = ioapic_ack_level, |
1877 | .irq_set_affinity = ioapic_set_affinity, | 1877 | .irq_set_affinity = ioapic_set_affinity, |
1878 | .irq_retrigger = irq_chip_retrigger_hierarchy, | ||
1879 | .flags = IRQCHIP_SKIP_SET_WAKE, | 1878 | .flags = IRQCHIP_SKIP_SET_WAKE, |
1880 | }; | 1879 | }; |
1881 | 1880 | ||
@@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { | |||
1887 | .irq_ack = irq_chip_ack_parent, | 1886 | .irq_ack = irq_chip_ack_parent, |
1888 | .irq_eoi = ioapic_ir_ack_level, | 1887 | .irq_eoi = ioapic_ir_ack_level, |
1889 | .irq_set_affinity = ioapic_set_affinity, | 1888 | .irq_set_affinity = ioapic_set_affinity, |
1890 | .irq_retrigger = irq_chip_retrigger_hierarchy, | ||
1891 | .flags = IRQCHIP_SKIP_SET_WAKE, | 1889 | .flags = IRQCHIP_SKIP_SET_WAKE, |
1892 | }; | 1890 | }; |
1893 | 1891 | ||
@@ -2117,6 +2115,7 @@ static inline void __init check_timer(void) | |||
2117 | if (idx != -1 && irq_trigger(idx)) | 2115 | if (idx != -1 && irq_trigger(idx)) |
2118 | unmask_ioapic_irq(irq_get_chip_data(0)); | 2116 | unmask_ioapic_irq(irq_get_chip_data(0)); |
2119 | } | 2117 | } |
2118 | irq_domain_deactivate_irq(irq_data); | ||
2120 | irq_domain_activate_irq(irq_data); | 2119 | irq_domain_activate_irq(irq_data); |
2121 | if (timer_irq_works()) { | 2120 | if (timer_irq_works()) { |
2122 | if (disable_timer_pin_1 > 0) | 2121 | if (disable_timer_pin_1 > 0) |
@@ -2138,6 +2137,7 @@ static inline void __init check_timer(void) | |||
2138 | * legacy devices should be connected to IO APIC #0 | 2137 | * legacy devices should be connected to IO APIC #0 |
2139 | */ | 2138 | */ |
2140 | replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); | 2139 | replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); |
2140 | irq_domain_deactivate_irq(irq_data); | ||
2141 | irq_domain_activate_irq(irq_data); | 2141 | irq_domain_activate_irq(irq_data); |
2142 | legacy_pic->unmask(0); | 2142 | legacy_pic->unmask(0); |
2143 | if (timer_irq_works()) { | 2143 | if (timer_irq_works()) { |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1d3167269a67..2b4cf04239b6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c) | |||
309 | 309 | ||
310 | /* get information required for multi-node processors */ | 310 | /* get information required for multi-node processors */ |
311 | if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { | 311 | if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { |
312 | u32 eax, ebx, ecx, edx; | ||
312 | 313 | ||
313 | node_id = cpuid_ecx(0x8000001e) & 7; | 314 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); |
315 | |||
316 | node_id = ecx & 0xff; | ||
317 | smp_num_siblings = ((ebx >> 8) & 0xff) + 1; | ||
318 | |||
319 | if (c->x86 == 0x15) | ||
320 | c->cu_id = ebx & 0xff; | ||
321 | |||
322 | if (c->x86 >= 0x17) { | ||
323 | c->cpu_core_id = ebx & 0xff; | ||
324 | |||
325 | if (smp_num_siblings > 1) | ||
326 | c->x86_max_cores /= smp_num_siblings; | ||
327 | } | ||
314 | 328 | ||
315 | /* | 329 | /* |
316 | * We may have multiple LLCs if L3 caches exist, so check if we | 330 | * We may have multiple LLCs if L3 caches exist, so check if we |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9bab7a8a4293..ede03e849a8b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
1015 | c->x86_model_id[0] = '\0'; /* Unset */ | 1015 | c->x86_model_id[0] = '\0'; /* Unset */ |
1016 | c->x86_max_cores = 1; | 1016 | c->x86_max_cores = 1; |
1017 | c->x86_coreid_bits = 0; | 1017 | c->x86_coreid_bits = 0; |
1018 | c->cu_id = 0xff; | ||
1018 | #ifdef CONFIG_X86_64 | 1019 | #ifdef CONFIG_X86_64 |
1019 | c->x86_clflush_size = 64; | 1020 | c->x86_clflush_size = 64; |
1020 | c->x86_phys_bits = 36; | 1021 | c->x86_phys_bits = 36; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 00ef43233e03..537c6647d84c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) | |||
1373 | 1373 | ||
1374 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; | 1374 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; |
1375 | 1375 | ||
1376 | static void __restart_timer(struct timer_list *t, unsigned long interval) | 1376 | static void __start_timer(struct timer_list *t, unsigned long interval) |
1377 | { | 1377 | { |
1378 | unsigned long when = jiffies + interval; | 1378 | unsigned long when = jiffies + interval; |
1379 | unsigned long flags; | 1379 | unsigned long flags; |
1380 | 1380 | ||
1381 | local_irq_save(flags); | 1381 | local_irq_save(flags); |
1382 | 1382 | ||
1383 | if (timer_pending(t)) { | 1383 | if (!timer_pending(t) || time_before(when, t->expires)) |
1384 | if (time_before(when, t->expires)) | 1384 | mod_timer(t, round_jiffies(when)); |
1385 | mod_timer(t, when); | ||
1386 | } else { | ||
1387 | t->expires = round_jiffies(when); | ||
1388 | add_timer_on(t, smp_processor_id()); | ||
1389 | } | ||
1390 | 1385 | ||
1391 | local_irq_restore(flags); | 1386 | local_irq_restore(flags); |
1392 | } | 1387 | } |
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data) | |||
1421 | 1416 | ||
1422 | done: | 1417 | done: |
1423 | __this_cpu_write(mce_next_interval, iv); | 1418 | __this_cpu_write(mce_next_interval, iv); |
1424 | __restart_timer(t, iv); | 1419 | __start_timer(t, iv); |
1425 | } | 1420 | } |
1426 | 1421 | ||
1427 | /* | 1422 | /* |
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval) | |||
1432 | struct timer_list *t = this_cpu_ptr(&mce_timer); | 1427 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1433 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1428 | unsigned long iv = __this_cpu_read(mce_next_interval); |
1434 | 1429 | ||
1435 | __restart_timer(t, interval); | 1430 | __start_timer(t, interval); |
1436 | 1431 | ||
1437 | if (interval < iv) | 1432 | if (interval < iv) |
1438 | __this_cpu_write(mce_next_interval, interval); | 1433 | __this_cpu_write(mce_next_interval, interval); |
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) | |||
1779 | } | 1774 | } |
1780 | } | 1775 | } |
1781 | 1776 | ||
1782 | static void mce_start_timer(unsigned int cpu, struct timer_list *t) | 1777 | static void mce_start_timer(struct timer_list *t) |
1783 | { | 1778 | { |
1784 | unsigned long iv = check_interval * HZ; | 1779 | unsigned long iv = check_interval * HZ; |
1785 | 1780 | ||
1786 | if (mca_cfg.ignore_ce || !iv) | 1781 | if (mca_cfg.ignore_ce || !iv) |
1787 | return; | 1782 | return; |
1788 | 1783 | ||
1789 | per_cpu(mce_next_interval, cpu) = iv; | 1784 | this_cpu_write(mce_next_interval, iv); |
1790 | 1785 | __start_timer(t, iv); | |
1791 | t->expires = round_jiffies(jiffies + iv); | ||
1792 | add_timer_on(t, cpu); | ||
1793 | } | 1786 | } |
1794 | 1787 | ||
1795 | static void __mcheck_cpu_setup_timer(void) | 1788 | static void __mcheck_cpu_setup_timer(void) |
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void) | |||
1806 | unsigned int cpu = smp_processor_id(); | 1799 | unsigned int cpu = smp_processor_id(); |
1807 | 1800 | ||
1808 | setup_pinned_timer(t, mce_timer_fn, cpu); | 1801 | setup_pinned_timer(t, mce_timer_fn, cpu); |
1809 | mce_start_timer(cpu, t); | 1802 | mce_start_timer(t); |
1810 | } | 1803 | } |
1811 | 1804 | ||
1812 | /* Handle unconfigured int18 (should never happen) */ | 1805 | /* Handle unconfigured int18 (should never happen) */ |
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu) | |||
2566 | 2559 | ||
2567 | static int mce_cpu_online(unsigned int cpu) | 2560 | static int mce_cpu_online(unsigned int cpu) |
2568 | { | 2561 | { |
2569 | struct timer_list *t = &per_cpu(mce_timer, cpu); | 2562 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
2570 | int ret; | 2563 | int ret; |
2571 | 2564 | ||
2572 | mce_device_create(cpu); | 2565 | mce_device_create(cpu); |
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu) | |||
2577 | return ret; | 2570 | return ret; |
2578 | } | 2571 | } |
2579 | mce_reenable_cpu(); | 2572 | mce_reenable_cpu(); |
2580 | mce_start_timer(cpu, t); | 2573 | mce_start_timer(t); |
2581 | return 0; | 2574 | return 0; |
2582 | } | 2575 | } |
2583 | 2576 | ||
2584 | static int mce_cpu_pre_down(unsigned int cpu) | 2577 | static int mce_cpu_pre_down(unsigned int cpu) |
2585 | { | 2578 | { |
2586 | struct timer_list *t = &per_cpu(mce_timer, cpu); | 2579 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
2587 | 2580 | ||
2588 | mce_disable_cpu(); | 2581 | mce_disable_cpu(); |
2589 | del_timer_sync(t); | 2582 | del_timer_sync(t); |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 6a31e2691f3a..079e81733a58 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family) | |||
384 | reget: | 384 | reget: |
385 | if (!get_builtin_microcode(&cp, family)) { | 385 | if (!get_builtin_microcode(&cp, family)) { |
386 | #ifdef CONFIG_BLK_DEV_INITRD | 386 | #ifdef CONFIG_BLK_DEV_INITRD |
387 | cp = find_cpio_data(ucode_path, (void *)initrd_start, | 387 | if (!initrd_gone) |
388 | initrd_end - initrd_start, NULL); | 388 | cp = find_cpio_data(ucode_path, (void *)initrd_start, |
389 | initrd_end - initrd_start, NULL); | ||
389 | #endif | 390 | #endif |
390 | if (!(cp.data && cp.size)) { | 391 | if (!(cp.data && cp.size)) { |
391 | /* | 392 | /* |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 2af69d27da62..73102d932760 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -46,6 +46,8 @@ | |||
46 | static struct microcode_ops *microcode_ops; | 46 | static struct microcode_ops *microcode_ops; |
47 | static bool dis_ucode_ldr = true; | 47 | static bool dis_ucode_ldr = true; |
48 | 48 | ||
49 | bool initrd_gone; | ||
50 | |||
49 | LIST_HEAD(microcode_cache); | 51 | LIST_HEAD(microcode_cache); |
50 | 52 | ||
51 | /* | 53 | /* |
@@ -190,21 +192,24 @@ void load_ucode_ap(void) | |||
190 | static int __init save_microcode_in_initrd(void) | 192 | static int __init save_microcode_in_initrd(void) |
191 | { | 193 | { |
192 | struct cpuinfo_x86 *c = &boot_cpu_data; | 194 | struct cpuinfo_x86 *c = &boot_cpu_data; |
195 | int ret = -EINVAL; | ||
193 | 196 | ||
194 | switch (c->x86_vendor) { | 197 | switch (c->x86_vendor) { |
195 | case X86_VENDOR_INTEL: | 198 | case X86_VENDOR_INTEL: |
196 | if (c->x86 >= 6) | 199 | if (c->x86 >= 6) |
197 | return save_microcode_in_initrd_intel(); | 200 | ret = save_microcode_in_initrd_intel(); |
198 | break; | 201 | break; |
199 | case X86_VENDOR_AMD: | 202 | case X86_VENDOR_AMD: |
200 | if (c->x86 >= 0x10) | 203 | if (c->x86 >= 0x10) |
201 | return save_microcode_in_initrd_amd(c->x86); | 204 | ret = save_microcode_in_initrd_amd(c->x86); |
202 | break; | 205 | break; |
203 | default: | 206 | default: |
204 | break; | 207 | break; |
205 | } | 208 | } |
206 | 209 | ||
207 | return -EINVAL; | 210 | initrd_gone = true; |
211 | |||
212 | return ret; | ||
208 | } | 213 | } |
209 | 214 | ||
210 | struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) | 215 | struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) |
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) | |||
247 | * has the virtual address of the beginning of the initrd. It also | 252 | * has the virtual address of the beginning of the initrd. It also |
248 | * possibly relocates the ramdisk. In either case, initrd_start contains | 253 | * possibly relocates the ramdisk. In either case, initrd_start contains |
249 | * the updated address so use that instead. | 254 | * the updated address so use that instead. |
255 | * | ||
256 | * initrd_gone is for the hotplug case where we've thrown out initrd | ||
257 | * already. | ||
250 | */ | 258 | */ |
251 | if (!use_pa && initrd_start) | 259 | if (!use_pa) { |
252 | start = initrd_start; | 260 | if (initrd_gone) |
261 | return (struct cpio_data){ NULL, 0, "" }; | ||
262 | if (initrd_start) | ||
263 | start = initrd_start; | ||
264 | } | ||
253 | 265 | ||
254 | return find_cpio_data(path, (void *)start, size, NULL); | 266 | return find_cpio_data(path, (void *)start, size, NULL); |
255 | #else /* !CONFIG_BLK_DEV_INITRD */ | 267 | #else /* !CONFIG_BLK_DEV_INITRD */ |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 3f329b74e040..8325d8a09ab0 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; | 42 | static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; |
43 | 43 | ||
44 | /* Current microcode patch used in early patching */ | 44 | /* Current microcode patch used in early patching on the APs. */ |
45 | struct microcode_intel *intel_ucode_patch; | 45 | struct microcode_intel *intel_ucode_patch; |
46 | 46 | ||
47 | static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, | 47 | static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, |
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void) | |||
607 | struct ucode_cpu_info uci; | 607 | struct ucode_cpu_info uci; |
608 | struct cpio_data cp; | 608 | struct cpio_data cp; |
609 | 609 | ||
610 | /* | ||
611 | * AP loading didn't find any microcode patch, no need to save anything. | ||
612 | */ | ||
613 | if (!intel_ucode_patch || IS_ERR(intel_ucode_patch)) | ||
614 | return 0; | ||
615 | |||
616 | if (!load_builtin_intel_microcode(&cp)) | 610 | if (!load_builtin_intel_microcode(&cp)) |
617 | cp = find_microcode_in_initrd(ucode_path, false); | 611 | cp = find_microcode_in_initrd(ucode_path, false); |
618 | 612 | ||
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void) | |||
628 | return 0; | 622 | return 0; |
629 | } | 623 | } |
630 | 624 | ||
631 | |||
632 | /* | 625 | /* |
633 | * @res_patch, output: a pointer to the patch we found. | 626 | * @res_patch, output: a pointer to the patch we found. |
634 | */ | 627 | */ |
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index e4e97a5355ce..de7234401275 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/fpu/regset.h> | 9 | #include <asm/fpu/regset.h> |
10 | #include <asm/fpu/signal.h> | 10 | #include <asm/fpu/signal.h> |
11 | #include <asm/fpu/types.h> | 11 | #include <asm/fpu/types.h> |
12 | #include <asm/fpu/xstate.h> | ||
12 | #include <asm/traps.h> | 13 | #include <asm/traps.h> |
13 | 14 | ||
14 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state) | |||
183 | * it will #GP. Make sure it is replaced after the memset(). | 184 | * it will #GP. Make sure it is replaced after the memset(). |
184 | */ | 185 | */ |
185 | if (static_cpu_has(X86_FEATURE_XSAVES)) | 186 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
186 | state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT; | 187 | state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | |
188 | xfeatures_mask; | ||
187 | 189 | ||
188 | if (static_cpu_has(X86_FEATURE_FXSR)) | 190 | if (static_cpu_has(X86_FEATURE_FXSR)) |
189 | fpstate_init_fxstate(&state->fxsave); | 191 | fpstate_init_fxstate(&state->fxsave); |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 85e87b46c318..dc6ba5bda9fc 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer) | |||
352 | } else { | 352 | } else { |
353 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | 353 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); |
354 | 354 | ||
355 | irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); | ||
355 | irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); | 356 | irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); |
356 | disable_irq(hdev->irq); | 357 | disable_irq(hdev->irq); |
357 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); | 358 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 46732dc3b73c..99b920d0e516 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
433 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 433 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
434 | 434 | ||
435 | if (c->phys_proc_id == o->phys_proc_id && | 435 | if (c->phys_proc_id == o->phys_proc_id && |
436 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && | 436 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { |
437 | c->cpu_core_id == o->cpu_core_id) | 437 | if (c->cpu_core_id == o->cpu_core_id) |
438 | return topology_sane(c, o, "smt"); | 438 | return topology_sane(c, o, "smt"); |
439 | |||
440 | if ((c->cu_id != 0xff) && | ||
441 | (o->cu_id != 0xff) && | ||
442 | (c->cu_id == o->cu_id)) | ||
443 | return topology_sane(c, o, "smt"); | ||
444 | } | ||
439 | 445 | ||
440 | } else if (c->phys_proc_id == o->phys_proc_id && | 446 | } else if (c->phys_proc_id == o->phys_proc_id && |
441 | c->cpu_core_id == o->cpu_core_id) { | 447 | c->cpu_core_id == o->cpu_core_id) { |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index e41af597aed8..37e7cf544e51 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -1356,6 +1356,9 @@ void __init tsc_init(void) | |||
1356 | (unsigned long)cpu_khz / 1000, | 1356 | (unsigned long)cpu_khz / 1000, |
1357 | (unsigned long)cpu_khz % 1000); | 1357 | (unsigned long)cpu_khz % 1000); |
1358 | 1358 | ||
1359 | /* Sanitize TSC ADJUST before cyc2ns gets initialized */ | ||
1360 | tsc_store_and_check_tsc_adjust(true); | ||
1361 | |||
1359 | /* | 1362 | /* |
1360 | * Secondary CPUs do not run through tsc_init(), so set up | 1363 | * Secondary CPUs do not run through tsc_init(), so set up |
1361 | * all the scale factors for all CPUs, assuming the same | 1364 | * all the scale factors for all CPUs, assuming the same |
@@ -1386,8 +1389,6 @@ void __init tsc_init(void) | |||
1386 | 1389 | ||
1387 | if (unsynchronized_tsc()) | 1390 | if (unsynchronized_tsc()) |
1388 | mark_tsc_unstable("TSCs unsynchronized"); | 1391 | mark_tsc_unstable("TSCs unsynchronized"); |
1389 | else | ||
1390 | tsc_store_and_check_tsc_adjust(true); | ||
1391 | 1392 | ||
1392 | check_system_tsc_reliable(); | 1393 | check_system_tsc_reliable(); |
1393 | 1394 | ||
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index d0db011051a5..728f75378475 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu) | |||
286 | if (unsynchronized_tsc()) | 286 | if (unsynchronized_tsc()) |
287 | return; | 287 | return; |
288 | 288 | ||
289 | if (tsc_clocksource_reliable) { | ||
290 | if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) | ||
291 | pr_info( | ||
292 | "Skipped synchronization checks as TSC is reliable.\n"); | ||
293 | return; | ||
294 | } | ||
295 | |||
296 | /* | 289 | /* |
297 | * Set the maximum number of test runs to | 290 | * Set the maximum number of test runs to |
298 | * 1 if the CPU does not provide the TSC_ADJUST MSR | 291 | * 1 if the CPU does not provide the TSC_ADJUST MSR |
@@ -380,14 +373,19 @@ void check_tsc_sync_target(void) | |||
380 | int cpus = 2; | 373 | int cpus = 2; |
381 | 374 | ||
382 | /* Also aborts if there is no TSC. */ | 375 | /* Also aborts if there is no TSC. */ |
383 | if (unsynchronized_tsc() || tsc_clocksource_reliable) | 376 | if (unsynchronized_tsc()) |
384 | return; | 377 | return; |
385 | 378 | ||
386 | /* | 379 | /* |
387 | * Store, verify and sanitize the TSC adjust register. If | 380 | * Store, verify and sanitize the TSC adjust register. If |
388 | * successful skip the test. | 381 | * successful skip the test. |
382 | * | ||
383 | * The test is also skipped when the TSC is marked reliable. This | ||
384 | * is true for SoCs which have no fallback clocksource. On these | ||
385 | * SoCs the TSC is frequency synchronized, but still the TSC ADJUST | ||
386 | * register might have been wreckaged by the BIOS.. | ||
389 | */ | 387 | */ |
390 | if (tsc_store_and_check_tsc_adjust(false)) { | 388 | if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) { |
391 | atomic_inc(&skip_test); | 389 | atomic_inc(&skip_test); |
392 | return; | 390 | return; |
393 | } | 391 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d153be8929a6..e52c9088660f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) | |||
3182 | memcpy(dest, xsave, XSAVE_HDR_OFFSET); | 3182 | memcpy(dest, xsave, XSAVE_HDR_OFFSET); |
3183 | 3183 | ||
3184 | /* Set XSTATE_BV */ | 3184 | /* Set XSTATE_BV */ |
3185 | xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; | ||
3185 | *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; | 3186 | *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; |
3186 | 3187 | ||
3187 | /* | 3188 | /* |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index ea9c49adaa1f..8aa6bea1cd6c 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/debugfs.h> | 15 | #include <linux/debugfs.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/sched.h> | ||
18 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
19 | 20 | ||
20 | #include <asm/pgtable.h> | 21 | #include <asm/pgtable.h> |
@@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, | |||
406 | } else | 407 | } else |
407 | note_page(m, &st, __pgprot(0), 1); | 408 | note_page(m, &st, __pgprot(0), 1); |
408 | 409 | ||
410 | cond_resched(); | ||
409 | start++; | 411 | start++; |
410 | } | 412 | } |
411 | 413 | ||
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 319148bd4b05..2f25a363068c 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) | |||
269 | efi_scratch.use_pgd = true; | 269 | efi_scratch.use_pgd = true; |
270 | 270 | ||
271 | /* | 271 | /* |
272 | * Certain firmware versions are way too sentimential and still believe | ||
273 | * they are exclusive and unquestionable owners of the first physical page, | ||
274 | * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY | ||
275 | * (but then write-access it later during SetVirtualAddressMap()). | ||
276 | * | ||
277 | * Create a 1:1 mapping for this page, to avoid triple faults during early | ||
278 | * boot with such firmware. We are free to hand this page to the BIOS, | ||
279 | * as trim_bios_range() will reserve the first page and isolate it away | ||
280 | * from memory allocators anyway. | ||
281 | */ | ||
282 | if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) { | ||
283 | pr_err("Failed to create 1:1 mapping for the first page!\n"); | ||
284 | return 1; | ||
285 | } | ||
286 | |||
287 | /* | ||
272 | * When making calls to the firmware everything needs to be 1:1 | 288 | * When making calls to the firmware everything needs to be 1:1 |
273 | * mapped and addressable with 32-bit pointers. Map the kernel | 289 | * mapped and addressable with 32-bit pointers. Map the kernel |
274 | * text and allocate a new stack because we can't rely on the | 290 | * text and allocate a new stack because we can't rely on the |
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 848e8568fb3c..8fd4be610607 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c | |||
@@ -419,7 +419,7 @@ subsys_initcall(topology_init); | |||
419 | 419 | ||
420 | void cpu_reset(void) | 420 | void cpu_reset(void) |
421 | { | 421 | { |
422 | #if XCHAL_HAVE_PTP_MMU | 422 | #if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU) |
423 | local_irq_disable(); | 423 | local_irq_disable(); |
424 | /* | 424 | /* |
425 | * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must | 425 | * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must |
diff --git a/block/blk-lib.c b/block/blk-lib.c index f8c82a9b4012..ed1e78e24db0 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -306,11 +306,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
306 | if (ret == 0 || (ret && ret != -EOPNOTSUPP)) | 306 | if (ret == 0 || (ret && ret != -EOPNOTSUPP)) |
307 | goto out; | 307 | goto out; |
308 | 308 | ||
309 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, | ||
310 | ZERO_PAGE(0), biop); | ||
311 | if (ret == 0 || (ret && ret != -EOPNOTSUPP)) | ||
312 | goto out; | ||
313 | |||
314 | ret = 0; | 309 | ret = 0; |
315 | while (nr_sects != 0) { | 310 | while (nr_sects != 0) { |
316 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), | 311 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), |
@@ -369,6 +364,10 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
369 | return 0; | 364 | return 0; |
370 | } | 365 | } |
371 | 366 | ||
367 | if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, | ||
368 | ZERO_PAGE(0))) | ||
369 | return 0; | ||
370 | |||
372 | blk_start_plug(&plug); | 371 | blk_start_plug(&plug); |
373 | ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, | 372 | ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, |
374 | &bio, discard); | 373 | &bio, discard); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index df939b54b09f..1fad2a6b3bbb 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg) | |||
356 | struct crypto_larval *larval; | 356 | struct crypto_larval *larval; |
357 | int err; | 357 | int err; |
358 | 358 | ||
359 | alg->cra_flags &= ~CRYPTO_ALG_DEAD; | ||
359 | err = crypto_check_alg(alg); | 360 | err = crypto_check_alg(alg); |
360 | if (err) | 361 | if (err) |
361 | return err; | 362 | return err; |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index f849311e9fd4..533265f110e0 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) | |||
661 | unlock: | 661 | unlock: |
662 | list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { | 662 | list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { |
663 | af_alg_free_sg(&rsgl->sgl); | 663 | af_alg_free_sg(&rsgl->sgl); |
664 | list_del(&rsgl->list); | ||
664 | if (rsgl != &ctx->first_rsgl) | 665 | if (rsgl != &ctx->first_rsgl) |
665 | sock_kfree_s(sk, rsgl, sizeof(*rsgl)); | 666 | sock_kfree_s(sk, rsgl, sizeof(*rsgl)); |
666 | list_del(&rsgl->list); | ||
667 | } | 667 | } |
668 | INIT_LIST_HEAD(&ctx->list); | 668 | INIT_LIST_HEAD(&ctx->list); |
669 | aead_wmem_wakeup(sk); | 669 | aead_wmem_wakeup(sk); |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 2f82b8eba360..7361d00818e2 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | |||
2704 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 2704 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); |
2705 | struct device *dev = acpi_desc->dev; | 2705 | struct device *dev = acpi_desc->dev; |
2706 | struct acpi_nfit_flush_work flush; | 2706 | struct acpi_nfit_flush_work flush; |
2707 | int rc; | ||
2707 | 2708 | ||
2708 | /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ | 2709 | /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ |
2709 | device_lock(dev); | 2710 | device_lock(dev); |
@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | |||
2716 | INIT_WORK_ONSTACK(&flush.work, flush_probe); | 2717 | INIT_WORK_ONSTACK(&flush.work, flush_probe); |
2717 | COMPLETION_INITIALIZER_ONSTACK(flush.cmp); | 2718 | COMPLETION_INITIALIZER_ONSTACK(flush.cmp); |
2718 | queue_work(nfit_wq, &flush.work); | 2719 | queue_work(nfit_wq, &flush.work); |
2719 | return wait_for_completion_interruptible(&flush.cmp); | 2720 | |
2721 | rc = wait_for_completion_interruptible(&flush.cmp); | ||
2722 | cancel_work_sync(&flush.work); | ||
2723 | return rc; | ||
2720 | } | 2724 | } |
2721 | 2725 | ||
2722 | static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, | 2726 | static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9cd0a2d41816..c2d3785ec227 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
1702 | 1702 | ||
1703 | if (qc->err_mask & ~AC_ERR_OTHER) | 1703 | if (qc->err_mask & ~AC_ERR_OTHER) |
1704 | qc->err_mask &= ~AC_ERR_OTHER; | 1704 | qc->err_mask &= ~AC_ERR_OTHER; |
1705 | } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { | ||
1706 | qc->result_tf.command |= ATA_SENSE; | ||
1705 | } | 1707 | } |
1706 | 1708 | ||
1707 | /* finish up */ | 1709 | /* finish up */ |
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4356 | { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, | 4358 | { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, |
4357 | 4359 | ||
4358 | /* | 4360 | /* |
4359 | * Device times out with higher max sects. | 4361 | * These devices time out with higher max sects. |
4360 | * https://bugzilla.kernel.org/show_bug.cgi?id=121671 | 4362 | * https://bugzilla.kernel.org/show_bug.cgi?id=121671 |
4361 | */ | 4363 | */ |
4362 | { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, | 4364 | { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, |
4363 | 4365 | ||
4364 | /* Devices we expect to fail diagnostics */ | 4366 | /* Devices we expect to fail diagnostics */ |
4365 | 4367 | ||
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 823e938c9a78..2f32782cea6d 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
4132 | host->iomap = NULL; | 4132 | host->iomap = NULL; |
4133 | hpriv->base = devm_ioremap(&pdev->dev, res->start, | 4133 | hpriv->base = devm_ioremap(&pdev->dev, res->start, |
4134 | resource_size(res)); | 4134 | resource_size(res)); |
4135 | if (!hpriv->base) | ||
4136 | return -ENOMEM; | ||
4137 | |||
4135 | hpriv->base -= SATAHC0_REG_BASE; | 4138 | hpriv->base -= SATAHC0_REG_BASE; |
4136 | 4139 | ||
4137 | hpriv->clk = clk_get(&pdev->dev, NULL); | 4140 | hpriv->clk = clk_get(&pdev->dev, NULL); |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 4497d263209f..ac350c518e0c 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv) | |||
558 | struct firmware_buf *buf = fw_priv->buf; | 558 | struct firmware_buf *buf = fw_priv->buf; |
559 | 559 | ||
560 | __fw_load_abort(buf); | 560 | __fw_load_abort(buf); |
561 | |||
562 | /* avoid user action after loading abort */ | ||
563 | fw_priv->buf = NULL; | ||
564 | } | 561 | } |
565 | 562 | ||
566 | static LIST_HEAD(pending_fw_head); | 563 | static LIST_HEAD(pending_fw_head); |
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev, | |||
713 | 710 | ||
714 | mutex_lock(&fw_lock); | 711 | mutex_lock(&fw_lock); |
715 | fw_buf = fw_priv->buf; | 712 | fw_buf = fw_priv->buf; |
716 | if (!fw_buf) | 713 | if (fw_state_is_aborted(&fw_buf->fw_st)) |
717 | goto out; | 714 | goto out; |
718 | 715 | ||
719 | switch (loading) { | 716 | switch (loading) { |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index dacb6a8418aa..fa26ffd25fa6 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev, | |||
389 | { | 389 | { |
390 | struct memory_block *mem = to_memory_block(dev); | 390 | struct memory_block *mem = to_memory_block(dev); |
391 | unsigned long start_pfn, end_pfn; | 391 | unsigned long start_pfn, end_pfn; |
392 | unsigned long valid_start, valid_end, valid_pages; | ||
392 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; | 393 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
393 | struct page *first_page; | ||
394 | struct zone *zone; | 394 | struct zone *zone; |
395 | int zone_shift = 0; | 395 | int zone_shift = 0; |
396 | 396 | ||
397 | start_pfn = section_nr_to_pfn(mem->start_section_nr); | 397 | start_pfn = section_nr_to_pfn(mem->start_section_nr); |
398 | end_pfn = start_pfn + nr_pages; | 398 | end_pfn = start_pfn + nr_pages; |
399 | first_page = pfn_to_page(start_pfn); | ||
400 | 399 | ||
401 | /* The block contains more than one zone can not be offlined. */ | 400 | /* The block contains more than one zone can not be offlined. */ |
402 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | 401 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) |
403 | return sprintf(buf, "none\n"); | 402 | return sprintf(buf, "none\n"); |
404 | 403 | ||
405 | zone = page_zone(first_page); | 404 | zone = page_zone(pfn_to_page(valid_start)); |
405 | valid_pages = valid_end - valid_start; | ||
406 | 406 | ||
407 | /* MMOP_ONLINE_KEEP */ | 407 | /* MMOP_ONLINE_KEEP */ |
408 | sprintf(buf, "%s", zone->name); | 408 | sprintf(buf, "%s", zone->name); |
409 | 409 | ||
410 | /* MMOP_ONLINE_KERNEL */ | 410 | /* MMOP_ONLINE_KERNEL */ |
411 | zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); | 411 | zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift); |
412 | if (zone_shift) { | 412 | if (zone_shift) { |
413 | strcat(buf, " "); | 413 | strcat(buf, " "); |
414 | strcat(buf, (zone + zone_shift)->name); | 414 | strcat(buf, (zone + zone_shift)->name); |
415 | } | 415 | } |
416 | 416 | ||
417 | /* MMOP_ONLINE_MOVABLE */ | 417 | /* MMOP_ONLINE_MOVABLE */ |
418 | zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); | 418 | zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift); |
419 | if (zone_shift) { | 419 | if (zone_shift) { |
420 | strcat(buf, " "); | 420 | strcat(buf, " "); |
421 | strcat(buf, (zone + zone_shift)->name); | 421 | strcat(buf, (zone + zone_shift)->name); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 872eac4cb1df..a14fac6a01d3 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) | |||
966 | unsigned long flags; | 966 | unsigned long flags; |
967 | int retval; | 967 | int retval; |
968 | 968 | ||
969 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
970 | |||
971 | if (rpmflags & RPM_GET_PUT) { | 969 | if (rpmflags & RPM_GET_PUT) { |
972 | if (!atomic_dec_and_test(&dev->power.usage_count)) | 970 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
973 | return 0; | 971 | return 0; |
974 | } | 972 | } |
975 | 973 | ||
974 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
975 | |||
976 | spin_lock_irqsave(&dev->power.lock, flags); | 976 | spin_lock_irqsave(&dev->power.lock, flags); |
977 | retval = rpm_idle(dev, rpmflags); | 977 | retval = rpm_idle(dev, rpmflags); |
978 | spin_unlock_irqrestore(&dev->power.lock, flags); | 978 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) | |||
998 | unsigned long flags; | 998 | unsigned long flags; |
999 | int retval; | 999 | int retval; |
1000 | 1000 | ||
1001 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
1002 | |||
1003 | if (rpmflags & RPM_GET_PUT) { | 1001 | if (rpmflags & RPM_GET_PUT) { |
1004 | if (!atomic_dec_and_test(&dev->power.usage_count)) | 1002 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
1005 | return 0; | 1003 | return 0; |
1006 | } | 1004 | } |
1007 | 1005 | ||
1006 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
1007 | |||
1008 | spin_lock_irqsave(&dev->power.lock, flags); | 1008 | spin_lock_irqsave(&dev->power.lock, flags); |
1009 | retval = rpm_suspend(dev, rpmflags); | 1009 | retval = rpm_suspend(dev, rpmflags); |
1010 | spin_unlock_irqrestore(&dev->power.lock, flags); | 1010 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) | |||
1029 | unsigned long flags; | 1029 | unsigned long flags; |
1030 | int retval; | 1030 | int retval; |
1031 | 1031 | ||
1032 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | 1032 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && |
1033 | dev->power.runtime_status != RPM_ACTIVE); | ||
1033 | 1034 | ||
1034 | if (rpmflags & RPM_GET_PUT) | 1035 | if (rpmflags & RPM_GET_PUT) |
1035 | atomic_inc(&dev->power.usage_count); | 1036 | atomic_inc(&dev->power.usage_count); |
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index f642c4264c27..168fa175d65a 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h | |||
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus); | |||
45 | void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); | 45 | void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); |
46 | void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); | 46 | void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); |
47 | void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); | 47 | void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); |
48 | #ifdef CONFIG_BCMA_DRIVER_MIPS | ||
49 | void bcma_chipco_serial_init(struct bcma_drv_cc *cc); | ||
50 | #endif /* CONFIG_BCMA_DRIVER_MIPS */ | ||
48 | 51 | ||
49 | /* driver_chipcommon_b.c */ | 52 | /* driver_chipcommon_b.c */ |
50 | int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); | 53 | int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); |
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index b4f6520e74f0..62f5bfa5065d 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c | |||
@@ -15,8 +15,6 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/bcma/bcma.h> | 16 | #include <linux/bcma/bcma.h> |
17 | 17 | ||
18 | static void bcma_chipco_serial_init(struct bcma_drv_cc *cc); | ||
19 | |||
20 | static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, | 18 | static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, |
21 | u32 mask, u32 value) | 19 | u32 mask, u32 value) |
22 | { | 20 | { |
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) | |||
186 | if (cc->capabilities & BCMA_CC_CAP_PMU) | 184 | if (cc->capabilities & BCMA_CC_CAP_PMU) |
187 | bcma_pmu_early_init(cc); | 185 | bcma_pmu_early_init(cc); |
188 | 186 | ||
189 | if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC) | ||
190 | bcma_chipco_serial_init(cc); | ||
191 | |||
192 | if (bus->hosttype == BCMA_HOSTTYPE_SOC) | 187 | if (bus->hosttype == BCMA_HOSTTYPE_SOC) |
193 | bcma_core_chipcommon_flash_detect(cc); | 188 | bcma_core_chipcommon_flash_detect(cc); |
194 | 189 | ||
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value) | |||
378 | return res; | 373 | return res; |
379 | } | 374 | } |
380 | 375 | ||
381 | static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) | 376 | #ifdef CONFIG_BCMA_DRIVER_MIPS |
377 | void bcma_chipco_serial_init(struct bcma_drv_cc *cc) | ||
382 | { | 378 | { |
383 | #if IS_BUILTIN(CONFIG_BCM47XX) | ||
384 | unsigned int irq; | 379 | unsigned int irq; |
385 | u32 baud_base; | 380 | u32 baud_base; |
386 | u32 i; | 381 | u32 i; |
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) | |||
422 | ports[i].baud_base = baud_base; | 417 | ports[i].baud_base = baud_base; |
423 | ports[i].reg_shift = 0; | 418 | ports[i].reg_shift = 0; |
424 | } | 419 | } |
425 | #endif /* CONFIG_BCM47XX */ | ||
426 | } | 420 | } |
421 | #endif /* CONFIG_BCMA_DRIVER_MIPS */ | ||
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 96f171328200..89af807cf29c 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c | |||
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore) | |||
278 | 278 | ||
279 | void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) | 279 | void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) |
280 | { | 280 | { |
281 | struct bcma_bus *bus = mcore->core->bus; | ||
282 | |||
281 | if (mcore->early_setup_done) | 283 | if (mcore->early_setup_done) |
282 | return; | 284 | return; |
283 | 285 | ||
286 | bcma_chipco_serial_init(&bus->drv_cc); | ||
284 | bcma_core_mips_nvram_init(mcore); | 287 | bcma_core_mips_nvram_init(mcore); |
285 | 288 | ||
286 | mcore->early_setup_done = true; | 289 | mcore->early_setup_done = true; |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 6ce5ce8be2f2..87fba424817e 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -92,7 +92,6 @@ static void add_early_randomness(struct hwrng *rng) | |||
92 | mutex_unlock(&reading_mutex); | 92 | mutex_unlock(&reading_mutex); |
93 | if (bytes_read > 0) | 93 | if (bytes_read > 0) |
94 | add_device_randomness(rng_buffer, bytes_read); | 94 | add_device_randomness(rng_buffer, bytes_read); |
95 | memset(rng_buffer, 0, size); | ||
96 | } | 95 | } |
97 | 96 | ||
98 | static inline void cleanup_rng(struct kref *kref) | 97 | static inline void cleanup_rng(struct kref *kref) |
@@ -288,7 +287,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
288 | } | 287 | } |
289 | } | 288 | } |
290 | out: | 289 | out: |
291 | memset(rng_buffer, 0, rng_buffer_size()); | ||
292 | return ret ? : err; | 290 | return ret ? : err; |
293 | 291 | ||
294 | out_unlock_reading: | 292 | out_unlock_reading: |
@@ -427,7 +425,6 @@ static int hwrng_fillfn(void *unused) | |||
427 | /* Outside lock, sure, but y'know: randomness. */ | 425 | /* Outside lock, sure, but y'know: randomness. */ |
428 | add_hwgenerator_randomness((void *)rng_fillbuf, rc, | 426 | add_hwgenerator_randomness((void *)rng_fillbuf, rc, |
429 | rc * current_quality * 8 >> 10); | 427 | rc * current_quality * 8 >> 10); |
430 | memset(rng_fillbuf, 0, rng_buffer_size()); | ||
431 | } | 428 | } |
432 | hwrng_fill = NULL; | 429 | hwrng_fill = NULL; |
433 | return 0; | 430 | return 0; |
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 4fda623e55bb..c94360671f41 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c | |||
@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy, | |||
784 | static int brcm_avs_suspend(struct cpufreq_policy *policy) | 784 | static int brcm_avs_suspend(struct cpufreq_policy *policy) |
785 | { | 785 | { |
786 | struct private_data *priv = policy->driver_data; | 786 | struct private_data *priv = policy->driver_data; |
787 | int ret; | ||
788 | |||
789 | ret = brcm_avs_get_pmap(priv, &priv->pmap); | ||
790 | if (ret) | ||
791 | return ret; | ||
787 | 792 | ||
788 | return brcm_avs_get_pmap(priv, &priv->pmap); | 793 | /* |
794 | * We can't use the P-state returned by brcm_avs_get_pmap(), since | ||
795 | * that's the initial P-state from when the P-map was downloaded to the | ||
796 | * AVS co-processor, not necessarily the P-state we are running at now. | ||
797 | * So, we get the current P-state explicitly. | ||
798 | */ | ||
799 | return brcm_avs_get_pstate(priv, &priv->pmap.state); | ||
789 | } | 800 | } |
790 | 801 | ||
791 | static int brcm_avs_resume(struct cpufreq_policy *policy) | 802 | static int brcm_avs_resume(struct cpufreq_policy *policy) |
@@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf) | |||
954 | brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv); | 965 | brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv); |
955 | brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4); | 966 | brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4); |
956 | 967 | ||
957 | return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n", | 968 | return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n", |
958 | pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2, | 969 | pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2, |
959 | mdiv_p3, mdiv_p4); | 970 | mdiv_p3, mdiv_p4, pmap.mode, pmap.state); |
960 | } | 971 | } |
961 | 972 | ||
962 | static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf) | 973 | static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf) |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index a54d65aa776d..50bd6d987fc3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) | |||
1235 | cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); | 1235 | cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | #define MSR_IA32_POWER_CTL_BIT_EE 19 | ||
1239 | |||
1240 | /* Disable energy efficiency optimization */ | ||
1241 | static void intel_pstate_disable_ee(int cpu) | ||
1242 | { | ||
1243 | u64 power_ctl; | ||
1244 | int ret; | ||
1245 | |||
1246 | ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); | ||
1247 | if (ret) | ||
1248 | return; | ||
1249 | |||
1250 | if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { | ||
1251 | pr_info("Disabling energy efficiency optimization\n"); | ||
1252 | power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); | ||
1253 | wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); | ||
1254 | } | ||
1255 | } | ||
1256 | |||
1238 | static int atom_get_min_pstate(void) | 1257 | static int atom_get_min_pstate(void) |
1239 | { | 1258 | { |
1240 | u64 value; | 1259 | u64 value; |
@@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { | |||
1845 | {} | 1864 | {} |
1846 | }; | 1865 | }; |
1847 | 1866 | ||
1867 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { | ||
1868 | ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params), | ||
1869 | {} | ||
1870 | }; | ||
1871 | |||
1848 | static int intel_pstate_init_cpu(unsigned int cpunum) | 1872 | static int intel_pstate_init_cpu(unsigned int cpunum) |
1849 | { | 1873 | { |
1850 | struct cpudata *cpu; | 1874 | struct cpudata *cpu; |
@@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
1875 | cpu->cpu = cpunum; | 1899 | cpu->cpu = cpunum; |
1876 | 1900 | ||
1877 | if (hwp_active) { | 1901 | if (hwp_active) { |
1902 | const struct x86_cpu_id *id; | ||
1903 | |||
1904 | id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); | ||
1905 | if (id) | ||
1906 | intel_pstate_disable_ee(cpunum); | ||
1907 | |||
1878 | intel_pstate_hwp_enable(cpu); | 1908 | intel_pstate_hwp_enable(cpu); |
1879 | pid_params.sample_rate_ms = 50; | 1909 | pid_params.sample_rate_ms = 50; |
1880 | pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; | 1910 | pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; |
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index e2ce8190ecc9..612898b4aaad 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data) | |||
959 | static void ccp5_config(struct ccp_device *ccp) | 959 | static void ccp5_config(struct ccp_device *ccp) |
960 | { | 960 | { |
961 | /* Public side */ | 961 | /* Public side */ |
962 | iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); | 962 | iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); |
963 | } | 963 | } |
964 | 964 | ||
965 | static void ccp5other_config(struct ccp_device *ccp) | 965 | static void ccp5other_config(struct ccp_device *ccp) |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 830f35e6005f..649e5610a5ce 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -238,6 +238,7 @@ struct ccp_dma_chan { | |||
238 | struct ccp_device *ccp; | 238 | struct ccp_device *ccp; |
239 | 239 | ||
240 | spinlock_t lock; | 240 | spinlock_t lock; |
241 | struct list_head created; | ||
241 | struct list_head pending; | 242 | struct list_head pending; |
242 | struct list_head active; | 243 | struct list_head active; |
243 | struct list_head complete; | 244 | struct list_head complete; |
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 6553912804f7..e5d9278f4019 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c | |||
@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan) | |||
63 | ccp_free_desc_resources(chan->ccp, &chan->complete); | 63 | ccp_free_desc_resources(chan->ccp, &chan->complete); |
64 | ccp_free_desc_resources(chan->ccp, &chan->active); | 64 | ccp_free_desc_resources(chan->ccp, &chan->active); |
65 | ccp_free_desc_resources(chan->ccp, &chan->pending); | 65 | ccp_free_desc_resources(chan->ccp, &chan->pending); |
66 | ccp_free_desc_resources(chan->ccp, &chan->created); | ||
66 | 67 | ||
67 | spin_unlock_irqrestore(&chan->lock, flags); | 68 | spin_unlock_irqrestore(&chan->lock, flags); |
68 | } | 69 | } |
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) | |||
273 | spin_lock_irqsave(&chan->lock, flags); | 274 | spin_lock_irqsave(&chan->lock, flags); |
274 | 275 | ||
275 | cookie = dma_cookie_assign(tx_desc); | 276 | cookie = dma_cookie_assign(tx_desc); |
277 | list_del(&desc->entry); | ||
276 | list_add_tail(&desc->entry, &chan->pending); | 278 | list_add_tail(&desc->entry, &chan->pending); |
277 | 279 | ||
278 | spin_unlock_irqrestore(&chan->lock, flags); | 280 | spin_unlock_irqrestore(&chan->lock, flags); |
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, | |||
426 | 428 | ||
427 | spin_lock_irqsave(&chan->lock, sflags); | 429 | spin_lock_irqsave(&chan->lock, sflags); |
428 | 430 | ||
429 | list_add_tail(&desc->entry, &chan->pending); | 431 | list_add_tail(&desc->entry, &chan->created); |
430 | 432 | ||
431 | spin_unlock_irqrestore(&chan->lock, sflags); | 433 | spin_unlock_irqrestore(&chan->lock, sflags); |
432 | 434 | ||
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) | |||
610 | /*TODO: Purge the complete list? */ | 612 | /*TODO: Purge the complete list? */ |
611 | ccp_free_desc_resources(chan->ccp, &chan->active); | 613 | ccp_free_desc_resources(chan->ccp, &chan->active); |
612 | ccp_free_desc_resources(chan->ccp, &chan->pending); | 614 | ccp_free_desc_resources(chan->ccp, &chan->pending); |
615 | ccp_free_desc_resources(chan->ccp, &chan->created); | ||
613 | 616 | ||
614 | spin_unlock_irqrestore(&chan->lock, flags); | 617 | spin_unlock_irqrestore(&chan->lock, flags); |
615 | 618 | ||
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) | |||
679 | chan->ccp = ccp; | 682 | chan->ccp = ccp; |
680 | 683 | ||
681 | spin_lock_init(&chan->lock); | 684 | spin_lock_init(&chan->lock); |
685 | INIT_LIST_HEAD(&chan->created); | ||
682 | INIT_LIST_HEAD(&chan->pending); | 686 | INIT_LIST_HEAD(&chan->pending); |
683 | INIT_LIST_HEAD(&chan->active); | 687 | INIT_LIST_HEAD(&chan->active); |
684 | INIT_LIST_HEAD(&chan->complete); | 688 | INIT_LIST_HEAD(&chan->complete); |
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 2ed1e24b44a8..b4b78b37f8a6 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
158 | case CRYPTO_ALG_TYPE_AEAD: | 158 | case CRYPTO_ALG_TYPE_AEAD: |
159 | ctx_req.req.aead_req = (struct aead_request *)req; | 159 | ctx_req.req.aead_req = (struct aead_request *)req; |
160 | ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); | 160 | ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); |
161 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, | 161 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, |
162 | ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); | 162 | ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); |
163 | if (ctx_req.ctx.reqctx->skb) { | 163 | if (ctx_req.ctx.reqctx->skb) { |
164 | kfree_skb(ctx_req.ctx.reqctx->skb); | 164 | kfree_skb(ctx_req.ctx.reqctx->skb); |
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1362 | struct chcr_wr *chcr_req; | 1362 | struct chcr_wr *chcr_req; |
1363 | struct cpl_rx_phys_dsgl *phys_cpl; | 1363 | struct cpl_rx_phys_dsgl *phys_cpl; |
1364 | struct phys_sge_parm sg_param; | 1364 | struct phys_sge_parm sg_param; |
1365 | struct scatterlist *src, *dst; | 1365 | struct scatterlist *src; |
1366 | struct scatterlist src_sg[2], dst_sg[2]; | ||
1367 | unsigned int frags = 0, transhdr_len; | 1366 | unsigned int frags = 0, transhdr_len; |
1368 | unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; | 1367 | unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; |
1369 | unsigned int kctx_len = 0; | 1368 | unsigned int kctx_len = 0; |
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1383 | 1382 | ||
1384 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) | 1383 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) |
1385 | goto err; | 1384 | goto err; |
1386 | src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); | 1385 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); |
1387 | dst = src; | 1386 | reqctx->dst = src; |
1387 | |||
1388 | if (req->src != req->dst) { | 1388 | if (req->src != req->dst) { |
1389 | err = chcr_copy_assoc(req, aeadctx); | 1389 | err = chcr_copy_assoc(req, aeadctx); |
1390 | if (err) | 1390 | if (err) |
1391 | return ERR_PTR(err); | 1391 | return ERR_PTR(err); |
1392 | dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); | 1392 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, |
1393 | req->assoclen); | ||
1393 | } | 1394 | } |
1394 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { | 1395 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { |
1395 | null = 1; | 1396 | null = 1; |
1396 | assoclen = 0; | 1397 | assoclen = 0; |
1397 | } | 1398 | } |
1398 | reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + | 1399 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
1399 | (op_type ? -authsize : authsize)); | 1400 | (op_type ? -authsize : authsize)); |
1400 | if (reqctx->dst_nents <= 0) { | 1401 | if (reqctx->dst_nents <= 0) { |
1401 | pr_err("AUTHENC:Invalid Destination sg entries\n"); | 1402 | pr_err("AUTHENC:Invalid Destination sg entries\n"); |
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1460 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 1461 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); |
1461 | sg_param.qid = qid; | 1462 | sg_param.qid = qid; |
1462 | sg_param.align = 0; | 1463 | sg_param.align = 0; |
1463 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, | 1464 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, |
1464 | &sg_param)) | 1465 | &sg_param)) |
1465 | goto dstmap_fail; | 1466 | goto dstmap_fail; |
1466 | 1467 | ||
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
1711 | struct chcr_wr *chcr_req; | 1712 | struct chcr_wr *chcr_req; |
1712 | struct cpl_rx_phys_dsgl *phys_cpl; | 1713 | struct cpl_rx_phys_dsgl *phys_cpl; |
1713 | struct phys_sge_parm sg_param; | 1714 | struct phys_sge_parm sg_param; |
1714 | struct scatterlist *src, *dst; | 1715 | struct scatterlist *src; |
1715 | struct scatterlist src_sg[2], dst_sg[2]; | ||
1716 | unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; | 1716 | unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; |
1717 | unsigned int dst_size = 0, kctx_len; | 1717 | unsigned int dst_size = 0, kctx_len; |
1718 | unsigned int sub_type; | 1718 | unsigned int sub_type; |
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
1728 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) | 1728 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) |
1729 | goto err; | 1729 | goto err; |
1730 | sub_type = get_aead_subtype(tfm); | 1730 | sub_type = get_aead_subtype(tfm); |
1731 | src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); | 1731 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); |
1732 | dst = src; | 1732 | reqctx->dst = src; |
1733 | |||
1733 | if (req->src != req->dst) { | 1734 | if (req->src != req->dst) { |
1734 | err = chcr_copy_assoc(req, aeadctx); | 1735 | err = chcr_copy_assoc(req, aeadctx); |
1735 | if (err) { | 1736 | if (err) { |
1736 | pr_err("AAD copy to destination buffer fails\n"); | 1737 | pr_err("AAD copy to destination buffer fails\n"); |
1737 | return ERR_PTR(err); | 1738 | return ERR_PTR(err); |
1738 | } | 1739 | } |
1739 | dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); | 1740 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, |
1741 | req->assoclen); | ||
1740 | } | 1742 | } |
1741 | reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + | 1743 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
1742 | (op_type ? -authsize : authsize)); | 1744 | (op_type ? -authsize : authsize)); |
1743 | if (reqctx->dst_nents <= 0) { | 1745 | if (reqctx->dst_nents <= 0) { |
1744 | pr_err("CCM:Invalid Destination sg entries\n"); | 1746 | pr_err("CCM:Invalid Destination sg entries\n"); |
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
1777 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 1779 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); |
1778 | sg_param.qid = qid; | 1780 | sg_param.qid = qid; |
1779 | sg_param.align = 0; | 1781 | sg_param.align = 0; |
1780 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, | 1782 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, |
1781 | &sg_param)) | 1783 | &sg_param)) |
1782 | goto dstmap_fail; | 1784 | goto dstmap_fail; |
1783 | 1785 | ||
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1809 | struct chcr_wr *chcr_req; | 1811 | struct chcr_wr *chcr_req; |
1810 | struct cpl_rx_phys_dsgl *phys_cpl; | 1812 | struct cpl_rx_phys_dsgl *phys_cpl; |
1811 | struct phys_sge_parm sg_param; | 1813 | struct phys_sge_parm sg_param; |
1812 | struct scatterlist *src, *dst; | 1814 | struct scatterlist *src; |
1813 | struct scatterlist src_sg[2], dst_sg[2]; | ||
1814 | unsigned int frags = 0, transhdr_len; | 1815 | unsigned int frags = 0, transhdr_len; |
1815 | unsigned int ivsize = AES_BLOCK_SIZE; | 1816 | unsigned int ivsize = AES_BLOCK_SIZE; |
1816 | unsigned int dst_size = 0, kctx_len; | 1817 | unsigned int dst_size = 0, kctx_len; |
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1832 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) | 1833 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) |
1833 | goto err; | 1834 | goto err; |
1834 | 1835 | ||
1835 | src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); | 1836 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); |
1836 | dst = src; | 1837 | reqctx->dst = src; |
1837 | if (req->src != req->dst) { | 1838 | if (req->src != req->dst) { |
1838 | err = chcr_copy_assoc(req, aeadctx); | 1839 | err = chcr_copy_assoc(req, aeadctx); |
1839 | if (err) | 1840 | if (err) |
1840 | return ERR_PTR(err); | 1841 | return ERR_PTR(err); |
1841 | dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); | 1842 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, |
1843 | req->assoclen); | ||
1842 | } | 1844 | } |
1843 | 1845 | ||
1844 | if (!req->cryptlen) | 1846 | if (!req->cryptlen) |
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1848 | crypt_len = AES_BLOCK_SIZE; | 1850 | crypt_len = AES_BLOCK_SIZE; |
1849 | else | 1851 | else |
1850 | crypt_len = req->cryptlen; | 1852 | crypt_len = req->cryptlen; |
1851 | reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + | 1853 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
1852 | (op_type ? -authsize : authsize)); | 1854 | (op_type ? -authsize : authsize)); |
1853 | if (reqctx->dst_nents <= 0) { | 1855 | if (reqctx->dst_nents <= 0) { |
1854 | pr_err("GCM:Invalid Destination sg entries\n"); | 1856 | pr_err("GCM:Invalid Destination sg entries\n"); |
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1923 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 1925 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); |
1924 | sg_param.qid = qid; | 1926 | sg_param.qid = qid; |
1925 | sg_param.align = 0; | 1927 | sg_param.align = 0; |
1926 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, | 1928 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, |
1927 | &sg_param)) | 1929 | &sg_param)) |
1928 | goto dstmap_fail; | 1930 | goto dstmap_fail; |
1929 | 1931 | ||
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1937 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | 1939 | write_sg_to_skb(skb, &frags, src, req->cryptlen); |
1938 | } else { | 1940 | } else { |
1939 | aes_gcm_empty_pld_pad(req->dst, authsize - 1); | 1941 | aes_gcm_empty_pld_pad(req->dst, authsize - 1); |
1940 | write_sg_to_skb(skb, &frags, dst, crypt_len); | 1942 | write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len); |
1943 | |||
1941 | } | 1944 | } |
1942 | 1945 | ||
1943 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, | 1946 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, |
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
2189 | unsigned int ck_size; | 2192 | unsigned int ck_size; |
2190 | int ret = 0, key_ctx_size = 0; | 2193 | int ret = 0, key_ctx_size = 0; |
2191 | 2194 | ||
2192 | if (get_aead_subtype(aead) == | 2195 | if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && |
2193 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { | 2196 | keylen > 3) { |
2194 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ | 2197 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ |
2195 | memcpy(aeadctx->salt, key + keylen, 4); | 2198 | memcpy(aeadctx->salt, key + keylen, 4); |
2196 | } | 2199 | } |
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 918da8e6e2d8..1c65f07e1cc9 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c | |||
@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = { | |||
52 | int assign_chcr_device(struct chcr_dev **dev) | 52 | int assign_chcr_device(struct chcr_dev **dev) |
53 | { | 53 | { |
54 | struct uld_ctx *u_ctx; | 54 | struct uld_ctx *u_ctx; |
55 | int ret = -ENXIO; | ||
55 | 56 | ||
56 | /* | 57 | /* |
57 | * Which device to use if multiple devices are available TODO | 58 | * Which device to use if multiple devices are available TODO |
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev) | |||
59 | * must go to the same device to maintain the ordering. | 60 | * must go to the same device to maintain the ordering. |
60 | */ | 61 | */ |
61 | mutex_lock(&dev_mutex); /* TODO ? */ | 62 | mutex_lock(&dev_mutex); /* TODO ? */ |
62 | u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); | 63 | list_for_each_entry(u_ctx, &uld_ctx_list, entry) |
63 | if (!u_ctx) { | 64 | if (u_ctx && u_ctx->dev) { |
64 | mutex_unlock(&dev_mutex); | 65 | *dev = u_ctx->dev; |
65 | return -ENXIO; | 66 | ret = 0; |
67 | break; | ||
66 | } | 68 | } |
67 | |||
68 | *dev = u_ctx->dev; | ||
69 | mutex_unlock(&dev_mutex); | 69 | mutex_unlock(&dev_mutex); |
70 | return 0; | 70 | return ret; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int chcr_dev_add(struct uld_ctx *u_ctx) | 73 | static int chcr_dev_add(struct uld_ctx *u_ctx) |
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) | |||
202 | 202 | ||
203 | static int __init chcr_crypto_init(void) | 203 | static int __init chcr_crypto_init(void) |
204 | { | 204 | { |
205 | if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { | 205 | if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) |
206 | pr_err("ULD register fail: No chcr crypto support in cxgb4"); | 206 | pr_err("ULD register fail: No chcr crypto support in cxgb4"); |
207 | return -1; | ||
208 | } | ||
209 | 207 | ||
210 | return 0; | 208 | return 0; |
211 | } | 209 | } |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index d5af7d64a763..7ec0a8f12475 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
@@ -158,6 +158,9 @@ struct ablk_ctx { | |||
158 | }; | 158 | }; |
159 | struct chcr_aead_reqctx { | 159 | struct chcr_aead_reqctx { |
160 | struct sk_buff *skb; | 160 | struct sk_buff *skb; |
161 | struct scatterlist *dst; | ||
162 | struct scatterlist srcffwd[2]; | ||
163 | struct scatterlist dstffwd[2]; | ||
161 | short int dst_nents; | 164 | short int dst_nents; |
162 | u16 verify; | 165 | u16 verify; |
163 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; | 166 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; |
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index bc5cbc193aae..5b2d78a5b5aa 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c | |||
@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
233 | &hw_data->accel_capabilities_mask); | 233 | &hw_data->accel_capabilities_mask); |
234 | 234 | ||
235 | /* Find and map all the device's BARS */ | 235 | /* Find and map all the device's BARS */ |
236 | i = 0; | 236 | i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; |
237 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | 237 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
238 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | 238 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, |
239 | ADF_PCI_MAX_BARS * 2) { | 239 | ADF_PCI_MAX_BARS * 2) { |
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index e8822536530b..33f0a6251e38 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h | |||
@@ -69,6 +69,7 @@ | |||
69 | #define ADF_ERRSOU5 (0x3A000 + 0xD8) | 69 | #define ADF_ERRSOU5 (0x3A000 + 0xD8) |
70 | #define ADF_DEVICE_FUSECTL_OFFSET 0x40 | 70 | #define ADF_DEVICE_FUSECTL_OFFSET 0x40 |
71 | #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C | 71 | #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C |
72 | #define ADF_DEVICE_FUSECTL_MASK 0x80000000 | ||
72 | #define ADF_PCI_MAX_BARS 3 | 73 | #define ADF_PCI_MAX_BARS 3 |
73 | #define ADF_DEVICE_NAME_LENGTH 32 | 74 | #define ADF_DEVICE_NAME_LENGTH 32 |
74 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 | 75 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 |
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 1e480f140663..8c4fd255a601 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c | |||
@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) | |||
456 | unsigned int csr_val; | 456 | unsigned int csr_val; |
457 | int times = 30; | 457 | int times = 30; |
458 | 458 | ||
459 | if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) | 459 | if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID) |
460 | return 0; | 460 | return 0; |
461 | 461 | ||
462 | csr_val = ADF_CSR_RD(csr_addr, 0); | 462 | csr_val = ADF_CSR_RD(csr_addr, 0); |
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) | |||
716 | (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + | 716 | (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + |
717 | LOCAL_TO_XFER_REG_OFFSET); | 717 | LOCAL_TO_XFER_REG_OFFSET); |
718 | handle->pci_dev = pci_info->pci_dev; | 718 | handle->pci_dev = pci_info->pci_dev; |
719 | if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) { | 719 | if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) { |
720 | sram_bar = | 720 | sram_bar = |
721 | &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; | 721 | &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; |
722 | handle->hal_sram_addr_v = sram_bar->virt_addr; | 722 | handle->hal_sram_addr_v = sram_bar->virt_addr; |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index d5ba43a87a68..200828c60db9 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -153,6 +153,8 @@ struct cppi41_dd { | |||
153 | 153 | ||
154 | /* context for suspend/resume */ | 154 | /* context for suspend/resume */ |
155 | unsigned int dma_tdfdq; | 155 | unsigned int dma_tdfdq; |
156 | |||
157 | bool is_suspended; | ||
156 | }; | 158 | }; |
157 | 159 | ||
158 | #define FIST_COMPLETION_QUEUE 93 | 160 | #define FIST_COMPLETION_QUEUE 93 |
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc) | |||
257 | BUG_ON(desc_num >= ALLOC_DECS_NUM); | 259 | BUG_ON(desc_num >= ALLOC_DECS_NUM); |
258 | c = cdd->chan_busy[desc_num]; | 260 | c = cdd->chan_busy[desc_num]; |
259 | cdd->chan_busy[desc_num] = NULL; | 261 | cdd->chan_busy[desc_num] = NULL; |
262 | |||
263 | /* Usecount for chan_busy[], paired with push_desc_queue() */ | ||
264 | pm_runtime_put(cdd->ddev.dev); | ||
265 | |||
260 | return c; | 266 | return c; |
261 | } | 267 | } |
262 | 268 | ||
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data) | |||
317 | 323 | ||
318 | while (val) { | 324 | while (val) { |
319 | u32 desc, len; | 325 | u32 desc, len; |
320 | int error; | ||
321 | 326 | ||
322 | error = pm_runtime_get(cdd->ddev.dev); | 327 | /* |
323 | if (error < 0) | 328 | * This should never trigger, see the comments in |
324 | dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", | 329 | * push_desc_queue() |
325 | __func__, error); | 330 | */ |
331 | WARN_ON(cdd->is_suspended); | ||
326 | 332 | ||
327 | q_num = __fls(val); | 333 | q_num = __fls(val); |
328 | val &= ~(1 << q_num); | 334 | val &= ~(1 << q_num); |
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data) | |||
343 | c->residue = pd_trans_len(c->desc->pd6) - len; | 349 | c->residue = pd_trans_len(c->desc->pd6) - len; |
344 | dma_cookie_complete(&c->txd); | 350 | dma_cookie_complete(&c->txd); |
345 | dmaengine_desc_get_callback_invoke(&c->txd, NULL); | 351 | dmaengine_desc_get_callback_invoke(&c->txd, NULL); |
346 | |||
347 | pm_runtime_mark_last_busy(cdd->ddev.dev); | ||
348 | pm_runtime_put_autosuspend(cdd->ddev.dev); | ||
349 | } | 352 | } |
350 | } | 353 | } |
351 | return IRQ_HANDLED; | 354 | return IRQ_HANDLED; |
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c) | |||
447 | */ | 450 | */ |
448 | __iowmb(); | 451 | __iowmb(); |
449 | 452 | ||
453 | /* | ||
454 | * DMA transfers can take at least 200ms to complete with USB mass | ||
455 | * storage connected. To prevent autosuspend timeouts, we must use | ||
456 | * pm_runtime_get/put() when chan_busy[] is modified. This will get | ||
457 | * cleared in desc_to_chan() or cppi41_stop_chan() depending on the | ||
458 | * outcome of the transfer. | ||
459 | */ | ||
460 | pm_runtime_get(cdd->ddev.dev); | ||
461 | |||
450 | desc_phys = lower_32_bits(c->desc_phys); | 462 | desc_phys = lower_32_bits(c->desc_phys); |
451 | desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); | 463 | desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); |
452 | WARN_ON(cdd->chan_busy[desc_num]); | 464 | WARN_ON(cdd->chan_busy[desc_num]); |
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c) | |||
457 | cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); | 469 | cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); |
458 | } | 470 | } |
459 | 471 | ||
460 | static void pending_desc(struct cppi41_channel *c) | 472 | /* |
473 | * Caller must hold cdd->lock to prevent push_desc_queue() | ||
474 | * getting called out of order. We have both cppi41_dma_issue_pending() | ||
475 | * and cppi41_runtime_resume() call this function. | ||
476 | */ | ||
477 | static void cppi41_run_queue(struct cppi41_dd *cdd) | ||
461 | { | 478 | { |
462 | struct cppi41_dd *cdd = c->cdd; | 479 | struct cppi41_channel *c, *_c; |
463 | unsigned long flags; | ||
464 | 480 | ||
465 | spin_lock_irqsave(&cdd->lock, flags); | 481 | list_for_each_entry_safe(c, _c, &cdd->pending, node) { |
466 | list_add_tail(&c->node, &cdd->pending); | 482 | push_desc_queue(c); |
467 | spin_unlock_irqrestore(&cdd->lock, flags); | 483 | list_del(&c->node); |
484 | } | ||
468 | } | 485 | } |
469 | 486 | ||
470 | static void cppi41_dma_issue_pending(struct dma_chan *chan) | 487 | static void cppi41_dma_issue_pending(struct dma_chan *chan) |
471 | { | 488 | { |
472 | struct cppi41_channel *c = to_cpp41_chan(chan); | 489 | struct cppi41_channel *c = to_cpp41_chan(chan); |
473 | struct cppi41_dd *cdd = c->cdd; | 490 | struct cppi41_dd *cdd = c->cdd; |
491 | unsigned long flags; | ||
474 | int error; | 492 | int error; |
475 | 493 | ||
476 | error = pm_runtime_get(cdd->ddev.dev); | 494 | error = pm_runtime_get(cdd->ddev.dev); |
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) | |||
482 | return; | 500 | return; |
483 | } | 501 | } |
484 | 502 | ||
485 | if (likely(pm_runtime_active(cdd->ddev.dev))) | 503 | spin_lock_irqsave(&cdd->lock, flags); |
486 | push_desc_queue(c); | 504 | list_add_tail(&c->node, &cdd->pending); |
487 | else | 505 | if (!cdd->is_suspended) |
488 | pending_desc(c); | 506 | cppi41_run_queue(cdd); |
507 | spin_unlock_irqrestore(&cdd->lock, flags); | ||
489 | 508 | ||
490 | pm_runtime_mark_last_busy(cdd->ddev.dev); | 509 | pm_runtime_mark_last_busy(cdd->ddev.dev); |
491 | pm_runtime_put_autosuspend(cdd->ddev.dev); | 510 | pm_runtime_put_autosuspend(cdd->ddev.dev); |
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan) | |||
705 | WARN_ON(!cdd->chan_busy[desc_num]); | 724 | WARN_ON(!cdd->chan_busy[desc_num]); |
706 | cdd->chan_busy[desc_num] = NULL; | 725 | cdd->chan_busy[desc_num] = NULL; |
707 | 726 | ||
727 | /* Usecount for chan_busy[], paired with push_desc_queue() */ | ||
728 | pm_runtime_put(cdd->ddev.dev); | ||
729 | |||
708 | return 0; | 730 | return 0; |
709 | } | 731 | } |
710 | 732 | ||
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev) | |||
1150 | static int __maybe_unused cppi41_runtime_suspend(struct device *dev) | 1172 | static int __maybe_unused cppi41_runtime_suspend(struct device *dev) |
1151 | { | 1173 | { |
1152 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | 1174 | struct cppi41_dd *cdd = dev_get_drvdata(dev); |
1175 | unsigned long flags; | ||
1153 | 1176 | ||
1177 | spin_lock_irqsave(&cdd->lock, flags); | ||
1178 | cdd->is_suspended = true; | ||
1154 | WARN_ON(!list_empty(&cdd->pending)); | 1179 | WARN_ON(!list_empty(&cdd->pending)); |
1180 | spin_unlock_irqrestore(&cdd->lock, flags); | ||
1155 | 1181 | ||
1156 | return 0; | 1182 | return 0; |
1157 | } | 1183 | } |
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev) | |||
1159 | static int __maybe_unused cppi41_runtime_resume(struct device *dev) | 1185 | static int __maybe_unused cppi41_runtime_resume(struct device *dev) |
1160 | { | 1186 | { |
1161 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | 1187 | struct cppi41_dd *cdd = dev_get_drvdata(dev); |
1162 | struct cppi41_channel *c, *_c; | ||
1163 | unsigned long flags; | 1188 | unsigned long flags; |
1164 | 1189 | ||
1165 | spin_lock_irqsave(&cdd->lock, flags); | 1190 | spin_lock_irqsave(&cdd->lock, flags); |
1166 | list_for_each_entry_safe(c, _c, &cdd->pending, node) { | 1191 | cdd->is_suspended = false; |
1167 | push_desc_queue(c); | 1192 | cppi41_run_queue(cdd); |
1168 | list_del(&c->node); | ||
1169 | } | ||
1170 | spin_unlock_irqrestore(&cdd->lock, flags); | 1193 | spin_unlock_irqrestore(&cdd->lock, flags); |
1171 | 1194 | ||
1172 | return 0; | 1195 | return 0; |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 740bbb942594..7539f73df9e0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i) | |||
1699 | static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) | 1699 | static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) |
1700 | { | 1700 | { |
1701 | struct pl330_thread *thrd = NULL; | 1701 | struct pl330_thread *thrd = NULL; |
1702 | unsigned long flags; | ||
1703 | int chans, i; | 1702 | int chans, i; |
1704 | 1703 | ||
1705 | if (pl330->state == DYING) | 1704 | if (pl330->state == DYING) |
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) | |||
1707 | 1706 | ||
1708 | chans = pl330->pcfg.num_chan; | 1707 | chans = pl330->pcfg.num_chan; |
1709 | 1708 | ||
1710 | spin_lock_irqsave(&pl330->lock, flags); | ||
1711 | |||
1712 | for (i = 0; i < chans; i++) { | 1709 | for (i = 0; i < chans; i++) { |
1713 | thrd = &pl330->channels[i]; | 1710 | thrd = &pl330->channels[i]; |
1714 | if ((thrd->free) && (!_manager_ns(thrd) || | 1711 | if ((thrd->free) && (!_manager_ns(thrd) || |
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) | |||
1726 | thrd = NULL; | 1723 | thrd = NULL; |
1727 | } | 1724 | } |
1728 | 1725 | ||
1729 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1730 | |||
1731 | return thrd; | 1726 | return thrd; |
1732 | } | 1727 | } |
1733 | 1728 | ||
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev) | |||
1745 | static void pl330_release_channel(struct pl330_thread *thrd) | 1740 | static void pl330_release_channel(struct pl330_thread *thrd) |
1746 | { | 1741 | { |
1747 | struct pl330_dmac *pl330; | 1742 | struct pl330_dmac *pl330; |
1748 | unsigned long flags; | ||
1749 | 1743 | ||
1750 | if (!thrd || thrd->free) | 1744 | if (!thrd || thrd->free) |
1751 | return; | 1745 | return; |
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd) | |||
1757 | 1751 | ||
1758 | pl330 = thrd->dmac; | 1752 | pl330 = thrd->dmac; |
1759 | 1753 | ||
1760 | spin_lock_irqsave(&pl330->lock, flags); | ||
1761 | _free_event(thrd, thrd->ev); | 1754 | _free_event(thrd, thrd->ev); |
1762 | thrd->free = true; | 1755 | thrd->free = true; |
1763 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1764 | } | 1756 | } |
1765 | 1757 | ||
1766 | /* Initialize the structure for PL330 configuration, that can be used | 1758 | /* Initialize the structure for PL330 configuration, that can be used |
@@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
2122 | struct pl330_dmac *pl330 = pch->dmac; | 2114 | struct pl330_dmac *pl330 = pch->dmac; |
2123 | unsigned long flags; | 2115 | unsigned long flags; |
2124 | 2116 | ||
2125 | spin_lock_irqsave(&pch->lock, flags); | 2117 | spin_lock_irqsave(&pl330->lock, flags); |
2126 | 2118 | ||
2127 | dma_cookie_init(chan); | 2119 | dma_cookie_init(chan); |
2128 | pch->cyclic = false; | 2120 | pch->cyclic = false; |
2129 | 2121 | ||
2130 | pch->thread = pl330_request_channel(pl330); | 2122 | pch->thread = pl330_request_channel(pl330); |
2131 | if (!pch->thread) { | 2123 | if (!pch->thread) { |
2132 | spin_unlock_irqrestore(&pch->lock, flags); | 2124 | spin_unlock_irqrestore(&pl330->lock, flags); |
2133 | return -ENOMEM; | 2125 | return -ENOMEM; |
2134 | } | 2126 | } |
2135 | 2127 | ||
2136 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); | 2128 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); |
2137 | 2129 | ||
2138 | spin_unlock_irqrestore(&pch->lock, flags); | 2130 | spin_unlock_irqrestore(&pl330->lock, flags); |
2139 | 2131 | ||
2140 | return 1; | 2132 | return 1; |
2141 | } | 2133 | } |
@@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan) | |||
2238 | static void pl330_free_chan_resources(struct dma_chan *chan) | 2230 | static void pl330_free_chan_resources(struct dma_chan *chan) |
2239 | { | 2231 | { |
2240 | struct dma_pl330_chan *pch = to_pchan(chan); | 2232 | struct dma_pl330_chan *pch = to_pchan(chan); |
2233 | struct pl330_dmac *pl330 = pch->dmac; | ||
2241 | unsigned long flags; | 2234 | unsigned long flags; |
2242 | 2235 | ||
2243 | tasklet_kill(&pch->task); | 2236 | tasklet_kill(&pch->task); |
2244 | 2237 | ||
2245 | pm_runtime_get_sync(pch->dmac->ddma.dev); | 2238 | pm_runtime_get_sync(pch->dmac->ddma.dev); |
2246 | spin_lock_irqsave(&pch->lock, flags); | 2239 | spin_lock_irqsave(&pl330->lock, flags); |
2247 | 2240 | ||
2248 | pl330_release_channel(pch->thread); | 2241 | pl330_release_channel(pch->thread); |
2249 | pch->thread = NULL; | 2242 | pch->thread = NULL; |
@@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan) | |||
2251 | if (pch->cyclic) | 2244 | if (pch->cyclic) |
2252 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); | 2245 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); |
2253 | 2246 | ||
2254 | spin_unlock_irqrestore(&pch->lock, flags); | 2247 | spin_unlock_irqrestore(&pl330->lock, flags); |
2255 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); | 2248 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); |
2256 | pm_runtime_put_autosuspend(pch->dmac->ddma.dev); | 2249 | pm_runtime_put_autosuspend(pch->dmac->ddma.dev); |
2257 | } | 2250 | } |
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 921dfa047202..260c4b4b492e 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c | |||
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) | |||
187 | struct exit_boot_struct { | 187 | struct exit_boot_struct { |
188 | efi_memory_desc_t *runtime_map; | 188 | efi_memory_desc_t *runtime_map; |
189 | int *runtime_entry_count; | 189 | int *runtime_entry_count; |
190 | void *new_fdt_addr; | ||
190 | }; | 191 | }; |
191 | 192 | ||
192 | static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, | 193 | static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, |
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, | |||
202 | efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, | 203 | efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, |
203 | p->runtime_map, p->runtime_entry_count); | 204 | p->runtime_map, p->runtime_entry_count); |
204 | 205 | ||
205 | return EFI_SUCCESS; | 206 | return update_fdt_memmap(p->new_fdt_addr, map); |
206 | } | 207 | } |
207 | 208 | ||
208 | /* | 209 | /* |
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, | |||
300 | 301 | ||
301 | priv.runtime_map = runtime_map; | 302 | priv.runtime_map = runtime_map; |
302 | priv.runtime_entry_count = &runtime_entry_count; | 303 | priv.runtime_entry_count = &runtime_entry_count; |
304 | priv.new_fdt_addr = (void *)*new_fdt_addr; | ||
303 | status = efi_exit_boot_services(sys_table, handle, &map, &priv, | 305 | status = efi_exit_boot_services(sys_table, handle, &map, &priv, |
304 | exit_boot_func); | 306 | exit_boot_func); |
305 | 307 | ||
306 | if (status == EFI_SUCCESS) { | 308 | if (status == EFI_SUCCESS) { |
307 | efi_set_virtual_address_map_t *svam; | 309 | efi_set_virtual_address_map_t *svam; |
308 | 310 | ||
309 | status = update_fdt_memmap((void *)*new_fdt_addr, &map); | ||
310 | if (status != EFI_SUCCESS) { | ||
311 | /* | ||
312 | * The kernel won't get far without the memory map, but | ||
313 | * may still be able to print something meaningful so | ||
314 | * return success here. | ||
315 | */ | ||
316 | return EFI_SUCCESS; | ||
317 | } | ||
318 | |||
319 | /* Install the new virtual address map */ | 311 | /* Install the new virtual address map */ |
320 | svam = sys_table->runtime->set_virtual_address_map; | 312 | svam = sys_table->runtime->set_virtual_address_map; |
321 | status = svam(runtime_entry_count * desc_size, desc_size, | 313 | status = svam(runtime_entry_count * desc_size, desc_size, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e2b0b1646f99..0635829b18cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) | |||
254 | } | 254 | } |
255 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); | 255 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); |
256 | 256 | ||
257 | if (adev->mode_info.num_crtc) | ||
258 | amdgpu_display_set_vga_render_state(adev, false); | ||
259 | |||
257 | gmc_v6_0_mc_stop(adev, &save); | 260 | gmc_v6_0_mc_stop(adev, &save); |
258 | 261 | ||
259 | if (gmc_v6_0_wait_for_idle((void *)adev)) { | 262 | if (gmc_v6_0_wait_for_idle((void *)adev)) { |
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) | |||
283 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | 286 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
284 | } | 287 | } |
285 | gmc_v6_0_mc_resume(adev, &save); | 288 | gmc_v6_0_mc_resume(adev, &save); |
286 | amdgpu_display_set_vga_render_state(adev, false); | ||
287 | } | 289 | } |
288 | 290 | ||
289 | static int gmc_v6_0_mc_init(struct amdgpu_device *adev) | 291 | static int gmc_v6_0_mc_init(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 50f5cf7b69d1..fdfb1ec17e66 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev, | |||
2032 | } | 2032 | } |
2033 | 2033 | ||
2034 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 2034 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
2035 | struct drm_pending_vblank_event *event = crtc_state->event; | ||
2035 | /* | 2036 | /* |
2036 | * TEST_ONLY and PAGE_FLIP_EVENT are mutually | 2037 | * Free the allocated event. drm_atomic_helper_setup_commit |
2037 | * exclusive, if they weren't, this code should be | 2038 | * can allocate an event too, so only free it if it's ours |
2038 | * called on success for TEST_ONLY too. | 2039 | * to prevent a double free in drm_atomic_state_clear. |
2039 | */ | 2040 | */ |
2040 | if (crtc_state->event) | 2041 | if (event && (event->base.fence || event->base.file_priv)) { |
2041 | drm_event_cancel_free(dev, &crtc_state->event->base); | 2042 | drm_event_cancel_free(dev, &event->base); |
2043 | crtc_state->event = NULL; | ||
2044 | } | ||
2042 | } | 2045 | } |
2043 | 2046 | ||
2044 | if (!fence_state) | 2047 | if (!fence_state) |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 34f757bcabae..4594477dee00 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, | |||
1666 | 1666 | ||
1667 | funcs = plane->helper_private; | 1667 | funcs = plane->helper_private; |
1668 | 1668 | ||
1669 | if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) | ||
1670 | continue; | ||
1671 | |||
1672 | if (funcs->prepare_fb) { | 1669 | if (funcs->prepare_fb) { |
1673 | ret = funcs->prepare_fb(plane, plane_state); | 1670 | ret = funcs->prepare_fb(plane, plane_state); |
1674 | if (ret) | 1671 | if (ret) |
@@ -1685,9 +1682,6 @@ fail: | |||
1685 | if (j >= i) | 1682 | if (j >= i) |
1686 | continue; | 1683 | continue; |
1687 | 1684 | ||
1688 | if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) | ||
1689 | continue; | ||
1690 | |||
1691 | funcs = plane->helper_private; | 1685 | funcs = plane->helper_private; |
1692 | 1686 | ||
1693 | if (funcs->cleanup_fb) | 1687 | if (funcs->cleanup_fb) |
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev, | |||
1954 | for_each_plane_in_state(old_state, plane, plane_state, i) { | 1948 | for_each_plane_in_state(old_state, plane, plane_state, i) { |
1955 | const struct drm_plane_helper_funcs *funcs; | 1949 | const struct drm_plane_helper_funcs *funcs; |
1956 | 1950 | ||
1957 | if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc)) | ||
1958 | continue; | ||
1959 | |||
1960 | funcs = plane->helper_private; | 1951 | funcs = plane->helper_private; |
1961 | 1952 | ||
1962 | if (funcs->cleanup_fb) | 1953 | if (funcs->cleanup_fb) |
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5a4526289392..7a7019ac9388 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev, | |||
225 | 225 | ||
226 | INIT_LIST_HEAD(&connector->probed_modes); | 226 | INIT_LIST_HEAD(&connector->probed_modes); |
227 | INIT_LIST_HEAD(&connector->modes); | 227 | INIT_LIST_HEAD(&connector->modes); |
228 | mutex_init(&connector->mutex); | ||
228 | connector->edid_blob_ptr = NULL; | 229 | connector->edid_blob_ptr = NULL; |
229 | connector->status = connector_status_unknown; | 230 | connector->status = connector_status_unknown; |
230 | 231 | ||
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector) | |||
359 | connector->funcs->atomic_destroy_state(connector, | 360 | connector->funcs->atomic_destroy_state(connector, |
360 | connector->state); | 361 | connector->state); |
361 | 362 | ||
363 | mutex_destroy(&connector->mutex); | ||
364 | |||
362 | memset(connector, 0, sizeof(*connector)); | 365 | memset(connector, 0, sizeof(*connector)); |
363 | } | 366 | } |
364 | EXPORT_SYMBOL(drm_connector_cleanup); | 367 | EXPORT_SYMBOL(drm_connector_cleanup); |
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup); | |||
374 | */ | 377 | */ |
375 | int drm_connector_register(struct drm_connector *connector) | 378 | int drm_connector_register(struct drm_connector *connector) |
376 | { | 379 | { |
377 | int ret; | 380 | int ret = 0; |
378 | 381 | ||
379 | if (connector->registered) | 382 | if (!connector->dev->registered) |
380 | return 0; | 383 | return 0; |
381 | 384 | ||
385 | mutex_lock(&connector->mutex); | ||
386 | if (connector->registered) | ||
387 | goto unlock; | ||
388 | |||
382 | ret = drm_sysfs_connector_add(connector); | 389 | ret = drm_sysfs_connector_add(connector); |
383 | if (ret) | 390 | if (ret) |
384 | return ret; | 391 | goto unlock; |
385 | 392 | ||
386 | ret = drm_debugfs_connector_add(connector); | 393 | ret = drm_debugfs_connector_add(connector); |
387 | if (ret) { | 394 | if (ret) { |
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector) | |||
397 | drm_mode_object_register(connector->dev, &connector->base); | 404 | drm_mode_object_register(connector->dev, &connector->base); |
398 | 405 | ||
399 | connector->registered = true; | 406 | connector->registered = true; |
400 | return 0; | 407 | goto unlock; |
401 | 408 | ||
402 | err_debugfs: | 409 | err_debugfs: |
403 | drm_debugfs_connector_remove(connector); | 410 | drm_debugfs_connector_remove(connector); |
404 | err_sysfs: | 411 | err_sysfs: |
405 | drm_sysfs_connector_remove(connector); | 412 | drm_sysfs_connector_remove(connector); |
413 | unlock: | ||
414 | mutex_unlock(&connector->mutex); | ||
406 | return ret; | 415 | return ret; |
407 | } | 416 | } |
408 | EXPORT_SYMBOL(drm_connector_register); | 417 | EXPORT_SYMBOL(drm_connector_register); |
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register); | |||
415 | */ | 424 | */ |
416 | void drm_connector_unregister(struct drm_connector *connector) | 425 | void drm_connector_unregister(struct drm_connector *connector) |
417 | { | 426 | { |
418 | if (!connector->registered) | 427 | mutex_lock(&connector->mutex); |
428 | if (!connector->registered) { | ||
429 | mutex_unlock(&connector->mutex); | ||
419 | return; | 430 | return; |
431 | } | ||
420 | 432 | ||
421 | if (connector->funcs->early_unregister) | 433 | if (connector->funcs->early_unregister) |
422 | connector->funcs->early_unregister(connector); | 434 | connector->funcs->early_unregister(connector); |
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector) | |||
425 | drm_debugfs_connector_remove(connector); | 437 | drm_debugfs_connector_remove(connector); |
426 | 438 | ||
427 | connector->registered = false; | 439 | connector->registered = false; |
440 | mutex_unlock(&connector->mutex); | ||
428 | } | 441 | } |
429 | EXPORT_SYMBOL(drm_connector_unregister); | 442 | EXPORT_SYMBOL(drm_connector_unregister); |
430 | 443 | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a525751b4559..6594b4088f11 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) | |||
745 | if (ret) | 745 | if (ret) |
746 | goto err_minors; | 746 | goto err_minors; |
747 | 747 | ||
748 | dev->registered = true; | ||
749 | |||
748 | if (dev->driver->load) { | 750 | if (dev->driver->load) { |
749 | ret = dev->driver->load(dev, flags); | 751 | ret = dev->driver->load(dev, flags); |
750 | if (ret) | 752 | if (ret) |
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev) | |||
785 | 787 | ||
786 | drm_lastclose(dev); | 788 | drm_lastclose(dev); |
787 | 789 | ||
790 | dev->registered = false; | ||
791 | |||
788 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 792 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
789 | drm_modeset_unregister_all(dev); | 793 | drm_modeset_unregister_all(dev); |
790 | 794 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b2c4a0b8a627..728ca3ea74d2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_device *dev) | |||
213 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { | 213 | } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { |
214 | dev_priv->pch_type = PCH_KBP; | 214 | dev_priv->pch_type = PCH_KBP; |
215 | DRM_DEBUG_KMS("Found KabyPoint PCH\n"); | 215 | DRM_DEBUG_KMS("Found KabyPoint PCH\n"); |
216 | WARN_ON(!IS_KABYLAKE(dev_priv)); | 216 | WARN_ON(!IS_SKYLAKE(dev_priv) && |
217 | !IS_KABYLAKE(dev_priv)); | ||
217 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || | 218 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
218 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || | 219 | (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || |
219 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && | 220 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && |
@@ -2427,6 +2428,7 @@ static int intel_runtime_resume(struct device *kdev) | |||
2427 | * we can do is to hope that things will still work (and disable RPM). | 2428 | * we can do is to hope that things will still work (and disable RPM). |
2428 | */ | 2429 | */ |
2429 | i915_gem_init_swizzling(dev_priv); | 2430 | i915_gem_init_swizzling(dev_priv); |
2431 | i915_gem_restore_fences(dev_priv); | ||
2430 | 2432 | ||
2431 | intel_runtime_pm_enable_interrupts(dev_priv); | 2433 | intel_runtime_pm_enable_interrupts(dev_priv); |
2432 | 2434 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 69bc3b0c4390..8493e19b563a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1012,6 +1012,8 @@ struct intel_fbc { | |||
1012 | struct work_struct underrun_work; | 1012 | struct work_struct underrun_work; |
1013 | 1013 | ||
1014 | struct intel_fbc_state_cache { | 1014 | struct intel_fbc_state_cache { |
1015 | struct i915_vma *vma; | ||
1016 | |||
1015 | struct { | 1017 | struct { |
1016 | unsigned int mode_flags; | 1018 | unsigned int mode_flags; |
1017 | uint32_t hsw_bdw_pixel_rate; | 1019 | uint32_t hsw_bdw_pixel_rate; |
@@ -1025,15 +1027,14 @@ struct intel_fbc { | |||
1025 | } plane; | 1027 | } plane; |
1026 | 1028 | ||
1027 | struct { | 1029 | struct { |
1028 | u64 ilk_ggtt_offset; | ||
1029 | uint32_t pixel_format; | 1030 | uint32_t pixel_format; |
1030 | unsigned int stride; | 1031 | unsigned int stride; |
1031 | int fence_reg; | ||
1032 | unsigned int tiling_mode; | ||
1033 | } fb; | 1032 | } fb; |
1034 | } state_cache; | 1033 | } state_cache; |
1035 | 1034 | ||
1036 | struct intel_fbc_reg_params { | 1035 | struct intel_fbc_reg_params { |
1036 | struct i915_vma *vma; | ||
1037 | |||
1037 | struct { | 1038 | struct { |
1038 | enum pipe pipe; | 1039 | enum pipe pipe; |
1039 | enum plane plane; | 1040 | enum plane plane; |
@@ -1041,10 +1042,8 @@ struct intel_fbc { | |||
1041 | } crtc; | 1042 | } crtc; |
1042 | 1043 | ||
1043 | struct { | 1044 | struct { |
1044 | u64 ggtt_offset; | ||
1045 | uint32_t pixel_format; | 1045 | uint32_t pixel_format; |
1046 | unsigned int stride; | 1046 | unsigned int stride; |
1047 | int fence_reg; | ||
1048 | } fb; | 1047 | } fb; |
1049 | 1048 | ||
1050 | int cfb_size; | 1049 | int cfb_size; |
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, | |||
3168 | return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); | 3167 | return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); |
3169 | } | 3168 | } |
3170 | 3169 | ||
3171 | static inline unsigned long | ||
3172 | i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, | ||
3173 | const struct i915_ggtt_view *view) | ||
3174 | { | ||
3175 | return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); | ||
3176 | } | ||
3177 | |||
3178 | /* i915_gem_fence_reg.c */ | 3170 | /* i915_gem_fence_reg.c */ |
3179 | int __must_check i915_vma_get_fence(struct i915_vma *vma); | 3171 | int __must_check i915_vma_get_fence(struct i915_vma *vma); |
3180 | int __must_check i915_vma_put_fence(struct i915_vma *vma); | 3172 | int __must_check i915_vma_put_fence(struct i915_vma *vma); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4b23a7814713..24b5b046754b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2010,8 +2010,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) | |||
2010 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 2010 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
2011 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 2011 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
2012 | 2012 | ||
2013 | if (WARN_ON(reg->pin_count)) | 2013 | /* Ideally we want to assert that the fence register is not |
2014 | continue; | 2014 | * live at this point (i.e. that no piece of code will be |
2015 | * trying to write through fence + GTT, as that both violates | ||
2016 | * our tracking of activity and associated locking/barriers, | ||
2017 | * but also is illegal given that the hw is powered down). | ||
2018 | * | ||
2019 | * Previously we used reg->pin_count as a "liveness" indicator. | ||
2020 | * That is not sufficient, and we need a more fine-grained | ||
2021 | * tool if we want to have a sanity check here. | ||
2022 | */ | ||
2015 | 2023 | ||
2016 | if (!reg->vma) | 2024 | if (!reg->vma) |
2017 | continue; | 2025 | continue; |
@@ -3478,7 +3486,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3478 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); | 3486 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); |
3479 | 3487 | ||
3480 | /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ | 3488 | /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ |
3481 | if (obj->cache_dirty) { | 3489 | if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) { |
3482 | i915_gem_clflush_object(obj, true); | 3490 | i915_gem_clflush_object(obj, true); |
3483 | intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); | 3491 | intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); |
3484 | } | 3492 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 097d9d8c2315..b8b877c91b0a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -1181,14 +1181,14 @@ validate_exec_list(struct drm_device *dev, | |||
1181 | if (exec[i].offset != | 1181 | if (exec[i].offset != |
1182 | gen8_canonical_addr(exec[i].offset & PAGE_MASK)) | 1182 | gen8_canonical_addr(exec[i].offset & PAGE_MASK)) |
1183 | return -EINVAL; | 1183 | return -EINVAL; |
1184 | |||
1185 | /* From drm_mm perspective address space is continuous, | ||
1186 | * so from this point we're always using non-canonical | ||
1187 | * form internally. | ||
1188 | */ | ||
1189 | exec[i].offset = gen8_noncanonical_addr(exec[i].offset); | ||
1190 | } | 1184 | } |
1191 | 1185 | ||
1186 | /* From drm_mm perspective address space is continuous, | ||
1187 | * so from this point we're always using non-canonical | ||
1188 | * form internally. | ||
1189 | */ | ||
1190 | exec[i].offset = gen8_noncanonical_addr(exec[i].offset); | ||
1191 | |||
1192 | if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) | 1192 | if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) |
1193 | return -EINVAL; | 1193 | return -EINVAL; |
1194 | 1194 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 4b3ff3e5b911..d09c74973cb3 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c | |||
@@ -66,8 +66,16 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | |||
66 | 66 | ||
67 | max_order = MAX_ORDER; | 67 | max_order = MAX_ORDER; |
68 | #ifdef CONFIG_SWIOTLB | 68 | #ifdef CONFIG_SWIOTLB |
69 | if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ | 69 | if (swiotlb_nr_tbl()) { |
70 | max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); | 70 | unsigned int max_segment; |
71 | |||
72 | max_segment = swiotlb_max_segment(); | ||
73 | if (max_segment) { | ||
74 | max_segment = max_t(unsigned int, max_segment, | ||
75 | PAGE_SIZE) >> PAGE_SHIFT; | ||
76 | max_order = min(max_order, ilog2(max_segment)); | ||
77 | } | ||
78 | } | ||
71 | #endif | 79 | #endif |
72 | 80 | ||
73 | gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; | 81 | gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; |
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dbe9fb41ae53..8d3e515f27ba 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c | |||
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane) | |||
85 | 85 | ||
86 | __drm_atomic_helper_plane_duplicate_state(plane, state); | 86 | __drm_atomic_helper_plane_duplicate_state(plane, state); |
87 | 87 | ||
88 | intel_state->vma = NULL; | ||
89 | |||
88 | return state; | 90 | return state; |
89 | } | 91 | } |
90 | 92 | ||
@@ -100,6 +102,24 @@ void | |||
100 | intel_plane_destroy_state(struct drm_plane *plane, | 102 | intel_plane_destroy_state(struct drm_plane *plane, |
101 | struct drm_plane_state *state) | 103 | struct drm_plane_state *state) |
102 | { | 104 | { |
105 | struct i915_vma *vma; | ||
106 | |||
107 | vma = fetch_and_zero(&to_intel_plane_state(state)->vma); | ||
108 | |||
109 | /* | ||
110 | * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma. | ||
111 | * We currently don't clear all planes during driver unload, so we have | ||
112 | * to be able to unpin vma here for now. | ||
113 | * | ||
114 | * Normally this can only happen during unload when kmscon is disabled | ||
115 | * and userspace doesn't attempt to set a framebuffer at all. | ||
116 | */ | ||
117 | if (vma) { | ||
118 | mutex_lock(&plane->dev->struct_mutex); | ||
119 | intel_unpin_fb_vma(vma); | ||
120 | mutex_unlock(&plane->dev->struct_mutex); | ||
121 | } | ||
122 | |||
103 | drm_atomic_helper_plane_destroy_state(plane, state); | 123 | drm_atomic_helper_plane_destroy_state(plane, state); |
104 | } | 124 | } |
105 | 125 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 77f7b1d849a4..891c86aef99d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2235,24 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) | |||
2235 | i915_vma_pin_fence(vma); | 2235 | i915_vma_pin_fence(vma); |
2236 | } | 2236 | } |
2237 | 2237 | ||
2238 | i915_vma_get(vma); | ||
2238 | err: | 2239 | err: |
2239 | intel_runtime_pm_put(dev_priv); | 2240 | intel_runtime_pm_put(dev_priv); |
2240 | return vma; | 2241 | return vma; |
2241 | } | 2242 | } |
2242 | 2243 | ||
2243 | void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) | 2244 | void intel_unpin_fb_vma(struct i915_vma *vma) |
2244 | { | 2245 | { |
2245 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 2246 | lockdep_assert_held(&vma->vm->dev->struct_mutex); |
2246 | struct i915_ggtt_view view; | ||
2247 | struct i915_vma *vma; | ||
2248 | |||
2249 | WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); | ||
2250 | 2247 | ||
2251 | intel_fill_fb_ggtt_view(&view, fb, rotation); | 2248 | if (WARN_ON_ONCE(!vma)) |
2252 | vma = i915_gem_object_to_ggtt(obj, &view); | 2249 | return; |
2253 | 2250 | ||
2254 | i915_vma_unpin_fence(vma); | 2251 | i915_vma_unpin_fence(vma); |
2255 | i915_gem_object_unpin_from_display_plane(vma); | 2252 | i915_gem_object_unpin_from_display_plane(vma); |
2253 | i915_vma_put(vma); | ||
2256 | } | 2254 | } |
2257 | 2255 | ||
2258 | static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, | 2256 | static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, |
@@ -2747,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2747 | struct drm_device *dev = intel_crtc->base.dev; | 2745 | struct drm_device *dev = intel_crtc->base.dev; |
2748 | struct drm_i915_private *dev_priv = to_i915(dev); | 2746 | struct drm_i915_private *dev_priv = to_i915(dev); |
2749 | struct drm_crtc *c; | 2747 | struct drm_crtc *c; |
2750 | struct intel_crtc *i; | ||
2751 | struct drm_i915_gem_object *obj; | 2748 | struct drm_i915_gem_object *obj; |
2752 | struct drm_plane *primary = intel_crtc->base.primary; | 2749 | struct drm_plane *primary = intel_crtc->base.primary; |
2753 | struct drm_plane_state *plane_state = primary->state; | 2750 | struct drm_plane_state *plane_state = primary->state; |
@@ -2772,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2772 | * an fb with another CRTC instead | 2769 | * an fb with another CRTC instead |
2773 | */ | 2770 | */ |
2774 | for_each_crtc(dev, c) { | 2771 | for_each_crtc(dev, c) { |
2775 | i = to_intel_crtc(c); | 2772 | struct intel_plane_state *state; |
2776 | 2773 | ||
2777 | if (c == &intel_crtc->base) | 2774 | if (c == &intel_crtc->base) |
2778 | continue; | 2775 | continue; |
2779 | 2776 | ||
2780 | if (!i->active) | 2777 | if (!to_intel_crtc(c)->active) |
2781 | continue; | 2778 | continue; |
2782 | 2779 | ||
2783 | fb = c->primary->fb; | 2780 | state = to_intel_plane_state(c->primary->state); |
2784 | if (!fb) | 2781 | if (!state->vma) |
2785 | continue; | 2782 | continue; |
2786 | 2783 | ||
2787 | obj = intel_fb_obj(fb); | 2784 | if (intel_plane_ggtt_offset(state) == plane_config->base) { |
2788 | if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { | 2785 | fb = c->primary->fb; |
2789 | drm_framebuffer_reference(fb); | 2786 | drm_framebuffer_reference(fb); |
2790 | goto valid_fb; | 2787 | goto valid_fb; |
2791 | } | 2788 | } |
@@ -2806,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2806 | return; | 2803 | return; |
2807 | 2804 | ||
2808 | valid_fb: | 2805 | valid_fb: |
2806 | mutex_lock(&dev->struct_mutex); | ||
2807 | intel_state->vma = | ||
2808 | intel_pin_and_fence_fb_obj(fb, primary->state->rotation); | ||
2809 | mutex_unlock(&dev->struct_mutex); | ||
2810 | if (IS_ERR(intel_state->vma)) { | ||
2811 | DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", | ||
2812 | intel_crtc->pipe, PTR_ERR(intel_state->vma)); | ||
2813 | |||
2814 | intel_state->vma = NULL; | ||
2815 | drm_framebuffer_unreference(fb); | ||
2816 | return; | ||
2817 | } | ||
2818 | |||
2809 | plane_state->src_x = 0; | 2819 | plane_state->src_x = 0; |
2810 | plane_state->src_y = 0; | 2820 | plane_state->src_y = 0; |
2811 | plane_state->src_w = fb->width << 16; | 2821 | plane_state->src_w = fb->width << 16; |
@@ -3101,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, | |||
3101 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 3111 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
3102 | if (INTEL_GEN(dev_priv) >= 4) { | 3112 | if (INTEL_GEN(dev_priv) >= 4) { |
3103 | I915_WRITE(DSPSURF(plane), | 3113 | I915_WRITE(DSPSURF(plane), |
3104 | intel_fb_gtt_offset(fb, rotation) + | 3114 | intel_plane_ggtt_offset(plane_state) + |
3105 | intel_crtc->dspaddr_offset); | 3115 | intel_crtc->dspaddr_offset); |
3106 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | 3116 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
3107 | I915_WRITE(DSPLINOFF(plane), linear_offset); | 3117 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
3108 | } else { | 3118 | } else { |
3109 | I915_WRITE(DSPADDR(plane), | 3119 | I915_WRITE(DSPADDR(plane), |
3110 | intel_fb_gtt_offset(fb, rotation) + | 3120 | intel_plane_ggtt_offset(plane_state) + |
3111 | intel_crtc->dspaddr_offset); | 3121 | intel_crtc->dspaddr_offset); |
3112 | } | 3122 | } |
3113 | POSTING_READ(reg); | 3123 | POSTING_READ(reg); |
@@ -3204,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, | |||
3204 | 3214 | ||
3205 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 3215 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
3206 | I915_WRITE(DSPSURF(plane), | 3216 | I915_WRITE(DSPSURF(plane), |
3207 | intel_fb_gtt_offset(fb, rotation) + | 3217 | intel_plane_ggtt_offset(plane_state) + |
3208 | intel_crtc->dspaddr_offset); | 3218 | intel_crtc->dspaddr_offset); |
3209 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | 3219 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
3210 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); | 3220 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
@@ -3227,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, | |||
3227 | } | 3237 | } |
3228 | } | 3238 | } |
3229 | 3239 | ||
3230 | u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, | ||
3231 | unsigned int rotation) | ||
3232 | { | ||
3233 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
3234 | struct i915_ggtt_view view; | ||
3235 | struct i915_vma *vma; | ||
3236 | |||
3237 | intel_fill_fb_ggtt_view(&view, fb, rotation); | ||
3238 | |||
3239 | vma = i915_gem_object_to_ggtt(obj, &view); | ||
3240 | if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", | ||
3241 | view.type)) | ||
3242 | return -1; | ||
3243 | |||
3244 | return i915_ggtt_offset(vma); | ||
3245 | } | ||
3246 | |||
3247 | static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) | 3240 | static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) |
3248 | { | 3241 | { |
3249 | struct drm_device *dev = intel_crtc->base.dev; | 3242 | struct drm_device *dev = intel_crtc->base.dev; |
@@ -3438,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, | |||
3438 | } | 3431 | } |
3439 | 3432 | ||
3440 | I915_WRITE(PLANE_SURF(pipe, 0), | 3433 | I915_WRITE(PLANE_SURF(pipe, 0), |
3441 | intel_fb_gtt_offset(fb, rotation) + surf_addr); | 3434 | intel_plane_ggtt_offset(plane_state) + surf_addr); |
3442 | 3435 | ||
3443 | POSTING_READ(PLANE_SURF(pipe, 0)); | 3436 | POSTING_READ(PLANE_SURF(pipe, 0)); |
3444 | } | 3437 | } |
@@ -4269,10 +4262,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) | |||
4269 | drm_crtc_vblank_put(&intel_crtc->base); | 4262 | drm_crtc_vblank_put(&intel_crtc->base); |
4270 | 4263 | ||
4271 | wake_up_all(&dev_priv->pending_flip_queue); | 4264 | wake_up_all(&dev_priv->pending_flip_queue); |
4272 | queue_work(dev_priv->wq, &work->unpin_work); | ||
4273 | |||
4274 | trace_i915_flip_complete(intel_crtc->plane, | 4265 | trace_i915_flip_complete(intel_crtc->plane, |
4275 | work->pending_flip_obj); | 4266 | work->pending_flip_obj); |
4267 | |||
4268 | queue_work(dev_priv->wq, &work->unpin_work); | ||
4276 | } | 4269 | } |
4277 | 4270 | ||
4278 | static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 4271 | static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
@@ -11533,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
11533 | flush_work(&work->mmio_work); | 11526 | flush_work(&work->mmio_work); |
11534 | 11527 | ||
11535 | mutex_lock(&dev->struct_mutex); | 11528 | mutex_lock(&dev->struct_mutex); |
11536 | intel_unpin_fb_obj(work->old_fb, primary->state->rotation); | 11529 | intel_unpin_fb_vma(work->old_vma); |
11537 | i915_gem_object_put(work->pending_flip_obj); | 11530 | i915_gem_object_put(work->pending_flip_obj); |
11538 | mutex_unlock(&dev->struct_mutex); | 11531 | mutex_unlock(&dev->struct_mutex); |
11539 | 11532 | ||
@@ -12243,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
12243 | goto cleanup_pending; | 12236 | goto cleanup_pending; |
12244 | } | 12237 | } |
12245 | 12238 | ||
12246 | work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); | 12239 | work->old_vma = to_intel_plane_state(primary->state)->vma; |
12247 | work->gtt_offset += intel_crtc->dspaddr_offset; | 12240 | to_intel_plane_state(primary->state)->vma = vma; |
12241 | |||
12242 | work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset; | ||
12248 | work->rotation = crtc->primary->state->rotation; | 12243 | work->rotation = crtc->primary->state->rotation; |
12249 | 12244 | ||
12250 | /* | 12245 | /* |
@@ -12298,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
12298 | cleanup_request: | 12293 | cleanup_request: |
12299 | i915_add_request_no_flush(request); | 12294 | i915_add_request_no_flush(request); |
12300 | cleanup_unpin: | 12295 | cleanup_unpin: |
12301 | intel_unpin_fb_obj(fb, crtc->primary->state->rotation); | 12296 | to_intel_plane_state(primary->state)->vma = work->old_vma; |
12297 | intel_unpin_fb_vma(vma); | ||
12302 | cleanup_pending: | 12298 | cleanup_pending: |
12303 | atomic_dec(&intel_crtc->unpin_work_count); | 12299 | atomic_dec(&intel_crtc->unpin_work_count); |
12304 | unlock: | 12300 | unlock: |
@@ -14791,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
14791 | DRM_DEBUG_KMS("failed to pin object\n"); | 14787 | DRM_DEBUG_KMS("failed to pin object\n"); |
14792 | return PTR_ERR(vma); | 14788 | return PTR_ERR(vma); |
14793 | } | 14789 | } |
14790 | |||
14791 | to_intel_plane_state(new_state)->vma = vma; | ||
14794 | } | 14792 | } |
14795 | 14793 | ||
14796 | return 0; | 14794 | return 0; |
@@ -14809,19 +14807,12 @@ void | |||
14809 | intel_cleanup_plane_fb(struct drm_plane *plane, | 14807 | intel_cleanup_plane_fb(struct drm_plane *plane, |
14810 | struct drm_plane_state *old_state) | 14808 | struct drm_plane_state *old_state) |
14811 | { | 14809 | { |
14812 | struct drm_i915_private *dev_priv = to_i915(plane->dev); | 14810 | struct i915_vma *vma; |
14813 | struct intel_plane_state *old_intel_state; | ||
14814 | struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); | ||
14815 | struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); | ||
14816 | |||
14817 | old_intel_state = to_intel_plane_state(old_state); | ||
14818 | |||
14819 | if (!obj && !old_obj) | ||
14820 | return; | ||
14821 | 14811 | ||
14822 | if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || | 14812 | /* Should only be called after a successful intel_prepare_plane_fb()! */ |
14823 | !INTEL_INFO(dev_priv)->cursor_needs_physical)) | 14813 | vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); |
14824 | intel_unpin_fb_obj(old_state->fb, old_state->rotation); | 14814 | if (vma) |
14815 | intel_unpin_fb_vma(vma); | ||
14825 | } | 14816 | } |
14826 | 14817 | ||
14827 | int | 14818 | int |
@@ -15163,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane, | |||
15163 | if (!obj) | 15154 | if (!obj) |
15164 | addr = 0; | 15155 | addr = 0; |
15165 | else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) | 15156 | else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) |
15166 | addr = i915_gem_object_ggtt_offset(obj, NULL); | 15157 | addr = intel_plane_ggtt_offset(state); |
15167 | else | 15158 | else |
15168 | addr = obj->phys_handle->busaddr; | 15159 | addr = obj->phys_handle->busaddr; |
15169 | 15160 | ||
@@ -17063,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev) | |||
17063 | void intel_modeset_gem_init(struct drm_device *dev) | 17054 | void intel_modeset_gem_init(struct drm_device *dev) |
17064 | { | 17055 | { |
17065 | struct drm_i915_private *dev_priv = to_i915(dev); | 17056 | struct drm_i915_private *dev_priv = to_i915(dev); |
17066 | struct drm_crtc *c; | ||
17067 | struct drm_i915_gem_object *obj; | ||
17068 | 17057 | ||
17069 | intel_init_gt_powersave(dev_priv); | 17058 | intel_init_gt_powersave(dev_priv); |
17070 | 17059 | ||
17071 | intel_modeset_init_hw(dev); | 17060 | intel_modeset_init_hw(dev); |
17072 | 17061 | ||
17073 | intel_setup_overlay(dev_priv); | 17062 | intel_setup_overlay(dev_priv); |
17074 | |||
17075 | /* | ||
17076 | * Make sure any fbs we allocated at startup are properly | ||
17077 | * pinned & fenced. When we do the allocation it's too early | ||
17078 | * for this. | ||
17079 | */ | ||
17080 | for_each_crtc(dev, c) { | ||
17081 | struct i915_vma *vma; | ||
17082 | |||
17083 | obj = intel_fb_obj(c->primary->fb); | ||
17084 | if (obj == NULL) | ||
17085 | continue; | ||
17086 | |||
17087 | mutex_lock(&dev->struct_mutex); | ||
17088 | vma = intel_pin_and_fence_fb_obj(c->primary->fb, | ||
17089 | c->primary->state->rotation); | ||
17090 | mutex_unlock(&dev->struct_mutex); | ||
17091 | if (IS_ERR(vma)) { | ||
17092 | DRM_ERROR("failed to pin boot fb on pipe %d\n", | ||
17093 | to_intel_crtc(c)->pipe); | ||
17094 | drm_framebuffer_unreference(c->primary->fb); | ||
17095 | c->primary->fb = NULL; | ||
17096 | c->primary->crtc = c->primary->state->crtc = NULL; | ||
17097 | update_state_fb(c->primary); | ||
17098 | c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); | ||
17099 | } | ||
17100 | } | ||
17101 | } | 17063 | } |
17102 | 17064 | ||
17103 | int intel_connector_register(struct drm_connector *connector) | 17065 | int intel_connector_register(struct drm_connector *connector) |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 58a756f2f224..a2f0e070d38d 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -1730,7 +1730,8 @@ bxt_get_dpll(struct intel_crtc *crtc, | |||
1730 | return NULL; | 1730 | return NULL; |
1731 | 1731 | ||
1732 | if ((encoder->type == INTEL_OUTPUT_DP || | 1732 | if ((encoder->type == INTEL_OUTPUT_DP || |
1733 | encoder->type == INTEL_OUTPUT_EDP) && | 1733 | encoder->type == INTEL_OUTPUT_EDP || |
1734 | encoder->type == INTEL_OUTPUT_DP_MST) && | ||
1734 | !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) | 1735 | !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) |
1735 | return NULL; | 1736 | return NULL; |
1736 | 1737 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd72ae171eeb..03a2112004f9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -377,6 +377,7 @@ struct intel_atomic_state { | |||
377 | struct intel_plane_state { | 377 | struct intel_plane_state { |
378 | struct drm_plane_state base; | 378 | struct drm_plane_state base; |
379 | struct drm_rect clip; | 379 | struct drm_rect clip; |
380 | struct i915_vma *vma; | ||
380 | 381 | ||
381 | struct { | 382 | struct { |
382 | u32 offset; | 383 | u32 offset; |
@@ -1046,6 +1047,7 @@ struct intel_flip_work { | |||
1046 | struct work_struct mmio_work; | 1047 | struct work_struct mmio_work; |
1047 | 1048 | ||
1048 | struct drm_crtc *crtc; | 1049 | struct drm_crtc *crtc; |
1050 | struct i915_vma *old_vma; | ||
1049 | struct drm_framebuffer *old_fb; | 1051 | struct drm_framebuffer *old_fb; |
1050 | struct drm_i915_gem_object *pending_flip_obj; | 1052 | struct drm_i915_gem_object *pending_flip_obj; |
1051 | struct drm_pending_vblank_event *event; | 1053 | struct drm_pending_vblank_event *event; |
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, | |||
1273 | struct drm_modeset_acquire_ctx *ctx); | 1275 | struct drm_modeset_acquire_ctx *ctx); |
1274 | struct i915_vma * | 1276 | struct i915_vma * |
1275 | intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); | 1277 | intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); |
1276 | void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); | 1278 | void intel_unpin_fb_vma(struct i915_vma *vma); |
1277 | struct drm_framebuffer * | 1279 | struct drm_framebuffer * |
1278 | __intel_framebuffer_create(struct drm_device *dev, | 1280 | __intel_framebuffer_create(struct drm_device *dev, |
1279 | struct drm_mode_fb_cmd2 *mode_cmd, | 1281 | struct drm_mode_fb_cmd2 *mode_cmd, |
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, | |||
1362 | int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); | 1364 | int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); |
1363 | int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); | 1365 | int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); |
1364 | 1366 | ||
1365 | u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); | 1367 | static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) |
1368 | { | ||
1369 | return i915_ggtt_offset(state->vma); | ||
1370 | } | ||
1366 | 1371 | ||
1367 | u32 skl_plane_ctl_format(uint32_t pixel_format); | 1372 | u32 skl_plane_ctl_format(uint32_t pixel_format); |
1368 | u32 skl_plane_ctl_tiling(uint64_t fb_modifier); | 1373 | u32 skl_plane_ctl_tiling(uint64_t fb_modifier); |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 62f215b12eb5..f3a1d6a5cabe 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) | |||
173 | if (IS_I945GM(dev_priv)) | 173 | if (IS_I945GM(dev_priv)) |
174 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 174 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
175 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 175 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
176 | fbc_ctl |= params->fb.fence_reg; | 176 | fbc_ctl |= params->vma->fence->id; |
177 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 177 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
178 | } | 178 | } |
179 | 179 | ||
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) | |||
193 | else | 193 | else |
194 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | 194 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; |
195 | 195 | ||
196 | if (params->fb.fence_reg != I915_FENCE_REG_NONE) { | 196 | if (params->vma->fence) { |
197 | dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; | 197 | dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; |
198 | I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); | 198 | I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); |
199 | } else { | 199 | } else { |
200 | I915_WRITE(DPFC_FENCE_YOFF, 0); | 200 | I915_WRITE(DPFC_FENCE_YOFF, 0); |
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) | |||
251 | break; | 251 | break; |
252 | } | 252 | } |
253 | 253 | ||
254 | if (params->fb.fence_reg != I915_FENCE_REG_NONE) { | 254 | if (params->vma->fence) { |
255 | dpfc_ctl |= DPFC_CTL_FENCE_EN; | 255 | dpfc_ctl |= DPFC_CTL_FENCE_EN; |
256 | if (IS_GEN5(dev_priv)) | 256 | if (IS_GEN5(dev_priv)) |
257 | dpfc_ctl |= params->fb.fence_reg; | 257 | dpfc_ctl |= params->vma->fence->id; |
258 | if (IS_GEN6(dev_priv)) { | 258 | if (IS_GEN6(dev_priv)) { |
259 | I915_WRITE(SNB_DPFC_CTL_SA, | 259 | I915_WRITE(SNB_DPFC_CTL_SA, |
260 | SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); | 260 | SNB_CPU_FENCE_ENABLE | |
261 | params->vma->fence->id); | ||
261 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, | 262 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, |
262 | params->crtc.fence_y_offset); | 263 | params->crtc.fence_y_offset); |
263 | } | 264 | } |
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) | |||
269 | } | 270 | } |
270 | 271 | ||
271 | I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); | 272 | I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); |
272 | I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); | 273 | I915_WRITE(ILK_FBC_RT_BASE, |
274 | i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID); | ||
273 | /* enable it... */ | 275 | /* enable it... */ |
274 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 276 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
275 | 277 | ||
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) | |||
319 | break; | 321 | break; |
320 | } | 322 | } |
321 | 323 | ||
322 | if (params->fb.fence_reg != I915_FENCE_REG_NONE) { | 324 | if (params->vma->fence) { |
323 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; | 325 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; |
324 | I915_WRITE(SNB_DPFC_CTL_SA, | 326 | I915_WRITE(SNB_DPFC_CTL_SA, |
325 | SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); | 327 | SNB_CPU_FENCE_ENABLE | |
328 | params->vma->fence->id); | ||
326 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); | 329 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); |
327 | } else { | 330 | } else { |
328 | I915_WRITE(SNB_DPFC_CTL_SA,0); | 331 | I915_WRITE(SNB_DPFC_CTL_SA,0); |
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) | |||
727 | return effective_w <= max_w && effective_h <= max_h; | 730 | return effective_w <= max_w && effective_h <= max_h; |
728 | } | 731 | } |
729 | 732 | ||
730 | /* XXX replace me when we have VMA tracking for intel_plane_state */ | ||
731 | static int get_fence_id(struct drm_framebuffer *fb) | ||
732 | { | ||
733 | struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL); | ||
734 | |||
735 | return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE; | ||
736 | } | ||
737 | |||
738 | static void intel_fbc_update_state_cache(struct intel_crtc *crtc, | 733 | static void intel_fbc_update_state_cache(struct intel_crtc *crtc, |
739 | struct intel_crtc_state *crtc_state, | 734 | struct intel_crtc_state *crtc_state, |
740 | struct intel_plane_state *plane_state) | 735 | struct intel_plane_state *plane_state) |
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, | |||
743 | struct intel_fbc *fbc = &dev_priv->fbc; | 738 | struct intel_fbc *fbc = &dev_priv->fbc; |
744 | struct intel_fbc_state_cache *cache = &fbc->state_cache; | 739 | struct intel_fbc_state_cache *cache = &fbc->state_cache; |
745 | struct drm_framebuffer *fb = plane_state->base.fb; | 740 | struct drm_framebuffer *fb = plane_state->base.fb; |
746 | struct drm_i915_gem_object *obj; | 741 | |
742 | cache->vma = NULL; | ||
747 | 743 | ||
748 | cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; | 744 | cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; |
749 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 745 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, | |||
758 | if (!cache->plane.visible) | 754 | if (!cache->plane.visible) |
759 | return; | 755 | return; |
760 | 756 | ||
761 | obj = intel_fb_obj(fb); | ||
762 | |||
763 | /* FIXME: We lack the proper locking here, so only run this on the | ||
764 | * platforms that need. */ | ||
765 | if (IS_GEN(dev_priv, 5, 6)) | ||
766 | cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL); | ||
767 | cache->fb.pixel_format = fb->pixel_format; | 757 | cache->fb.pixel_format = fb->pixel_format; |
768 | cache->fb.stride = fb->pitches[0]; | 758 | cache->fb.stride = fb->pitches[0]; |
769 | cache->fb.fence_reg = get_fence_id(fb); | 759 | |
770 | cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); | 760 | cache->vma = plane_state->vma; |
771 | } | 761 | } |
772 | 762 | ||
773 | static bool intel_fbc_can_activate(struct intel_crtc *crtc) | 763 | static bool intel_fbc_can_activate(struct intel_crtc *crtc) |
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) | |||
784 | return false; | 774 | return false; |
785 | } | 775 | } |
786 | 776 | ||
787 | if (!cache->plane.visible) { | 777 | if (!cache->vma) { |
788 | fbc->no_fbc_reason = "primary plane not visible"; | 778 | fbc->no_fbc_reason = "primary plane not visible"; |
789 | return false; | 779 | return false; |
790 | } | 780 | } |
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) | |||
807 | * so have no fence associated with it) due to aperture constaints | 797 | * so have no fence associated with it) due to aperture constaints |
808 | * at the time of pinning. | 798 | * at the time of pinning. |
809 | */ | 799 | */ |
810 | if (cache->fb.tiling_mode != I915_TILING_X || | 800 | if (!cache->vma->fence) { |
811 | cache->fb.fence_reg == I915_FENCE_REG_NONE) { | ||
812 | fbc->no_fbc_reason = "framebuffer not tiled or fenced"; | 801 | fbc->no_fbc_reason = "framebuffer not tiled or fenced"; |
813 | return false; | 802 | return false; |
814 | } | 803 | } |
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, | |||
888 | * zero. */ | 877 | * zero. */ |
889 | memset(params, 0, sizeof(*params)); | 878 | memset(params, 0, sizeof(*params)); |
890 | 879 | ||
880 | params->vma = cache->vma; | ||
881 | |||
891 | params->crtc.pipe = crtc->pipe; | 882 | params->crtc.pipe = crtc->pipe; |
892 | params->crtc.plane = crtc->plane; | 883 | params->crtc.plane = crtc->plane; |
893 | params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); | 884 | params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); |
894 | 885 | ||
895 | params->fb.pixel_format = cache->fb.pixel_format; | 886 | params->fb.pixel_format = cache->fb.pixel_format; |
896 | params->fb.stride = cache->fb.stride; | 887 | params->fb.stride = cache->fb.stride; |
897 | params->fb.fence_reg = cache->fb.fence_reg; | ||
898 | 888 | ||
899 | params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); | 889 | params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); |
900 | |||
901 | params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset; | ||
902 | } | 890 | } |
903 | 891 | ||
904 | static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, | 892 | static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 8cf2d80f2254..f4a8c4fc57c4 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
284 | out_destroy_fbi: | 284 | out_destroy_fbi: |
285 | drm_fb_helper_release_fbi(helper); | 285 | drm_fb_helper_release_fbi(helper); |
286 | out_unpin: | 286 | out_unpin: |
287 | intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); | 287 | intel_unpin_fb_vma(vma); |
288 | out_unlock: | 288 | out_unlock: |
289 | mutex_unlock(&dev->struct_mutex); | 289 | mutex_unlock(&dev->struct_mutex); |
290 | return ret; | 290 | return ret; |
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) | |||
549 | 549 | ||
550 | if (ifbdev->fb) { | 550 | if (ifbdev->fb) { |
551 | mutex_lock(&ifbdev->helper.dev->struct_mutex); | 551 | mutex_lock(&ifbdev->helper.dev->struct_mutex); |
552 | intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); | 552 | intel_unpin_fb_vma(ifbdev->vma); |
553 | mutex_unlock(&ifbdev->helper.dev->struct_mutex); | 553 | mutex_unlock(&ifbdev->helper.dev->struct_mutex); |
554 | 554 | ||
555 | drm_framebuffer_remove(&ifbdev->fb->base); | 555 | drm_framebuffer_remove(&ifbdev->fb->base); |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 8f131a08d440..242a73e66d82 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane, | |||
273 | 273 | ||
274 | I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); | 274 | I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); |
275 | I915_WRITE(PLANE_SURF(pipe, plane), | 275 | I915_WRITE(PLANE_SURF(pipe, plane), |
276 | intel_fb_gtt_offset(fb, rotation) + surf_addr); | 276 | intel_plane_ggtt_offset(plane_state) + surf_addr); |
277 | POSTING_READ(PLANE_SURF(pipe, plane)); | 277 | POSTING_READ(PLANE_SURF(pipe, plane)); |
278 | } | 278 | } |
279 | 279 | ||
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane, | |||
458 | I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); | 458 | I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); |
459 | I915_WRITE(SPCNTR(pipe, plane), sprctl); | 459 | I915_WRITE(SPCNTR(pipe, plane), sprctl); |
460 | I915_WRITE(SPSURF(pipe, plane), | 460 | I915_WRITE(SPSURF(pipe, plane), |
461 | intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); | 461 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); |
462 | POSTING_READ(SPSURF(pipe, plane)); | 462 | POSTING_READ(SPSURF(pipe, plane)); |
463 | } | 463 | } |
464 | 464 | ||
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane, | |||
594 | I915_WRITE(SPRSCALE(pipe), sprscale); | 594 | I915_WRITE(SPRSCALE(pipe), sprscale); |
595 | I915_WRITE(SPRCTL(pipe), sprctl); | 595 | I915_WRITE(SPRCTL(pipe), sprctl); |
596 | I915_WRITE(SPRSURF(pipe), | 596 | I915_WRITE(SPRSURF(pipe), |
597 | intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); | 597 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); |
598 | POSTING_READ(SPRSURF(pipe)); | 598 | POSTING_READ(SPRSURF(pipe)); |
599 | } | 599 | } |
600 | 600 | ||
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane, | |||
721 | I915_WRITE(DVSSCALE(pipe), dvsscale); | 721 | I915_WRITE(DVSSCALE(pipe), dvsscale); |
722 | I915_WRITE(DVSCNTR(pipe), dvscntr); | 722 | I915_WRITE(DVSCNTR(pipe), dvscntr); |
723 | I915_WRITE(DVSSURF(pipe), | 723 | I915_WRITE(DVSSURF(pipe), |
724 | intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); | 724 | intel_plane_ggtt_offset(plane_state) + dvssurf_offset); |
725 | POSTING_READ(DVSSURF(pipe)); | 725 | POSTING_READ(DVSSURF(pipe)); |
726 | } | 726 | } |
727 | 727 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 74856a8b8f35..e64f52464ecf 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c | |||
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) | |||
222 | uint32_t mpllP; | 222 | uint32_t mpllP; |
223 | 223 | ||
224 | pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); | 224 | pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); |
225 | mpllP = (mpllP >> 8) & 0xf; | ||
225 | if (!mpllP) | 226 | if (!mpllP) |
226 | mpllP = 4; | 227 | mpllP = 4; |
227 | 228 | ||
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) | |||
232 | uint32_t clock; | 233 | uint32_t clock; |
233 | 234 | ||
234 | pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); | 235 | pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); |
235 | return clock; | 236 | return clock / 1000; |
236 | } | 237 | } |
237 | 238 | ||
238 | ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); | 239 | ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index ccdce1b4eec4..d5e58a38f160 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -99,6 +99,7 @@ struct nv84_fence_priv { | |||
99 | struct nouveau_bo *bo; | 99 | struct nouveau_bo *bo; |
100 | struct nouveau_bo *bo_gart; | 100 | struct nouveau_bo *bo_gart; |
101 | u32 *suspend; | 101 | u32 *suspend; |
102 | struct mutex mutex; | ||
102 | }; | 103 | }; |
103 | 104 | ||
104 | int nv84_fence_context_new(struct nouveau_channel *); | 105 | int nv84_fence_context_new(struct nouveau_channel *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h index 187ecdb82002..21a5775028cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_led.h +++ b/drivers/gpu/drm/nouveau/nouveau_led.h | |||
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev) | |||
42 | } | 42 | } |
43 | 43 | ||
44 | /* nouveau_led.c */ | 44 | /* nouveau_led.c */ |
45 | #if IS_ENABLED(CONFIG_LEDS_CLASS) | 45 | #if IS_REACHABLE(CONFIG_LEDS_CLASS) |
46 | int nouveau_led_init(struct drm_device *dev); | 46 | int nouveau_led_init(struct drm_device *dev); |
47 | void nouveau_led_suspend(struct drm_device *dev); | 47 | void nouveau_led_suspend(struct drm_device *dev); |
48 | void nouveau_led_resume(struct drm_device *dev); | 48 | void nouveau_led_resume(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 08f9c6fa0f7f..1fba38622744 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c | |||
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) | |||
313 | if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { | 313 | if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { |
314 | /* block access to objects not created via this interface */ | 314 | /* block access to objects not created via this interface */ |
315 | owner = argv->v0.owner; | 315 | owner = argv->v0.owner; |
316 | if (argv->v0.object == 0ULL) | 316 | if (argv->v0.object == 0ULL && |
317 | argv->v0.type != NVIF_IOCTL_V0_DEL) | ||
317 | argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ | 318 | argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ |
318 | else | 319 | else |
319 | argv->v0.owner = NVDRM_OBJECT_USIF; | 320 | argv->v0.owner = NVDRM_OBJECT_USIF; |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2c2c64507661..32097fd615fd 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
4052 | } | 4052 | } |
4053 | } | 4053 | } |
4054 | 4054 | ||
4055 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
4056 | if (crtc->state->event) | ||
4057 | drm_crtc_vblank_get(crtc); | ||
4058 | } | ||
4059 | |||
4055 | /* Update plane(s). */ | 4060 | /* Update plane(s). */ |
4056 | for_each_plane_in_state(state, plane, plane_state, i) { | 4061 | for_each_plane_in_state(state, plane, plane_state, i) { |
4057 | struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); | 4062 | struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); |
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
4101 | drm_crtc_send_vblank_event(crtc, crtc->state->event); | 4106 | drm_crtc_send_vblank_event(crtc, crtc->state->event); |
4102 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 4107 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
4103 | crtc->state->event = NULL; | 4108 | crtc->state->event = NULL; |
4109 | drm_crtc_vblank_put(crtc); | ||
4104 | } | 4110 | } |
4105 | } | 4111 | } |
4106 | 4112 | ||
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 52b87ae83e7b..f0b322bec7df 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) | |||
107 | struct nv84_fence_chan *fctx = chan->fence; | 107 | struct nv84_fence_chan *fctx = chan->fence; |
108 | 108 | ||
109 | nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); | 109 | nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); |
110 | mutex_lock(&priv->mutex); | ||
110 | nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); | 111 | nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); |
111 | nouveau_bo_vma_del(priv->bo, &fctx->vma); | 112 | nouveau_bo_vma_del(priv->bo, &fctx->vma); |
113 | mutex_unlock(&priv->mutex); | ||
112 | nouveau_fence_context_del(&fctx->base); | 114 | nouveau_fence_context_del(&fctx->base); |
113 | chan->fence = NULL; | 115 | chan->fence = NULL; |
114 | nouveau_fence_context_free(&fctx->base); | 116 | nouveau_fence_context_free(&fctx->base); |
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) | |||
134 | fctx->base.sync32 = nv84_fence_sync32; | 136 | fctx->base.sync32 = nv84_fence_sync32; |
135 | fctx->base.sequence = nv84_fence_read(chan); | 137 | fctx->base.sequence = nv84_fence_read(chan); |
136 | 138 | ||
139 | mutex_lock(&priv->mutex); | ||
137 | ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); | 140 | ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); |
138 | if (ret == 0) { | 141 | if (ret == 0) { |
139 | ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, | 142 | ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, |
140 | &fctx->vma_gart); | 143 | &fctx->vma_gart); |
141 | } | 144 | } |
145 | mutex_unlock(&priv->mutex); | ||
142 | 146 | ||
143 | if (ret) | 147 | if (ret) |
144 | nv84_fence_context_del(chan); | 148 | nv84_fence_context_del(chan); |
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
212 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); | 216 | priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); |
213 | priv->base.uevent = true; | 217 | priv->base.uevent = true; |
214 | 218 | ||
219 | mutex_init(&priv->mutex); | ||
220 | |||
215 | /* Use VRAM if there is any ; otherwise fallback to system memory */ | 221 | /* Use VRAM if there is any ; otherwise fallback to system memory */ |
216 | domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : | 222 | domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : |
217 | /* | 223 | /* |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c index 6f0436df0219..f8f2f16c22a2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c | |||
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1) | |||
59 | ); | 59 | ); |
60 | } | 60 | } |
61 | for (i = 0; i < size; i++) | 61 | for (i = 0; i < size; i++) |
62 | nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]); | 62 | nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]); |
63 | for (; i < 0x60; i++) | 63 | for (; i < 0x60; i++) |
64 | nvkm_wr32(device, 0x61c440 + soff, (i << 8)); | 64 | nvkm_wr32(device, 0x61c440 + soff, (i << 8)); |
65 | nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); | 65 | nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 567466f93cd5..0db8efbf1c2e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | |||
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device) | |||
433 | case 0x94: | 433 | case 0x94: |
434 | case 0x96: | 434 | case 0x96: |
435 | case 0x98: | 435 | case 0x98: |
436 | case 0xaa: | ||
437 | case 0xac: | ||
438 | return true; | 436 | return true; |
439 | default: | 437 | default: |
440 | break; | 438 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e0c143b865f3..30bd4a6a9d46 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -97,9 +97,10 @@ | |||
97 | * 2.46.0 - Add PFP_SYNC_ME support on evergreen | 97 | * 2.46.0 - Add PFP_SYNC_ME support on evergreen |
98 | * 2.47.0 - Add UVD_NO_OP register support | 98 | * 2.47.0 - Add UVD_NO_OP register support |
99 | * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI | 99 | * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI |
100 | * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values | ||
100 | */ | 101 | */ |
101 | #define KMS_DRIVER_MAJOR 2 | 102 | #define KMS_DRIVER_MAJOR 2 |
102 | #define KMS_DRIVER_MINOR 48 | 103 | #define KMS_DRIVER_MINOR 49 |
103 | #define KMS_DRIVER_PATCHLEVEL 0 | 104 | #define KMS_DRIVER_PATCHLEVEL 0 |
104 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 105 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
105 | int radeon_driver_unload_kms(struct drm_device *dev); | 106 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0bcffd8a7bd3..96683f5b2b1b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
220 | 220 | ||
221 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | 221 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
222 | 222 | ||
223 | args->vram_size = rdev->mc.real_vram_size; | 223 | args->vram_size = (u64)man->size << PAGE_SHIFT; |
224 | args->vram_visible = (u64)man->size << PAGE_SHIFT; | 224 | args->vram_visible = rdev->mc.visible_vram_size; |
225 | args->vram_visible -= rdev->vram_pin_size; | 225 | args->vram_visible -= rdev->vram_pin_size; |
226 | args->gart_size = rdev->mc.gtt_size; | 226 | args->gart_size = rdev->mc.gtt_size; |
227 | args->gart_size -= rdev->gart_pin_size; | 227 | args->gart_size -= rdev->gart_pin_size; |
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 881bf489478b..686cdd3c86f2 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
@@ -858,7 +858,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, | |||
858 | } | 858 | } |
859 | } | 859 | } |
860 | plane = &vc4_plane->base; | 860 | plane = &vc4_plane->base; |
861 | ret = drm_universal_plane_init(dev, plane, 0xff, | 861 | ret = drm_universal_plane_init(dev, plane, 0, |
862 | &vc4_plane_funcs, | 862 | &vc4_plane_funcs, |
863 | formats, num_formats, | 863 | formats, num_formats, |
864 | type, NULL); | 864 | type, NULL); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 723fd763da8e..7a96798b9c0a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -481,8 +481,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info) | |||
481 | mode_cmd.height = var->yres; | 481 | mode_cmd.height = var->yres; |
482 | mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; | 482 | mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; |
483 | mode_cmd.pixel_format = | 483 | mode_cmd.pixel_format = |
484 | drm_mode_legacy_fb_format(var->bits_per_pixel, | 484 | drm_mode_legacy_fb_format(var->bits_per_pixel, depth); |
485 | ((var->bits_per_pixel + 7) / 8) * mode_cmd.width); | ||
486 | 485 | ||
487 | cur_fb = par->set_fb; | 486 | cur_fb = par->set_fb; |
488 | if (cur_fb && cur_fb->width == mode_cmd.width && | 487 | if (cur_fb && cur_fb->width == mode_cmd.width && |
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index f31a778b0851..b22d0f83f8e3 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c | |||
@@ -168,7 +168,7 @@ struct cp2112_device { | |||
168 | atomic_t xfer_avail; | 168 | atomic_t xfer_avail; |
169 | struct gpio_chip gc; | 169 | struct gpio_chip gc; |
170 | u8 *in_out_buffer; | 170 | u8 *in_out_buffer; |
171 | spinlock_t lock; | 171 | struct mutex lock; |
172 | 172 | ||
173 | struct gpio_desc *desc[8]; | 173 | struct gpio_desc *desc[8]; |
174 | bool gpio_poll; | 174 | bool gpio_poll; |
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
186 | struct cp2112_device *dev = gpiochip_get_data(chip); | 186 | struct cp2112_device *dev = gpiochip_get_data(chip); |
187 | struct hid_device *hdev = dev->hdev; | 187 | struct hid_device *hdev = dev->hdev; |
188 | u8 *buf = dev->in_out_buffer; | 188 | u8 *buf = dev->in_out_buffer; |
189 | unsigned long flags; | ||
190 | int ret; | 189 | int ret; |
191 | 190 | ||
192 | spin_lock_irqsave(&dev->lock, flags); | 191 | mutex_lock(&dev->lock); |
193 | 192 | ||
194 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, | 193 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, |
195 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, | 194 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, |
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
213 | ret = 0; | 212 | ret = 0; |
214 | 213 | ||
215 | exit: | 214 | exit: |
216 | spin_unlock_irqrestore(&dev->lock, flags); | 215 | mutex_unlock(&dev->lock); |
217 | return ret <= 0 ? ret : -EIO; | 216 | return ret < 0 ? ret : -EIO; |
218 | } | 217 | } |
219 | 218 | ||
220 | static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | 219 | static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) |
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
222 | struct cp2112_device *dev = gpiochip_get_data(chip); | 221 | struct cp2112_device *dev = gpiochip_get_data(chip); |
223 | struct hid_device *hdev = dev->hdev; | 222 | struct hid_device *hdev = dev->hdev; |
224 | u8 *buf = dev->in_out_buffer; | 223 | u8 *buf = dev->in_out_buffer; |
225 | unsigned long flags; | ||
226 | int ret; | 224 | int ret; |
227 | 225 | ||
228 | spin_lock_irqsave(&dev->lock, flags); | 226 | mutex_lock(&dev->lock); |
229 | 227 | ||
230 | buf[0] = CP2112_GPIO_SET; | 228 | buf[0] = CP2112_GPIO_SET; |
231 | buf[1] = value ? 0xff : 0; | 229 | buf[1] = value ? 0xff : 0; |
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
237 | if (ret < 0) | 235 | if (ret < 0) |
238 | hid_err(hdev, "error setting GPIO values: %d\n", ret); | 236 | hid_err(hdev, "error setting GPIO values: %d\n", ret); |
239 | 237 | ||
240 | spin_unlock_irqrestore(&dev->lock, flags); | 238 | mutex_unlock(&dev->lock); |
241 | } | 239 | } |
242 | 240 | ||
243 | static int cp2112_gpio_get_all(struct gpio_chip *chip) | 241 | static int cp2112_gpio_get_all(struct gpio_chip *chip) |
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip) | |||
245 | struct cp2112_device *dev = gpiochip_get_data(chip); | 243 | struct cp2112_device *dev = gpiochip_get_data(chip); |
246 | struct hid_device *hdev = dev->hdev; | 244 | struct hid_device *hdev = dev->hdev; |
247 | u8 *buf = dev->in_out_buffer; | 245 | u8 *buf = dev->in_out_buffer; |
248 | unsigned long flags; | ||
249 | int ret; | 246 | int ret; |
250 | 247 | ||
251 | spin_lock_irqsave(&dev->lock, flags); | 248 | mutex_lock(&dev->lock); |
252 | 249 | ||
253 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, | 250 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, |
254 | CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, | 251 | CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, |
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip) | |||
262 | ret = buf[1]; | 259 | ret = buf[1]; |
263 | 260 | ||
264 | exit: | 261 | exit: |
265 | spin_unlock_irqrestore(&dev->lock, flags); | 262 | mutex_unlock(&dev->lock); |
266 | 263 | ||
267 | return ret; | 264 | return ret; |
268 | } | 265 | } |
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, | |||
284 | struct cp2112_device *dev = gpiochip_get_data(chip); | 281 | struct cp2112_device *dev = gpiochip_get_data(chip); |
285 | struct hid_device *hdev = dev->hdev; | 282 | struct hid_device *hdev = dev->hdev; |
286 | u8 *buf = dev->in_out_buffer; | 283 | u8 *buf = dev->in_out_buffer; |
287 | unsigned long flags; | ||
288 | int ret; | 284 | int ret; |
289 | 285 | ||
290 | spin_lock_irqsave(&dev->lock, flags); | 286 | mutex_lock(&dev->lock); |
291 | 287 | ||
292 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, | 288 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, |
293 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, | 289 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, |
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, | |||
308 | goto fail; | 304 | goto fail; |
309 | } | 305 | } |
310 | 306 | ||
311 | spin_unlock_irqrestore(&dev->lock, flags); | 307 | mutex_unlock(&dev->lock); |
312 | 308 | ||
313 | /* | 309 | /* |
314 | * Set gpio value when output direction is already set, | 310 | * Set gpio value when output direction is already set, |
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, | |||
319 | return 0; | 315 | return 0; |
320 | 316 | ||
321 | fail: | 317 | fail: |
322 | spin_unlock_irqrestore(&dev->lock, flags); | 318 | mutex_unlock(&dev->lock); |
323 | return ret < 0 ? ret : -EIO; | 319 | return ret < 0 ? ret : -EIO; |
324 | } | 320 | } |
325 | 321 | ||
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1235 | if (!dev->in_out_buffer) | 1231 | if (!dev->in_out_buffer) |
1236 | return -ENOMEM; | 1232 | return -ENOMEM; |
1237 | 1233 | ||
1238 | spin_lock_init(&dev->lock); | 1234 | mutex_init(&dev->lock); |
1239 | 1235 | ||
1240 | ret = hid_parse(hdev); | 1236 | ret = hid_parse(hdev); |
1241 | if (ret) { | 1237 | if (ret) { |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index f46f2c5117fa..350accfee8e8 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -76,6 +76,9 @@ | |||
76 | #define USB_VENDOR_ID_ALPS_JP 0x044E | 76 | #define USB_VENDOR_ID_ALPS_JP 0x044E |
77 | #define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B | 77 | #define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B |
78 | 78 | ||
79 | #define USB_VENDOR_ID_AMI 0x046b | ||
80 | #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10 | ||
81 | |||
79 | #define USB_VENDOR_ID_ANTON 0x1130 | 82 | #define USB_VENDOR_ID_ANTON 0x1130 |
80 | #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 | 83 | #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 |
81 | 84 | ||
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index c5c5fbe9d605..52026dc94d5c 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c | |||
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = { | |||
872 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), | 872 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), |
873 | .driver_data = LG_NOGET | LG_FF4 }, | 873 | .driver_data = LG_NOGET | LG_FF4 }, |
874 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), | 874 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), |
875 | .driver_data = LG_FF2 }, | 875 | .driver_data = LG_NOGET | LG_FF2 }, |
876 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), | 876 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), |
877 | .driver_data = LG_FF3 }, | 877 | .driver_data = LG_FF3 }, |
878 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), | 878 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index e9d6cc7cdfc5..30a2977e2645 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -57,6 +57,7 @@ static const struct hid_blacklist { | |||
57 | { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, | 57 | { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, |
58 | { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, | 58 | { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, |
59 | { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, | 59 | { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, |
60 | { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL }, | ||
60 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, | 61 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, |
61 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, | 62 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, |
62 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, | 63 | { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 0884dc9554fd..672145b0d8f5 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom) | |||
166 | wacom->id[0] = STYLUS_DEVICE_ID; | 166 | wacom->id[0] = STYLUS_DEVICE_ID; |
167 | } | 167 | } |
168 | 168 | ||
169 | pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); | 169 | if (prox) { |
170 | if (features->pressure_max > 255) | 170 | pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); |
171 | pressure = (pressure << 1) | ((data[4] >> 6) & 1); | 171 | if (features->pressure_max > 255) |
172 | pressure += (features->pressure_max + 1) / 2; | 172 | pressure = (pressure << 1) | ((data[4] >> 6) & 1); |
173 | 173 | pressure += (features->pressure_max + 1) / 2; | |
174 | input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14)); | 174 | |
175 | input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14)); | 175 | input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14)); |
176 | input_report_abs(input, ABS_PRESSURE, pressure); | 176 | input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14)); |
177 | 177 | input_report_abs(input, ABS_PRESSURE, pressure); | |
178 | input_report_key(input, BTN_TOUCH, data[4] & 0x08); | 178 | |
179 | input_report_key(input, BTN_STYLUS, data[4] & 0x10); | 179 | input_report_key(input, BTN_TOUCH, data[4] & 0x08); |
180 | /* Only allow the stylus2 button to be reported for the pen tool. */ | 180 | input_report_key(input, BTN_STYLUS, data[4] & 0x10); |
181 | input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20)); | 181 | /* Only allow the stylus2 button to be reported for the pen tool. */ |
182 | input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20)); | ||
183 | } | ||
182 | 184 | ||
183 | if (!prox) | 185 | if (!prox) |
184 | wacom->id[0] = 0; | 186 | wacom->id[0] = 0; |
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index cd49cb17eb7f..308dbda700eb 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, | |||
383 | return ret; | 383 | return ret; |
384 | } | 384 | } |
385 | 385 | ||
386 | init_cached_read_index(channel); | ||
386 | next_read_location = hv_get_next_read_location(inring_info); | 387 | next_read_location = hv_get_next_read_location(inring_info); |
387 | next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, | 388 | next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, |
388 | sizeof(desc), | 389 | sizeof(desc), |
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 686971263bef..45d6771fac8c 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c | |||
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev) | |||
962 | goto err_clk_dis; | 962 | goto err_clk_dis; |
963 | } | 963 | } |
964 | 964 | ||
965 | ret = i2c_add_adapter(&id->adap); | ||
966 | if (ret < 0) | ||
967 | goto err_clk_dis; | ||
968 | |||
969 | /* | 965 | /* |
970 | * Cadence I2C controller has a bug wherein it generates | 966 | * Cadence I2C controller has a bug wherein it generates |
971 | * invalid read transaction after HW timeout in master receiver mode. | 967 | * invalid read transaction after HW timeout in master receiver mode. |
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev) | |||
975 | */ | 971 | */ |
976 | cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); | 972 | cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); |
977 | 973 | ||
974 | ret = i2c_add_adapter(&id->adap); | ||
975 | if (ret < 0) | ||
976 | goto err_clk_dis; | ||
977 | |||
978 | dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", | 978 | dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", |
979 | id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); | 979 | id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); |
980 | 980 | ||
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index c62b7cd475f8..3310f2e0dbd3 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/of.h> | 29 | #include <linux/of.h> |
30 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
31 | #include <linux/pinctrl/consumer.h> | ||
31 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
32 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev) | |||
636 | return 0; | 637 | return 0; |
637 | } | 638 | } |
638 | 639 | ||
640 | #ifdef CONFIG_PM_SLEEP | ||
641 | static int lpi2c_imx_suspend(struct device *dev) | ||
642 | { | ||
643 | pinctrl_pm_select_sleep_state(dev); | ||
644 | |||
645 | return 0; | ||
646 | } | ||
647 | |||
648 | static int lpi2c_imx_resume(struct device *dev) | ||
649 | { | ||
650 | pinctrl_pm_select_default_state(dev); | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | #endif | ||
655 | |||
656 | static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume); | ||
657 | |||
639 | static struct platform_driver lpi2c_imx_driver = { | 658 | static struct platform_driver lpi2c_imx_driver = { |
640 | .probe = lpi2c_imx_probe, | 659 | .probe = lpi2c_imx_probe, |
641 | .remove = lpi2c_imx_remove, | 660 | .remove = lpi2c_imx_remove, |
642 | .driver = { | 661 | .driver = { |
643 | .name = DRIVER_NAME, | 662 | .name = DRIVER_NAME, |
644 | .of_match_table = lpi2c_imx_of_match, | 663 | .of_match_table = lpi2c_imx_of_match, |
664 | .pm = &imx_lpi2c_pm, | ||
645 | }, | 665 | }, |
646 | }; | 666 | }; |
647 | 667 | ||
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index e34d82e79b98..c21ca7bf2efe 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #define SMBSLVDAT (0xC + piix4_smba) | 58 | #define SMBSLVDAT (0xC + piix4_smba) |
59 | 59 | ||
60 | /* count for request_region */ | 60 | /* count for request_region */ |
61 | #define SMBIOSIZE 8 | 61 | #define SMBIOSIZE 9 |
62 | 62 | ||
63 | /* PCI Address Constants */ | 63 | /* PCI Address Constants */ |
64 | #define SMBBA 0x090 | 64 | #define SMBBA 0x090 |
@@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | |||
592 | u8 port; | 592 | u8 port; |
593 | int retval; | 593 | int retval; |
594 | 594 | ||
595 | mutex_lock(&piix4_mutex_sb800); | ||
596 | |||
595 | /* Request the SMBUS semaphore, avoid conflicts with the IMC */ | 597 | /* Request the SMBUS semaphore, avoid conflicts with the IMC */ |
596 | smbslvcnt = inb_p(SMBSLVCNT); | 598 | smbslvcnt = inb_p(SMBSLVCNT); |
597 | do { | 599 | do { |
@@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | |||
605 | usleep_range(1000, 2000); | 607 | usleep_range(1000, 2000); |
606 | } while (--retries); | 608 | } while (--retries); |
607 | /* SMBus is still owned by the IMC, we give up */ | 609 | /* SMBus is still owned by the IMC, we give up */ |
608 | if (!retries) | 610 | if (!retries) { |
611 | mutex_unlock(&piix4_mutex_sb800); | ||
609 | return -EBUSY; | 612 | return -EBUSY; |
610 | 613 | } | |
611 | mutex_lock(&piix4_mutex_sb800); | ||
612 | 614 | ||
613 | outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); | 615 | outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); |
614 | smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); | 616 | smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); |
@@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | |||
623 | 625 | ||
624 | outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); | 626 | outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); |
625 | 627 | ||
626 | mutex_unlock(&piix4_mutex_sb800); | ||
627 | |||
628 | /* Release the semaphore */ | 628 | /* Release the semaphore */ |
629 | outb_p(smbslvcnt | 0x20, SMBSLVCNT); | 629 | outb_p(smbslvcnt | 0x20, SMBSLVCNT); |
630 | 630 | ||
631 | mutex_unlock(&piix4_mutex_sb800); | ||
632 | |||
631 | return retval; | 633 | return retval; |
632 | } | 634 | } |
633 | 635 | ||
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index 2bbf0c521beb..7d61b566e148 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c | |||
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc) | |||
775 | 775 | ||
776 | static int palmas_gpadc_suspend(struct device *dev) | 776 | static int palmas_gpadc_suspend(struct device *dev) |
777 | { | 777 | { |
778 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 778 | struct iio_dev *indio_dev = dev_get_drvdata(dev); |
779 | struct palmas_gpadc *adc = iio_priv(indio_dev); | 779 | struct palmas_gpadc *adc = iio_priv(indio_dev); |
780 | int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; | 780 | int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; |
781 | int ret; | 781 | int ret; |
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev) | |||
798 | 798 | ||
799 | static int palmas_gpadc_resume(struct device *dev) | 799 | static int palmas_gpadc_resume(struct device *dev) |
800 | { | 800 | { |
801 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 801 | struct iio_dev *indio_dev = dev_get_drvdata(dev); |
802 | struct palmas_gpadc *adc = iio_priv(indio_dev); | 802 | struct palmas_gpadc *adc = iio_priv(indio_dev); |
803 | int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; | 803 | int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; |
804 | int ret; | 804 | int ret; |
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 9a081465c42f..6bb23a49e81e 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c | |||
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match); | |||
422 | 422 | ||
423 | static int __maybe_unused afe4403_suspend(struct device *dev) | 423 | static int __maybe_unused afe4403_suspend(struct device *dev) |
424 | { | 424 | { |
425 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 425 | struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev)); |
426 | struct afe4403_data *afe = iio_priv(indio_dev); | 426 | struct afe4403_data *afe = iio_priv(indio_dev); |
427 | int ret; | 427 | int ret; |
428 | 428 | ||
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev) | |||
443 | 443 | ||
444 | static int __maybe_unused afe4403_resume(struct device *dev) | 444 | static int __maybe_unused afe4403_resume(struct device *dev) |
445 | { | 445 | { |
446 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 446 | struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev)); |
447 | struct afe4403_data *afe = iio_priv(indio_dev); | 447 | struct afe4403_data *afe = iio_priv(indio_dev); |
448 | int ret; | 448 | int ret; |
449 | 449 | ||
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 45266404f7e3..964f5231a831 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c | |||
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match); | |||
428 | 428 | ||
429 | static int __maybe_unused afe4404_suspend(struct device *dev) | 429 | static int __maybe_unused afe4404_suspend(struct device *dev) |
430 | { | 430 | { |
431 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 431 | struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); |
432 | struct afe4404_data *afe = iio_priv(indio_dev); | 432 | struct afe4404_data *afe = iio_priv(indio_dev); |
433 | int ret; | 433 | int ret; |
434 | 434 | ||
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev) | |||
449 | 449 | ||
450 | static int __maybe_unused afe4404_resume(struct device *dev) | 450 | static int __maybe_unused afe4404_resume(struct device *dev) |
451 | { | 451 | { |
452 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | 452 | struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); |
453 | struct afe4404_data *afe = iio_priv(indio_dev); | 453 | struct afe4404_data *afe = iio_priv(indio_dev); |
454 | int ret; | 454 | int ret; |
455 | 455 | ||
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c index 90ab8a2d2846..183c14329d6e 100644 --- a/drivers/iio/health/max30100.c +++ b/drivers/iio/health/max30100.c | |||
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private) | |||
238 | 238 | ||
239 | mutex_lock(&data->lock); | 239 | mutex_lock(&data->lock); |
240 | 240 | ||
241 | while (cnt || (cnt = max30100_fifo_count(data) > 0)) { | 241 | while (cnt || (cnt = max30100_fifo_count(data)) > 0) { |
242 | ret = max30100_read_measurement(data); | 242 | ret = max30100_read_measurement(data); |
243 | if (ret) | 243 | if (ret) |
244 | break; | 244 | break; |
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c index 9c47bc98f3ac..2a22ad920333 100644 --- a/drivers/iio/humidity/dht11.c +++ b/drivers/iio/humidity/dht11.c | |||
@@ -71,7 +71,8 @@ | |||
71 | * a) select an implementation using busy loop polling on those systems | 71 | * a) select an implementation using busy loop polling on those systems |
72 | * b) use the checksum to do some probabilistic decoding | 72 | * b) use the checksum to do some probabilistic decoding |
73 | */ | 73 | */ |
74 | #define DHT11_START_TRANSMISSION 18 /* ms */ | 74 | #define DHT11_START_TRANSMISSION_MIN 18000 /* us */ |
75 | #define DHT11_START_TRANSMISSION_MAX 20000 /* us */ | ||
75 | #define DHT11_MIN_TIMERES 34000 /* ns */ | 76 | #define DHT11_MIN_TIMERES 34000 /* ns */ |
76 | #define DHT11_THRESHOLD 49000 /* ns */ | 77 | #define DHT11_THRESHOLD 49000 /* ns */ |
77 | #define DHT11_AMBIG_LOW 23000 /* ns */ | 78 | #define DHT11_AMBIG_LOW 23000 /* ns */ |
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev, | |||
228 | ret = gpio_direction_output(dht11->gpio, 0); | 229 | ret = gpio_direction_output(dht11->gpio, 0); |
229 | if (ret) | 230 | if (ret) |
230 | goto err; | 231 | goto err; |
231 | msleep(DHT11_START_TRANSMISSION); | 232 | usleep_range(DHT11_START_TRANSMISSION_MIN, |
233 | DHT11_START_TRANSMISSION_MAX); | ||
232 | ret = gpio_direction_input(dht11->gpio); | 234 | ret = gpio_direction_input(dht11->gpio); |
233 | if (ret) | 235 | if (ret) |
234 | goto err; | 236 | goto err; |
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index d0faca294006..86a6585b847d 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c | |||
@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) | |||
59 | 59 | ||
60 | case RXE_MEM_TYPE_MR: | 60 | case RXE_MEM_TYPE_MR: |
61 | case RXE_MEM_TYPE_FMR: | 61 | case RXE_MEM_TYPE_FMR: |
62 | return ((iova < mem->iova) || | 62 | if (iova < mem->iova || |
63 | ((iova + length) > (mem->iova + mem->length))) ? | 63 | length > mem->length || |
64 | -EFAULT : 0; | 64 | iova > mem->iova + mem->length - length) |
65 | return -EFAULT; | ||
66 | return 0; | ||
65 | 67 | ||
66 | default: | 68 | default: |
67 | return -EFAULT; | 69 | return -EFAULT; |
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 3435efff8799..5bcf07328972 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c | |||
@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, | |||
479 | goto err2; | 479 | goto err2; |
480 | } | 480 | } |
481 | 481 | ||
482 | resid = mtu; | 482 | qp->resp.resid = mtu; |
483 | } else { | 483 | } else { |
484 | if (pktlen != resid) { | 484 | if (pktlen != resid) { |
485 | state = RESPST_ERR_LENGTH; | 485 | state = RESPST_ERR_LENGTH; |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 92595b98e7ed..022be0e22eba 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
@@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev) | |||
263 | return -EINVAL; | 263 | return -EINVAL; |
264 | } | 264 | } |
265 | 265 | ||
266 | if (test_bit(ABS_MT_SLOT, dev->absbit)) { | 266 | if (test_bit(EV_ABS, dev->evbit)) { |
267 | nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; | 267 | input_alloc_absinfo(dev); |
268 | error = input_mt_init_slots(dev, nslot, 0); | 268 | if (!dev->absinfo) { |
269 | if (error) | 269 | error = -EINVAL; |
270 | goto fail1; | 270 | goto fail1; |
271 | } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { | 271 | } |
272 | input_set_events_per_packet(dev, 60); | 272 | |
273 | if (test_bit(ABS_MT_SLOT, dev->absbit)) { | ||
274 | nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; | ||
275 | error = input_mt_init_slots(dev, nslot, 0); | ||
276 | if (error) | ||
277 | goto fail1; | ||
278 | } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { | ||
279 | input_set_events_per_packet(dev, 60); | ||
280 | } | ||
273 | } | 281 | } |
274 | 282 | ||
275 | if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) { | 283 | if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) { |
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig index 8993983e3fe4..bb7762bf2879 100644 --- a/drivers/input/rmi4/Kconfig +++ b/drivers/input/rmi4/Kconfig | |||
@@ -42,13 +42,19 @@ config RMI4_SMB | |||
42 | config RMI4_F03 | 42 | config RMI4_F03 |
43 | bool "RMI4 Function 03 (PS2 Guest)" | 43 | bool "RMI4 Function 03 (PS2 Guest)" |
44 | depends on RMI4_CORE | 44 | depends on RMI4_CORE |
45 | depends on SERIO=y || RMI4_CORE=SERIO | ||
46 | help | 45 | help |
47 | Say Y here if you want to add support for RMI4 function 03. | 46 | Say Y here if you want to add support for RMI4 function 03. |
48 | 47 | ||
49 | Function 03 provides PS2 guest support for RMI4 devices. This | 48 | Function 03 provides PS2 guest support for RMI4 devices. This |
50 | includes support for TrackPoints on TouchPads. | 49 | includes support for TrackPoints on TouchPads. |
51 | 50 | ||
51 | config RMI4_F03_SERIO | ||
52 | tristate | ||
53 | depends on RMI4_CORE | ||
54 | depends on RMI4_F03 | ||
55 | default RMI4_CORE | ||
56 | select SERIO | ||
57 | |||
52 | config RMI4_2D_SENSOR | 58 | config RMI4_2D_SENSOR |
53 | bool | 59 | bool |
54 | depends on RMI4_CORE | 60 | depends on RMI4_CORE |
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 11447ab1055c..bf5c36e229ba 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c | |||
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) | |||
901 | data->enabled = true; | 901 | data->enabled = true; |
902 | if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { | 902 | if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { |
903 | retval = disable_irq_wake(irq); | 903 | retval = disable_irq_wake(irq); |
904 | if (!retval) | 904 | if (retval) |
905 | dev_warn(&rmi_dev->dev, | 905 | dev_warn(&rmi_dev->dev, |
906 | "Failed to disable irq for wake: %d\n", | 906 | "Failed to disable irq for wake: %d\n", |
907 | retval); | 907 | retval); |
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake) | |||
936 | disable_irq(irq); | 936 | disable_irq(irq); |
937 | if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { | 937 | if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { |
938 | retval = enable_irq_wake(irq); | 938 | retval = enable_irq_wake(irq); |
939 | if (!retval) | 939 | if (retval) |
940 | dev_warn(&rmi_dev->dev, | 940 | dev_warn(&rmi_dev->dev, |
941 | "Failed to enable irq for wake: %d\n", | 941 | "Failed to enable irq for wake: %d\n", |
942 | retval); | 942 | retval); |
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index 83cf11312fd9..c9d1c91e1887 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c | |||
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev) | |||
682 | } | 682 | } |
683 | platform_set_drvdata(wm->battery_dev, wm); | 683 | platform_set_drvdata(wm->battery_dev, wm); |
684 | wm->battery_dev->dev.parent = dev; | 684 | wm->battery_dev->dev.parent = dev; |
685 | wm->battery_dev->dev.platform_data = pdata->batt_pdata; | 685 | wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL; |
686 | ret = platform_device_add(wm->battery_dev); | 686 | ret = platform_device_add(wm->battery_dev); |
687 | if (ret < 0) | 687 | if (ret < 0) |
688 | goto batt_reg_err; | 688 | goto batt_reg_err; |
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index 54a5e870a8f5..efbcf8435185 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c | |||
@@ -19,9 +19,9 @@ | |||
19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
22 | #include <linux/interrupt.h> | ||
22 | #include <linux/irqdomain.h> | 23 | #include <linux/irqdomain.h> |
23 | #include <linux/irqchip.h> | 24 | #include <linux/irqchip.h> |
24 | #include <linux/irqchip/chained_irq.h> | ||
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <linux/of_platform.h> | 26 | #include <linux/of_platform.h> |
27 | #include <linux/mfd/syscon.h> | 27 | #include <linux/mfd/syscon.h> |
@@ -39,6 +39,7 @@ struct keystone_irq_device { | |||
39 | struct irq_domain *irqd; | 39 | struct irq_domain *irqd; |
40 | struct regmap *devctrl_regs; | 40 | struct regmap *devctrl_regs; |
41 | u32 devctrl_offset; | 41 | u32 devctrl_offset; |
42 | raw_spinlock_t wa_lock; | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq) | 45 | static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq) |
@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d) | |||
83 | /* nothing to do here */ | 84 | /* nothing to do here */ |
84 | } | 85 | } |
85 | 86 | ||
86 | static void keystone_irq_handler(struct irq_desc *desc) | 87 | static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq) |
87 | { | 88 | { |
88 | unsigned int irq = irq_desc_get_irq(desc); | 89 | struct keystone_irq_device *kirq = keystone_irq; |
89 | struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); | 90 | unsigned long wa_lock_flags; |
90 | unsigned long pending; | 91 | unsigned long pending; |
91 | int src, virq; | 92 | int src, virq; |
92 | 93 | ||
93 | dev_dbg(kirq->dev, "start irq %d\n", irq); | 94 | dev_dbg(kirq->dev, "start irq %d\n", irq); |
94 | 95 | ||
95 | chained_irq_enter(irq_desc_get_chip(desc), desc); | ||
96 | |||
97 | pending = keystone_irq_readl(kirq); | 96 | pending = keystone_irq_readl(kirq); |
98 | keystone_irq_writel(kirq, pending); | 97 | keystone_irq_writel(kirq, pending); |
99 | 98 | ||
@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc) | |||
111 | if (!virq) | 110 | if (!virq) |
112 | dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n", | 111 | dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n", |
113 | src, virq); | 112 | src, virq); |
113 | raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags); | ||
114 | generic_handle_irq(virq); | 114 | generic_handle_irq(virq); |
115 | raw_spin_unlock_irqrestore(&kirq->wa_lock, | ||
116 | wa_lock_flags); | ||
115 | } | 117 | } |
116 | } | 118 | } |
117 | 119 | ||
118 | chained_irq_exit(irq_desc_get_chip(desc), desc); | ||
119 | |||
120 | dev_dbg(kirq->dev, "end irq %d\n", irq); | 120 | dev_dbg(kirq->dev, "end irq %d\n", irq); |
121 | return IRQ_HANDLED; | ||
121 | } | 122 | } |
122 | 123 | ||
123 | static int keystone_irq_map(struct irq_domain *h, unsigned int virq, | 124 | static int keystone_irq_map(struct irq_domain *h, unsigned int virq, |
@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev) | |||
182 | return -ENODEV; | 183 | return -ENODEV; |
183 | } | 184 | } |
184 | 185 | ||
186 | raw_spin_lock_init(&kirq->wa_lock); | ||
187 | |||
185 | platform_set_drvdata(pdev, kirq); | 188 | platform_set_drvdata(pdev, kirq); |
186 | 189 | ||
187 | irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq); | 190 | ret = request_irq(kirq->irq, keystone_irq_handler, |
191 | 0, dev_name(dev), kirq); | ||
192 | if (ret) { | ||
193 | irq_domain_remove(kirq->irqd); | ||
194 | return ret; | ||
195 | } | ||
188 | 196 | ||
189 | /* clear all source bits */ | 197 | /* clear all source bits */ |
190 | keystone_irq_writel(kirq, ~0x0); | 198 | keystone_irq_writel(kirq, ~0x0); |
@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev) | |||
199 | struct keystone_irq_device *kirq = platform_get_drvdata(pdev); | 207 | struct keystone_irq_device *kirq = platform_get_drvdata(pdev); |
200 | int hwirq; | 208 | int hwirq; |
201 | 209 | ||
210 | free_irq(kirq->irq, kirq); | ||
211 | |||
202 | for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++) | 212 | for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++) |
203 | irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); | 213 | irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); |
204 | 214 | ||
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 17304705f2cf..05fa9f7af53c 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c | |||
@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = { | |||
131 | .irq_ack = icoll_ack_irq, | 131 | .irq_ack = icoll_ack_irq, |
132 | .irq_mask = icoll_mask_irq, | 132 | .irq_mask = icoll_mask_irq, |
133 | .irq_unmask = icoll_unmask_irq, | 133 | .irq_unmask = icoll_unmask_irq, |
134 | .flags = IRQCHIP_MASK_ON_SUSPEND | | ||
135 | IRQCHIP_SKIP_SET_WAKE, | ||
134 | }; | 136 | }; |
135 | 137 | ||
136 | static struct irq_chip asm9260_icoll_chip = { | 138 | static struct irq_chip asm9260_icoll_chip = { |
137 | .irq_ack = icoll_ack_irq, | 139 | .irq_ack = icoll_ack_irq, |
138 | .irq_mask = asm9260_mask_irq, | 140 | .irq_mask = asm9260_mask_irq, |
139 | .irq_unmask = asm9260_unmask_irq, | 141 | .irq_unmask = asm9260_unmask_irq, |
142 | .flags = IRQCHIP_MASK_ON_SUSPEND | | ||
143 | IRQCHIP_SKIP_SET_WAKE, | ||
140 | }; | 144 | }; |
141 | 145 | ||
142 | asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) | 146 | asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 7c6c57216bf2..8a9f742d8ed7 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string | |||
1534 | return PTR_ERR(key); | 1534 | return PTR_ERR(key); |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | rcu_read_lock(); | 1537 | down_read(&key->sem); |
1538 | 1538 | ||
1539 | ukp = user_key_payload(key); | 1539 | ukp = user_key_payload(key); |
1540 | if (!ukp) { | 1540 | if (!ukp) { |
1541 | rcu_read_unlock(); | 1541 | up_read(&key->sem); |
1542 | key_put(key); | 1542 | key_put(key); |
1543 | kzfree(new_key_string); | 1543 | kzfree(new_key_string); |
1544 | return -EKEYREVOKED; | 1544 | return -EKEYREVOKED; |
1545 | } | 1545 | } |
1546 | 1546 | ||
1547 | if (cc->key_size != ukp->datalen) { | 1547 | if (cc->key_size != ukp->datalen) { |
1548 | rcu_read_unlock(); | 1548 | up_read(&key->sem); |
1549 | key_put(key); | 1549 | key_put(key); |
1550 | kzfree(new_key_string); | 1550 | kzfree(new_key_string); |
1551 | return -EINVAL; | 1551 | return -EINVAL; |
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string | |||
1553 | 1553 | ||
1554 | memcpy(cc->key, ukp->data, cc->key_size); | 1554 | memcpy(cc->key, ukp->data, cc->key_size); |
1555 | 1555 | ||
1556 | rcu_read_unlock(); | 1556 | up_read(&key->sem); |
1557 | key_put(key); | 1557 | key_put(key); |
1558 | 1558 | ||
1559 | /* clear the flag since following operations may invalidate previously valid key */ | 1559 | /* clear the flag since following operations may invalidate previously valid key */ |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6400cffb986d..3570bcb7a4a4 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
427 | unsigned long flags; | 427 | unsigned long flags; |
428 | struct priority_group *pg; | 428 | struct priority_group *pg; |
429 | struct pgpath *pgpath; | 429 | struct pgpath *pgpath; |
430 | bool bypassed = true; | 430 | unsigned bypassed = 1; |
431 | 431 | ||
432 | if (!atomic_read(&m->nr_valid_paths)) { | 432 | if (!atomic_read(&m->nr_valid_paths)) { |
433 | clear_bit(MPATHF_QUEUE_IO, &m->flags); | 433 | clear_bit(MPATHF_QUEUE_IO, &m->flags); |
@@ -466,7 +466,7 @@ check_current_pg: | |||
466 | */ | 466 | */ |
467 | do { | 467 | do { |
468 | list_for_each_entry(pg, &m->priority_groups, list) { | 468 | list_for_each_entry(pg, &m->priority_groups, list) { |
469 | if (pg->bypassed == bypassed) | 469 | if (pg->bypassed == !!bypassed) |
470 | continue; | 470 | continue; |
471 | pgpath = choose_path_in_pg(m, pg, nr_bytes); | 471 | pgpath = choose_path_in_pg(m, pg, nr_bytes); |
472 | if (!IS_ERR_OR_NULL(pgpath)) { | 472 | if (!IS_ERR_OR_NULL(pgpath)) { |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 9d7275fb541a..6e702fc69a83 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q) | |||
779 | int srcu_idx; | 779 | int srcu_idx; |
780 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); | 780 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); |
781 | 781 | ||
782 | if (unlikely(!map)) { | ||
783 | dm_put_live_table(md, srcu_idx); | ||
784 | return; | ||
785 | } | ||
782 | ti = dm_table_find_target(map, pos); | 786 | ti = dm_table_find_target(map, pos); |
783 | dm_put_live_table(md, srcu_idx); | 787 | dm_put_live_table(md, srcu_idx); |
784 | } | 788 | } |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 82821ee0d57f..01175dac0db6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev) | |||
5291 | if (start_readonly && mddev->ro == 0) | 5291 | if (start_readonly && mddev->ro == 0) |
5292 | mddev->ro = 2; /* read-only, but switch on first write */ | 5292 | mddev->ro = 2; /* read-only, but switch on first write */ |
5293 | 5293 | ||
5294 | /* | ||
5295 | * NOTE: some pers->run(), for example r5l_recovery_log(), wakes | ||
5296 | * up mddev->thread. It is important to initialize critical | ||
5297 | * resources for mddev->thread BEFORE calling pers->run(). | ||
5298 | */ | ||
5294 | err = pers->run(mddev); | 5299 | err = pers->run(mddev); |
5295 | if (err) | 5300 | if (err) |
5296 | pr_warn("md: pers->run() failed ...\n"); | 5301 | pr_warn("md: pers->run() failed ...\n"); |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0e8ed2c327b0..302dea3296ba 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -162,6 +162,8 @@ struct r5l_log { | |||
162 | 162 | ||
163 | /* to submit async io_units, to fulfill ordering of flush */ | 163 | /* to submit async io_units, to fulfill ordering of flush */ |
164 | struct work_struct deferred_io_work; | 164 | struct work_struct deferred_io_work; |
165 | /* to disable write back during in degraded mode */ | ||
166 | struct work_struct disable_writeback_work; | ||
165 | }; | 167 | }; |
166 | 168 | ||
167 | /* | 169 | /* |
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work) | |||
611 | r5l_do_submit_io(log, io); | 613 | r5l_do_submit_io(log, io); |
612 | } | 614 | } |
613 | 615 | ||
616 | static void r5c_disable_writeback_async(struct work_struct *work) | ||
617 | { | ||
618 | struct r5l_log *log = container_of(work, struct r5l_log, | ||
619 | disable_writeback_work); | ||
620 | struct mddev *mddev = log->rdev->mddev; | ||
621 | |||
622 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) | ||
623 | return; | ||
624 | pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", | ||
625 | mdname(mddev)); | ||
626 | mddev_suspend(mddev); | ||
627 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | ||
628 | mddev_resume(mddev); | ||
629 | } | ||
630 | |||
614 | static void r5l_submit_current_io(struct r5l_log *log) | 631 | static void r5l_submit_current_io(struct r5l_log *log) |
615 | { | 632 | { |
616 | struct r5l_io_unit *io = log->current_io; | 633 | struct r5l_io_unit *io = log->current_io; |
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log) | |||
1393 | next_checkpoint = r5c_calculate_new_cp(conf); | 1410 | next_checkpoint = r5c_calculate_new_cp(conf); |
1394 | spin_unlock_irq(&log->io_list_lock); | 1411 | spin_unlock_irq(&log->io_list_lock); |
1395 | 1412 | ||
1396 | BUG_ON(reclaimable < 0); | ||
1397 | |||
1398 | if (reclaimable == 0 || !write_super) | 1413 | if (reclaimable == 0 || !write_super) |
1399 | return; | 1414 | return; |
1400 | 1415 | ||
@@ -2062,7 +2077,7 @@ static int | |||
2062 | r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | 2077 | r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, |
2063 | struct r5l_recovery_ctx *ctx) | 2078 | struct r5l_recovery_ctx *ctx) |
2064 | { | 2079 | { |
2065 | struct stripe_head *sh, *next; | 2080 | struct stripe_head *sh; |
2066 | struct mddev *mddev = log->rdev->mddev; | 2081 | struct mddev *mddev = log->rdev->mddev; |
2067 | struct page *page; | 2082 | struct page *page; |
2068 | sector_t next_checkpoint = MaxSector; | 2083 | sector_t next_checkpoint = MaxSector; |
@@ -2076,7 +2091,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | |||
2076 | 2091 | ||
2077 | WARN_ON(list_empty(&ctx->cached_list)); | 2092 | WARN_ON(list_empty(&ctx->cached_list)); |
2078 | 2093 | ||
2079 | list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { | 2094 | list_for_each_entry(sh, &ctx->cached_list, lru) { |
2080 | struct r5l_meta_block *mb; | 2095 | struct r5l_meta_block *mb; |
2081 | int i; | 2096 | int i; |
2082 | int offset; | 2097 | int offset; |
@@ -2126,14 +2141,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | |||
2126 | ctx->pos = write_pos; | 2141 | ctx->pos = write_pos; |
2127 | ctx->seq += 1; | 2142 | ctx->seq += 1; |
2128 | next_checkpoint = sh->log_start; | 2143 | next_checkpoint = sh->log_start; |
2129 | list_del_init(&sh->lru); | ||
2130 | raid5_release_stripe(sh); | ||
2131 | } | 2144 | } |
2132 | log->next_checkpoint = next_checkpoint; | 2145 | log->next_checkpoint = next_checkpoint; |
2133 | __free_page(page); | 2146 | __free_page(page); |
2134 | return 0; | 2147 | return 0; |
2135 | } | 2148 | } |
2136 | 2149 | ||
2150 | static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, | ||
2151 | struct r5l_recovery_ctx *ctx) | ||
2152 | { | ||
2153 | struct mddev *mddev = log->rdev->mddev; | ||
2154 | struct r5conf *conf = mddev->private; | ||
2155 | struct stripe_head *sh, *next; | ||
2156 | |||
2157 | if (ctx->data_only_stripes == 0) | ||
2158 | return; | ||
2159 | |||
2160 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; | ||
2161 | |||
2162 | list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { | ||
2163 | r5c_make_stripe_write_out(sh); | ||
2164 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2165 | list_del_init(&sh->lru); | ||
2166 | raid5_release_stripe(sh); | ||
2167 | } | ||
2168 | |||
2169 | md_wakeup_thread(conf->mddev->thread); | ||
2170 | /* reuse conf->wait_for_quiescent in recovery */ | ||
2171 | wait_event(conf->wait_for_quiescent, | ||
2172 | atomic_read(&conf->active_stripes) == 0); | ||
2173 | |||
2174 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | ||
2175 | } | ||
2176 | |||
2137 | static int r5l_recovery_log(struct r5l_log *log) | 2177 | static int r5l_recovery_log(struct r5l_log *log) |
2138 | { | 2178 | { |
2139 | struct mddev *mddev = log->rdev->mddev; | 2179 | struct mddev *mddev = log->rdev->mddev; |
@@ -2160,32 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log) | |||
2160 | pos = ctx.pos; | 2200 | pos = ctx.pos; |
2161 | ctx.seq += 10000; | 2201 | ctx.seq += 10000; |
2162 | 2202 | ||
2163 | if (ctx.data_only_stripes == 0) { | ||
2164 | log->next_checkpoint = ctx.pos; | ||
2165 | r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); | ||
2166 | ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); | ||
2167 | } | ||
2168 | 2203 | ||
2169 | if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) | 2204 | if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) |
2170 | pr_debug("md/raid:%s: starting from clean shutdown\n", | 2205 | pr_debug("md/raid:%s: starting from clean shutdown\n", |
2171 | mdname(mddev)); | 2206 | mdname(mddev)); |
2172 | else { | 2207 | else |
2173 | pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", | 2208 | pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", |
2174 | mdname(mddev), ctx.data_only_stripes, | 2209 | mdname(mddev), ctx.data_only_stripes, |
2175 | ctx.data_parity_stripes); | 2210 | ctx.data_parity_stripes); |
2176 | 2211 | ||
2177 | if (ctx.data_only_stripes > 0) | 2212 | if (ctx.data_only_stripes == 0) { |
2178 | if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { | 2213 | log->next_checkpoint = ctx.pos; |
2179 | pr_err("md/raid:%s: failed to rewrite stripes to journal\n", | 2214 | r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); |
2180 | mdname(mddev)); | 2215 | ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); |
2181 | return -EIO; | 2216 | } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { |
2182 | } | 2217 | pr_err("md/raid:%s: failed to rewrite stripes to journal\n", |
2218 | mdname(mddev)); | ||
2219 | return -EIO; | ||
2183 | } | 2220 | } |
2184 | 2221 | ||
2185 | log->log_start = ctx.pos; | 2222 | log->log_start = ctx.pos; |
2186 | log->seq = ctx.seq; | 2223 | log->seq = ctx.seq; |
2187 | log->last_checkpoint = pos; | 2224 | log->last_checkpoint = pos; |
2188 | r5l_write_super(log, pos); | 2225 | r5l_write_super(log, pos); |
2226 | |||
2227 | r5c_recovery_flush_data_only_stripes(log, &ctx); | ||
2189 | return 0; | 2228 | return 0; |
2190 | } | 2229 | } |
2191 | 2230 | ||
@@ -2247,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, | |||
2247 | val > R5C_JOURNAL_MODE_WRITE_BACK) | 2286 | val > R5C_JOURNAL_MODE_WRITE_BACK) |
2248 | return -EINVAL; | 2287 | return -EINVAL; |
2249 | 2288 | ||
2289 | if (raid5_calc_degraded(conf) > 0 && | ||
2290 | val == R5C_JOURNAL_MODE_WRITE_BACK) | ||
2291 | return -EINVAL; | ||
2292 | |||
2250 | mddev_suspend(mddev); | 2293 | mddev_suspend(mddev); |
2251 | conf->log->r5c_journal_mode = val; | 2294 | conf->log->r5c_journal_mode = val; |
2252 | mddev_resume(mddev); | 2295 | mddev_resume(mddev); |
@@ -2301,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf, | |||
2301 | set_bit(STRIPE_R5C_CACHING, &sh->state); | 2344 | set_bit(STRIPE_R5C_CACHING, &sh->state); |
2302 | } | 2345 | } |
2303 | 2346 | ||
2347 | /* | ||
2348 | * When run in degraded mode, array is set to write-through mode. | ||
2349 | * This check helps drain pending write safely in the transition to | ||
2350 | * write-through mode. | ||
2351 | */ | ||
2352 | if (s->failed) { | ||
2353 | r5c_make_stripe_write_out(sh); | ||
2354 | return -EAGAIN; | ||
2355 | } | ||
2356 | |||
2304 | for (i = disks; i--; ) { | 2357 | for (i = disks; i--; ) { |
2305 | dev = &sh->dev[i]; | 2358 | dev = &sh->dev[i]; |
2306 | /* if non-overwrite, use writing-out phase */ | 2359 | /* if non-overwrite, use writing-out phase */ |
@@ -2351,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh) | |||
2351 | struct page *p = sh->dev[i].orig_page; | 2404 | struct page *p = sh->dev[i].orig_page; |
2352 | 2405 | ||
2353 | sh->dev[i].orig_page = sh->dev[i].page; | 2406 | sh->dev[i].orig_page = sh->dev[i].page; |
2407 | clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); | ||
2408 | |||
2354 | if (!using_disk_info_extra_page) | 2409 | if (!using_disk_info_extra_page) |
2355 | put_page(p); | 2410 | put_page(p); |
2356 | } | 2411 | } |
@@ -2555,6 +2610,19 @@ ioerr: | |||
2555 | return ret; | 2610 | return ret; |
2556 | } | 2611 | } |
2557 | 2612 | ||
2613 | void r5c_update_on_rdev_error(struct mddev *mddev) | ||
2614 | { | ||
2615 | struct r5conf *conf = mddev->private; | ||
2616 | struct r5l_log *log = conf->log; | ||
2617 | |||
2618 | if (!log) | ||
2619 | return; | ||
2620 | |||
2621 | if (raid5_calc_degraded(conf) > 0 && | ||
2622 | conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) | ||
2623 | schedule_work(&log->disable_writeback_work); | ||
2624 | } | ||
2625 | |||
2558 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | 2626 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) |
2559 | { | 2627 | { |
2560 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 2628 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
@@ -2627,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | |||
2627 | spin_lock_init(&log->no_space_stripes_lock); | 2695 | spin_lock_init(&log->no_space_stripes_lock); |
2628 | 2696 | ||
2629 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); | 2697 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); |
2698 | INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); | ||
2630 | 2699 | ||
2631 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | 2700 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; |
2632 | INIT_LIST_HEAD(&log->stripe_in_journal_list); | 2701 | INIT_LIST_HEAD(&log->stripe_in_journal_list); |
@@ -2659,6 +2728,7 @@ io_kc: | |||
2659 | 2728 | ||
2660 | void r5l_exit_log(struct r5l_log *log) | 2729 | void r5l_exit_log(struct r5l_log *log) |
2661 | { | 2730 | { |
2731 | flush_work(&log->disable_writeback_work); | ||
2662 | md_unregister_thread(&log->reclaim_thread); | 2732 | md_unregister_thread(&log->reclaim_thread); |
2663 | mempool_destroy(log->meta_pool); | 2733 | mempool_destroy(log->meta_pool); |
2664 | bioset_free(log->bs); | 2734 | bioset_free(log->bs); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 36c13e4be9c9..3c7e106c12a2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, | |||
556 | * of the two sections, and some non-in_sync devices may | 556 | * of the two sections, and some non-in_sync devices may |
557 | * be insync in the section most affected by failed devices. | 557 | * be insync in the section most affected by failed devices. |
558 | */ | 558 | */ |
559 | static int calc_degraded(struct r5conf *conf) | 559 | int raid5_calc_degraded(struct r5conf *conf) |
560 | { | 560 | { |
561 | int degraded, degraded2; | 561 | int degraded, degraded2; |
562 | int i; | 562 | int i; |
@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf) | |||
619 | if (conf->mddev->reshape_position == MaxSector) | 619 | if (conf->mddev->reshape_position == MaxSector) |
620 | return conf->mddev->degraded > conf->max_degraded; | 620 | return conf->mddev->degraded > conf->max_degraded; |
621 | 621 | ||
622 | degraded = calc_degraded(conf); | 622 | degraded = raid5_calc_degraded(conf); |
623 | if (degraded > conf->max_degraded) | 623 | if (degraded > conf->max_degraded) |
624 | return 1; | 624 | return 1; |
625 | return 0; | 625 | return 0; |
@@ -1015,7 +1015,17 @@ again: | |||
1015 | 1015 | ||
1016 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) | 1016 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) |
1017 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); | 1017 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
1018 | sh->dev[i].vec.bv_page = sh->dev[i].page; | 1018 | |
1019 | if (!op_is_write(op) && | ||
1020 | test_bit(R5_InJournal, &sh->dev[i].flags)) | ||
1021 | /* | ||
1022 | * issuing read for a page in journal, this | ||
1023 | * must be preparing for prexor in rmw; read | ||
1024 | * the data into orig_page | ||
1025 | */ | ||
1026 | sh->dev[i].vec.bv_page = sh->dev[i].orig_page; | ||
1027 | else | ||
1028 | sh->dev[i].vec.bv_page = sh->dev[i].page; | ||
1019 | bi->bi_vcnt = 1; | 1029 | bi->bi_vcnt = 1; |
1020 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; | 1030 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
1021 | bi->bi_io_vec[0].bv_offset = 0; | 1031 | bi->bi_io_vec[0].bv_offset = 0; |
@@ -2380,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi) | |||
2380 | } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) | 2390 | } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) |
2381 | clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); | 2391 | clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); |
2382 | 2392 | ||
2393 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) | ||
2394 | /* | ||
2395 | * end read for a page in journal, this | ||
2396 | * must be preparing for prexor in rmw | ||
2397 | */ | ||
2398 | set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); | ||
2399 | |||
2383 | if (atomic_read(&rdev->read_errors)) | 2400 | if (atomic_read(&rdev->read_errors)) |
2384 | atomic_set(&rdev->read_errors, 0); | 2401 | atomic_set(&rdev->read_errors, 0); |
2385 | } else { | 2402 | } else { |
@@ -2538,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) | |||
2538 | 2555 | ||
2539 | spin_lock_irqsave(&conf->device_lock, flags); | 2556 | spin_lock_irqsave(&conf->device_lock, flags); |
2540 | clear_bit(In_sync, &rdev->flags); | 2557 | clear_bit(In_sync, &rdev->flags); |
2541 | mddev->degraded = calc_degraded(conf); | 2558 | mddev->degraded = raid5_calc_degraded(conf); |
2542 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2559 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2543 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 2560 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
2544 | 2561 | ||
@@ -2552,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) | |||
2552 | bdevname(rdev->bdev, b), | 2569 | bdevname(rdev->bdev, b), |
2553 | mdname(mddev), | 2570 | mdname(mddev), |
2554 | conf->raid_disks - mddev->degraded); | 2571 | conf->raid_disks - mddev->degraded); |
2572 | r5c_update_on_rdev_error(mddev); | ||
2555 | } | 2573 | } |
2556 | 2574 | ||
2557 | /* | 2575 | /* |
@@ -2880,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
2880 | return r_sector; | 2898 | return r_sector; |
2881 | } | 2899 | } |
2882 | 2900 | ||
2901 | /* | ||
2902 | * There are cases where we want handle_stripe_dirtying() and | ||
2903 | * schedule_reconstruction() to delay towrite to some dev of a stripe. | ||
2904 | * | ||
2905 | * This function checks whether we want to delay the towrite. Specifically, | ||
2906 | * we delay the towrite when: | ||
2907 | * | ||
2908 | * 1. degraded stripe has a non-overwrite to the missing dev, AND this | ||
2909 | * stripe has data in journal (for other devices). | ||
2910 | * | ||
2911 | * In this case, when reading data for the non-overwrite dev, it is | ||
2912 | * necessary to handle complex rmw of write back cache (prexor with | ||
2913 | * orig_page, and xor with page). To keep read path simple, we would | ||
2914 | * like to flush data in journal to RAID disks first, so complex rmw | ||
2915 | * is handled in the write patch (handle_stripe_dirtying). | ||
2916 | * | ||
2917 | */ | ||
2918 | static inline bool delay_towrite(struct r5dev *dev, | ||
2919 | struct stripe_head_state *s) | ||
2920 | { | ||
2921 | return !test_bit(R5_OVERWRITE, &dev->flags) && | ||
2922 | !test_bit(R5_Insync, &dev->flags) && s->injournal; | ||
2923 | } | ||
2924 | |||
2883 | static void | 2925 | static void |
2884 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | 2926 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, |
2885 | int rcw, int expand) | 2927 | int rcw, int expand) |
@@ -2900,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2900 | for (i = disks; i--; ) { | 2942 | for (i = disks; i--; ) { |
2901 | struct r5dev *dev = &sh->dev[i]; | 2943 | struct r5dev *dev = &sh->dev[i]; |
2902 | 2944 | ||
2903 | if (dev->towrite) { | 2945 | if (dev->towrite && !delay_towrite(dev, s)) { |
2904 | set_bit(R5_LOCKED, &dev->flags); | 2946 | set_bit(R5_LOCKED, &dev->flags); |
2905 | set_bit(R5_Wantdrain, &dev->flags); | 2947 | set_bit(R5_Wantdrain, &dev->flags); |
2906 | if (!expand) | 2948 | if (!expand) |
@@ -3295,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx) | |||
3295 | return rv; | 3337 | return rv; |
3296 | } | 3338 | } |
3297 | 3339 | ||
3298 | /* fetch_block - checks the given member device to see if its data needs | ||
3299 | * to be read or computed to satisfy a request. | ||
3300 | * | ||
3301 | * Returns 1 when no more member devices need to be checked, otherwise returns | ||
3302 | * 0 to tell the loop in handle_stripe_fill to continue | ||
3303 | */ | ||
3304 | |||
3305 | static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | 3340 | static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, |
3306 | int disk_idx, int disks) | 3341 | int disk_idx, int disks) |
3307 | { | 3342 | { |
@@ -3392,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3392 | return 0; | 3427 | return 0; |
3393 | } | 3428 | } |
3394 | 3429 | ||
3430 | /* fetch_block - checks the given member device to see if its data needs | ||
3431 | * to be read or computed to satisfy a request. | ||
3432 | * | ||
3433 | * Returns 1 when no more member devices need to be checked, otherwise returns | ||
3434 | * 0 to tell the loop in handle_stripe_fill to continue | ||
3435 | */ | ||
3395 | static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, | 3436 | static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, |
3396 | int disk_idx, int disks) | 3437 | int disk_idx, int disks) |
3397 | { | 3438 | { |
@@ -3478,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh, | |||
3478 | * midst of changing due to a write | 3519 | * midst of changing due to a write |
3479 | */ | 3520 | */ |
3480 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && | 3521 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && |
3481 | !sh->reconstruct_state) | 3522 | !sh->reconstruct_state) { |
3523 | |||
3524 | /* | ||
3525 | * For degraded stripe with data in journal, do not handle | ||
3526 | * read requests yet, instead, flush the stripe to raid | ||
3527 | * disks first, this avoids handling complex rmw of write | ||
3528 | * back cache (prexor with orig_page, and then xor with | ||
3529 | * page) in the read path | ||
3530 | */ | ||
3531 | if (s->injournal && s->failed) { | ||
3532 | if (test_bit(STRIPE_R5C_CACHING, &sh->state)) | ||
3533 | r5c_make_stripe_write_out(sh); | ||
3534 | goto out; | ||
3535 | } | ||
3536 | |||
3482 | for (i = disks; i--; ) | 3537 | for (i = disks; i--; ) |
3483 | if (fetch_block(sh, s, i, disks)) | 3538 | if (fetch_block(sh, s, i, disks)) |
3484 | break; | 3539 | break; |
3540 | } | ||
3541 | out: | ||
3485 | set_bit(STRIPE_HANDLE, &sh->state); | 3542 | set_bit(STRIPE_HANDLE, &sh->state); |
3486 | } | 3543 | } |
3487 | 3544 | ||
@@ -3594,6 +3651,21 @@ unhash: | |||
3594 | break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); | 3651 | break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); |
3595 | } | 3652 | } |
3596 | 3653 | ||
3654 | /* | ||
3655 | * For RMW in write back cache, we need extra page in prexor to store the | ||
3656 | * old data. This page is stored in dev->orig_page. | ||
3657 | * | ||
3658 | * This function checks whether we have data for prexor. The exact logic | ||
3659 | * is: | ||
3660 | * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE) | ||
3661 | */ | ||
3662 | static inline bool uptodate_for_rmw(struct r5dev *dev) | ||
3663 | { | ||
3664 | return (test_bit(R5_UPTODATE, &dev->flags)) && | ||
3665 | (!test_bit(R5_InJournal, &dev->flags) || | ||
3666 | test_bit(R5_OrigPageUPTDODATE, &dev->flags)); | ||
3667 | } | ||
3668 | |||
3597 | static int handle_stripe_dirtying(struct r5conf *conf, | 3669 | static int handle_stripe_dirtying(struct r5conf *conf, |
3598 | struct stripe_head *sh, | 3670 | struct stripe_head *sh, |
3599 | struct stripe_head_state *s, | 3671 | struct stripe_head_state *s, |
@@ -3622,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3622 | } else for (i = disks; i--; ) { | 3694 | } else for (i = disks; i--; ) { |
3623 | /* would I have to read this buffer for read_modify_write */ | 3695 | /* would I have to read this buffer for read_modify_write */ |
3624 | struct r5dev *dev = &sh->dev[i]; | 3696 | struct r5dev *dev = &sh->dev[i]; |
3625 | if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx || | 3697 | if (((dev->towrite && !delay_towrite(dev, s)) || |
3698 | i == sh->pd_idx || i == sh->qd_idx || | ||
3626 | test_bit(R5_InJournal, &dev->flags)) && | 3699 | test_bit(R5_InJournal, &dev->flags)) && |
3627 | !test_bit(R5_LOCKED, &dev->flags) && | 3700 | !test_bit(R5_LOCKED, &dev->flags) && |
3628 | !((test_bit(R5_UPTODATE, &dev->flags) && | 3701 | !(uptodate_for_rmw(dev) || |
3629 | (!test_bit(R5_InJournal, &dev->flags) || | ||
3630 | dev->page != dev->orig_page)) || | ||
3631 | test_bit(R5_Wantcompute, &dev->flags))) { | 3702 | test_bit(R5_Wantcompute, &dev->flags))) { |
3632 | if (test_bit(R5_Insync, &dev->flags)) | 3703 | if (test_bit(R5_Insync, &dev->flags)) |
3633 | rmw++; | 3704 | rmw++; |
@@ -3639,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3639 | i != sh->pd_idx && i != sh->qd_idx && | 3710 | i != sh->pd_idx && i != sh->qd_idx && |
3640 | !test_bit(R5_LOCKED, &dev->flags) && | 3711 | !test_bit(R5_LOCKED, &dev->flags) && |
3641 | !(test_bit(R5_UPTODATE, &dev->flags) || | 3712 | !(test_bit(R5_UPTODATE, &dev->flags) || |
3642 | test_bit(R5_InJournal, &dev->flags) || | ||
3643 | test_bit(R5_Wantcompute, &dev->flags))) { | 3713 | test_bit(R5_Wantcompute, &dev->flags))) { |
3644 | if (test_bit(R5_Insync, &dev->flags)) | 3714 | if (test_bit(R5_Insync, &dev->flags)) |
3645 | rcw++; | 3715 | rcw++; |
@@ -3689,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3689 | 3759 | ||
3690 | for (i = disks; i--; ) { | 3760 | for (i = disks; i--; ) { |
3691 | struct r5dev *dev = &sh->dev[i]; | 3761 | struct r5dev *dev = &sh->dev[i]; |
3692 | if ((dev->towrite || | 3762 | if (((dev->towrite && !delay_towrite(dev, s)) || |
3693 | i == sh->pd_idx || i == sh->qd_idx || | 3763 | i == sh->pd_idx || i == sh->qd_idx || |
3694 | test_bit(R5_InJournal, &dev->flags)) && | 3764 | test_bit(R5_InJournal, &dev->flags)) && |
3695 | !test_bit(R5_LOCKED, &dev->flags) && | 3765 | !test_bit(R5_LOCKED, &dev->flags) && |
3696 | !((test_bit(R5_UPTODATE, &dev->flags) && | 3766 | !(uptodate_for_rmw(dev) || |
3697 | (!test_bit(R5_InJournal, &dev->flags) || | ||
3698 | dev->page != dev->orig_page)) || | ||
3699 | test_bit(R5_Wantcompute, &dev->flags)) && | 3767 | test_bit(R5_Wantcompute, &dev->flags)) && |
3700 | test_bit(R5_Insync, &dev->flags)) { | 3768 | test_bit(R5_Insync, &dev->flags)) { |
3701 | if (test_bit(STRIPE_PREREAD_ACTIVE, | 3769 | if (test_bit(STRIPE_PREREAD_ACTIVE, |
@@ -3722,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3722 | i != sh->pd_idx && i != sh->qd_idx && | 3790 | i != sh->pd_idx && i != sh->qd_idx && |
3723 | !test_bit(R5_LOCKED, &dev->flags) && | 3791 | !test_bit(R5_LOCKED, &dev->flags) && |
3724 | !(test_bit(R5_UPTODATE, &dev->flags) || | 3792 | !(test_bit(R5_UPTODATE, &dev->flags) || |
3725 | test_bit(R5_InJournal, &dev->flags) || | ||
3726 | test_bit(R5_Wantcompute, &dev->flags))) { | 3793 | test_bit(R5_Wantcompute, &dev->flags))) { |
3727 | rcw++; | 3794 | rcw++; |
3728 | if (test_bit(R5_Insync, &dev->flags) && | 3795 | if (test_bit(R5_Insync, &dev->flags) && |
@@ -7025,7 +7092,7 @@ static int raid5_run(struct mddev *mddev) | |||
7025 | /* | 7092 | /* |
7026 | * 0 for a fully functional array, 1 or 2 for a degraded array. | 7093 | * 0 for a fully functional array, 1 or 2 for a degraded array. |
7027 | */ | 7094 | */ |
7028 | mddev->degraded = calc_degraded(conf); | 7095 | mddev->degraded = raid5_calc_degraded(conf); |
7029 | 7096 | ||
7030 | if (has_failed(conf)) { | 7097 | if (has_failed(conf)) { |
7031 | pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", | 7098 | pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", |
@@ -7272,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev) | |||
7272 | } | 7339 | } |
7273 | } | 7340 | } |
7274 | spin_lock_irqsave(&conf->device_lock, flags); | 7341 | spin_lock_irqsave(&conf->device_lock, flags); |
7275 | mddev->degraded = calc_degraded(conf); | 7342 | mddev->degraded = raid5_calc_degraded(conf); |
7276 | spin_unlock_irqrestore(&conf->device_lock, flags); | 7343 | spin_unlock_irqrestore(&conf->device_lock, flags); |
7277 | print_raid5_conf(conf); | 7344 | print_raid5_conf(conf); |
7278 | return count; | 7345 | return count; |
@@ -7632,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev) | |||
7632 | * pre and post number of devices. | 7699 | * pre and post number of devices. |
7633 | */ | 7700 | */ |
7634 | spin_lock_irqsave(&conf->device_lock, flags); | 7701 | spin_lock_irqsave(&conf->device_lock, flags); |
7635 | mddev->degraded = calc_degraded(conf); | 7702 | mddev->degraded = raid5_calc_degraded(conf); |
7636 | spin_unlock_irqrestore(&conf->device_lock, flags); | 7703 | spin_unlock_irqrestore(&conf->device_lock, flags); |
7637 | } | 7704 | } |
7638 | mddev->raid_disks = conf->raid_disks; | 7705 | mddev->raid_disks = conf->raid_disks; |
@@ -7720,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev) | |||
7720 | } else { | 7787 | } else { |
7721 | int d; | 7788 | int d; |
7722 | spin_lock_irq(&conf->device_lock); | 7789 | spin_lock_irq(&conf->device_lock); |
7723 | mddev->degraded = calc_degraded(conf); | 7790 | mddev->degraded = raid5_calc_degraded(conf); |
7724 | spin_unlock_irq(&conf->device_lock); | 7791 | spin_unlock_irq(&conf->device_lock); |
7725 | for (d = conf->raid_disks ; | 7792 | for (d = conf->raid_disks ; |
7726 | d < conf->raid_disks - mddev->delta_disks; | 7793 | d < conf->raid_disks - mddev->delta_disks; |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index ed8e1362ab36..1440fa26e296 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -322,6 +322,11 @@ enum r5dev_flags { | |||
322 | * data and parity being written are in the journal | 322 | * data and parity being written are in the journal |
323 | * device | 323 | * device |
324 | */ | 324 | */ |
325 | R5_OrigPageUPTDODATE, /* with write back cache, we read old data into | ||
326 | * dev->orig_page for prexor. When this flag is | ||
327 | * set, orig_page contains latest data in the | ||
328 | * raid disk. | ||
329 | */ | ||
325 | }; | 330 | }; |
326 | 331 | ||
327 | /* | 332 | /* |
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, | |||
753 | extern struct stripe_head * | 758 | extern struct stripe_head * |
754 | raid5_get_active_stripe(struct r5conf *conf, sector_t sector, | 759 | raid5_get_active_stripe(struct r5conf *conf, sector_t sector, |
755 | int previous, int noblock, int noquiesce); | 760 | int previous, int noblock, int noquiesce); |
761 | extern int raid5_calc_degraded(struct r5conf *conf); | ||
756 | extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); | 762 | extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); |
757 | extern void r5l_exit_log(struct r5l_log *log); | 763 | extern void r5l_exit_log(struct r5l_log *log); |
758 | extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); | 764 | extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); |
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num); | |||
781 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); | 787 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); |
782 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); | 788 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); |
783 | extern struct md_sysfs_entry r5c_journal_mode; | 789 | extern struct md_sysfs_entry r5c_journal_mode; |
790 | extern void r5c_update_on_rdev_error(struct mddev *mddev); | ||
784 | #endif | 791 | #endif |
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index ebb5e391b800..87a6b65ed3af 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
@@ -1206,7 +1206,7 @@ static int cec_config_thread_func(void *arg) | |||
1206 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; | 1206 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; |
1207 | if (last_la == CEC_LOG_ADDR_INVALID || | 1207 | if (last_la == CEC_LOG_ADDR_INVALID || |
1208 | last_la == CEC_LOG_ADDR_UNREGISTERED || | 1208 | last_la == CEC_LOG_ADDR_UNREGISTERED || |
1209 | !(last_la & type2mask[type])) | 1209 | !((1 << last_la) & type2mask[type])) |
1210 | last_la = la_list[0]; | 1210 | last_la = la_list[0]; |
1211 | 1211 | ||
1212 | err = cec_config_log_addr(adap, i, last_la); | 1212 | err = cec_config_log_addr(adap, i, last_la); |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 01a804792f30..b5972440c1bf 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -1023,7 +1023,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, | |||
1023 | if (!host->busy_status && busy_resp && | 1023 | if (!host->busy_status && busy_resp && |
1024 | !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && | 1024 | !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && |
1025 | (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { | 1025 | (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { |
1026 | /* Unmask the busy IRQ */ | 1026 | |
1027 | /* Clear the busy start IRQ */ | ||
1028 | writel(host->variant->busy_detect_mask, | ||
1029 | host->base + MMCICLEAR); | ||
1030 | |||
1031 | /* Unmask the busy end IRQ */ | ||
1027 | writel(readl(base + MMCIMASK0) | | 1032 | writel(readl(base + MMCIMASK0) | |
1028 | host->variant->busy_detect_mask, | 1033 | host->variant->busy_detect_mask, |
1029 | base + MMCIMASK0); | 1034 | base + MMCIMASK0); |
@@ -1038,10 +1043,14 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, | |||
1038 | 1043 | ||
1039 | /* | 1044 | /* |
1040 | * At this point we are not busy with a command, we have | 1045 | * At this point we are not busy with a command, we have |
1041 | * not received a new busy request, mask the busy IRQ and | 1046 | * not received a new busy request, clear and mask the busy |
1042 | * fall through to process the IRQ. | 1047 | * end IRQ and fall through to process the IRQ. |
1043 | */ | 1048 | */ |
1044 | if (host->busy_status) { | 1049 | if (host->busy_status) { |
1050 | |||
1051 | writel(host->variant->busy_detect_mask, | ||
1052 | host->base + MMCICLEAR); | ||
1053 | |||
1045 | writel(readl(base + MMCIMASK0) & | 1054 | writel(readl(base + MMCIMASK0) & |
1046 | ~host->variant->busy_detect_mask, | 1055 | ~host->variant->busy_detect_mask, |
1047 | base + MMCIMASK0); | 1056 | base + MMCIMASK0); |
@@ -1283,12 +1292,21 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) | |||
1283 | } | 1292 | } |
1284 | 1293 | ||
1285 | /* | 1294 | /* |
1286 | * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's | 1295 | * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's |
1287 | * enabled) since the HW seems to be triggering the IRQ on both | 1296 | * enabled) in mmci_cmd_irq() function where ST Micro busy |
1288 | * edges while monitoring DAT0 for busy completion. | 1297 | * detection variant is handled. Considering the HW seems to be |
1298 | * triggering the IRQ on both edges while monitoring DAT0 for | ||
1299 | * busy completion and that same status bit is used to monitor | ||
1300 | * start and end of busy detection, special care must be taken | ||
1301 | * to make sure that both start and end interrupts are always | ||
1302 | * cleared one after the other. | ||
1289 | */ | 1303 | */ |
1290 | status &= readl(host->base + MMCIMASK0); | 1304 | status &= readl(host->base + MMCIMASK0); |
1291 | writel(status, host->base + MMCICLEAR); | 1305 | if (host->variant->busy_detect) |
1306 | writel(status & ~host->variant->busy_detect_mask, | ||
1307 | host->base + MMCICLEAR); | ||
1308 | else | ||
1309 | writel(status, host->base + MMCICLEAR); | ||
1292 | 1310 | ||
1293 | dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); | 1311 | dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); |
1294 | 1312 | ||
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 23909804ffb8..0def99590d16 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) | |||
2733 | if (intmask & SDHCI_INT_RETUNE) | 2733 | if (intmask & SDHCI_INT_RETUNE) |
2734 | mmc_retune_needed(host->mmc); | 2734 | mmc_retune_needed(host->mmc); |
2735 | 2735 | ||
2736 | if (intmask & SDHCI_INT_CARD_INT) { | 2736 | if ((intmask & SDHCI_INT_CARD_INT) && |
2737 | (host->ier & SDHCI_INT_CARD_INT)) { | ||
2737 | sdhci_enable_sdio_irq_nolock(host, false); | 2738 | sdhci_enable_sdio_irq_nolock(host, false); |
2738 | host->thread_isr |= SDHCI_INT_CARD_INT; | 2739 | host->thread_isr |= SDHCI_INT_CARD_INT; |
2739 | result = IRQ_WAKE_THREAD; | 2740 | result = IRQ_WAKE_THREAD; |
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index c12d2618eebf..3872ab96b80a 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c | |||
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev) | |||
1152 | if (skb == NULL) | 1152 | if (skb == NULL) |
1153 | break; | 1153 | break; |
1154 | np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1154 | np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1155 | if (pci_dma_mapping_error(np->pci_dev, | ||
1156 | np->rx_info[i].mapping)) { | ||
1157 | dev_kfree_skb(skb); | ||
1158 | np->rx_info[i].skb = NULL; | ||
1159 | break; | ||
1160 | } | ||
1155 | /* Grrr, we cannot offset to correctly align the IP header. */ | 1161 | /* Grrr, we cannot offset to correctly align the IP header. */ |
1156 | np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); | 1162 | np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); |
1157 | } | 1163 | } |
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1182 | { | 1188 | { |
1183 | struct netdev_private *np = netdev_priv(dev); | 1189 | struct netdev_private *np = netdev_priv(dev); |
1184 | unsigned int entry; | 1190 | unsigned int entry; |
1191 | unsigned int prev_tx; | ||
1185 | u32 status; | 1192 | u32 status; |
1186 | int i; | 1193 | int i, j; |
1187 | 1194 | ||
1188 | /* | 1195 | /* |
1189 | * be cautious here, wrapping the queue has weird semantics | 1196 | * be cautious here, wrapping the queue has weird semantics |
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1201 | } | 1208 | } |
1202 | #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ | 1209 | #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ |
1203 | 1210 | ||
1211 | prev_tx = np->cur_tx; | ||
1204 | entry = np->cur_tx % TX_RING_SIZE; | 1212 | entry = np->cur_tx % TX_RING_SIZE; |
1205 | for (i = 0; i < skb_num_frags(skb); i++) { | 1213 | for (i = 0; i < skb_num_frags(skb); i++) { |
1206 | int wrap_ring = 0; | 1214 | int wrap_ring = 0; |
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1234 | skb_frag_size(this_frag), | 1242 | skb_frag_size(this_frag), |
1235 | PCI_DMA_TODEVICE); | 1243 | PCI_DMA_TODEVICE); |
1236 | } | 1244 | } |
1245 | if (pci_dma_mapping_error(np->pci_dev, | ||
1246 | np->tx_info[entry].mapping)) { | ||
1247 | dev->stats.tx_dropped++; | ||
1248 | goto err_out; | ||
1249 | } | ||
1237 | 1250 | ||
1238 | np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); | 1251 | np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); |
1239 | np->tx_ring[entry].status = cpu_to_le32(status); | 1252 | np->tx_ring[entry].status = cpu_to_le32(status); |
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1268 | netif_stop_queue(dev); | 1281 | netif_stop_queue(dev); |
1269 | 1282 | ||
1270 | return NETDEV_TX_OK; | 1283 | return NETDEV_TX_OK; |
1271 | } | ||
1272 | 1284 | ||
1285 | err_out: | ||
1286 | entry = prev_tx % TX_RING_SIZE; | ||
1287 | np->tx_info[entry].skb = NULL; | ||
1288 | if (i > 0) { | ||
1289 | pci_unmap_single(np->pci_dev, | ||
1290 | np->tx_info[entry].mapping, | ||
1291 | skb_first_frag_len(skb), | ||
1292 | PCI_DMA_TODEVICE); | ||
1293 | np->tx_info[entry].mapping = 0; | ||
1294 | entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; | ||
1295 | for (j = 1; j < i; j++) { | ||
1296 | pci_unmap_single(np->pci_dev, | ||
1297 | np->tx_info[entry].mapping, | ||
1298 | skb_frag_size( | ||
1299 | &skb_shinfo(skb)->frags[j-1]), | ||
1300 | PCI_DMA_TODEVICE); | ||
1301 | entry++; | ||
1302 | } | ||
1303 | } | ||
1304 | dev_kfree_skb_any(skb); | ||
1305 | np->cur_tx = prev_tx; | ||
1306 | return NETDEV_TX_OK; | ||
1307 | } | ||
1273 | 1308 | ||
1274 | /* The interrupt handler does all of the Rx thread work and cleans up | 1309 | /* The interrupt handler does all of the Rx thread work and cleans up |
1275 | after the Tx thread. */ | 1310 | after the Tx thread. */ |
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev) | |||
1569 | break; /* Better luck next round. */ | 1604 | break; /* Better luck next round. */ |
1570 | np->rx_info[entry].mapping = | 1605 | np->rx_info[entry].mapping = |
1571 | pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1606 | pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1607 | if (pci_dma_mapping_error(np->pci_dev, | ||
1608 | np->rx_info[entry].mapping)) { | ||
1609 | dev_kfree_skb(skb); | ||
1610 | np->rx_info[entry].skb = NULL; | ||
1611 | break; | ||
1612 | } | ||
1572 | np->rx_ring[entry].rxaddr = | 1613 | np->rx_ring[entry].rxaddr = |
1573 | cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); | 1614 | cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); |
1574 | } | 1615 | } |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c0fb80acc2da..baba2db9d9c2 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -43,13 +43,13 @@ | |||
43 | #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ | 43 | #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ |
44 | #define MIN_RX_RING_SIZE 64 | 44 | #define MIN_RX_RING_SIZE 64 |
45 | #define MAX_RX_RING_SIZE 8192 | 45 | #define MAX_RX_RING_SIZE 8192 |
46 | #define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ | 46 | #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
47 | * (bp)->rx_ring_size) | 47 | * (bp)->rx_ring_size) |
48 | 48 | ||
49 | #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ | 49 | #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ |
50 | #define MIN_TX_RING_SIZE 64 | 50 | #define MIN_TX_RING_SIZE 64 |
51 | #define MAX_TX_RING_SIZE 4096 | 51 | #define MAX_TX_RING_SIZE 4096 |
52 | #define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ | 52 | #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
53 | * (bp)->tx_ring_size) | 53 | * (bp)->tx_ring_size) |
54 | 54 | ||
55 | /* level of occupied TX descriptors under which we wake up TX process */ | 55 | /* level of occupied TX descriptors under which we wake up TX process */ |
@@ -78,6 +78,37 @@ | |||
78 | */ | 78 | */ |
79 | #define MACB_HALT_TIMEOUT 1230 | 79 | #define MACB_HALT_TIMEOUT 1230 |
80 | 80 | ||
81 | /* DMA buffer descriptor might be different size | ||
82 | * depends on hardware configuration. | ||
83 | */ | ||
84 | static unsigned int macb_dma_desc_get_size(struct macb *bp) | ||
85 | { | ||
86 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
87 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) | ||
88 | return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64); | ||
89 | #endif | ||
90 | return sizeof(struct macb_dma_desc); | ||
91 | } | ||
92 | |||
93 | static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx) | ||
94 | { | ||
95 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
96 | /* Dma buffer descriptor is 4 words length (instead of 2 words) | ||
97 | * for 64b GEM. | ||
98 | */ | ||
99 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) | ||
100 | idx <<= 1; | ||
101 | #endif | ||
102 | return idx; | ||
103 | } | ||
104 | |||
105 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
106 | static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) | ||
107 | { | ||
108 | return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); | ||
109 | } | ||
110 | #endif | ||
111 | |||
81 | /* Ring buffer accessors */ | 112 | /* Ring buffer accessors */ |
82 | static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) | 113 | static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) |
83 | { | 114 | { |
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) | |||
87 | static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, | 118 | static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, |
88 | unsigned int index) | 119 | unsigned int index) |
89 | { | 120 | { |
90 | return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; | 121 | index = macb_tx_ring_wrap(queue->bp, index); |
122 | index = macb_adj_dma_desc_idx(queue->bp, index); | ||
123 | return &queue->tx_ring[index]; | ||
91 | } | 124 | } |
92 | 125 | ||
93 | static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, | 126 | static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, |
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) | |||
101 | dma_addr_t offset; | 134 | dma_addr_t offset; |
102 | 135 | ||
103 | offset = macb_tx_ring_wrap(queue->bp, index) * | 136 | offset = macb_tx_ring_wrap(queue->bp, index) * |
104 | sizeof(struct macb_dma_desc); | 137 | macb_dma_desc_get_size(queue->bp); |
105 | 138 | ||
106 | return queue->tx_ring_dma + offset; | 139 | return queue->tx_ring_dma + offset; |
107 | } | 140 | } |
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) | |||
113 | 146 | ||
114 | static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) | 147 | static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) |
115 | { | 148 | { |
116 | return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; | 149 | index = macb_rx_ring_wrap(bp, index); |
150 | index = macb_adj_dma_desc_idx(bp, index); | ||
151 | return &bp->rx_ring[index]; | ||
117 | } | 152 | } |
118 | 153 | ||
119 | static void *macb_rx_buffer(struct macb *bp, unsigned int index) | 154 | static void *macb_rx_buffer(struct macb *bp, unsigned int index) |
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) | |||
560 | } | 595 | } |
561 | } | 596 | } |
562 | 597 | ||
563 | static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) | 598 | static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) |
564 | { | 599 | { |
565 | desc->addr = (u32)addr; | ||
566 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 600 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
567 | desc->addrh = (u32)(addr >> 32); | 601 | struct macb_dma_desc_64 *desc_64; |
602 | |||
603 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) { | ||
604 | desc_64 = macb_64b_desc(bp, desc); | ||
605 | desc_64->addrh = upper_32_bits(addr); | ||
606 | } | ||
568 | #endif | 607 | #endif |
608 | desc->addr = lower_32_bits(addr); | ||
609 | } | ||
610 | |||
611 | static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) | ||
612 | { | ||
613 | dma_addr_t addr = 0; | ||
614 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
615 | struct macb_dma_desc_64 *desc_64; | ||
616 | |||
617 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) { | ||
618 | desc_64 = macb_64b_desc(bp, desc); | ||
619 | addr = ((u64)(desc_64->addrh) << 32); | ||
620 | } | ||
621 | #endif | ||
622 | addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | ||
623 | return addr; | ||
569 | } | 624 | } |
570 | 625 | ||
571 | static void macb_tx_error_task(struct work_struct *work) | 626 | static void macb_tx_error_task(struct work_struct *work) |
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work) | |||
649 | 704 | ||
650 | /* Set end of TX queue */ | 705 | /* Set end of TX queue */ |
651 | desc = macb_tx_desc(queue, 0); | 706 | desc = macb_tx_desc(queue, 0); |
652 | macb_set_addr(desc, 0); | 707 | macb_set_addr(bp, desc, 0); |
653 | desc->ctrl = MACB_BIT(TX_USED); | 708 | desc->ctrl = MACB_BIT(TX_USED); |
654 | 709 | ||
655 | /* Make descriptor updates visible to hardware */ | 710 | /* Make descriptor updates visible to hardware */ |
656 | wmb(); | 711 | wmb(); |
657 | 712 | ||
658 | /* Reinitialize the TX desc queue */ | 713 | /* Reinitialize the TX desc queue */ |
659 | queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); | 714 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
660 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 715 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
661 | queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); | 716 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
717 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); | ||
662 | #endif | 718 | #endif |
663 | /* Make TX ring reflect state of hardware */ | 719 | /* Make TX ring reflect state of hardware */ |
664 | queue->tx_head = 0; | 720 | queue->tx_head = 0; |
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp) | |||
750 | unsigned int entry; | 806 | unsigned int entry; |
751 | struct sk_buff *skb; | 807 | struct sk_buff *skb; |
752 | dma_addr_t paddr; | 808 | dma_addr_t paddr; |
809 | struct macb_dma_desc *desc; | ||
753 | 810 | ||
754 | while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, | 811 | while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, |
755 | bp->rx_ring_size) > 0) { | 812 | bp->rx_ring_size) > 0) { |
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp) | |||
759 | rmb(); | 816 | rmb(); |
760 | 817 | ||
761 | bp->rx_prepared_head++; | 818 | bp->rx_prepared_head++; |
819 | desc = macb_rx_desc(bp, entry); | ||
762 | 820 | ||
763 | if (!bp->rx_skbuff[entry]) { | 821 | if (!bp->rx_skbuff[entry]) { |
764 | /* allocate sk_buff for this free entry in ring */ | 822 | /* allocate sk_buff for this free entry in ring */ |
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp) | |||
782 | 840 | ||
783 | if (entry == bp->rx_ring_size - 1) | 841 | if (entry == bp->rx_ring_size - 1) |
784 | paddr |= MACB_BIT(RX_WRAP); | 842 | paddr |= MACB_BIT(RX_WRAP); |
785 | macb_set_addr(&(bp->rx_ring[entry]), paddr); | 843 | macb_set_addr(bp, desc, paddr); |
786 | bp->rx_ring[entry].ctrl = 0; | 844 | desc->ctrl = 0; |
787 | 845 | ||
788 | /* properly align Ethernet header */ | 846 | /* properly align Ethernet header */ |
789 | skb_reserve(skb, NET_IP_ALIGN); | 847 | skb_reserve(skb, NET_IP_ALIGN); |
790 | } else { | 848 | } else { |
791 | bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); | 849 | desc->addr &= ~MACB_BIT(RX_USED); |
792 | bp->rx_ring[entry].ctrl = 0; | 850 | desc->ctrl = 0; |
793 | } | 851 | } |
794 | } | 852 | } |
795 | 853 | ||
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget) | |||
835 | bool rxused; | 893 | bool rxused; |
836 | 894 | ||
837 | entry = macb_rx_ring_wrap(bp, bp->rx_tail); | 895 | entry = macb_rx_ring_wrap(bp, bp->rx_tail); |
838 | desc = &bp->rx_ring[entry]; | 896 | desc = macb_rx_desc(bp, entry); |
839 | 897 | ||
840 | /* Make hw descriptor updates visible to CPU */ | 898 | /* Make hw descriptor updates visible to CPU */ |
841 | rmb(); | 899 | rmb(); |
842 | 900 | ||
843 | rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; | 901 | rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; |
844 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | 902 | addr = macb_get_addr(bp, desc); |
845 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
846 | addr |= ((u64)(desc->addrh) << 32); | ||
847 | #endif | ||
848 | ctrl = desc->ctrl; | 903 | ctrl = desc->ctrl; |
849 | 904 | ||
850 | if (!rxused) | 905 | if (!rxused) |
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
987 | static inline void macb_init_rx_ring(struct macb *bp) | 1042 | static inline void macb_init_rx_ring(struct macb *bp) |
988 | { | 1043 | { |
989 | dma_addr_t addr; | 1044 | dma_addr_t addr; |
1045 | struct macb_dma_desc *desc = NULL; | ||
990 | int i; | 1046 | int i; |
991 | 1047 | ||
992 | addr = bp->rx_buffers_dma; | 1048 | addr = bp->rx_buffers_dma; |
993 | for (i = 0; i < bp->rx_ring_size; i++) { | 1049 | for (i = 0; i < bp->rx_ring_size; i++) { |
994 | bp->rx_ring[i].addr = addr; | 1050 | desc = macb_rx_desc(bp, i); |
995 | bp->rx_ring[i].ctrl = 0; | 1051 | macb_set_addr(bp, desc, addr); |
1052 | desc->ctrl = 0; | ||
996 | addr += bp->rx_buffer_size; | 1053 | addr += bp->rx_buffer_size; |
997 | } | 1054 | } |
998 | bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); | 1055 | desc->addr |= MACB_BIT(RX_WRAP); |
999 | bp->rx_tail = 0; | 1056 | bp->rx_tail = 0; |
1000 | } | 1057 | } |
1001 | 1058 | ||
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget) | |||
1008 | 1065 | ||
1009 | for (tail = bp->rx_tail; budget > 0; tail++) { | 1066 | for (tail = bp->rx_tail; budget > 0; tail++) { |
1010 | struct macb_dma_desc *desc = macb_rx_desc(bp, tail); | 1067 | struct macb_dma_desc *desc = macb_rx_desc(bp, tail); |
1011 | u32 addr, ctrl; | 1068 | u32 ctrl; |
1012 | 1069 | ||
1013 | /* Make hw descriptor updates visible to CPU */ | 1070 | /* Make hw descriptor updates visible to CPU */ |
1014 | rmb(); | 1071 | rmb(); |
1015 | 1072 | ||
1016 | addr = desc->addr; | ||
1017 | ctrl = desc->ctrl; | 1073 | ctrl = desc->ctrl; |
1018 | 1074 | ||
1019 | if (!(addr & MACB_BIT(RX_USED))) | 1075 | if (!(desc->addr & MACB_BIT(RX_USED))) |
1020 | break; | 1076 | break; |
1021 | 1077 | ||
1022 | if (ctrl & MACB_BIT(RX_SOF)) { | 1078 | if (ctrl & MACB_BIT(RX_SOF)) { |
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp, | |||
1336 | i = tx_head; | 1392 | i = tx_head; |
1337 | entry = macb_tx_ring_wrap(bp, i); | 1393 | entry = macb_tx_ring_wrap(bp, i); |
1338 | ctrl = MACB_BIT(TX_USED); | 1394 | ctrl = MACB_BIT(TX_USED); |
1339 | desc = &queue->tx_ring[entry]; | 1395 | desc = macb_tx_desc(queue, entry); |
1340 | desc->ctrl = ctrl; | 1396 | desc->ctrl = ctrl; |
1341 | 1397 | ||
1342 | if (lso_ctrl) { | 1398 | if (lso_ctrl) { |
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp, | |||
1358 | i--; | 1414 | i--; |
1359 | entry = macb_tx_ring_wrap(bp, i); | 1415 | entry = macb_tx_ring_wrap(bp, i); |
1360 | tx_skb = &queue->tx_skb[entry]; | 1416 | tx_skb = &queue->tx_skb[entry]; |
1361 | desc = &queue->tx_ring[entry]; | 1417 | desc = macb_tx_desc(queue, entry); |
1362 | 1418 | ||
1363 | ctrl = (u32)tx_skb->size; | 1419 | ctrl = (u32)tx_skb->size; |
1364 | if (eof) { | 1420 | if (eof) { |
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp, | |||
1379 | ctrl |= MACB_BF(MSS_MFS, mss_mfs); | 1435 | ctrl |= MACB_BF(MSS_MFS, mss_mfs); |
1380 | 1436 | ||
1381 | /* Set TX buffer descriptor */ | 1437 | /* Set TX buffer descriptor */ |
1382 | macb_set_addr(desc, tx_skb->mapping); | 1438 | macb_set_addr(bp, desc, tx_skb->mapping); |
1383 | /* desc->addr must be visible to hardware before clearing | 1439 | /* desc->addr must be visible to hardware before clearing |
1384 | * 'TX_USED' bit in desc->ctrl. | 1440 | * 'TX_USED' bit in desc->ctrl. |
1385 | */ | 1441 | */ |
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp) | |||
1586 | if (!skb) | 1642 | if (!skb) |
1587 | continue; | 1643 | continue; |
1588 | 1644 | ||
1589 | desc = &bp->rx_ring[i]; | 1645 | desc = macb_rx_desc(bp, i); |
1590 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | 1646 | addr = macb_get_addr(bp, desc); |
1591 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1647 | |
1592 | addr |= ((u64)(desc->addrh) << 32); | ||
1593 | #endif | ||
1594 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, | 1648 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, |
1595 | DMA_FROM_DEVICE); | 1649 | DMA_FROM_DEVICE); |
1596 | dev_kfree_skb_any(skb); | 1650 | dev_kfree_skb_any(skb); |
@@ -1711,15 +1765,17 @@ out_err: | |||
1711 | static void gem_init_rings(struct macb *bp) | 1765 | static void gem_init_rings(struct macb *bp) |
1712 | { | 1766 | { |
1713 | struct macb_queue *queue; | 1767 | struct macb_queue *queue; |
1768 | struct macb_dma_desc *desc = NULL; | ||
1714 | unsigned int q; | 1769 | unsigned int q; |
1715 | int i; | 1770 | int i; |
1716 | 1771 | ||
1717 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | 1772 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1718 | for (i = 0; i < bp->tx_ring_size; i++) { | 1773 | for (i = 0; i < bp->tx_ring_size; i++) { |
1719 | queue->tx_ring[i].addr = 0; | 1774 | desc = macb_tx_desc(queue, i); |
1720 | queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 1775 | macb_set_addr(bp, desc, 0); |
1776 | desc->ctrl = MACB_BIT(TX_USED); | ||
1721 | } | 1777 | } |
1722 | queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); | 1778 | desc->ctrl |= MACB_BIT(TX_WRAP); |
1723 | queue->tx_head = 0; | 1779 | queue->tx_head = 0; |
1724 | queue->tx_tail = 0; | 1780 | queue->tx_tail = 0; |
1725 | } | 1781 | } |
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp) | |||
1733 | static void macb_init_rings(struct macb *bp) | 1789 | static void macb_init_rings(struct macb *bp) |
1734 | { | 1790 | { |
1735 | int i; | 1791 | int i; |
1792 | struct macb_dma_desc *desc = NULL; | ||
1736 | 1793 | ||
1737 | macb_init_rx_ring(bp); | 1794 | macb_init_rx_ring(bp); |
1738 | 1795 | ||
1739 | for (i = 0; i < bp->tx_ring_size; i++) { | 1796 | for (i = 0; i < bp->tx_ring_size; i++) { |
1740 | bp->queues[0].tx_ring[i].addr = 0; | 1797 | desc = macb_tx_desc(&bp->queues[0], i); |
1741 | bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); | 1798 | macb_set_addr(bp, desc, 0); |
1799 | desc->ctrl = MACB_BIT(TX_USED); | ||
1742 | } | 1800 | } |
1743 | bp->queues[0].tx_head = 0; | 1801 | bp->queues[0].tx_head = 0; |
1744 | bp->queues[0].tx_tail = 0; | 1802 | bp->queues[0].tx_tail = 0; |
1745 | bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); | 1803 | desc->ctrl |= MACB_BIT(TX_WRAP); |
1746 | } | 1804 | } |
1747 | 1805 | ||
1748 | static void macb_reset_hw(struct macb *bp) | 1806 | static void macb_reset_hw(struct macb *bp) |
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp) | |||
1863 | dmacfg &= ~GEM_BIT(TXCOEN); | 1921 | dmacfg &= ~GEM_BIT(TXCOEN); |
1864 | 1922 | ||
1865 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1923 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1866 | dmacfg |= GEM_BIT(ADDR64); | 1924 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
1925 | dmacfg |= GEM_BIT(ADDR64); | ||
1867 | #endif | 1926 | #endif |
1868 | netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", | 1927 | netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", |
1869 | dmacfg); | 1928 | dmacfg); |
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp) | |||
1910 | macb_configure_dma(bp); | 1969 | macb_configure_dma(bp); |
1911 | 1970 | ||
1912 | /* Initialize TX and RX buffers */ | 1971 | /* Initialize TX and RX buffers */ |
1913 | macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); | 1972 | macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma)); |
1914 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1973 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1915 | macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); | 1974 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
1975 | macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma)); | ||
1916 | #endif | 1976 | #endif |
1917 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | 1977 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1918 | queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); | 1978 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
1919 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1979 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1920 | queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); | 1980 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
1981 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); | ||
1921 | #endif | 1982 | #endif |
1922 | 1983 | ||
1923 | /* Enable interrupts */ | 1984 | /* Enable interrupts */ |
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev) | |||
2627 | queue->IMR = GEM_IMR(hw_q - 1); | 2688 | queue->IMR = GEM_IMR(hw_q - 1); |
2628 | queue->TBQP = GEM_TBQP(hw_q - 1); | 2689 | queue->TBQP = GEM_TBQP(hw_q - 1); |
2629 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 2690 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
2630 | queue->TBQPH = GEM_TBQPH(hw_q -1); | 2691 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
2692 | queue->TBQPH = GEM_TBQPH(hw_q - 1); | ||
2631 | #endif | 2693 | #endif |
2632 | } else { | 2694 | } else { |
2633 | /* queue0 uses legacy registers */ | 2695 | /* queue0 uses legacy registers */ |
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev) | |||
2637 | queue->IMR = MACB_IMR; | 2699 | queue->IMR = MACB_IMR; |
2638 | queue->TBQP = MACB_TBQP; | 2700 | queue->TBQP = MACB_TBQP; |
2639 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 2701 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
2640 | queue->TBQPH = MACB_TBQPH; | 2702 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
2703 | queue->TBQPH = MACB_TBQPH; | ||
2641 | #endif | 2704 | #endif |
2642 | } | 2705 | } |
2643 | 2706 | ||
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev) | |||
2730 | static int at91ether_start(struct net_device *dev) | 2793 | static int at91ether_start(struct net_device *dev) |
2731 | { | 2794 | { |
2732 | struct macb *lp = netdev_priv(dev); | 2795 | struct macb *lp = netdev_priv(dev); |
2796 | struct macb_dma_desc *desc; | ||
2733 | dma_addr_t addr; | 2797 | dma_addr_t addr; |
2734 | u32 ctl; | 2798 | u32 ctl; |
2735 | int i; | 2799 | int i; |
2736 | 2800 | ||
2737 | lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, | 2801 | lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, |
2738 | (AT91ETHER_MAX_RX_DESCR * | 2802 | (AT91ETHER_MAX_RX_DESCR * |
2739 | sizeof(struct macb_dma_desc)), | 2803 | macb_dma_desc_get_size(lp)), |
2740 | &lp->rx_ring_dma, GFP_KERNEL); | 2804 | &lp->rx_ring_dma, GFP_KERNEL); |
2741 | if (!lp->rx_ring) | 2805 | if (!lp->rx_ring) |
2742 | return -ENOMEM; | 2806 | return -ENOMEM; |
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev) | |||
2748 | if (!lp->rx_buffers) { | 2812 | if (!lp->rx_buffers) { |
2749 | dma_free_coherent(&lp->pdev->dev, | 2813 | dma_free_coherent(&lp->pdev->dev, |
2750 | AT91ETHER_MAX_RX_DESCR * | 2814 | AT91ETHER_MAX_RX_DESCR * |
2751 | sizeof(struct macb_dma_desc), | 2815 | macb_dma_desc_get_size(lp), |
2752 | lp->rx_ring, lp->rx_ring_dma); | 2816 | lp->rx_ring, lp->rx_ring_dma); |
2753 | lp->rx_ring = NULL; | 2817 | lp->rx_ring = NULL; |
2754 | return -ENOMEM; | 2818 | return -ENOMEM; |
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev) | |||
2756 | 2820 | ||
2757 | addr = lp->rx_buffers_dma; | 2821 | addr = lp->rx_buffers_dma; |
2758 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { | 2822 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { |
2759 | lp->rx_ring[i].addr = addr; | 2823 | desc = macb_rx_desc(lp, i); |
2760 | lp->rx_ring[i].ctrl = 0; | 2824 | macb_set_addr(lp, desc, addr); |
2825 | desc->ctrl = 0; | ||
2761 | addr += AT91ETHER_MAX_RBUFF_SZ; | 2826 | addr += AT91ETHER_MAX_RBUFF_SZ; |
2762 | } | 2827 | } |
2763 | 2828 | ||
2764 | /* Set the Wrap bit on the last descriptor */ | 2829 | /* Set the Wrap bit on the last descriptor */ |
2765 | lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); | 2830 | desc->addr |= MACB_BIT(RX_WRAP); |
2766 | 2831 | ||
2767 | /* Reset buffer index */ | 2832 | /* Reset buffer index */ |
2768 | lp->rx_tail = 0; | 2833 | lp->rx_tail = 0; |
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev) | |||
2834 | 2899 | ||
2835 | dma_free_coherent(&lp->pdev->dev, | 2900 | dma_free_coherent(&lp->pdev->dev, |
2836 | AT91ETHER_MAX_RX_DESCR * | 2901 | AT91ETHER_MAX_RX_DESCR * |
2837 | sizeof(struct macb_dma_desc), | 2902 | macb_dma_desc_get_size(lp), |
2838 | lp->rx_ring, lp->rx_ring_dma); | 2903 | lp->rx_ring, lp->rx_ring_dma); |
2839 | lp->rx_ring = NULL; | 2904 | lp->rx_ring = NULL; |
2840 | 2905 | ||
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2885 | static void at91ether_rx(struct net_device *dev) | 2950 | static void at91ether_rx(struct net_device *dev) |
2886 | { | 2951 | { |
2887 | struct macb *lp = netdev_priv(dev); | 2952 | struct macb *lp = netdev_priv(dev); |
2953 | struct macb_dma_desc *desc; | ||
2888 | unsigned char *p_recv; | 2954 | unsigned char *p_recv; |
2889 | struct sk_buff *skb; | 2955 | struct sk_buff *skb; |
2890 | unsigned int pktlen; | 2956 | unsigned int pktlen; |
2891 | 2957 | ||
2892 | while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { | 2958 | desc = macb_rx_desc(lp, lp->rx_tail); |
2959 | while (desc->addr & MACB_BIT(RX_USED)) { | ||
2893 | p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; | 2960 | p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; |
2894 | pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); | 2961 | pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); |
2895 | skb = netdev_alloc_skb(dev, pktlen + 2); | 2962 | skb = netdev_alloc_skb(dev, pktlen + 2); |
2896 | if (skb) { | 2963 | if (skb) { |
2897 | skb_reserve(skb, 2); | 2964 | skb_reserve(skb, 2); |
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev) | |||
2905 | lp->stats.rx_dropped++; | 2972 | lp->stats.rx_dropped++; |
2906 | } | 2973 | } |
2907 | 2974 | ||
2908 | if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) | 2975 | if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) |
2909 | lp->stats.multicast++; | 2976 | lp->stats.multicast++; |
2910 | 2977 | ||
2911 | /* reset ownership bit */ | 2978 | /* reset ownership bit */ |
2912 | lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); | 2979 | desc->addr &= ~MACB_BIT(RX_USED); |
2913 | 2980 | ||
2914 | /* wrap after last buffer */ | 2981 | /* wrap after last buffer */ |
2915 | if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) | 2982 | if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) |
2916 | lp->rx_tail = 0; | 2983 | lp->rx_tail = 0; |
2917 | else | 2984 | else |
2918 | lp->rx_tail++; | 2985 | lp->rx_tail++; |
2986 | |||
2987 | desc = macb_rx_desc(lp, lp->rx_tail); | ||
2919 | } | 2988 | } |
2920 | } | 2989 | } |
2921 | 2990 | ||
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev) | |||
3211 | device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); | 3280 | device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); |
3212 | 3281 | ||
3213 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 3282 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
3214 | if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) | 3283 | if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { |
3215 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); | 3284 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); |
3285 | bp->hw_dma_cap = HW_DMA_CAP_64B; | ||
3286 | } else | ||
3287 | bp->hw_dma_cap = HW_DMA_CAP_32B; | ||
3216 | #endif | 3288 | #endif |
3217 | 3289 | ||
3218 | spin_lock_init(&bp->lock); | 3290 | spin_lock_init(&bp->lock); |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d67adad67be1..fc8550a5d47f 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -385,6 +385,8 @@ | |||
385 | /* Bitfields in DCFG6. */ | 385 | /* Bitfields in DCFG6. */ |
386 | #define GEM_PBUF_LSO_OFFSET 27 | 386 | #define GEM_PBUF_LSO_OFFSET 27 |
387 | #define GEM_PBUF_LSO_SIZE 1 | 387 | #define GEM_PBUF_LSO_SIZE 1 |
388 | #define GEM_DAW64_OFFSET 23 | ||
389 | #define GEM_DAW64_SIZE 1 | ||
388 | 390 | ||
389 | /* Constants for CLK */ | 391 | /* Constants for CLK */ |
390 | #define MACB_CLK_DIV8 0 | 392 | #define MACB_CLK_DIV8 0 |
@@ -487,12 +489,20 @@ | |||
487 | struct macb_dma_desc { | 489 | struct macb_dma_desc { |
488 | u32 addr; | 490 | u32 addr; |
489 | u32 ctrl; | 491 | u32 ctrl; |
492 | }; | ||
493 | |||
490 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 494 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
491 | u32 addrh; | 495 | enum macb_hw_dma_cap { |
492 | u32 resvd; | 496 | HW_DMA_CAP_32B, |
493 | #endif | 497 | HW_DMA_CAP_64B, |
494 | }; | 498 | }; |
495 | 499 | ||
500 | struct macb_dma_desc_64 { | ||
501 | u32 addrh; | ||
502 | u32 resvd; | ||
503 | }; | ||
504 | #endif | ||
505 | |||
496 | /* DMA descriptor bitfields */ | 506 | /* DMA descriptor bitfields */ |
497 | #define MACB_RX_USED_OFFSET 0 | 507 | #define MACB_RX_USED_OFFSET 0 |
498 | #define MACB_RX_USED_SIZE 1 | 508 | #define MACB_RX_USED_SIZE 1 |
@@ -874,6 +884,10 @@ struct macb { | |||
874 | unsigned int jumbo_max_len; | 884 | unsigned int jumbo_max_len; |
875 | 885 | ||
876 | u32 wol; | 886 | u32 wol; |
887 | |||
888 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
889 | enum macb_hw_dma_cap hw_dma_cap; | ||
890 | #endif | ||
877 | }; | 891 | }; |
878 | 892 | ||
879 | static inline bool macb_is_gem(struct macb *bp) | 893 | static inline bool macb_is_gem(struct macb *bp) |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 2f85b64f01fa..1e4695270da6 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
@@ -31,6 +31,7 @@ struct lmac { | |||
31 | u8 lmac_type; | 31 | u8 lmac_type; |
32 | u8 lane_to_sds; | 32 | u8 lane_to_sds; |
33 | bool use_training; | 33 | bool use_training; |
34 | bool autoneg; | ||
34 | bool link_up; | 35 | bool link_up; |
35 | int lmacid; /* ID within BGX */ | 36 | int lmacid; /* ID within BGX */ |
36 | int lmacid_bd; /* ID on board */ | 37 | int lmacid_bd; /* ID on board */ |
@@ -461,7 +462,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) | |||
461 | /* power down, reset autoneg, autoneg enable */ | 462 | /* power down, reset autoneg, autoneg enable */ |
462 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); | 463 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); |
463 | cfg &= ~PCS_MRX_CTL_PWR_DN; | 464 | cfg &= ~PCS_MRX_CTL_PWR_DN; |
464 | cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); | 465 | cfg |= PCS_MRX_CTL_RST_AN; |
466 | if (lmac->phydev) { | ||
467 | cfg |= PCS_MRX_CTL_AN_EN; | ||
468 | } else { | ||
469 | /* In scenarios where PHY driver is not present or it's a | ||
470 | * non-standard PHY, FW sets AN_EN to inform Linux driver | ||
471 | * to do auto-neg and link polling or not. | ||
472 | */ | ||
473 | if (cfg & PCS_MRX_CTL_AN_EN) | ||
474 | lmac->autoneg = true; | ||
475 | } | ||
465 | bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); | 476 | bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); |
466 | 477 | ||
467 | if (lmac->lmac_type == BGX_MODE_QSGMII) { | 478 | if (lmac->lmac_type == BGX_MODE_QSGMII) { |
@@ -472,7 +483,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) | |||
472 | return 0; | 483 | return 0; |
473 | } | 484 | } |
474 | 485 | ||
475 | if (lmac->lmac_type == BGX_MODE_SGMII) { | 486 | if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) { |
476 | if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, | 487 | if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, |
477 | PCS_MRX_STATUS_AN_CPT, false)) { | 488 | PCS_MRX_STATUS_AN_CPT, false)) { |
478 | dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); | 489 | dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); |
@@ -678,12 +689,71 @@ static int bgx_xaui_check_link(struct lmac *lmac) | |||
678 | return -1; | 689 | return -1; |
679 | } | 690 | } |
680 | 691 | ||
692 | static void bgx_poll_for_sgmii_link(struct lmac *lmac) | ||
693 | { | ||
694 | u64 pcs_link, an_result; | ||
695 | u8 speed; | ||
696 | |||
697 | pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, | ||
698 | BGX_GMP_PCS_MRX_STATUS); | ||
699 | |||
700 | /*Link state bit is sticky, read it again*/ | ||
701 | if (!(pcs_link & PCS_MRX_STATUS_LINK)) | ||
702 | pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, | ||
703 | BGX_GMP_PCS_MRX_STATUS); | ||
704 | |||
705 | if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS, | ||
706 | PCS_MRX_STATUS_AN_CPT, false)) { | ||
707 | lmac->link_up = false; | ||
708 | lmac->last_speed = SPEED_UNKNOWN; | ||
709 | lmac->last_duplex = DUPLEX_UNKNOWN; | ||
710 | goto next_poll; | ||
711 | } | ||
712 | |||
713 | lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false; | ||
714 | an_result = bgx_reg_read(lmac->bgx, lmac->lmacid, | ||
715 | BGX_GMP_PCS_ANX_AN_RESULTS); | ||
716 | |||
717 | speed = (an_result >> 3) & 0x3; | ||
718 | lmac->last_duplex = (an_result >> 1) & 0x1; | ||
719 | switch (speed) { | ||
720 | case 0: | ||
721 | lmac->last_speed = 10; | ||
722 | break; | ||
723 | case 1: | ||
724 | lmac->last_speed = 100; | ||
725 | break; | ||
726 | case 2: | ||
727 | lmac->last_speed = 1000; | ||
728 | break; | ||
729 | default: | ||
730 | lmac->link_up = false; | ||
731 | lmac->last_speed = SPEED_UNKNOWN; | ||
732 | lmac->last_duplex = DUPLEX_UNKNOWN; | ||
733 | break; | ||
734 | } | ||
735 | |||
736 | next_poll: | ||
737 | |||
738 | if (lmac->last_link != lmac->link_up) { | ||
739 | if (lmac->link_up) | ||
740 | bgx_sgmii_change_link_state(lmac); | ||
741 | lmac->last_link = lmac->link_up; | ||
742 | } | ||
743 | |||
744 | queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3); | ||
745 | } | ||
746 | |||
681 | static void bgx_poll_for_link(struct work_struct *work) | 747 | static void bgx_poll_for_link(struct work_struct *work) |
682 | { | 748 | { |
683 | struct lmac *lmac; | 749 | struct lmac *lmac; |
684 | u64 spu_link, smu_link; | 750 | u64 spu_link, smu_link; |
685 | 751 | ||
686 | lmac = container_of(work, struct lmac, dwork.work); | 752 | lmac = container_of(work, struct lmac, dwork.work); |
753 | if (lmac->is_sgmii) { | ||
754 | bgx_poll_for_sgmii_link(lmac); | ||
755 | return; | ||
756 | } | ||
687 | 757 | ||
688 | /* Receive link is latching low. Force it high and verify it */ | 758 | /* Receive link is latching low. Force it high and verify it */ |
689 | bgx_reg_modify(lmac->bgx, lmac->lmacid, | 759 | bgx_reg_modify(lmac->bgx, lmac->lmacid, |
@@ -775,9 +845,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) | |||
775 | (lmac->lmac_type != BGX_MODE_XLAUI) && | 845 | (lmac->lmac_type != BGX_MODE_XLAUI) && |
776 | (lmac->lmac_type != BGX_MODE_40G_KR) && | 846 | (lmac->lmac_type != BGX_MODE_40G_KR) && |
777 | (lmac->lmac_type != BGX_MODE_10G_KR)) { | 847 | (lmac->lmac_type != BGX_MODE_10G_KR)) { |
778 | if (!lmac->phydev) | 848 | if (!lmac->phydev) { |
779 | return -ENODEV; | 849 | if (lmac->autoneg) { |
780 | 850 | bgx_reg_write(bgx, lmacid, | |
851 | BGX_GMP_PCS_LINKX_TIMER, | ||
852 | PCS_LINKX_TIMER_COUNT); | ||
853 | goto poll; | ||
854 | } else { | ||
855 | /* Default to below link speed and duplex */ | ||
856 | lmac->link_up = true; | ||
857 | lmac->last_speed = 1000; | ||
858 | lmac->last_duplex = 1; | ||
859 | bgx_sgmii_change_link_state(lmac); | ||
860 | return 0; | ||
861 | } | ||
862 | } | ||
781 | lmac->phydev->dev_flags = 0; | 863 | lmac->phydev->dev_flags = 0; |
782 | 864 | ||
783 | if (phy_connect_direct(&lmac->netdev, lmac->phydev, | 865 | if (phy_connect_direct(&lmac->netdev, lmac->phydev, |
@@ -786,15 +868,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) | |||
786 | return -ENODEV; | 868 | return -ENODEV; |
787 | 869 | ||
788 | phy_start_aneg(lmac->phydev); | 870 | phy_start_aneg(lmac->phydev); |
789 | } else { | 871 | return 0; |
790 | lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | | ||
791 | WQ_MEM_RECLAIM, 1); | ||
792 | if (!lmac->check_link) | ||
793 | return -ENOMEM; | ||
794 | INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); | ||
795 | queue_delayed_work(lmac->check_link, &lmac->dwork, 0); | ||
796 | } | 872 | } |
797 | 873 | ||
874 | poll: | ||
875 | lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | | ||
876 | WQ_MEM_RECLAIM, 1); | ||
877 | if (!lmac->check_link) | ||
878 | return -ENOMEM; | ||
879 | INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); | ||
880 | queue_delayed_work(lmac->check_link, &lmac->dwork, 0); | ||
881 | |||
798 | return 0; | 882 | return 0; |
799 | } | 883 | } |
800 | 884 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index c18ebfeb2039..a60f189429bb 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | |||
@@ -153,10 +153,15 @@ | |||
153 | #define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) | 153 | #define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) |
154 | #define PCS_MRX_CTL_RESET BIT_ULL(15) | 154 | #define PCS_MRX_CTL_RESET BIT_ULL(15) |
155 | #define BGX_GMP_PCS_MRX_STATUS 0x30008 | 155 | #define BGX_GMP_PCS_MRX_STATUS 0x30008 |
156 | #define PCS_MRX_STATUS_LINK BIT_ULL(2) | ||
156 | #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) | 157 | #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) |
158 | #define BGX_GMP_PCS_ANX_ADV 0x30010 | ||
157 | #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 | 159 | #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 |
160 | #define BGX_GMP_PCS_LINKX_TIMER 0x30040 | ||
161 | #define PCS_LINKX_TIMER_COUNT 0x1E84 | ||
158 | #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 | 162 | #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 |
159 | #define BGX_GMP_PCS_MISCX_CTL 0x30078 | 163 | #define BGX_GMP_PCS_MISCX_CTL 0x30078 |
164 | #define PCS_MISC_CTL_MODE BIT_ULL(8) | ||
160 | #define PCS_MISC_CTL_DISP_EN BIT_ULL(13) | 165 | #define PCS_MISC_CTL_DISP_EN BIT_ULL(13) |
161 | #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) | 166 | #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) |
162 | #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full | 167 | #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c index 67befedef709..578c7f8f11bf 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c | |||
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed) | |||
116 | int speed = 2; | 116 | int speed = 2; |
117 | 117 | ||
118 | if (!xcv) { | 118 | if (!xcv) { |
119 | dev_err(&xcv->pdev->dev, | 119 | pr_err("XCV init not done, probe may have failed\n"); |
120 | "XCV init not done, probe may have failed\n"); | ||
121 | return; | 120 | return; |
122 | } | 121 | } |
123 | 122 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 1a7f8ad7b9c6..cd49a54c538d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) | |||
362 | status = -EPERM; | 362 | status = -EPERM; |
363 | goto err; | 363 | goto err; |
364 | } | 364 | } |
365 | done: | 365 | |
366 | /* Remember currently programmed MAC */ | ||
366 | ether_addr_copy(adapter->dev_mac, addr->sa_data); | 367 | ether_addr_copy(adapter->dev_mac, addr->sa_data); |
368 | done: | ||
367 | ether_addr_copy(netdev->dev_addr, addr->sa_data); | 369 | ether_addr_copy(netdev->dev_addr, addr->sa_data); |
368 | dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); | 370 | dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); |
369 | return 0; | 371 | return 0; |
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter) | |||
3618 | { | 3620 | { |
3619 | /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ | 3621 | /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ |
3620 | if (!BEx_chip(adapter) || !be_virtfn(adapter) || | 3622 | if (!BEx_chip(adapter) || !be_virtfn(adapter) || |
3621 | check_privilege(adapter, BE_PRIV_FILTMGMT)) | 3623 | check_privilege(adapter, BE_PRIV_FILTMGMT)) { |
3622 | be_dev_mac_del(adapter, adapter->pmac_id[0]); | 3624 | be_dev_mac_del(adapter, adapter->pmac_id[0]); |
3625 | eth_zero_addr(adapter->dev_mac); | ||
3626 | } | ||
3623 | 3627 | ||
3624 | be_clear_uc_list(adapter); | 3628 | be_clear_uc_list(adapter); |
3625 | be_clear_mc_list(adapter); | 3629 | be_clear_mc_list(adapter); |
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter) | |||
3773 | if (status) | 3777 | if (status) |
3774 | return status; | 3778 | return status; |
3775 | 3779 | ||
3776 | /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ | 3780 | /* Normally this condition usually true as the ->dev_mac is zeroed. |
3777 | if (!BEx_chip(adapter) || !be_virtfn(adapter) || | 3781 | * But on BE3 VFs the initial MAC is pre-programmed by PF and |
3778 | check_privilege(adapter, BE_PRIV_FILTMGMT)) { | 3782 | * subsequent be_dev_mac_add() can fail (after fresh boot) |
3783 | */ | ||
3784 | if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) { | ||
3785 | int old_pmac_id = -1; | ||
3786 | |||
3787 | /* Remember old programmed MAC if any - can happen on BE3 VF */ | ||
3788 | if (!is_zero_ether_addr(adapter->dev_mac)) | ||
3789 | old_pmac_id = adapter->pmac_id[0]; | ||
3790 | |||
3779 | status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); | 3791 | status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); |
3780 | if (status) | 3792 | if (status) |
3781 | return status; | 3793 | return status; |
3794 | |||
3795 | /* Delete the old programmed MAC as we successfully programmed | ||
3796 | * a new MAC | ||
3797 | */ | ||
3798 | if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0]) | ||
3799 | be_dev_mac_del(adapter, old_pmac_id); | ||
3800 | |||
3782 | ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); | 3801 | ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); |
3783 | } | 3802 | } |
3784 | 3803 | ||
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter) | |||
4552 | 4571 | ||
4553 | memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); | 4572 | memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); |
4554 | memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); | 4573 | memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); |
4574 | |||
4575 | /* Initial MAC for BE3 VFs is already programmed by PF */ | ||
4576 | if (BEx_chip(adapter) && be_virtfn(adapter)) | ||
4577 | memcpy(adapter->dev_mac, mac, ETH_ALEN); | ||
4555 | } | 4578 | } |
4556 | 4579 | ||
4557 | return 0; | 4580 | return 0; |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index c1b671667920..957bfc220978 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) | |||
2010 | if (!rxb->page) | 2010 | if (!rxb->page) |
2011 | continue; | 2011 | continue; |
2012 | 2012 | ||
2013 | dma_unmap_single(rx_queue->dev, rxb->dma, | 2013 | dma_unmap_page(rx_queue->dev, rxb->dma, |
2014 | PAGE_SIZE, DMA_FROM_DEVICE); | 2014 | PAGE_SIZE, DMA_FROM_DEVICE); |
2015 | __free_page(rxb->page); | 2015 | __free_page(rxb->page); |
2016 | 2016 | ||
2017 | rxb->page = NULL; | 2017 | rxb->page = NULL; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 87226685f742..8fa18fc17cd2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
@@ -1014,9 +1014,7 @@ | |||
1014 | 1014 | ||
1015 | static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) | 1015 | static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) |
1016 | { | 1016 | { |
1017 | u8 __iomem *reg_addr = ACCESS_ONCE(base); | 1017 | writel(value, base + reg); |
1018 | |||
1019 | writel(value, reg_addr + reg); | ||
1020 | } | 1018 | } |
1021 | 1019 | ||
1022 | #define dsaf_write_dev(a, reg, value) \ | 1020 | #define dsaf_write_dev(a, reg, value) \ |
@@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) | |||
1024 | 1022 | ||
1025 | static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) | 1023 | static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) |
1026 | { | 1024 | { |
1027 | u8 __iomem *reg_addr = ACCESS_ONCE(base); | 1025 | return readl(base + reg); |
1028 | |||
1029 | return readl(reg_addr + reg); | ||
1030 | } | 1026 | } |
1031 | 1027 | ||
1032 | static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) | 1028 | static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 672b64606321..8aed72860e7c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
@@ -305,8 +305,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, | |||
305 | struct hns_nic_ring_data *ring_data) | 305 | struct hns_nic_ring_data *ring_data) |
306 | { | 306 | { |
307 | struct hns_nic_priv *priv = netdev_priv(ndev); | 307 | struct hns_nic_priv *priv = netdev_priv(ndev); |
308 | struct device *dev = priv->dev; | ||
309 | struct hnae_ring *ring = ring_data->ring; | 308 | struct hnae_ring *ring = ring_data->ring; |
309 | struct device *dev = ring_to_dev(ring); | ||
310 | struct netdev_queue *dev_queue; | 310 | struct netdev_queue *dev_queue; |
311 | struct skb_frag_struct *frag; | 311 | struct skb_frag_struct *frag; |
312 | int buf_num; | 312 | int buf_num; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index c7e939945259..53daa6ca5d83 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c | |||
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) | |||
158 | return -ETIMEDOUT; | 158 | return -ETIMEDOUT; |
159 | } | 159 | } |
160 | 160 | ||
161 | static int mlx4_comm_internal_err(u32 slave_read) | 161 | int mlx4_comm_internal_err(u32 slave_read) |
162 | { | 162 | { |
163 | return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == | 163 | return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == |
164 | (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; | 164 | (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index d5a9372ed84d..9aa422691954 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -1099,7 +1099,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, | |||
1099 | memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); | 1099 | memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); |
1100 | new_prof.tx_ring_size = tx_size; | 1100 | new_prof.tx_ring_size = tx_size; |
1101 | new_prof.rx_ring_size = rx_size; | 1101 | new_prof.rx_ring_size = rx_size; |
1102 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); | 1102 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); |
1103 | if (err) | 1103 | if (err) |
1104 | goto out; | 1104 | goto out; |
1105 | 1105 | ||
@@ -1774,7 +1774,7 @@ static int mlx4_en_set_channels(struct net_device *dev, | |||
1774 | new_prof.tx_ring_num[TX_XDP] = xdp_count; | 1774 | new_prof.tx_ring_num[TX_XDP] = xdp_count; |
1775 | new_prof.rx_ring_num = channel->rx_count; | 1775 | new_prof.rx_ring_num = channel->rx_count; |
1776 | 1776 | ||
1777 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); | 1777 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); |
1778 | if (err) | 1778 | if (err) |
1779 | goto out; | 1779 | goto out; |
1780 | 1780 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 761f8b12399c..3b4961a8e8e4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -2042,6 +2042,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |||
2042 | if (priv->tx_cq[t] && priv->tx_cq[t][i]) | 2042 | if (priv->tx_cq[t] && priv->tx_cq[t][i]) |
2043 | mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); | 2043 | mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); |
2044 | } | 2044 | } |
2045 | kfree(priv->tx_ring[t]); | ||
2046 | kfree(priv->tx_cq[t]); | ||
2045 | } | 2047 | } |
2046 | 2048 | ||
2047 | for (i = 0; i < priv->rx_ring_num; i++) { | 2049 | for (i = 0; i < priv->rx_ring_num; i++) { |
@@ -2184,9 +2186,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst, | |||
2184 | 2186 | ||
2185 | int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, | 2187 | int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, |
2186 | struct mlx4_en_priv *tmp, | 2188 | struct mlx4_en_priv *tmp, |
2187 | struct mlx4_en_port_profile *prof) | 2189 | struct mlx4_en_port_profile *prof, |
2190 | bool carry_xdp_prog) | ||
2188 | { | 2191 | { |
2189 | int t; | 2192 | struct bpf_prog *xdp_prog; |
2193 | int i, t; | ||
2190 | 2194 | ||
2191 | mlx4_en_copy_priv(tmp, priv, prof); | 2195 | mlx4_en_copy_priv(tmp, priv, prof); |
2192 | 2196 | ||
@@ -2200,6 +2204,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, | |||
2200 | } | 2204 | } |
2201 | return -ENOMEM; | 2205 | return -ENOMEM; |
2202 | } | 2206 | } |
2207 | |||
2208 | /* All rx_rings has the same xdp_prog. Pick the first one. */ | ||
2209 | xdp_prog = rcu_dereference_protected( | ||
2210 | priv->rx_ring[0]->xdp_prog, | ||
2211 | lockdep_is_held(&priv->mdev->state_lock)); | ||
2212 | |||
2213 | if (xdp_prog && carry_xdp_prog) { | ||
2214 | xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num); | ||
2215 | if (IS_ERR(xdp_prog)) { | ||
2216 | mlx4_en_free_resources(tmp); | ||
2217 | return PTR_ERR(xdp_prog); | ||
2218 | } | ||
2219 | for (i = 0; i < tmp->rx_ring_num; i++) | ||
2220 | rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog, | ||
2221 | xdp_prog); | ||
2222 | } | ||
2223 | |||
2203 | return 0; | 2224 | return 0; |
2204 | } | 2225 | } |
2205 | 2226 | ||
@@ -2214,7 +2235,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
2214 | { | 2235 | { |
2215 | struct mlx4_en_priv *priv = netdev_priv(dev); | 2236 | struct mlx4_en_priv *priv = netdev_priv(dev); |
2216 | struct mlx4_en_dev *mdev = priv->mdev; | 2237 | struct mlx4_en_dev *mdev = priv->mdev; |
2217 | int t; | ||
2218 | 2238 | ||
2219 | en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); | 2239 | en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); |
2220 | 2240 | ||
@@ -2248,11 +2268,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
2248 | mlx4_en_free_resources(priv); | 2268 | mlx4_en_free_resources(priv); |
2249 | mutex_unlock(&mdev->state_lock); | 2269 | mutex_unlock(&mdev->state_lock); |
2250 | 2270 | ||
2251 | for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { | ||
2252 | kfree(priv->tx_ring[t]); | ||
2253 | kfree(priv->tx_cq[t]); | ||
2254 | } | ||
2255 | |||
2256 | free_netdev(dev); | 2271 | free_netdev(dev); |
2257 | } | 2272 | } |
2258 | 2273 | ||
@@ -2755,7 +2770,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) | |||
2755 | en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); | 2770 | en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); |
2756 | } | 2771 | } |
2757 | 2772 | ||
2758 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); | 2773 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false); |
2759 | if (err) { | 2774 | if (err) { |
2760 | if (prog) | 2775 | if (prog) |
2761 | bpf_prog_sub(prog, priv->rx_ring_num - 1); | 2776 | bpf_prog_sub(prog, priv->rx_ring_num - 1); |
@@ -3499,7 +3514,7 @@ int mlx4_en_reset_config(struct net_device *dev, | |||
3499 | memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); | 3514 | memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); |
3500 | memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); | 3515 | memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); |
3501 | 3516 | ||
3502 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); | 3517 | err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); |
3503 | if (err) | 3518 | if (err) |
3504 | goto out; | 3519 | goto out; |
3505 | 3520 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index eac527e25ec9..cc003fdf0ed9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -514,8 +514,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) | |||
514 | return; | 514 | return; |
515 | 515 | ||
516 | for (ring = 0; ring < priv->rx_ring_num; ring++) { | 516 | for (ring = 0; ring < priv->rx_ring_num; ring++) { |
517 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) | 517 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { |
518 | local_bh_disable(); | ||
518 | napi_reschedule(&priv->rx_cq[ring]->napi); | 519 | napi_reschedule(&priv->rx_cq[ring]->napi); |
520 | local_bh_enable(); | ||
521 | } | ||
519 | } | 522 | } |
520 | } | 523 | } |
521 | 524 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 0e8b7c44931f..8258d08acd8c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c | |||
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev) | |||
222 | return; | 222 | return; |
223 | 223 | ||
224 | mlx4_stop_catas_poll(dev); | 224 | mlx4_stop_catas_poll(dev); |
225 | if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && | ||
226 | mlx4_is_slave(dev)) { | ||
227 | /* In mlx4_remove_one on a VF */ | ||
228 | u32 slave_read = | ||
229 | swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read)); | ||
230 | |||
231 | if (mlx4_comm_internal_err(slave_read)) { | ||
232 | mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n", | ||
233 | __func__); | ||
234 | mlx4_enter_error_state(dev->persist); | ||
235 | } | ||
236 | } | ||
225 | mutex_lock(&intf_mutex); | 237 | mutex_lock(&intf_mutex); |
226 | 238 | ||
227 | list_for_each_entry(intf, &intf_list, list) | 239 | list_for_each_entry(intf, &intf_list, list) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 88ee7d8a5923..086920b615af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); | |||
1220 | void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); | 1220 | void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); |
1221 | 1221 | ||
1222 | void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); | 1222 | void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); |
1223 | int mlx4_comm_internal_err(u32 slave_read); | ||
1223 | 1224 | ||
1224 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, | 1225 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, |
1225 | enum mlx4_port_type *type); | 1226 | enum mlx4_port_type *type); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ba1c6cd0cc79..cec59bc264c9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, | |||
679 | 679 | ||
680 | int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, | 680 | int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, |
681 | struct mlx4_en_priv *tmp, | 681 | struct mlx4_en_priv *tmp, |
682 | struct mlx4_en_port_profile *prof); | 682 | struct mlx4_en_port_profile *prof, |
683 | bool carry_xdp_prog); | ||
683 | void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, | 684 | void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, |
684 | struct mlx4_en_priv *tmp); | 685 | struct mlx4_en_priv *tmp); |
685 | 686 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3797cc7c1288..caa837e5e2b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) | |||
1728 | if (cmd->cmdif_rev > CMD_IF_REV) { | 1728 | if (cmd->cmdif_rev > CMD_IF_REV) { |
1729 | dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", | 1729 | dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", |
1730 | CMD_IF_REV, cmd->cmdif_rev); | 1730 | CMD_IF_REV, cmd->cmdif_rev); |
1731 | err = -ENOTSUPP; | 1731 | err = -EOPNOTSUPP; |
1732 | goto err_free_page; | 1732 | goto err_free_page; |
1733 | } | 1733 | } |
1734 | 1734 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 951dbd58594d..d5ecb8f53fd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); | |||
791 | int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); | 791 | int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); |
792 | 792 | ||
793 | int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); | 793 | int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); |
794 | void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); | 794 | void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc, |
795 | enum mlx5e_traffic_types tt); | ||
795 | 796 | ||
796 | int mlx5e_open_locked(struct net_device *netdev); | 797 | int mlx5e_open_locked(struct net_device *netdev); |
797 | int mlx5e_close_locked(struct net_device *netdev); | 798 | int mlx5e_close_locked(struct net_device *netdev); |
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} | |||
863 | 864 | ||
864 | static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) | 865 | static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) |
865 | { | 866 | { |
866 | return -ENOTSUPP; | 867 | return -EOPNOTSUPP; |
867 | } | 868 | } |
868 | 869 | ||
869 | static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) | 870 | static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) |
870 | { | 871 | { |
871 | return -ENOTSUPP; | 872 | return -EOPNOTSUPP; |
872 | } | 873 | } |
873 | #else | 874 | #else |
874 | int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); | 875 | int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index f0b460f47f29..0523ed47f597 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | |||
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, | |||
89 | int i; | 89 | int i; |
90 | 90 | ||
91 | if (!MLX5_CAP_GEN(priv->mdev, ets)) | 91 | if (!MLX5_CAP_GEN(priv->mdev, ets)) |
92 | return -ENOTSUPP; | 92 | return -EOPNOTSUPP; |
93 | 93 | ||
94 | ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; | 94 | ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; |
95 | for (i = 0; i < ets->ets_cap; i++) { | 95 | for (i = 0; i < ets->ets_cap; i++) { |
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, | |||
236 | int err; | 236 | int err; |
237 | 237 | ||
238 | if (!MLX5_CAP_GEN(priv->mdev, ets)) | 238 | if (!MLX5_CAP_GEN(priv->mdev, ets)) |
239 | return -ENOTSUPP; | 239 | return -EOPNOTSUPP; |
240 | 240 | ||
241 | err = mlx5e_dbcnl_validate_ets(netdev, ets); | 241 | err = mlx5e_dbcnl_validate_ets(netdev, ets); |
242 | if (err) | 242 | if (err) |
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev) | |||
402 | struct mlx5_core_dev *mdev = priv->mdev; | 402 | struct mlx5_core_dev *mdev = priv->mdev; |
403 | struct ieee_ets ets; | 403 | struct ieee_ets ets; |
404 | struct ieee_pfc pfc; | 404 | struct ieee_pfc pfc; |
405 | int err = -ENOTSUPP; | 405 | int err = -EOPNOTSUPP; |
406 | int i; | 406 | int i; |
407 | 407 | ||
408 | if (!MLX5_CAP_GEN(mdev, ets)) | 408 | if (!MLX5_CAP_GEN(mdev, ets)) |
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev, | |||
511 | struct mlx5e_priv *priv = netdev_priv(netdev); | 511 | struct mlx5e_priv *priv = netdev_priv(netdev); |
512 | struct mlx5_core_dev *mdev = priv->mdev; | 512 | struct mlx5_core_dev *mdev = priv->mdev; |
513 | 513 | ||
514 | if (!MLX5_CAP_GEN(priv->mdev, ets)) { | ||
515 | netdev_err(netdev, "%s, ets is not supported\n", __func__); | ||
516 | return; | ||
517 | } | ||
518 | |||
514 | if (priority >= CEE_DCBX_MAX_PRIO) { | 519 | if (priority >= CEE_DCBX_MAX_PRIO) { |
515 | netdev_err(netdev, | 520 | netdev_err(netdev, |
516 | "%s, priority is out of range\n", __func__); | 521 | "%s, priority is out of range\n", __func__); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 5197817e4b2f..bb67863aa361 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, | |||
595 | struct mlx5e_priv *priv = netdev_priv(netdev); | 595 | struct mlx5e_priv *priv = netdev_priv(netdev); |
596 | 596 | ||
597 | if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) | 597 | if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) |
598 | return -ENOTSUPP; | 598 | return -EOPNOTSUPP; |
599 | 599 | ||
600 | coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; | 600 | coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; |
601 | coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; | 601 | coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; |
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev, | |||
620 | int i; | 620 | int i; |
621 | 621 | ||
622 | if (!MLX5_CAP_GEN(mdev, cq_moderation)) | 622 | if (!MLX5_CAP_GEN(mdev, cq_moderation)) |
623 | return -ENOTSUPP; | 623 | return -EOPNOTSUPP; |
624 | 624 | ||
625 | mutex_lock(&priv->state_lock); | 625 | mutex_lock(&priv->state_lock); |
626 | 626 | ||
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, | |||
980 | 980 | ||
981 | static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) | 981 | static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) |
982 | { | 982 | { |
983 | struct mlx5_core_dev *mdev = priv->mdev; | ||
984 | void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); | 983 | void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); |
985 | int i; | 984 | struct mlx5_core_dev *mdev = priv->mdev; |
985 | int ctxlen = MLX5_ST_SZ_BYTES(tirc); | ||
986 | int tt; | ||
986 | 987 | ||
987 | MLX5_SET(modify_tir_in, in, bitmask.hash, 1); | 988 | MLX5_SET(modify_tir_in, in, bitmask.hash, 1); |
988 | mlx5e_build_tir_ctx_hash(tirc, priv); | ||
989 | 989 | ||
990 | for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) | 990 | for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { |
991 | mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); | 991 | memset(tirc, 0, ctxlen); |
992 | mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt); | ||
993 | mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); | ||
994 | } | ||
992 | } | 995 | } |
993 | 996 | ||
994 | static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, | 997 | static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, |
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, | |||
996 | { | 999 | { |
997 | struct mlx5e_priv *priv = netdev_priv(dev); | 1000 | struct mlx5e_priv *priv = netdev_priv(dev); |
998 | int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); | 1001 | int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); |
1002 | bool hash_changed = false; | ||
999 | void *in; | 1003 | void *in; |
1000 | 1004 | ||
1001 | if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && | 1005 | if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && |
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, | |||
1017 | mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); | 1021 | mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); |
1018 | } | 1022 | } |
1019 | 1023 | ||
1020 | if (key) | 1024 | if (hfunc != ETH_RSS_HASH_NO_CHANGE && |
1025 | hfunc != priv->params.rss_hfunc) { | ||
1026 | priv->params.rss_hfunc = hfunc; | ||
1027 | hash_changed = true; | ||
1028 | } | ||
1029 | |||
1030 | if (key) { | ||
1021 | memcpy(priv->params.toeplitz_hash_key, key, | 1031 | memcpy(priv->params.toeplitz_hash_key, key, |
1022 | sizeof(priv->params.toeplitz_hash_key)); | 1032 | sizeof(priv->params.toeplitz_hash_key)); |
1033 | hash_changed = hash_changed || | ||
1034 | priv->params.rss_hfunc == ETH_RSS_HASH_TOP; | ||
1035 | } | ||
1023 | 1036 | ||
1024 | if (hfunc != ETH_RSS_HASH_NO_CHANGE) | 1037 | if (hash_changed) |
1025 | priv->params.rss_hfunc = hfunc; | 1038 | mlx5e_modify_tirs_hash(priv, in, inlen); |
1026 | |||
1027 | mlx5e_modify_tirs_hash(priv, in, inlen); | ||
1028 | 1039 | ||
1029 | mutex_unlock(&priv->state_lock); | 1040 | mutex_unlock(&priv->state_lock); |
1030 | 1041 | ||
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1296 | u32 mlx5_wol_mode; | 1307 | u32 mlx5_wol_mode; |
1297 | 1308 | ||
1298 | if (!wol_supported) | 1309 | if (!wol_supported) |
1299 | return -ENOTSUPP; | 1310 | return -EOPNOTSUPP; |
1300 | 1311 | ||
1301 | if (wol->wolopts & ~wol_supported) | 1312 | if (wol->wolopts & ~wol_supported) |
1302 | return -EINVAL; | 1313 | return -EINVAL; |
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) | |||
1426 | 1437 | ||
1427 | if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && | 1438 | if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && |
1428 | !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) | 1439 | !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) |
1429 | return -ENOTSUPP; | 1440 | return -EOPNOTSUPP; |
1430 | 1441 | ||
1431 | if (!rx_mode_changed) | 1442 | if (!rx_mode_changed) |
1432 | return 0; | 1443 | return 0; |
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, | |||
1452 | bool reset; | 1463 | bool reset; |
1453 | 1464 | ||
1454 | if (!MLX5_CAP_GEN(mdev, cqe_compression)) | 1465 | if (!MLX5_CAP_GEN(mdev, cqe_compression)) |
1455 | return -ENOTSUPP; | 1466 | return -EOPNOTSUPP; |
1456 | 1467 | ||
1457 | if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { | 1468 | if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { |
1458 | netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); | 1469 | netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1fe80de5d68f..a0e5a69402b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | |||
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) | |||
1089 | MLX5_FLOW_NAMESPACE_KERNEL); | 1089 | MLX5_FLOW_NAMESPACE_KERNEL); |
1090 | 1090 | ||
1091 | if (!priv->fs.ns) | 1091 | if (!priv->fs.ns) |
1092 | return -EINVAL; | 1092 | return -EOPNOTSUPP; |
1093 | 1093 | ||
1094 | err = mlx5e_arfs_create_tables(priv); | 1094 | err = mlx5e_arfs_create_tables(priv); |
1095 | if (err) { | 1095 | if (err) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index d088effd7160..f33f72d0237c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | |||
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, | |||
92 | ns = mlx5_get_flow_namespace(priv->mdev, | 92 | ns = mlx5_get_flow_namespace(priv->mdev, |
93 | MLX5_FLOW_NAMESPACE_ETHTOOL); | 93 | MLX5_FLOW_NAMESPACE_ETHTOOL); |
94 | if (!ns) | 94 | if (!ns) |
95 | return ERR_PTR(-ENOTSUPP); | 95 | return ERR_PTR(-EOPNOTSUPP); |
96 | 96 | ||
97 | table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, | 97 | table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, |
98 | flow_table_properties_nic_receive.log_max_ft_size)), | 98 | flow_table_properties_nic_receive.log_max_ft_size)), |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2b7dd315020c..f14ca3385fdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) | |||
2022 | MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); | 2022 | MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); |
2023 | } | 2023 | } |
2024 | 2024 | ||
2025 | void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) | 2025 | void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc, |
2026 | enum mlx5e_traffic_types tt) | ||
2026 | { | 2027 | { |
2028 | void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); | ||
2029 | |||
2030 | #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ | ||
2031 | MLX5_HASH_FIELD_SEL_DST_IP) | ||
2032 | |||
2033 | #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ | ||
2034 | MLX5_HASH_FIELD_SEL_DST_IP |\ | ||
2035 | MLX5_HASH_FIELD_SEL_L4_SPORT |\ | ||
2036 | MLX5_HASH_FIELD_SEL_L4_DPORT) | ||
2037 | |||
2038 | #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ | ||
2039 | MLX5_HASH_FIELD_SEL_DST_IP |\ | ||
2040 | MLX5_HASH_FIELD_SEL_IPSEC_SPI) | ||
2041 | |||
2027 | MLX5_SET(tirc, tirc, rx_hash_fn, | 2042 | MLX5_SET(tirc, tirc, rx_hash_fn, |
2028 | mlx5e_rx_hash_fn(priv->params.rss_hfunc)); | 2043 | mlx5e_rx_hash_fn(priv->params.rss_hfunc)); |
2029 | if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { | 2044 | if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { |
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) | |||
2035 | MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); | 2050 | MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); |
2036 | memcpy(rss_key, priv->params.toeplitz_hash_key, len); | 2051 | memcpy(rss_key, priv->params.toeplitz_hash_key, len); |
2037 | } | 2052 | } |
2053 | |||
2054 | switch (tt) { | ||
2055 | case MLX5E_TT_IPV4_TCP: | ||
2056 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2057 | MLX5_L3_PROT_TYPE_IPV4); | ||
2058 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2059 | MLX5_L4_PROT_TYPE_TCP); | ||
2060 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2061 | MLX5_HASH_IP_L4PORTS); | ||
2062 | break; | ||
2063 | |||
2064 | case MLX5E_TT_IPV6_TCP: | ||
2065 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2066 | MLX5_L3_PROT_TYPE_IPV6); | ||
2067 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2068 | MLX5_L4_PROT_TYPE_TCP); | ||
2069 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2070 | MLX5_HASH_IP_L4PORTS); | ||
2071 | break; | ||
2072 | |||
2073 | case MLX5E_TT_IPV4_UDP: | ||
2074 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2075 | MLX5_L3_PROT_TYPE_IPV4); | ||
2076 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2077 | MLX5_L4_PROT_TYPE_UDP); | ||
2078 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2079 | MLX5_HASH_IP_L4PORTS); | ||
2080 | break; | ||
2081 | |||
2082 | case MLX5E_TT_IPV6_UDP: | ||
2083 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2084 | MLX5_L3_PROT_TYPE_IPV6); | ||
2085 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2086 | MLX5_L4_PROT_TYPE_UDP); | ||
2087 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2088 | MLX5_HASH_IP_L4PORTS); | ||
2089 | break; | ||
2090 | |||
2091 | case MLX5E_TT_IPV4_IPSEC_AH: | ||
2092 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2093 | MLX5_L3_PROT_TYPE_IPV4); | ||
2094 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2095 | MLX5_HASH_IP_IPSEC_SPI); | ||
2096 | break; | ||
2097 | |||
2098 | case MLX5E_TT_IPV6_IPSEC_AH: | ||
2099 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2100 | MLX5_L3_PROT_TYPE_IPV6); | ||
2101 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2102 | MLX5_HASH_IP_IPSEC_SPI); | ||
2103 | break; | ||
2104 | |||
2105 | case MLX5E_TT_IPV4_IPSEC_ESP: | ||
2106 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2107 | MLX5_L3_PROT_TYPE_IPV4); | ||
2108 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2109 | MLX5_HASH_IP_IPSEC_SPI); | ||
2110 | break; | ||
2111 | |||
2112 | case MLX5E_TT_IPV6_IPSEC_ESP: | ||
2113 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2114 | MLX5_L3_PROT_TYPE_IPV6); | ||
2115 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2116 | MLX5_HASH_IP_IPSEC_SPI); | ||
2117 | break; | ||
2118 | |||
2119 | case MLX5E_TT_IPV4: | ||
2120 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2121 | MLX5_L3_PROT_TYPE_IPV4); | ||
2122 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2123 | MLX5_HASH_IP); | ||
2124 | break; | ||
2125 | |||
2126 | case MLX5E_TT_IPV6: | ||
2127 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2128 | MLX5_L3_PROT_TYPE_IPV6); | ||
2129 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2130 | MLX5_HASH_IP); | ||
2131 | break; | ||
2132 | default: | ||
2133 | WARN_ONCE(true, "%s: bad traffic type!\n", __func__); | ||
2134 | } | ||
2038 | } | 2135 | } |
2039 | 2136 | ||
2040 | static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) | 2137 | static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) |
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) | |||
2404 | static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, | 2501 | static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, |
2405 | enum mlx5e_traffic_types tt) | 2502 | enum mlx5e_traffic_types tt) |
2406 | { | 2503 | { |
2407 | void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); | ||
2408 | |||
2409 | MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); | 2504 | MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); |
2410 | 2505 | ||
2411 | #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ | ||
2412 | MLX5_HASH_FIELD_SEL_DST_IP) | ||
2413 | |||
2414 | #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ | ||
2415 | MLX5_HASH_FIELD_SEL_DST_IP |\ | ||
2416 | MLX5_HASH_FIELD_SEL_L4_SPORT |\ | ||
2417 | MLX5_HASH_FIELD_SEL_L4_DPORT) | ||
2418 | |||
2419 | #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ | ||
2420 | MLX5_HASH_FIELD_SEL_DST_IP |\ | ||
2421 | MLX5_HASH_FIELD_SEL_IPSEC_SPI) | ||
2422 | |||
2423 | mlx5e_build_tir_ctx_lro(tirc, priv); | 2506 | mlx5e_build_tir_ctx_lro(tirc, priv); |
2424 | 2507 | ||
2425 | MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); | 2508 | MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); |
2426 | MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); | 2509 | MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); |
2427 | mlx5e_build_tir_ctx_hash(tirc, priv); | 2510 | mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt); |
2428 | |||
2429 | switch (tt) { | ||
2430 | case MLX5E_TT_IPV4_TCP: | ||
2431 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2432 | MLX5_L3_PROT_TYPE_IPV4); | ||
2433 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2434 | MLX5_L4_PROT_TYPE_TCP); | ||
2435 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2436 | MLX5_HASH_IP_L4PORTS); | ||
2437 | break; | ||
2438 | |||
2439 | case MLX5E_TT_IPV6_TCP: | ||
2440 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2441 | MLX5_L3_PROT_TYPE_IPV6); | ||
2442 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2443 | MLX5_L4_PROT_TYPE_TCP); | ||
2444 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2445 | MLX5_HASH_IP_L4PORTS); | ||
2446 | break; | ||
2447 | |||
2448 | case MLX5E_TT_IPV4_UDP: | ||
2449 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2450 | MLX5_L3_PROT_TYPE_IPV4); | ||
2451 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2452 | MLX5_L4_PROT_TYPE_UDP); | ||
2453 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2454 | MLX5_HASH_IP_L4PORTS); | ||
2455 | break; | ||
2456 | |||
2457 | case MLX5E_TT_IPV6_UDP: | ||
2458 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2459 | MLX5_L3_PROT_TYPE_IPV6); | ||
2460 | MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, | ||
2461 | MLX5_L4_PROT_TYPE_UDP); | ||
2462 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2463 | MLX5_HASH_IP_L4PORTS); | ||
2464 | break; | ||
2465 | |||
2466 | case MLX5E_TT_IPV4_IPSEC_AH: | ||
2467 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2468 | MLX5_L3_PROT_TYPE_IPV4); | ||
2469 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2470 | MLX5_HASH_IP_IPSEC_SPI); | ||
2471 | break; | ||
2472 | |||
2473 | case MLX5E_TT_IPV6_IPSEC_AH: | ||
2474 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2475 | MLX5_L3_PROT_TYPE_IPV6); | ||
2476 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2477 | MLX5_HASH_IP_IPSEC_SPI); | ||
2478 | break; | ||
2479 | |||
2480 | case MLX5E_TT_IPV4_IPSEC_ESP: | ||
2481 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2482 | MLX5_L3_PROT_TYPE_IPV4); | ||
2483 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2484 | MLX5_HASH_IP_IPSEC_SPI); | ||
2485 | break; | ||
2486 | |||
2487 | case MLX5E_TT_IPV6_IPSEC_ESP: | ||
2488 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2489 | MLX5_L3_PROT_TYPE_IPV6); | ||
2490 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2491 | MLX5_HASH_IP_IPSEC_SPI); | ||
2492 | break; | ||
2493 | |||
2494 | case MLX5E_TT_IPV4: | ||
2495 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2496 | MLX5_L3_PROT_TYPE_IPV4); | ||
2497 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2498 | MLX5_HASH_IP); | ||
2499 | break; | ||
2500 | |||
2501 | case MLX5E_TT_IPV6: | ||
2502 | MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, | ||
2503 | MLX5_L3_PROT_TYPE_IPV6); | ||
2504 | MLX5_SET(rx_hash_field_select, hfso, selected_fields, | ||
2505 | MLX5_HASH_IP); | ||
2506 | break; | ||
2507 | default: | ||
2508 | WARN_ONCE(true, | ||
2509 | "mlx5e_build_indir_tir_ctx: bad traffic type!\n"); | ||
2510 | } | ||
2511 | } | 2511 | } |
2512 | 2512 | ||
2513 | static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, | 2513 | static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, |
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { | |||
3331 | static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) | 3331 | static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) |
3332 | { | 3332 | { |
3333 | if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | 3333 | if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) |
3334 | return -ENOTSUPP; | 3334 | return -EOPNOTSUPP; |
3335 | if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || | 3335 | if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || |
3336 | !MLX5_CAP_GEN(mdev, nic_flow_table) || | 3336 | !MLX5_CAP_GEN(mdev, nic_flow_table) || |
3337 | !MLX5_CAP_ETH(mdev, csum_cap) || | 3337 | !MLX5_CAP_ETH(mdev, csum_cap) || |
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) | |||
3343 | < 3) { | 3343 | < 3) { |
3344 | mlx5_core_warn(mdev, | 3344 | mlx5_core_warn(mdev, |
3345 | "Not creating net device, some required device capabilities are missing\n"); | 3345 | "Not creating net device, some required device capabilities are missing\n"); |
3346 | return -ENOTSUPP; | 3346 | return -EOPNOTSUPP; |
3347 | } | 3347 | } |
3348 | if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) | 3348 | if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) |
3349 | mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); | 3349 | mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 46bef6a26a8c..c5282b6aba8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, | |||
663 | __be32 *saddr, | 663 | __be32 *saddr, |
664 | int *out_ttl) | 664 | int *out_ttl) |
665 | { | 665 | { |
666 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | ||
666 | struct rtable *rt; | 667 | struct rtable *rt; |
667 | struct neighbour *n = NULL; | 668 | struct neighbour *n = NULL; |
668 | int ttl; | 669 | int ttl; |
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, | |||
677 | #else | 678 | #else |
678 | return -EOPNOTSUPP; | 679 | return -EOPNOTSUPP; |
679 | #endif | 680 | #endif |
680 | 681 | /* if the egress device isn't on the same HW e-switch, we use the uplink */ | |
681 | if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { | 682 | if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) |
682 | pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); | 683 | *out_dev = mlx5_eswitch_get_uplink_netdev(esw); |
683 | ip_rt_put(rt); | 684 | else |
684 | return -EOPNOTSUPP; | 685 | *out_dev = rt->dst.dev; |
685 | } | ||
686 | 686 | ||
687 | ttl = ip4_dst_hoplimit(&rt->dst); | 687 | ttl = ip4_dst_hoplimit(&rt->dst); |
688 | n = dst_neigh_lookup(&rt->dst, &fl4->daddr); | 688 | n = dst_neigh_lookup(&rt->dst, &fl4->daddr); |
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, | |||
693 | *out_n = n; | 693 | *out_n = n; |
694 | *saddr = fl4->saddr; | 694 | *saddr = fl4->saddr; |
695 | *out_ttl = ttl; | 695 | *out_ttl = ttl; |
696 | *out_dev = rt->dst.dev; | ||
697 | 696 | ||
698 | return 0; | 697 | return 0; |
699 | } | 698 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f14d9c9ba773..d0c8bf014453 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, | |||
133 | 133 | ||
134 | if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || | 134 | if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || |
135 | !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) | 135 | !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) |
136 | return -ENOTSUPP; | 136 | return -EOPNOTSUPP; |
137 | 137 | ||
138 | esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", | 138 | esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", |
139 | vport, vlan, qos, set_flags); | 139 | vport, vlan, qos, set_flags); |
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
353 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); | 353 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); |
354 | if (!root_ns) { | 354 | if (!root_ns) { |
355 | esw_warn(dev, "Failed to get FDB flow namespace\n"); | 355 | esw_warn(dev, "Failed to get FDB flow namespace\n"); |
356 | return -ENOMEM; | 356 | return -EOPNOTSUPP; |
357 | } | 357 | } |
358 | 358 | ||
359 | flow_group_in = mlx5_vzalloc(inlen); | 359 | flow_group_in = mlx5_vzalloc(inlen); |
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
962 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); | 962 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); |
963 | if (!root_ns) { | 963 | if (!root_ns) { |
964 | esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); | 964 | esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); |
965 | return -EIO; | 965 | return -EOPNOTSUPP; |
966 | } | 966 | } |
967 | 967 | ||
968 | flow_group_in = mlx5_vzalloc(inlen); | 968 | flow_group_in = mlx5_vzalloc(inlen); |
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
1079 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); | 1079 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); |
1080 | if (!root_ns) { | 1080 | if (!root_ns) { |
1081 | esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); | 1081 | esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); |
1082 | return -EIO; | 1082 | return -EOPNOTSUPP; |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | flow_group_in = mlx5_vzalloc(inlen); | 1085 | flow_group_in = mlx5_vzalloc(inlen); |
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) | |||
1630 | if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || | 1630 | if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || |
1631 | !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { | 1631 | !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { |
1632 | esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); | 1632 | esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); |
1633 | return -ENOTSUPP; | 1633 | return -EOPNOTSUPP; |
1634 | } | 1634 | } |
1635 | 1635 | ||
1636 | if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) | 1636 | if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 03293ed1cc22..595f7c7383b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, | |||
166 | return 0; | 166 | return 0; |
167 | 167 | ||
168 | out_notsupp: | 168 | out_notsupp: |
169 | return -ENOTSUPP; | 169 | return -EOPNOTSUPP; |
170 | } | 170 | } |
171 | 171 | ||
172 | int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, | 172 | int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, |
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
424 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); | 424 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); |
425 | if (!root_ns) { | 425 | if (!root_ns) { |
426 | esw_warn(dev, "Failed to get FDB flow namespace\n"); | 426 | esw_warn(dev, "Failed to get FDB flow namespace\n"); |
427 | err = -EOPNOTSUPP; | ||
427 | goto ns_err; | 428 | goto ns_err; |
428 | } | 429 | } |
429 | 430 | ||
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) | |||
535 | ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); | 536 | ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); |
536 | if (!ns) { | 537 | if (!ns) { |
537 | esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); | 538 | esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); |
538 | return -ENOMEM; | 539 | return -EOPNOTSUPP; |
539 | } | 540 | } |
540 | 541 | ||
541 | ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); | 542 | ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); |
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) | |||
655 | esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); | 656 | esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); |
656 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | 657 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); |
657 | if (err1) | 658 | if (err1) |
658 | esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); | 659 | esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); |
659 | } | 660 | } |
660 | if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { | 661 | if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { |
661 | if (mlx5_eswitch_inline_mode_get(esw, | 662 | if (mlx5_eswitch_inline_mode_get(esw, |
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) | |||
674 | int vport; | 675 | int vport; |
675 | int err; | 676 | int err; |
676 | 677 | ||
678 | /* disable PF RoCE so missed packets don't go through RoCE steering */ | ||
679 | mlx5_dev_list_lock(); | ||
680 | mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); | ||
681 | mlx5_dev_list_unlock(); | ||
682 | |||
677 | err = esw_create_offloads_fdb_table(esw, nvports); | 683 | err = esw_create_offloads_fdb_table(esw, nvports); |
678 | if (err) | 684 | if (err) |
679 | return err; | 685 | goto create_fdb_err; |
680 | 686 | ||
681 | err = esw_create_offloads_table(esw); | 687 | err = esw_create_offloads_table(esw); |
682 | if (err) | 688 | if (err) |
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) | |||
696 | goto err_reps; | 702 | goto err_reps; |
697 | } | 703 | } |
698 | 704 | ||
699 | /* disable PF RoCE so missed packets don't go through RoCE steering */ | ||
700 | mlx5_dev_list_lock(); | ||
701 | mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); | ||
702 | mlx5_dev_list_unlock(); | ||
703 | |||
704 | return 0; | 705 | return 0; |
705 | 706 | ||
706 | err_reps: | 707 | err_reps: |
@@ -717,6 +718,13 @@ create_fg_err: | |||
717 | 718 | ||
718 | create_ft_err: | 719 | create_ft_err: |
719 | esw_destroy_offloads_fdb_table(esw); | 720 | esw_destroy_offloads_fdb_table(esw); |
721 | |||
722 | create_fdb_err: | ||
723 | /* enable back PF RoCE */ | ||
724 | mlx5_dev_list_lock(); | ||
725 | mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); | ||
726 | mlx5_dev_list_unlock(); | ||
727 | |||
720 | return err; | 728 | return err; |
721 | } | 729 | } |
722 | 730 | ||
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) | |||
724 | { | 732 | { |
725 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; | 733 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
726 | 734 | ||
727 | /* enable back PF RoCE */ | ||
728 | mlx5_dev_list_lock(); | ||
729 | mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); | ||
730 | mlx5_dev_list_unlock(); | ||
731 | |||
732 | mlx5_eswitch_disable_sriov(esw); | 735 | mlx5_eswitch_disable_sriov(esw); |
733 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | 736 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); |
734 | if (err) { | 737 | if (err) { |
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) | |||
738 | esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); | 741 | esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); |
739 | } | 742 | } |
740 | 743 | ||
744 | /* enable back PF RoCE */ | ||
745 | mlx5_dev_list_lock(); | ||
746 | mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); | ||
747 | mlx5_dev_list_unlock(); | ||
748 | |||
741 | return err; | 749 | return err; |
742 | } | 750 | } |
743 | 751 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index c4478ecd8056..b53fc85a2375 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | |||
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, | |||
322 | flow_table_properties_nic_receive. | 322 | flow_table_properties_nic_receive. |
323 | flow_modify_en); | 323 | flow_modify_en); |
324 | if (!atomic_mod_cap) | 324 | if (!atomic_mod_cap) |
325 | return -ENOTSUPP; | 325 | return -EOPNOTSUPP; |
326 | opmod = 1; | 326 | opmod = 1; |
327 | 327 | ||
328 | return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); | 328 | return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ac7a2fc916c..6346a8f5883b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) | |||
1822 | struct mlx5_flow_table *ft; | 1822 | struct mlx5_flow_table *ft; |
1823 | 1823 | ||
1824 | ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); | 1824 | ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); |
1825 | if (!ns) | 1825 | if (WARN_ON(!ns)) |
1826 | return -EINVAL; | 1826 | return -EINVAL; |
1827 | ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); | 1827 | ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); |
1828 | if (IS_ERR(ft)) { | 1828 | if (IS_ERR(ft)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d01e9f21d469..3c315eb8d270 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) | |||
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | 809 | ||
810 | return -ENOTSUPP; | 810 | return -EOPNOTSUPP; |
811 | } | 811 | } |
812 | 812 | ||
813 | 813 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index d2ec9d232a70..fd12e0a377a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c | |||
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, | |||
620 | u32 out[MLX5_ST_SZ_DW(qtct_reg)]; | 620 | u32 out[MLX5_ST_SZ_DW(qtct_reg)]; |
621 | 621 | ||
622 | if (!MLX5_CAP_GEN(mdev, ets)) | 622 | if (!MLX5_CAP_GEN(mdev, ets)) |
623 | return -ENOTSUPP; | 623 | return -EOPNOTSUPP; |
624 | 624 | ||
625 | return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), | 625 | return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), |
626 | MLX5_REG_QETCR, 0, 1); | 626 | MLX5_REG_QETCR, 0, 1); |
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, | |||
632 | u32 in[MLX5_ST_SZ_DW(qtct_reg)]; | 632 | u32 in[MLX5_ST_SZ_DW(qtct_reg)]; |
633 | 633 | ||
634 | if (!MLX5_CAP_GEN(mdev, ets)) | 634 | if (!MLX5_CAP_GEN(mdev, ets)) |
635 | return -ENOTSUPP; | 635 | return -EOPNOTSUPP; |
636 | 636 | ||
637 | memset(in, 0, sizeof(in)); | 637 | memset(in, 0, sizeof(in)); |
638 | return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, | 638 | return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 269e4401c342..7129c30a2ab4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | |||
532 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | 532 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) |
533 | return -EACCES; | 533 | return -EACCES; |
534 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) | 534 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) |
535 | return -ENOTSUPP; | 535 | return -EOPNOTSUPP; |
536 | 536 | ||
537 | in = mlx5_vzalloc(inlen); | 537 | in = mlx5_vzalloc(inlen); |
538 | if (!in) | 538 | if (!in) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index be3c91c7f211..5484fd726d5a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | |||
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, | |||
305 | { | 305 | { |
306 | void __iomem *ioaddr = hw->pcsr; | 306 | void __iomem *ioaddr = hw->pcsr; |
307 | u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); | 307 | u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); |
308 | u32 intr_mask = readl(ioaddr + GMAC_INT_MASK); | ||
308 | int ret = 0; | 309 | int ret = 0; |
309 | 310 | ||
311 | /* Discard masked bits */ | ||
312 | intr_status &= ~intr_mask; | ||
313 | |||
310 | /* Not used events (e.g. MMC interrupts) are not handled. */ | 314 | /* Not used events (e.g. MMC interrupts) are not handled. */ |
311 | if ((intr_status & GMAC_INT_STATUS_MMCTIS)) | 315 | if ((intr_status & GMAC_INT_STATUS_MMCTIS)) |
312 | x->mmc_tx_irq_n++; | 316 | x->mmc_tx_irq_n++; |
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index ece59c54a653..4a40a3d825b4 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c | |||
@@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev) | |||
648 | { | 648 | { |
649 | /* Finish setting up the DEVICE info. */ | 649 | /* Finish setting up the DEVICE info. */ |
650 | dev->mtu = AX_MTU; | 650 | dev->mtu = AX_MTU; |
651 | dev->hard_header_len = 0; | 651 | dev->hard_header_len = AX25_MAX_HEADER_LEN; |
652 | dev->addr_len = 0; | 652 | dev->addr_len = AX25_ADDR_LEN; |
653 | dev->type = ARPHRD_AX25; | 653 | dev->type = ARPHRD_AX25; |
654 | dev->tx_queue_len = 10; | 654 | dev->tx_queue_len = 10; |
655 | dev->header_ops = &ax25_header_ops; | 655 | dev->header_ops = &ax25_header_ops; |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5a1cc089acb7..86e5749226ef 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context) | |||
1295 | ndev = hv_get_drvdata(device); | 1295 | ndev = hv_get_drvdata(device); |
1296 | buffer = get_per_channel_state(channel); | 1296 | buffer = get_per_channel_state(channel); |
1297 | 1297 | ||
1298 | /* commit_rd_index() -> hv_signal_on_read() needs this. */ | ||
1299 | init_cached_read_index(channel); | ||
1300 | |||
1298 | do { | 1301 | do { |
1299 | desc = get_next_pkt_raw(channel); | 1302 | desc = get_next_pkt_raw(channel); |
1300 | if (desc != NULL) { | 1303 | if (desc != NULL) { |
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context) | |||
1347 | 1350 | ||
1348 | bufferlen = bytes_recvd; | 1351 | bufferlen = bytes_recvd; |
1349 | } | 1352 | } |
1353 | |||
1354 | init_cached_read_index(channel); | ||
1355 | |||
1350 | } while (1); | 1356 | } while (1); |
1351 | 1357 | ||
1352 | if (bufferlen > NETVSC_PACKET_SIZE) | 1358 | if (bufferlen > NETVSC_PACKET_SIZE) |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 1e05b7c2d157..0844f8496413 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev) | |||
164 | { | 164 | { |
165 | dev->mtu = 64 * 1024; | 165 | dev->mtu = 64 * 1024; |
166 | dev->hard_header_len = ETH_HLEN; /* 14 */ | 166 | dev->hard_header_len = ETH_HLEN; /* 14 */ |
167 | dev->min_header_len = ETH_HLEN; /* 14 */ | ||
167 | dev->addr_len = ETH_ALEN; /* 6 */ | 168 | dev->addr_len = ETH_ALEN; /* 6 */ |
168 | dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ | 169 | dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ |
169 | dev->flags = IFF_LOOPBACK; | 170 | dev->flags = IFF_LOOPBACK; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 402618565838..c27011bbe30c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
681 | size_t linear; | 681 | size_t linear; |
682 | 682 | ||
683 | if (q->flags & IFF_VNET_HDR) { | 683 | if (q->flags & IFF_VNET_HDR) { |
684 | vnet_hdr_len = q->vnet_hdr_sz; | 684 | vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
685 | 685 | ||
686 | err = -EINVAL; | 686 | err = -EINVAL; |
687 | if (len < vnet_hdr_len) | 687 | if (len < vnet_hdr_len) |
@@ -820,7 +820,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
820 | 820 | ||
821 | if (q->flags & IFF_VNET_HDR) { | 821 | if (q->flags & IFF_VNET_HDR) { |
822 | struct virtio_net_hdr vnet_hdr; | 822 | struct virtio_net_hdr vnet_hdr; |
823 | vnet_hdr_len = q->vnet_hdr_sz; | 823 | vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
824 | if (iov_iter_count(iter) < vnet_hdr_len) | 824 | if (iov_iter_count(iter) < vnet_hdr_len) |
825 | return -EINVAL; | 825 | return -EINVAL; |
826 | 826 | ||
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c index c0b4e65267af..46fe1ae919a3 100644 --- a/drivers/net/phy/mdio-bcm-iproc.c +++ b/drivers/net/phy/mdio-bcm-iproc.c | |||
@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg) | |||
81 | if (rc) | 81 | if (rc) |
82 | return rc; | 82 | return rc; |
83 | 83 | ||
84 | iproc_mdio_config_clk(priv->base); | ||
85 | |||
86 | /* Prepare the read operation */ | 84 | /* Prepare the read operation */ |
87 | cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | | 85 | cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | |
88 | (reg << MII_DATA_RA_SHIFT) | | 86 | (reg << MII_DATA_RA_SHIFT) | |
@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id, | |||
112 | if (rc) | 110 | if (rc) |
113 | return rc; | 111 | return rc; |
114 | 112 | ||
115 | iproc_mdio_config_clk(priv->base); | ||
116 | |||
117 | /* Prepare the write operation */ | 113 | /* Prepare the write operation */ |
118 | cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | | 114 | cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | |
119 | (reg << MII_DATA_RA_SHIFT) | | 115 | (reg << MII_DATA_RA_SHIFT) | |
@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev) | |||
163 | bus->read = iproc_mdio_read; | 159 | bus->read = iproc_mdio_read; |
164 | bus->write = iproc_mdio_write; | 160 | bus->write = iproc_mdio_write; |
165 | 161 | ||
162 | iproc_mdio_config_clk(priv->base); | ||
163 | |||
166 | rc = of_mdiobus_register(bus, pdev->dev.of_node); | 164 | rc = of_mdiobus_register(bus, pdev->dev.of_node); |
167 | if (rc) { | 165 | if (rc) { |
168 | dev_err(&pdev->dev, "MDIO bus registration failed\n"); | 166 | dev_err(&pdev->dev, "MDIO bus registration failed\n"); |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index e55809c5beb7..6742070ca676 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = { | |||
1012 | .phy_id = PHY_ID_KSZ8795, | 1012 | .phy_id = PHY_ID_KSZ8795, |
1013 | .phy_id_mask = MICREL_PHY_ID_MASK, | 1013 | .phy_id_mask = MICREL_PHY_ID_MASK, |
1014 | .name = "Micrel KSZ8795", | 1014 | .name = "Micrel KSZ8795", |
1015 | .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), | 1015 | .features = PHY_BASIC_FEATURES, |
1016 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | 1016 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, |
1017 | .config_init = kszphy_config_init, | 1017 | .config_init = kszphy_config_init, |
1018 | .config_aneg = ksz8873mll_config_aneg, | 1018 | .config_aneg = ksz8873mll_config_aneg, |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 92b08383cafa..8c8e15b8739d 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -908,6 +908,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
908 | struct module *ndev_owner = dev->dev.parent->driver->owner; | 908 | struct module *ndev_owner = dev->dev.parent->driver->owner; |
909 | struct mii_bus *bus = phydev->mdio.bus; | 909 | struct mii_bus *bus = phydev->mdio.bus; |
910 | struct device *d = &phydev->mdio.dev; | 910 | struct device *d = &phydev->mdio.dev; |
911 | bool using_genphy = false; | ||
911 | int err; | 912 | int err; |
912 | 913 | ||
913 | /* For Ethernet device drivers that register their own MDIO bus, we | 914 | /* For Ethernet device drivers that register their own MDIO bus, we |
@@ -933,12 +934,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
933 | d->driver = | 934 | d->driver = |
934 | &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; | 935 | &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; |
935 | 936 | ||
937 | using_genphy = true; | ||
938 | } | ||
939 | |||
940 | if (!try_module_get(d->driver->owner)) { | ||
941 | dev_err(&dev->dev, "failed to get the device driver module\n"); | ||
942 | err = -EIO; | ||
943 | goto error_put_device; | ||
944 | } | ||
945 | |||
946 | if (using_genphy) { | ||
936 | err = d->driver->probe(d); | 947 | err = d->driver->probe(d); |
937 | if (err >= 0) | 948 | if (err >= 0) |
938 | err = device_bind_driver(d); | 949 | err = device_bind_driver(d); |
939 | 950 | ||
940 | if (err) | 951 | if (err) |
941 | goto error; | 952 | goto error_module_put; |
942 | } | 953 | } |
943 | 954 | ||
944 | if (phydev->attached_dev) { | 955 | if (phydev->attached_dev) { |
@@ -975,7 +986,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
975 | return err; | 986 | return err; |
976 | 987 | ||
977 | error: | 988 | error: |
989 | /* phy_detach() does all of the cleanup below */ | ||
978 | phy_detach(phydev); | 990 | phy_detach(phydev); |
991 | return err; | ||
992 | |||
993 | error_module_put: | ||
994 | module_put(d->driver->owner); | ||
995 | error_put_device: | ||
979 | put_device(d); | 996 | put_device(d); |
980 | if (ndev_owner != bus->owner) | 997 | if (ndev_owner != bus->owner) |
981 | module_put(bus->owner); | 998 | module_put(bus->owner); |
@@ -1039,6 +1056,8 @@ void phy_detach(struct phy_device *phydev) | |||
1039 | 1056 | ||
1040 | phy_led_triggers_unregister(phydev); | 1057 | phy_led_triggers_unregister(phydev); |
1041 | 1058 | ||
1059 | module_put(phydev->mdio.dev.driver->owner); | ||
1060 | |||
1042 | /* If the device had no specific driver before (i.e. - it | 1061 | /* If the device had no specific driver before (i.e. - it |
1043 | * was using the generic driver), we unbind the device | 1062 | * was using the generic driver), we unbind the device |
1044 | * from the generic driver so that there's a chance a | 1063 | * from the generic driver so that there's a chance a |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2cd10b26b650..bfabe180053e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1170,9 +1170,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | if (tun->flags & IFF_VNET_HDR) { | 1172 | if (tun->flags & IFF_VNET_HDR) { |
1173 | if (len < tun->vnet_hdr_sz) | 1173 | int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); |
1174 | |||
1175 | if (len < vnet_hdr_sz) | ||
1174 | return -EINVAL; | 1176 | return -EINVAL; |
1175 | len -= tun->vnet_hdr_sz; | 1177 | len -= vnet_hdr_sz; |
1176 | 1178 | ||
1177 | if (!copy_from_iter_full(&gso, sizeof(gso), from)) | 1179 | if (!copy_from_iter_full(&gso, sizeof(gso), from)) |
1178 | return -EFAULT; | 1180 | return -EFAULT; |
@@ -1183,7 +1185,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1183 | 1185 | ||
1184 | if (tun16_to_cpu(tun, gso.hdr_len) > len) | 1186 | if (tun16_to_cpu(tun, gso.hdr_len) > len) |
1185 | return -EINVAL; | 1187 | return -EINVAL; |
1186 | iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso)); | 1188 | iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); |
1187 | } | 1189 | } |
1188 | 1190 | ||
1189 | if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { | 1191 | if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { |
@@ -1335,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, | |||
1335 | vlan_hlen = VLAN_HLEN; | 1337 | vlan_hlen = VLAN_HLEN; |
1336 | 1338 | ||
1337 | if (tun->flags & IFF_VNET_HDR) | 1339 | if (tun->flags & IFF_VNET_HDR) |
1338 | vnet_hdr_sz = tun->vnet_hdr_sz; | 1340 | vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); |
1339 | 1341 | ||
1340 | total = skb->len + vlan_hlen + vnet_hdr_sz; | 1342 | total = skb->len + vlan_hlen + vnet_hdr_sz; |
1341 | 1343 | ||
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 3daa41bdd4ea..0acc9b640419 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c | |||
@@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
776 | struct net_device *netdev; | 776 | struct net_device *netdev; |
777 | struct catc *catc; | 777 | struct catc *catc; |
778 | u8 broadcast[ETH_ALEN]; | 778 | u8 broadcast[ETH_ALEN]; |
779 | int i, pktsz; | 779 | int pktsz, ret; |
780 | 780 | ||
781 | if (usb_set_interface(usbdev, | 781 | if (usb_set_interface(usbdev, |
782 | intf->altsetting->desc.bInterfaceNumber, 1)) { | 782 | intf->altsetting->desc.bInterfaceNumber, 1)) { |
@@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
811 | if ((!catc->ctrl_urb) || (!catc->tx_urb) || | 811 | if ((!catc->ctrl_urb) || (!catc->tx_urb) || |
812 | (!catc->rx_urb) || (!catc->irq_urb)) { | 812 | (!catc->rx_urb) || (!catc->irq_urb)) { |
813 | dev_err(&intf->dev, "No free urbs available.\n"); | 813 | dev_err(&intf->dev, "No free urbs available.\n"); |
814 | usb_free_urb(catc->ctrl_urb); | 814 | ret = -ENOMEM; |
815 | usb_free_urb(catc->tx_urb); | 815 | goto fail_free; |
816 | usb_free_urb(catc->rx_urb); | ||
817 | usb_free_urb(catc->irq_urb); | ||
818 | free_netdev(netdev); | ||
819 | return -ENOMEM; | ||
820 | } | 816 | } |
821 | 817 | ||
822 | /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ | 818 | /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ |
@@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
844 | catc->irq_buf, 2, catc_irq_done, catc, 1); | 840 | catc->irq_buf, 2, catc_irq_done, catc, 1); |
845 | 841 | ||
846 | if (!catc->is_f5u011) { | 842 | if (!catc->is_f5u011) { |
843 | u32 *buf; | ||
844 | int i; | ||
845 | |||
847 | dev_dbg(dev, "Checking memory size\n"); | 846 | dev_dbg(dev, "Checking memory size\n"); |
848 | 847 | ||
849 | i = 0x12345678; | 848 | buf = kmalloc(4, GFP_KERNEL); |
850 | catc_write_mem(catc, 0x7a80, &i, 4); | 849 | if (!buf) { |
851 | i = 0x87654321; | 850 | ret = -ENOMEM; |
852 | catc_write_mem(catc, 0xfa80, &i, 4); | 851 | goto fail_free; |
853 | catc_read_mem(catc, 0x7a80, &i, 4); | 852 | } |
853 | |||
854 | *buf = 0x12345678; | ||
855 | catc_write_mem(catc, 0x7a80, buf, 4); | ||
856 | *buf = 0x87654321; | ||
857 | catc_write_mem(catc, 0xfa80, buf, 4); | ||
858 | catc_read_mem(catc, 0x7a80, buf, 4); | ||
854 | 859 | ||
855 | switch (i) { | 860 | switch (*buf) { |
856 | case 0x12345678: | 861 | case 0x12345678: |
857 | catc_set_reg(catc, TxBufCount, 8); | 862 | catc_set_reg(catc, TxBufCount, 8); |
858 | catc_set_reg(catc, RxBufCount, 32); | 863 | catc_set_reg(catc, RxBufCount, 32); |
@@ -867,6 +872,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
867 | dev_dbg(dev, "32k Memory\n"); | 872 | dev_dbg(dev, "32k Memory\n"); |
868 | break; | 873 | break; |
869 | } | 874 | } |
875 | |||
876 | kfree(buf); | ||
870 | 877 | ||
871 | dev_dbg(dev, "Getting MAC from SEEROM.\n"); | 878 | dev_dbg(dev, "Getting MAC from SEEROM.\n"); |
872 | 879 | ||
@@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
913 | usb_set_intfdata(intf, catc); | 920 | usb_set_intfdata(intf, catc); |
914 | 921 | ||
915 | SET_NETDEV_DEV(netdev, &intf->dev); | 922 | SET_NETDEV_DEV(netdev, &intf->dev); |
916 | if (register_netdev(netdev) != 0) { | 923 | ret = register_netdev(netdev); |
917 | usb_set_intfdata(intf, NULL); | 924 | if (ret) |
918 | usb_free_urb(catc->ctrl_urb); | 925 | goto fail_clear_intfdata; |
919 | usb_free_urb(catc->tx_urb); | 926 | |
920 | usb_free_urb(catc->rx_urb); | ||
921 | usb_free_urb(catc->irq_urb); | ||
922 | free_netdev(netdev); | ||
923 | return -EIO; | ||
924 | } | ||
925 | return 0; | 927 | return 0; |
928 | |||
929 | fail_clear_intfdata: | ||
930 | usb_set_intfdata(intf, NULL); | ||
931 | fail_free: | ||
932 | usb_free_urb(catc->ctrl_urb); | ||
933 | usb_free_urb(catc->tx_urb); | ||
934 | usb_free_urb(catc->rx_urb); | ||
935 | usb_free_urb(catc->irq_urb); | ||
936 | free_netdev(netdev); | ||
937 | return ret; | ||
926 | } | 938 | } |
927 | 939 | ||
928 | static void catc_disconnect(struct usb_interface *intf) | 940 | static void catc_disconnect(struct usb_interface *intf) |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 24e803fe9a53..36674484c6fb 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb) | |||
126 | 126 | ||
127 | static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) | 127 | static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) |
128 | { | 128 | { |
129 | u8 *buf; | ||
129 | int ret; | 130 | int ret; |
130 | 131 | ||
132 | buf = kmalloc(size, GFP_NOIO); | ||
133 | if (!buf) | ||
134 | return -ENOMEM; | ||
135 | |||
131 | ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), | 136 | ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), |
132 | PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, | 137 | PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, |
133 | indx, data, size, 1000); | 138 | indx, buf, size, 1000); |
134 | if (ret < 0) | 139 | if (ret < 0) |
135 | netif_dbg(pegasus, drv, pegasus->net, | 140 | netif_dbg(pegasus, drv, pegasus->net, |
136 | "%s returned %d\n", __func__, ret); | 141 | "%s returned %d\n", __func__, ret); |
142 | else if (ret <= size) | ||
143 | memcpy(data, buf, ret); | ||
144 | kfree(buf); | ||
137 | return ret; | 145 | return ret; |
138 | } | 146 | } |
139 | 147 | ||
140 | static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) | 148 | static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, |
149 | const void *data) | ||
141 | { | 150 | { |
151 | u8 *buf; | ||
142 | int ret; | 152 | int ret; |
143 | 153 | ||
154 | buf = kmemdup(data, size, GFP_NOIO); | ||
155 | if (!buf) | ||
156 | return -ENOMEM; | ||
157 | |||
144 | ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), | 158 | ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), |
145 | PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, | 159 | PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, |
146 | indx, data, size, 100); | 160 | indx, buf, size, 100); |
147 | if (ret < 0) | 161 | if (ret < 0) |
148 | netif_dbg(pegasus, drv, pegasus->net, | 162 | netif_dbg(pegasus, drv, pegasus->net, |
149 | "%s returned %d\n", __func__, ret); | 163 | "%s returned %d\n", __func__, ret); |
164 | kfree(buf); | ||
150 | return ret; | 165 | return ret; |
151 | } | 166 | } |
152 | 167 | ||
153 | static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) | 168 | static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) |
154 | { | 169 | { |
170 | u8 *buf; | ||
155 | int ret; | 171 | int ret; |
156 | 172 | ||
173 | buf = kmemdup(&data, 1, GFP_NOIO); | ||
174 | if (!buf) | ||
175 | return -ENOMEM; | ||
176 | |||
157 | ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), | 177 | ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), |
158 | PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, | 178 | PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, |
159 | indx, &data, 1, 1000); | 179 | indx, buf, 1, 1000); |
160 | if (ret < 0) | 180 | if (ret < 0) |
161 | netif_dbg(pegasus, drv, pegasus->net, | 181 | netif_dbg(pegasus, drv, pegasus->net, |
162 | "%s returned %d\n", __func__, ret); | 182 | "%s returned %d\n", __func__, ret); |
183 | kfree(buf); | ||
163 | return ret; | 184 | return ret; |
164 | } | 185 | } |
165 | 186 | ||
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 95b7bd0d7abc..c81c79110cef 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c | |||
@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150"; | |||
155 | */ | 155 | */ |
156 | static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) | 156 | static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) |
157 | { | 157 | { |
158 | return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | 158 | void *buf; |
159 | RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, | 159 | int ret; |
160 | indx, 0, data, size, 500); | 160 | |
161 | buf = kmalloc(size, GFP_NOIO); | ||
162 | if (!buf) | ||
163 | return -ENOMEM; | ||
164 | |||
165 | ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | ||
166 | RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, | ||
167 | indx, 0, buf, size, 500); | ||
168 | if (ret > 0 && ret <= size) | ||
169 | memcpy(data, buf, ret); | ||
170 | kfree(buf); | ||
171 | return ret; | ||
161 | } | 172 | } |
162 | 173 | ||
163 | static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) | 174 | static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) |
164 | { | 175 | { |
165 | return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | 176 | void *buf; |
166 | RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, | 177 | int ret; |
167 | indx, 0, data, size, 500); | 178 | |
179 | buf = kmemdup(data, size, GFP_NOIO); | ||
180 | if (!buf) | ||
181 | return -ENOMEM; | ||
182 | |||
183 | ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | ||
184 | RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, | ||
185 | indx, 0, buf, size, 500); | ||
186 | kfree(buf); | ||
187 | return ret; | ||
168 | } | 188 | } |
169 | 189 | ||
170 | static void async_set_reg_cb(struct urb *urb) | 190 | static void async_set_reg_cb(struct urb *urb) |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 12071f1582df..d9440bc022f2 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c | |||
@@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0); | |||
73 | /* Private data structure */ | 73 | /* Private data structure */ |
74 | struct sierra_net_data { | 74 | struct sierra_net_data { |
75 | 75 | ||
76 | u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ | ||
77 | |||
78 | u16 link_up; /* air link up or down */ | 76 | u16 link_up; /* air link up or down */ |
79 | u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ | 77 | u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ |
80 | 78 | ||
@@ -122,6 +120,7 @@ struct param { | |||
122 | 120 | ||
123 | /* LSI Protocol types */ | 121 | /* LSI Protocol types */ |
124 | #define SIERRA_NET_PROTOCOL_UMTS 0x01 | 122 | #define SIERRA_NET_PROTOCOL_UMTS 0x01 |
123 | #define SIERRA_NET_PROTOCOL_UMTS_DS 0x04 | ||
125 | /* LSI Coverage */ | 124 | /* LSI Coverage */ |
126 | #define SIERRA_NET_COVERAGE_NONE 0x00 | 125 | #define SIERRA_NET_COVERAGE_NONE 0x00 |
127 | #define SIERRA_NET_COVERAGE_NOPACKET 0x01 | 126 | #define SIERRA_NET_COVERAGE_NOPACKET 0x01 |
@@ -129,7 +128,8 @@ struct param { | |||
129 | /* LSI Session */ | 128 | /* LSI Session */ |
130 | #define SIERRA_NET_SESSION_IDLE 0x00 | 129 | #define SIERRA_NET_SESSION_IDLE 0x00 |
131 | /* LSI Link types */ | 130 | /* LSI Link types */ |
132 | #define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 | 131 | #define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00 |
132 | #define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02 | ||
133 | 133 | ||
134 | struct lsi_umts { | 134 | struct lsi_umts { |
135 | u8 protocol; | 135 | u8 protocol; |
@@ -137,9 +137,14 @@ struct lsi_umts { | |||
137 | __be16 length; | 137 | __be16 length; |
138 | /* eventually use a union for the rest - assume umts for now */ | 138 | /* eventually use a union for the rest - assume umts for now */ |
139 | u8 coverage; | 139 | u8 coverage; |
140 | u8 unused2[41]; | 140 | u8 network_len; /* network name len */ |
141 | u8 network[40]; /* network name (UCS2, bigendian) */ | ||
141 | u8 session_state; | 142 | u8 session_state; |
142 | u8 unused3[33]; | 143 | u8 unused3[33]; |
144 | } __packed; | ||
145 | |||
146 | struct lsi_umts_single { | ||
147 | struct lsi_umts lsi; | ||
143 | u8 link_type; | 148 | u8 link_type; |
144 | u8 pdp_addr_len; /* NW-supplied PDP address len */ | 149 | u8 pdp_addr_len; /* NW-supplied PDP address len */ |
145 | u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ | 150 | u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ |
@@ -158,10 +163,31 @@ struct lsi_umts { | |||
158 | u8 reserved[8]; | 163 | u8 reserved[8]; |
159 | } __packed; | 164 | } __packed; |
160 | 165 | ||
166 | struct lsi_umts_dual { | ||
167 | struct lsi_umts lsi; | ||
168 | u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */ | ||
169 | u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */ | ||
170 | u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */ | ||
171 | u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */ | ||
172 | u8 unused4[23]; | ||
173 | u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */ | ||
174 | u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */ | ||
175 | u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */ | ||
176 | u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/ | ||
177 | u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */ | ||
178 | u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */ | ||
179 | u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */ | ||
180 | u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/ | ||
181 | u8 unused5[68]; | ||
182 | } __packed; | ||
183 | |||
161 | #define SIERRA_NET_LSI_COMMON_LEN 4 | 184 | #define SIERRA_NET_LSI_COMMON_LEN 4 |
162 | #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) | 185 | #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single)) |
163 | #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ | 186 | #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ |
164 | (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) | 187 | (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) |
188 | #define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual)) | ||
189 | #define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \ | ||
190 | (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN) | ||
165 | 191 | ||
166 | /* Forward definitions */ | 192 | /* Forward definitions */ |
167 | static void sierra_sync_timer(unsigned long syncdata); | 193 | static void sierra_sync_timer(unsigned long syncdata); |
@@ -190,10 +216,11 @@ static inline void sierra_net_set_private(struct usbnet *dev, | |||
190 | dev->data[0] = (unsigned long)priv; | 216 | dev->data[0] = (unsigned long)priv; |
191 | } | 217 | } |
192 | 218 | ||
193 | /* is packet IPv4 */ | 219 | /* is packet IPv4/IPv6 */ |
194 | static inline int is_ip(struct sk_buff *skb) | 220 | static inline int is_ip(struct sk_buff *skb) |
195 | { | 221 | { |
196 | return skb->protocol == cpu_to_be16(ETH_P_IP); | 222 | return skb->protocol == cpu_to_be16(ETH_P_IP) || |
223 | skb->protocol == cpu_to_be16(ETH_P_IPV6); | ||
197 | } | 224 | } |
198 | 225 | ||
199 | /* | 226 | /* |
@@ -349,49 +376,54 @@ static inline int sierra_net_is_valid_addrlen(u8 len) | |||
349 | static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) | 376 | static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) |
350 | { | 377 | { |
351 | struct lsi_umts *lsi = (struct lsi_umts *)data; | 378 | struct lsi_umts *lsi = (struct lsi_umts *)data; |
379 | u32 expected_length; | ||
352 | 380 | ||
353 | if (datalen < sizeof(struct lsi_umts)) { | 381 | if (datalen < sizeof(struct lsi_umts_single)) { |
354 | netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", | 382 | netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n", |
355 | __func__, datalen, | 383 | __func__, datalen, sizeof(struct lsi_umts_single)); |
356 | sizeof(struct lsi_umts)); | ||
357 | return -1; | 384 | return -1; |
358 | } | 385 | } |
359 | 386 | ||
360 | if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { | 387 | /* Validate the session state */ |
361 | netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", | 388 | if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { |
362 | __func__, be16_to_cpu(lsi->length), | 389 | netdev_err(dev->net, "Session idle, 0x%02x\n", |
363 | (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); | 390 | lsi->session_state); |
364 | return -1; | 391 | return 0; |
365 | } | 392 | } |
366 | 393 | ||
367 | /* Validate the protocol - only support UMTS for now */ | 394 | /* Validate the protocol - only support UMTS for now */ |
368 | if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { | 395 | if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) { |
396 | struct lsi_umts_single *single = (struct lsi_umts_single *)lsi; | ||
397 | |||
398 | /* Validate the link type */ | ||
399 | if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 && | ||
400 | single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) { | ||
401 | netdev_err(dev->net, "Link type unsupported: 0x%02x\n", | ||
402 | single->link_type); | ||
403 | return -1; | ||
404 | } | ||
405 | expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN; | ||
406 | } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) { | ||
407 | expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN; | ||
408 | } else { | ||
369 | netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", | 409 | netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", |
370 | lsi->protocol); | 410 | lsi->protocol); |
371 | return -1; | 411 | return -1; |
372 | } | 412 | } |
373 | 413 | ||
374 | /* Validate the link type */ | 414 | if (be16_to_cpu(lsi->length) != expected_length) { |
375 | if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { | 415 | netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", |
376 | netdev_err(dev->net, "Link type unsupported: 0x%02x\n", | 416 | __func__, be16_to_cpu(lsi->length), expected_length); |
377 | lsi->link_type); | ||
378 | return -1; | 417 | return -1; |
379 | } | 418 | } |
380 | 419 | ||
381 | /* Validate the coverage */ | 420 | /* Validate the coverage */ |
382 | if (lsi->coverage == SIERRA_NET_COVERAGE_NONE | 421 | if (lsi->coverage == SIERRA_NET_COVERAGE_NONE || |
383 | || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { | 422 | lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { |
384 | netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); | 423 | netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); |
385 | return 0; | 424 | return 0; |
386 | } | 425 | } |
387 | 426 | ||
388 | /* Validate the session state */ | ||
389 | if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { | ||
390 | netdev_err(dev->net, "Session idle, 0x%02x\n", | ||
391 | lsi->session_state); | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /* Set link_sense true */ | 427 | /* Set link_sense true */ |
396 | return 1; | 428 | return 1; |
397 | } | 429 | } |
@@ -652,7 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) | |||
652 | u8 numendpoints; | 684 | u8 numendpoints; |
653 | u16 fwattr = 0; | 685 | u16 fwattr = 0; |
654 | int status; | 686 | int status; |
655 | struct ethhdr *eth; | ||
656 | struct sierra_net_data *priv; | 687 | struct sierra_net_data *priv; |
657 | static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { | 688 | static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { |
658 | 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; | 689 | 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; |
@@ -690,11 +721,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) | |||
690 | dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); | 721 | dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); |
691 | dev->net->dev_addr[ETH_ALEN-1] = ifacenum; | 722 | dev->net->dev_addr[ETH_ALEN-1] = ifacenum; |
692 | 723 | ||
693 | /* we will have to manufacture ethernet headers, prepare template */ | ||
694 | eth = (struct ethhdr *)priv->ethr_hdr_tmpl; | ||
695 | memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
696 | eth->h_proto = cpu_to_be16(ETH_P_IP); | ||
697 | |||
698 | /* prepare shutdown message template */ | 724 | /* prepare shutdown message template */ |
699 | memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); | 725 | memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); |
700 | /* set context index initially to 0 - prepares tx hdr template */ | 726 | /* set context index initially to 0 - prepares tx hdr template */ |
@@ -824,9 +850,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
824 | 850 | ||
825 | skb_pull(skb, hh.hdrlen); | 851 | skb_pull(skb, hh.hdrlen); |
826 | 852 | ||
827 | /* We are going to accept this packet, prepare it */ | 853 | /* We are going to accept this packet, prepare it. |
828 | memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, | 854 | * In case protocol is IPv6, keep it, otherwise force IPv4. |
829 | ETH_HLEN); | 855 | */ |
856 | skb_reset_mac_header(skb); | ||
857 | if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6)) | ||
858 | eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP); | ||
859 | eth_zero_addr(eth_hdr(skb)->h_source); | ||
860 | memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
830 | 861 | ||
831 | /* Last packet in batch handled by usbnet */ | 862 | /* Last packet in batch handled by usbnet */ |
832 | if (hh.payload_len.word == skb->len) | 863 | if (hh.payload_len.word == skb->len) |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index d02ca1491d16..8d3e53fac1da 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c | |||
@@ -91,7 +91,7 @@ | |||
91 | 91 | ||
92 | #define IWL8000_FW_PRE "iwlwifi-8000C-" | 92 | #define IWL8000_FW_PRE "iwlwifi-8000C-" |
93 | #define IWL8000_MODULE_FIRMWARE(api) \ | 93 | #define IWL8000_MODULE_FIRMWARE(api) \ |
94 | IWL8000_FW_PRE "-" __stringify(api) ".ucode" | 94 | IWL8000_FW_PRE __stringify(api) ".ucode" |
95 | 95 | ||
96 | #define IWL8265_FW_PRE "iwlwifi-8265-" | 96 | #define IWL8265_FW_PRE "iwlwifi-8265-" |
97 | #define IWL8265_MODULE_FIRMWARE(api) \ | 97 | #define IWL8265_MODULE_FIRMWARE(api) \ |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 636c8b03e318..09e9e2e3ed04 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, | |||
1164 | .frame_limit = IWL_FRAME_LIMIT, | 1164 | .frame_limit = IWL_FRAME_LIMIT, |
1165 | }; | 1165 | }; |
1166 | 1166 | ||
1167 | /* Make sure reserved queue is still marked as such (or allocated) */ | 1167 | /* Make sure reserved queue is still marked as such (if allocated) */ |
1168 | mvm->queue_info[mvm_sta->reserved_queue].status = | 1168 | if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) |
1169 | IWL_MVM_QUEUE_RESERVED; | 1169 | mvm->queue_info[mvm_sta->reserved_queue].status = |
1170 | IWL_MVM_QUEUE_RESERVED; | ||
1170 | 1171 | ||
1171 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { | 1172 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { |
1172 | struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; | 1173 | struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 63a051be832e..bec7d9c46087 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c | |||
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) | |||
843 | return; | 843 | return; |
844 | 844 | ||
845 | IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); | 845 | IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); |
846 | thermal_zone_device_unregister(mvm->tz_device.tzone); | 846 | if (mvm->tz_device.tzone) { |
847 | mvm->tz_device.tzone = NULL; | 847 | thermal_zone_device_unregister(mvm->tz_device.tzone); |
848 | mvm->tz_device.tzone = NULL; | ||
849 | } | ||
848 | } | 850 | } |
849 | 851 | ||
850 | static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) | 852 | static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) |
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) | |||
853 | return; | 855 | return; |
854 | 856 | ||
855 | IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); | 857 | IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); |
856 | thermal_cooling_device_unregister(mvm->cooling_dev.cdev); | 858 | if (mvm->cooling_dev.cdev) { |
857 | mvm->cooling_dev.cdev = NULL; | 859 | thermal_cooling_device_unregister(mvm->cooling_dev.cdev); |
860 | mvm->cooling_dev.cdev = NULL; | ||
861 | } | ||
858 | } | 862 | } |
859 | #endif /* CONFIG_THERMAL */ | 863 | #endif /* CONFIG_THERMAL */ |
860 | 864 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 691ddef1ae28..a33a06d58a9a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c | |||
@@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) | |||
92 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 92 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
93 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 93 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
94 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); | 94 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
95 | char *fw_name = "rtlwifi/rtl8192cfwU.bin"; | 95 | char *fw_name; |
96 | 96 | ||
97 | rtl8192ce_bt_reg_init(hw); | 97 | rtl8192ce_bt_reg_init(hw); |
98 | 98 | ||
@@ -164,8 +164,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* request fw */ | 166 | /* request fw */ |
167 | if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) | 167 | if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && |
168 | !IS_92C_SERIAL(rtlhal->version)) | ||
169 | fw_name = "rtlwifi/rtl8192cfwU.bin"; | ||
170 | else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) | ||
168 | fw_name = "rtlwifi/rtl8192cfwU_B.bin"; | 171 | fw_name = "rtlwifi/rtl8192cfwU_B.bin"; |
172 | else | ||
173 | fw_name = "rtlwifi/rtl8192cfw.bin"; | ||
169 | 174 | ||
170 | rtlpriv->max_fw_size = 0x4000; | 175 | rtlpriv->max_fw_size = 0x4000; |
171 | pr_info("Using firmware %s\n", fw_name); | 176 | pr_info("Using firmware %s\n", fw_name); |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8315fe73ecd0..1e4125a98291 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
281 | { | 281 | { |
282 | RING_IDX req_prod = queue->rx.req_prod_pvt; | 282 | RING_IDX req_prod = queue->rx.req_prod_pvt; |
283 | int notify; | 283 | int notify; |
284 | int err = 0; | ||
284 | 285 | ||
285 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) | 286 | if (unlikely(!netif_carrier_ok(queue->info->netdev))) |
286 | return; | 287 | return; |
@@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
295 | struct xen_netif_rx_request *req; | 296 | struct xen_netif_rx_request *req; |
296 | 297 | ||
297 | skb = xennet_alloc_one_rx_buffer(queue); | 298 | skb = xennet_alloc_one_rx_buffer(queue); |
298 | if (!skb) | 299 | if (!skb) { |
300 | err = -ENOMEM; | ||
299 | break; | 301 | break; |
302 | } | ||
300 | 303 | ||
301 | id = xennet_rxidx(req_prod); | 304 | id = xennet_rxidx(req_prod); |
302 | 305 | ||
@@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
320 | 323 | ||
321 | queue->rx.req_prod_pvt = req_prod; | 324 | queue->rx.req_prod_pvt = req_prod; |
322 | 325 | ||
323 | /* Not enough requests? Try again later. */ | 326 | /* Try again later if there are not enough requests or skb allocation |
324 | if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { | 327 | * failed. |
328 | * Enough requests is quantified as the sum of newly created slots and | ||
329 | * the unconsumed slots at the backend. | ||
330 | */ | ||
331 | if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || | ||
332 | unlikely(err)) { | ||
325 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); | 333 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); |
326 | return; | 334 | return; |
327 | } | 335 | } |
@@ -1379,6 +1387,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) | |||
1379 | for (i = 0; i < num_queues && info->queues; ++i) { | 1387 | for (i = 0; i < num_queues && info->queues; ++i) { |
1380 | struct netfront_queue *queue = &info->queues[i]; | 1388 | struct netfront_queue *queue = &info->queues[i]; |
1381 | 1389 | ||
1390 | del_timer_sync(&queue->rx_refill_timer); | ||
1391 | |||
1382 | if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) | 1392 | if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) |
1383 | unbind_from_irqhandler(queue->tx_irq, queue); | 1393 | unbind_from_irqhandler(queue->tx_irq, queue); |
1384 | if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { | 1394 | if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { |
@@ -1733,7 +1743,6 @@ static void xennet_destroy_queues(struct netfront_info *info) | |||
1733 | 1743 | ||
1734 | if (netif_running(info->netdev)) | 1744 | if (netif_running(info->netdev)) |
1735 | napi_disable(&queue->napi); | 1745 | napi_disable(&queue->napi); |
1736 | del_timer_sync(&queue->rx_refill_timer); | ||
1737 | netif_napi_del(&queue->napi); | 1746 | netif_napi_del(&queue->napi); |
1738 | } | 1747 | } |
1739 | 1748 | ||
@@ -1822,27 +1831,19 @@ static int talk_to_netback(struct xenbus_device *dev, | |||
1822 | xennet_destroy_queues(info); | 1831 | xennet_destroy_queues(info); |
1823 | 1832 | ||
1824 | err = xennet_create_queues(info, &num_queues); | 1833 | err = xennet_create_queues(info, &num_queues); |
1825 | if (err < 0) | 1834 | if (err < 0) { |
1826 | goto destroy_ring; | 1835 | xenbus_dev_fatal(dev, err, "creating queues"); |
1836 | kfree(info->queues); | ||
1837 | info->queues = NULL; | ||
1838 | goto out; | ||
1839 | } | ||
1827 | 1840 | ||
1828 | /* Create shared ring, alloc event channel -- for each queue */ | 1841 | /* Create shared ring, alloc event channel -- for each queue */ |
1829 | for (i = 0; i < num_queues; ++i) { | 1842 | for (i = 0; i < num_queues; ++i) { |
1830 | queue = &info->queues[i]; | 1843 | queue = &info->queues[i]; |
1831 | err = setup_netfront(dev, queue, feature_split_evtchn); | 1844 | err = setup_netfront(dev, queue, feature_split_evtchn); |
1832 | if (err) { | 1845 | if (err) |
1833 | /* setup_netfront() will tidy up the current | 1846 | goto destroy_ring; |
1834 | * queue on error, but we need to clean up | ||
1835 | * those already allocated. | ||
1836 | */ | ||
1837 | if (i > 0) { | ||
1838 | rtnl_lock(); | ||
1839 | netif_set_real_num_tx_queues(info->netdev, i); | ||
1840 | rtnl_unlock(); | ||
1841 | goto destroy_ring; | ||
1842 | } else { | ||
1843 | goto out; | ||
1844 | } | ||
1845 | } | ||
1846 | } | 1847 | } |
1847 | 1848 | ||
1848 | again: | 1849 | again: |
@@ -1932,9 +1933,10 @@ abort_transaction_no_dev_fatal: | |||
1932 | xenbus_transaction_end(xbt, 1); | 1933 | xenbus_transaction_end(xbt, 1); |
1933 | destroy_ring: | 1934 | destroy_ring: |
1934 | xennet_disconnect_backend(info); | 1935 | xennet_disconnect_backend(info); |
1935 | kfree(info->queues); | 1936 | xennet_destroy_queues(info); |
1936 | info->queues = NULL; | ||
1937 | out: | 1937 | out: |
1938 | unregister_netdev(info->netdev); | ||
1939 | xennet_free_netdev(info->netdev); | ||
1938 | return err; | 1940 | return err; |
1939 | } | 1941 | } |
1940 | 1942 | ||
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index a518cb1b59d4..ce3e8dfa10ad 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
@@ -52,17 +52,17 @@ static void namespace_blk_release(struct device *dev) | |||
52 | kfree(nsblk); | 52 | kfree(nsblk); |
53 | } | 53 | } |
54 | 54 | ||
55 | static struct device_type namespace_io_device_type = { | 55 | static const struct device_type namespace_io_device_type = { |
56 | .name = "nd_namespace_io", | 56 | .name = "nd_namespace_io", |
57 | .release = namespace_io_release, | 57 | .release = namespace_io_release, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static struct device_type namespace_pmem_device_type = { | 60 | static const struct device_type namespace_pmem_device_type = { |
61 | .name = "nd_namespace_pmem", | 61 | .name = "nd_namespace_pmem", |
62 | .release = namespace_pmem_release, | 62 | .release = namespace_pmem_release, |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static struct device_type namespace_blk_device_type = { | 65 | static const struct device_type namespace_blk_device_type = { |
66 | .name = "nd_namespace_blk", | 66 | .name = "nd_namespace_blk", |
67 | .release = namespace_blk_release, | 67 | .release = namespace_blk_release, |
68 | }; | 68 | }; |
@@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) | |||
962 | struct nvdimm_drvdata *ndd; | 962 | struct nvdimm_drvdata *ndd; |
963 | struct nd_label_id label_id; | 963 | struct nd_label_id label_id; |
964 | u32 flags = 0, remainder; | 964 | u32 flags = 0, remainder; |
965 | int rc, i, id = -1; | ||
965 | u8 *uuid = NULL; | 966 | u8 *uuid = NULL; |
966 | int rc, i; | ||
967 | 967 | ||
968 | if (dev->driver || ndns->claim) | 968 | if (dev->driver || ndns->claim) |
969 | return -EBUSY; | 969 | return -EBUSY; |
@@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) | |||
972 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | 972 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); |
973 | 973 | ||
974 | uuid = nspm->uuid; | 974 | uuid = nspm->uuid; |
975 | id = nspm->id; | ||
975 | } else if (is_namespace_blk(dev)) { | 976 | } else if (is_namespace_blk(dev)) { |
976 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); | 977 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
977 | 978 | ||
978 | uuid = nsblk->uuid; | 979 | uuid = nsblk->uuid; |
979 | flags = NSLABEL_FLAG_LOCAL; | 980 | flags = NSLABEL_FLAG_LOCAL; |
981 | id = nsblk->id; | ||
980 | } | 982 | } |
981 | 983 | ||
982 | /* | 984 | /* |
@@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) | |||
1039 | 1041 | ||
1040 | /* | 1042 | /* |
1041 | * Try to delete the namespace if we deleted all of its | 1043 | * Try to delete the namespace if we deleted all of its |
1042 | * allocation, this is not the seed device for the region, and | 1044 | * allocation, this is not the seed or 0th device for the |
1043 | * it is not actively claimed by a btt instance. | 1045 | * region, and it is not actively claimed by a btt, pfn, or dax |
1046 | * instance. | ||
1044 | */ | 1047 | */ |
1045 | if (val == 0 && nd_region->ns_seed != dev && !ndns->claim) | 1048 | if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim) |
1046 | nd_device_unregister(dev, ND_ASYNC); | 1049 | nd_device_unregister(dev, ND_ASYNC); |
1047 | 1050 | ||
1048 | return rc; | 1051 | return rc; |
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index a2ac9e641aa9..6c033c9a2f06 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c | |||
@@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
627 | size = resource_size(&nsio->res); | 627 | size = resource_size(&nsio->res); |
628 | npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; | 628 | npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; |
629 | if (nd_pfn->mode == PFN_MODE_PMEM) { | 629 | if (nd_pfn->mode == PFN_MODE_PMEM) { |
630 | unsigned long memmap_size; | ||
631 | |||
632 | /* | 630 | /* |
633 | * vmemmap_populate_hugepages() allocates the memmap array in | 631 | * vmemmap_populate_hugepages() allocates the memmap array in |
634 | * HPAGE_SIZE chunks. | 632 | * HPAGE_SIZE chunks. |
635 | */ | 633 | */ |
636 | memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); | 634 | offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, |
637 | offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve, | 635 | max(nd_pfn->align, HPAGE_SIZE)) - start; |
638 | nd_pfn->align) - start; | ||
639 | } else if (nd_pfn->mode == PFN_MODE_RAM) | 636 | } else if (nd_pfn->mode == PFN_MODE_RAM) |
640 | offset = ALIGN(start + SZ_8K + dax_label_reserve, | 637 | offset = ALIGN(start + SZ_8K + dax_label_reserve, |
641 | nd_pfn->align) - start; | 638 | nd_pfn->align) - start; |
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index dd6d4ccb41e4..3858b87fd0bb 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c | |||
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, | |||
293 | p->irq = PARPORT_IRQ_NONE; | 293 | p->irq = PARPORT_IRQ_NONE; |
294 | } | 294 | } |
295 | if (p->irq != PARPORT_IRQ_NONE) { | 295 | if (p->irq != PARPORT_IRQ_NONE) { |
296 | printk(", irq %d", p->irq); | 296 | pr_cont(", irq %d", p->irq); |
297 | 297 | ||
298 | if (p->dma == PARPORT_DMA_AUTO) { | 298 | if (p->dma == PARPORT_DMA_AUTO) { |
299 | p->dma = PARPORT_DMA_NONE; | 299 | p->dma = PARPORT_DMA_NONE; |
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base, | |||
303 | is mandatory (see above) */ | 303 | is mandatory (see above) */ |
304 | p->dma = PARPORT_DMA_NONE; | 304 | p->dma = PARPORT_DMA_NONE; |
305 | 305 | ||
306 | printk(" ["); | 306 | pr_cont(" ["); |
307 | #define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} | 307 | #define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}} |
308 | { | 308 | { |
309 | int f = 0; | 309 | int f = 0; |
310 | printmode(PCSPP); | 310 | printmode(PCSPP); |
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, | |||
315 | // printmode(DMA); | 315 | // printmode(DMA); |
316 | } | 316 | } |
317 | #undef printmode | 317 | #undef printmode |
318 | printk("]\n"); | 318 | pr_cont("]\n"); |
319 | 319 | ||
320 | if (p->irq != PARPORT_IRQ_NONE) { | 320 | if (p->irq != PARPORT_IRQ_NONE) { |
321 | if (request_irq (p->irq, parport_irq_handler, | 321 | if (request_irq (p->irq, parport_irq_handler, |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 10c9c0ba8ff2..ec0b4c11ccd9 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/pm_runtime.h> | ||
35 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
36 | #include "../pci.h" | 35 | #include "../pci.h" |
37 | #include "pciehp.h" | 36 | #include "pciehp.h" |
@@ -99,7 +98,6 @@ static int board_added(struct slot *p_slot) | |||
99 | pciehp_green_led_blink(p_slot); | 98 | pciehp_green_led_blink(p_slot); |
100 | 99 | ||
101 | /* Check link training status */ | 100 | /* Check link training status */ |
102 | pm_runtime_get_sync(&ctrl->pcie->port->dev); | ||
103 | retval = pciehp_check_link_status(ctrl); | 101 | retval = pciehp_check_link_status(ctrl); |
104 | if (retval) { | 102 | if (retval) { |
105 | ctrl_err(ctrl, "Failed to check link status\n"); | 103 | ctrl_err(ctrl, "Failed to check link status\n"); |
@@ -120,14 +118,12 @@ static int board_added(struct slot *p_slot) | |||
120 | if (retval != -EEXIST) | 118 | if (retval != -EEXIST) |
121 | goto err_exit; | 119 | goto err_exit; |
122 | } | 120 | } |
123 | pm_runtime_put(&ctrl->pcie->port->dev); | ||
124 | 121 | ||
125 | pciehp_green_led_on(p_slot); | 122 | pciehp_green_led_on(p_slot); |
126 | pciehp_set_attention_status(p_slot, 0); | 123 | pciehp_set_attention_status(p_slot, 0); |
127 | return 0; | 124 | return 0; |
128 | 125 | ||
129 | err_exit: | 126 | err_exit: |
130 | pm_runtime_put(&ctrl->pcie->port->dev); | ||
131 | set_slot_off(ctrl, p_slot); | 127 | set_slot_off(ctrl, p_slot); |
132 | return retval; | 128 | return retval; |
133 | } | 129 | } |
@@ -141,9 +137,7 @@ static int remove_board(struct slot *p_slot) | |||
141 | int retval; | 137 | int retval; |
142 | struct controller *ctrl = p_slot->ctrl; | 138 | struct controller *ctrl = p_slot->ctrl; |
143 | 139 | ||
144 | pm_runtime_get_sync(&ctrl->pcie->port->dev); | ||
145 | retval = pciehp_unconfigure_device(p_slot); | 140 | retval = pciehp_unconfigure_device(p_slot); |
146 | pm_runtime_put(&ctrl->pcie->port->dev); | ||
147 | if (retval) | 141 | if (retval) |
148 | return retval; | 142 | return retval; |
149 | 143 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 50c5003295ca..7f73bacf13ed 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -1206,6 +1206,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
1206 | if (flags & PCI_IRQ_AFFINITY) { | 1206 | if (flags & PCI_IRQ_AFFINITY) { |
1207 | if (!affd) | 1207 | if (!affd) |
1208 | affd = &msi_default_affd; | 1208 | affd = &msi_default_affd; |
1209 | |||
1210 | if (affd->pre_vectors + affd->post_vectors > min_vecs) | ||
1211 | return -EINVAL; | ||
1212 | |||
1213 | /* | ||
1214 | * If there aren't any vectors left after applying the pre/post | ||
1215 | * vectors don't bother with assigning affinity. | ||
1216 | */ | ||
1217 | if (affd->pre_vectors + affd->post_vectors == min_vecs) | ||
1218 | affd = NULL; | ||
1209 | } else { | 1219 | } else { |
1210 | if (WARN_ON(affd)) | 1220 | if (WARN_ON(affd)) |
1211 | affd = NULL; | 1221 | affd = NULL; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a881c0d3d2e8..7904d02ffdb9 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -2241,10 +2241,13 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) | |||
2241 | return false; | 2241 | return false; |
2242 | 2242 | ||
2243 | /* | 2243 | /* |
2244 | * Hotplug ports handled by firmware in System Management Mode | 2244 | * Hotplug interrupts cannot be delivered if the link is down, |
2245 | * so parents of a hotplug port must stay awake. In addition, | ||
2246 | * hotplug ports handled by firmware in System Management Mode | ||
2245 | * may not be put into D3 by the OS (Thunderbolt on non-Macs). | 2247 | * may not be put into D3 by the OS (Thunderbolt on non-Macs). |
2248 | * For simplicity, disallow in general for now. | ||
2246 | */ | 2249 | */ |
2247 | if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) | 2250 | if (bridge->is_hotplug_bridge) |
2248 | return false; | 2251 | return false; |
2249 | 2252 | ||
2250 | if (pci_bridge_d3_force) | 2253 | if (pci_bridge_d3_force) |
@@ -2276,10 +2279,7 @@ static int pci_dev_check_d3cold(struct pci_dev *dev, void *data) | |||
2276 | !pci_pme_capable(dev, PCI_D3cold)) || | 2279 | !pci_pme_capable(dev, PCI_D3cold)) || |
2277 | 2280 | ||
2278 | /* If it is a bridge it must be allowed to go to D3. */ | 2281 | /* If it is a bridge it must be allowed to go to D3. */ |
2279 | !pci_power_manageable(dev) || | 2282 | !pci_power_manageable(dev)) |
2280 | |||
2281 | /* Hotplug interrupts cannot be delivered if the link is down. */ | ||
2282 | dev->is_hotplug_bridge) | ||
2283 | 2283 | ||
2284 | *d3cold_ok = false; | 2284 | *d3cold_ok = false; |
2285 | 2285 | ||
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 17ac1dce3286..3dd8bcbb3011 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) | |||
532 | link = kzalloc(sizeof(*link), GFP_KERNEL); | 532 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
533 | if (!link) | 533 | if (!link) |
534 | return NULL; | 534 | return NULL; |
535 | |||
535 | INIT_LIST_HEAD(&link->sibling); | 536 | INIT_LIST_HEAD(&link->sibling); |
536 | INIT_LIST_HEAD(&link->children); | 537 | INIT_LIST_HEAD(&link->children); |
537 | INIT_LIST_HEAD(&link->link); | 538 | INIT_LIST_HEAD(&link->link); |
538 | link->pdev = pdev; | 539 | link->pdev = pdev; |
539 | if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) { | 540 | |
541 | /* | ||
542 | * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe | ||
543 | * hierarchies. | ||
544 | */ | ||
545 | if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || | ||
546 | pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) { | ||
547 | link->root = link; | ||
548 | } else { | ||
540 | struct pcie_link_state *parent; | 549 | struct pcie_link_state *parent; |
550 | |||
541 | parent = pdev->bus->parent->self->link_state; | 551 | parent = pdev->bus->parent->self->link_state; |
542 | if (!parent) { | 552 | if (!parent) { |
543 | kfree(link); | 553 | kfree(link); |
544 | return NULL; | 554 | return NULL; |
545 | } | 555 | } |
556 | |||
546 | link->parent = parent; | 557 | link->parent = parent; |
558 | link->root = link->parent->root; | ||
547 | list_add(&link->link, &parent->children); | 559 | list_add(&link->link, &parent->children); |
548 | } | 560 | } |
549 | /* Setup a pointer to the root port link */ | ||
550 | if (!link->parent) | ||
551 | link->root = link; | ||
552 | else | ||
553 | link->root = link->parent->root; | ||
554 | 561 | ||
555 | list_add(&link->sibling, &link_list); | 562 | list_add(&link->sibling, &link_list); |
556 | pdev->link_state = link; | 563 | pdev->link_state = link; |
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c index 09172043d589..c617ec49e9ed 100644 --- a/drivers/pinctrl/berlin/berlin-bg4ct.c +++ b/drivers/pinctrl/berlin/berlin-bg4ct.c | |||
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = { | |||
217 | BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, | 217 | BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, |
218 | BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ | 218 | BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ |
219 | BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ | 219 | BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ |
220 | BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ | 220 | BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */ |
221 | BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, | 221 | BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, |
222 | BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ | 222 | BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ |
223 | BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ | 223 | BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ |
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index c123488266ce..d94aef17348b 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | |||
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset, | |||
731 | int reg) | 731 | int reg) |
732 | { | 732 | { |
733 | struct byt_community *comm = byt_get_community(vg, offset); | 733 | struct byt_community *comm = byt_get_community(vg, offset); |
734 | u32 reg_offset = 0; | 734 | u32 reg_offset; |
735 | 735 | ||
736 | if (!comm) | 736 | if (!comm) |
737 | return NULL; | 737 | return NULL; |
738 | 738 | ||
739 | offset -= comm->pin_base; | 739 | offset -= comm->pin_base; |
740 | if (reg == BYT_INT_STAT_REG) | 740 | switch (reg) { |
741 | case BYT_INT_STAT_REG: | ||
741 | reg_offset = (offset / 32) * 4; | 742 | reg_offset = (offset / 32) * 4; |
742 | else | 743 | break; |
744 | case BYT_DEBOUNCE_REG: | ||
745 | reg_offset = 0; | ||
746 | break; | ||
747 | default: | ||
743 | reg_offset = comm->pad_map[offset] * 16; | 748 | reg_offset = comm->pad_map[offset] * 16; |
749 | break; | ||
750 | } | ||
744 | 751 | ||
745 | return comm->reg_base + reg_offset + reg; | 752 | return comm->reg_base + reg_offset + reg; |
746 | } | 753 | } |
@@ -1243,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, | |||
1243 | debounce = readl(db_reg); | 1250 | debounce = readl(db_reg); |
1244 | debounce &= ~BYT_DEBOUNCE_PULSE_MASK; | 1251 | debounce &= ~BYT_DEBOUNCE_PULSE_MASK; |
1245 | 1252 | ||
1253 | if (arg) | ||
1254 | conf |= BYT_DEBOUNCE_EN; | ||
1255 | else | ||
1256 | conf &= ~BYT_DEBOUNCE_EN; | ||
1257 | |||
1246 | switch (arg) { | 1258 | switch (arg) { |
1247 | case 0: | ||
1248 | conf &= BYT_DEBOUNCE_EN; | ||
1249 | break; | ||
1250 | case 375: | 1259 | case 375: |
1251 | debounce |= BYT_DEBOUNCE_PULSE_375US; | 1260 | debounce |= BYT_DEBOUNCE_PULSE_375US; |
1252 | break; | 1261 | break; |
@@ -1269,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, | |||
1269 | debounce |= BYT_DEBOUNCE_PULSE_24MS; | 1278 | debounce |= BYT_DEBOUNCE_PULSE_24MS; |
1270 | break; | 1279 | break; |
1271 | default: | 1280 | default: |
1272 | ret = -EINVAL; | 1281 | if (arg) |
1282 | ret = -EINVAL; | ||
1283 | break; | ||
1273 | } | 1284 | } |
1274 | 1285 | ||
1275 | if (!ret) | 1286 | if (!ret) |
@@ -1612,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) | |||
1612 | continue; | 1623 | continue; |
1613 | } | 1624 | } |
1614 | 1625 | ||
1626 | raw_spin_lock(&vg->lock); | ||
1615 | pending = readl(reg); | 1627 | pending = readl(reg); |
1628 | raw_spin_unlock(&vg->lock); | ||
1616 | for_each_set_bit(pin, &pending, 32) { | 1629 | for_each_set_bit(pin, &pending, 32) { |
1617 | virq = irq_find_mapping(vg->chip.irqdomain, base + pin); | 1630 | virq = irq_find_mapping(vg->chip.irqdomain, base + pin); |
1618 | generic_handle_irq(virq); | 1631 | generic_handle_irq(virq); |
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index b21896126f76..4d4ef42a39b5 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c | |||
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin, | |||
794 | unsigned int i; | 794 | unsigned int i; |
795 | int ret; | 795 | int ret; |
796 | 796 | ||
797 | if (!mrfld_buf_available(mp, pin)) | ||
798 | return -ENOTSUPP; | ||
799 | |||
797 | for (i = 0; i < nconfigs; i++) { | 800 | for (i = 0; i < nconfigs; i++) { |
798 | switch (pinconf_to_config_param(configs[i])) { | 801 | switch (pinconf_to_config_param(configs[i])) { |
799 | case PIN_CONFIG_BIAS_DISABLE: | 802 | case PIN_CONFIG_BIAS_DISABLE: |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 0eb51e33cb1b..207a8de4e1ed 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c | |||
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, | |||
564 | val = arg / 10 - 1; | 564 | val = arg / 10 - 1; |
565 | break; | 565 | break; |
566 | case PIN_CONFIG_BIAS_DISABLE: | 566 | case PIN_CONFIG_BIAS_DISABLE: |
567 | val = 0; | 567 | continue; |
568 | break; | ||
569 | case PIN_CONFIG_BIAS_PULL_UP: | 568 | case PIN_CONFIG_BIAS_PULL_UP: |
570 | if (arg == 0) | 569 | if (arg == 0) |
571 | return -EINVAL; | 570 | return -EINVAL; |
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index e6a512ebeae2..a3ade9e4ef47 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c | |||
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = { | |||
272 | 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1, | 272 | 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1, |
273 | BIT(3)), | 273 | BIT(3)), |
274 | AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100, | 274 | AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100, |
275 | AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), | 275 | AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), |
276 | AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100, | 276 | AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100, |
277 | AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)), | 277 | AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)), |
278 | AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100, | 278 | AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100, |
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index a43b0e8a438d..988a7472c2ab 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c | |||
@@ -30,9 +30,6 @@ | |||
30 | #include <linux/of_gpio.h> | 30 | #include <linux/of_gpio.h> |
31 | #include <linux/regulator/of_regulator.h> | 31 | #include <linux/regulator/of_regulator.h> |
32 | #include <linux/regulator/machine.h> | 32 | #include <linux/regulator/machine.h> |
33 | #include <linux/acpi.h> | ||
34 | #include <linux/property.h> | ||
35 | #include <linux/gpio/consumer.h> | ||
36 | 33 | ||
37 | struct fixed_voltage_data { | 34 | struct fixed_voltage_data { |
38 | struct regulator_desc desc; | 35 | struct regulator_desc desc; |
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev, | |||
97 | return config; | 94 | return config; |
98 | } | 95 | } |
99 | 96 | ||
100 | /** | ||
101 | * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info | ||
102 | * @dev: device requesting for fixed_voltage_config | ||
103 | * @desc: regulator description | ||
104 | * | ||
105 | * Populates fixed_voltage_config structure by extracting data through ACPI | ||
106 | * interface, returns a pointer to the populated structure of NULL if memory | ||
107 | * alloc fails. | ||
108 | */ | ||
109 | static struct fixed_voltage_config * | ||
110 | acpi_get_fixed_voltage_config(struct device *dev, | ||
111 | const struct regulator_desc *desc) | ||
112 | { | ||
113 | struct fixed_voltage_config *config; | ||
114 | const char *supply_name; | ||
115 | struct gpio_desc *gpiod; | ||
116 | int ret; | ||
117 | |||
118 | config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL); | ||
119 | if (!config) | ||
120 | return ERR_PTR(-ENOMEM); | ||
121 | |||
122 | ret = device_property_read_string(dev, "supply-name", &supply_name); | ||
123 | if (!ret) | ||
124 | config->supply_name = supply_name; | ||
125 | |||
126 | gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS); | ||
127 | if (IS_ERR(gpiod)) | ||
128 | return ERR_PTR(-ENODEV); | ||
129 | |||
130 | config->gpio = desc_to_gpio(gpiod); | ||
131 | config->enable_high = device_property_read_bool(dev, | ||
132 | "enable-active-high"); | ||
133 | gpiod_put(gpiod); | ||
134 | |||
135 | return config; | ||
136 | } | ||
137 | |||
138 | static struct regulator_ops fixed_voltage_ops = { | 97 | static struct regulator_ops fixed_voltage_ops = { |
139 | }; | 98 | }; |
140 | 99 | ||
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) | |||
155 | &drvdata->desc); | 114 | &drvdata->desc); |
156 | if (IS_ERR(config)) | 115 | if (IS_ERR(config)) |
157 | return PTR_ERR(config); | 116 | return PTR_ERR(config); |
158 | } else if (ACPI_HANDLE(&pdev->dev)) { | ||
159 | config = acpi_get_fixed_voltage_config(&pdev->dev, | ||
160 | &drvdata->desc); | ||
161 | if (IS_ERR(config)) | ||
162 | return PTR_ERR(config); | ||
163 | } else { | 117 | } else { |
164 | config = dev_get_platdata(&pdev->dev); | 118 | config = dev_get_platdata(&pdev->dev); |
165 | } | 119 | } |
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 4864b9d742c0..716191046a70 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c | |||
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV, | |||
452 | vsel = 62; | 452 | vsel = 62; |
453 | else if ((min_uV > 1800000) && (min_uV <= 1900000)) | 453 | else if ((min_uV > 1800000) && (min_uV <= 1900000)) |
454 | vsel = 61; | 454 | vsel = 61; |
455 | else if ((min_uV > 1350000) && (min_uV <= 1800000)) | 455 | else if ((min_uV > 1500000) && (min_uV <= 1800000)) |
456 | vsel = 60; | 456 | vsel = 60; |
457 | else if ((min_uV > 1350000) && (min_uV <= 1500000)) | 457 | else if ((min_uV > 1350000) && (min_uV <= 1500000)) |
458 | vsel = 59; | 458 | vsel = 59; |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index c93c5a8fba32..5dc673dc9487 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121 | |||
1551 | will be called rtc-mpc5121. | 1551 | will be called rtc-mpc5121. |
1552 | 1552 | ||
1553 | config RTC_DRV_JZ4740 | 1553 | config RTC_DRV_JZ4740 |
1554 | bool "Ingenic JZ4740 SoC" | 1554 | tristate "Ingenic JZ4740 SoC" |
1555 | depends on MACH_INGENIC || COMPILE_TEST | 1555 | depends on MACH_INGENIC || COMPILE_TEST |
1556 | help | 1556 | help |
1557 | If you say yes here you get support for the Ingenic JZ47xx SoCs RTC | 1557 | If you say yes here you get support for the Ingenic JZ47xx SoCs RTC |
1558 | controllers. | 1558 | controllers. |
1559 | 1559 | ||
1560 | This driver can also be buillt as a module. If so, the module | ||
1561 | will be called rtc-jz4740. | ||
1562 | |||
1560 | config RTC_DRV_LPC24XX | 1563 | config RTC_DRV_LPC24XX |
1561 | tristate "NXP RTC for LPC178x/18xx/408x/43xx" | 1564 | tristate "NXP RTC for LPC178x/18xx/408x/43xx" |
1562 | depends on ARCH_LPC18XX || COMPILE_TEST | 1565 | depends on ARCH_LPC18XX || COMPILE_TEST |
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index 72918c1ba092..64989afffa3d 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | ||
20 | #include <linux/of_device.h> | 21 | #include <linux/of_device.h> |
21 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
22 | #include <linux/reboot.h> | 23 | #include <linux/reboot.h> |
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void) | |||
294 | JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks); | 295 | JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks); |
295 | 296 | ||
296 | jz4740_rtc_poweroff(dev_for_power_off); | 297 | jz4740_rtc_poweroff(dev_for_power_off); |
297 | machine_halt(); | 298 | kernel_halt(); |
298 | } | 299 | } |
299 | 300 | ||
300 | static const struct of_device_id jz4740_rtc_of_match[] = { | 301 | static const struct of_device_id jz4740_rtc_of_match[] = { |
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = { | |||
302 | { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 }, | 303 | { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 }, |
303 | {}, | 304 | {}, |
304 | }; | 305 | }; |
306 | MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match); | ||
305 | 307 | ||
306 | static int jz4740_rtc_probe(struct platform_device *pdev) | 308 | static int jz4740_rtc_probe(struct platform_device *pdev) |
307 | { | 309 | { |
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = { | |||
429 | { "jz4780-rtc", ID_JZ4780 }, | 431 | { "jz4780-rtc", ID_JZ4780 }, |
430 | {} | 432 | {} |
431 | }; | 433 | }; |
434 | MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids); | ||
432 | 435 | ||
433 | static struct platform_driver jz4740_rtc_driver = { | 436 | static struct platform_driver jz4740_rtc_driver = { |
434 | .probe = jz4740_rtc_probe, | 437 | .probe = jz4740_rtc_probe, |
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = { | |||
440 | .id_table = jz4740_rtc_ids, | 443 | .id_table = jz4740_rtc_ids, |
441 | }; | 444 | }; |
442 | 445 | ||
443 | builtin_platform_driver(jz4740_rtc_driver); | 446 | module_platform_driver(jz4740_rtc_driver); |
447 | |||
448 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); | ||
449 | MODULE_LICENSE("GPL"); | ||
450 | MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n"); | ||
451 | MODULE_ALIAS("platform:jz4740-rtc"); | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 75f820ca17b7..27ff38f839fc 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -1583,7 +1583,7 @@ out: | |||
1583 | int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) | 1583 | int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) |
1584 | { | 1584 | { |
1585 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; | 1585 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1586 | struct zfcp_fsf_req *req = NULL; | 1586 | struct zfcp_fsf_req *req; |
1587 | int retval = -EIO; | 1587 | int retval = -EIO; |
1588 | 1588 | ||
1589 | spin_lock_irq(&qdio->req_q_lock); | 1589 | spin_lock_irq(&qdio->req_q_lock); |
@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1612 | zfcp_fsf_req_free(req); | 1612 | zfcp_fsf_req_free(req); |
1613 | out: | 1613 | out: |
1614 | spin_unlock_irq(&qdio->req_q_lock); | 1614 | spin_unlock_irq(&qdio->req_q_lock); |
1615 | if (req && !IS_ERR(req)) | 1615 | if (!retval) |
1616 | zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); | 1616 | zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); |
1617 | return retval; | 1617 | return retval; |
1618 | } | 1618 | } |
@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) | |||
1638 | int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) | 1638 | int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) |
1639 | { | 1639 | { |
1640 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; | 1640 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1641 | struct zfcp_fsf_req *req = NULL; | 1641 | struct zfcp_fsf_req *req; |
1642 | int retval = -EIO; | 1642 | int retval = -EIO; |
1643 | 1643 | ||
1644 | spin_lock_irq(&qdio->req_q_lock); | 1644 | spin_lock_irq(&qdio->req_q_lock); |
@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) | |||
1667 | zfcp_fsf_req_free(req); | 1667 | zfcp_fsf_req_free(req); |
1668 | out: | 1668 | out: |
1669 | spin_unlock_irq(&qdio->req_q_lock); | 1669 | spin_unlock_irq(&qdio->req_q_lock); |
1670 | if (req && !IS_ERR(req)) | 1670 | if (!retval) |
1671 | zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); | 1671 | zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); |
1672 | return retval; | 1672 | return retval; |
1673 | } | 1673 | } |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 4f56b1003cc7..5b48bedd7c38 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -50,9 +50,13 @@ struct aac_common aac_config = { | |||
50 | 50 | ||
51 | static inline int aac_is_msix_mode(struct aac_dev *dev) | 51 | static inline int aac_is_msix_mode(struct aac_dev *dev) |
52 | { | 52 | { |
53 | u32 status; | 53 | u32 status = 0; |
54 | 54 | ||
55 | status = src_readl(dev, MUnit.OMR); | 55 | if (dev->pdev->device == PMC_DEVICE_S6 || |
56 | dev->pdev->device == PMC_DEVICE_S7 || | ||
57 | dev->pdev->device == PMC_DEVICE_S8) { | ||
58 | status = src_readl(dev, MUnit.OMR); | ||
59 | } | ||
56 | return (status & AAC_INT_MODE_MSIX); | 60 | return (status & AAC_INT_MODE_MSIX); |
57 | } | 61 | } |
58 | 62 | ||
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 99b747cedbeb..0f807798c624 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
@@ -3816,6 +3816,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = { | |||
3816 | static const struct target_core_fabric_ops ibmvscsis_ops = { | 3816 | static const struct target_core_fabric_ops ibmvscsis_ops = { |
3817 | .module = THIS_MODULE, | 3817 | .module = THIS_MODULE, |
3818 | .name = "ibmvscsis", | 3818 | .name = "ibmvscsis", |
3819 | .max_data_sg_nents = MAX_TXU / PAGE_SIZE, | ||
3819 | .get_fabric_name = ibmvscsis_get_fabric_name, | 3820 | .get_fabric_name = ibmvscsis_get_fabric_name, |
3820 | .tpg_get_wwn = ibmvscsis_get_fabric_wwn, | 3821 | .tpg_get_wwn = ibmvscsis_get_fabric_wwn, |
3821 | .tpg_get_tag = ibmvscsis_get_tag, | 3822 | .tpg_get_tag = ibmvscsis_get_tag, |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 75f3fce1c867..0b5b423b1db0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/workqueue.h> | 51 | #include <linux/workqueue.h> |
52 | #include <linux/delay.h> | 52 | #include <linux/delay.h> |
53 | #include <linux/pci.h> | 53 | #include <linux/pci.h> |
54 | #include <linux/pci-aspm.h> | ||
54 | #include <linux/interrupt.h> | 55 | #include <linux/interrupt.h> |
55 | #include <linux/aer.h> | 56 | #include <linux/aer.h> |
56 | #include <linux/raid_class.h> | 57 | #include <linux/raid_class.h> |
@@ -4657,6 +4658,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
4657 | struct MPT3SAS_DEVICE *sas_device_priv_data; | 4658 | struct MPT3SAS_DEVICE *sas_device_priv_data; |
4658 | u32 response_code = 0; | 4659 | u32 response_code = 0; |
4659 | unsigned long flags; | 4660 | unsigned long flags; |
4661 | unsigned int sector_sz; | ||
4660 | 4662 | ||
4661 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); | 4663 | mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); |
4662 | scmd = _scsih_scsi_lookup_get_clear(ioc, smid); | 4664 | scmd = _scsih_scsi_lookup_get_clear(ioc, smid); |
@@ -4715,6 +4717,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
4715 | } | 4717 | } |
4716 | 4718 | ||
4717 | xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); | 4719 | xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); |
4720 | |||
4721 | /* In case of bogus fw or device, we could end up having | ||
4722 | * unaligned partial completion. We can force alignment here, | ||
4723 | * then scsi-ml does not need to handle this misbehavior. | ||
4724 | */ | ||
4725 | sector_sz = scmd->device->sector_size; | ||
4726 | if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && | ||
4727 | xfer_cnt % sector_sz)) { | ||
4728 | sdev_printk(KERN_INFO, scmd->device, | ||
4729 | "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", | ||
4730 | xfer_cnt, sector_sz); | ||
4731 | xfer_cnt = round_down(xfer_cnt, sector_sz); | ||
4732 | } | ||
4733 | |||
4718 | scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); | 4734 | scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); |
4719 | if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) | 4735 | if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) |
4720 | log_info = le32_to_cpu(mpi_reply->IOCLogInfo); | 4736 | log_info = le32_to_cpu(mpi_reply->IOCLogInfo); |
@@ -8746,6 +8762,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
8746 | 8762 | ||
8747 | switch (hba_mpi_version) { | 8763 | switch (hba_mpi_version) { |
8748 | case MPI2_VERSION: | 8764 | case MPI2_VERSION: |
8765 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | | ||
8766 | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); | ||
8749 | /* Use mpt2sas driver host template for SAS 2.0 HBA's */ | 8767 | /* Use mpt2sas driver host template for SAS 2.0 HBA's */ |
8750 | shost = scsi_host_alloc(&mpt2sas_driver_template, | 8768 | shost = scsi_host_alloc(&mpt2sas_driver_template, |
8751 | sizeof(struct MPT3SAS_ADAPTER)); | 8769 | sizeof(struct MPT3SAS_ADAPTER)); |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index dc88a09f9043..a94b0b6bd030 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -3242,7 +3242,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) | |||
3242 | * from a probe failure context. | 3242 | * from a probe failure context. |
3243 | */ | 3243 | */ |
3244 | if (!ha->rsp_q_map || !ha->rsp_q_map[0]) | 3244 | if (!ha->rsp_q_map || !ha->rsp_q_map[0]) |
3245 | return; | 3245 | goto free_irqs; |
3246 | rsp = ha->rsp_q_map[0]; | 3246 | rsp = ha->rsp_q_map[0]; |
3247 | 3247 | ||
3248 | if (ha->flags.msix_enabled) { | 3248 | if (ha->flags.msix_enabled) { |
@@ -3262,6 +3262,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) | |||
3262 | free_irq(pci_irq_vector(ha->pdev, 0), rsp); | 3262 | free_irq(pci_irq_vector(ha->pdev, 0), rsp); |
3263 | } | 3263 | } |
3264 | 3264 | ||
3265 | free_irqs: | ||
3265 | pci_free_irq_vectors(ha->pdev); | 3266 | pci_free_irq_vectors(ha->pdev); |
3266 | } | 3267 | } |
3267 | 3268 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 0a000ecf0881..40660461a4b5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1616,7 +1616,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1616 | /* Don't abort commands in adapter during EEH | 1616 | /* Don't abort commands in adapter during EEH |
1617 | * recovery as it's not accessible/responding. | 1617 | * recovery as it's not accessible/responding. |
1618 | */ | 1618 | */ |
1619 | if (!ha->flags.eeh_busy) { | 1619 | if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { |
1620 | /* Get a reference to the sp and drop the lock. | 1620 | /* Get a reference to the sp and drop the lock. |
1621 | * The reference ensures this sp->done() call | 1621 | * The reference ensures this sp->done() call |
1622 | * - and not the call in qla2xxx_eh_abort() - | 1622 | * - and not the call in qla2xxx_eh_abort() - |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index ec91bd07f00a..c680d7641311 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | |||
534 | { | 534 | { |
535 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | 535 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); |
536 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); | 536 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); |
537 | unsigned long flags; | ||
537 | int req_size; | 538 | int req_size; |
539 | int ret; | ||
538 | 540 | ||
539 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); | 541 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); |
540 | 542 | ||
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, | |||
562 | req_size = sizeof(cmd->req.cmd); | 564 | req_size = sizeof(cmd->req.cmd); |
563 | } | 565 | } |
564 | 566 | ||
565 | if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) | 567 | ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); |
568 | if (ret == -EIO) { | ||
569 | cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; | ||
570 | spin_lock_irqsave(&req_vq->vq_lock, flags); | ||
571 | virtscsi_complete_cmd(vscsi, cmd); | ||
572 | spin_unlock_irqrestore(&req_vq->vq_lock, flags); | ||
573 | } else if (ret != 0) { | ||
566 | return SCSI_MLQUEUE_HOST_BUSY; | 574 | return SCSI_MLQUEUE_HOST_BUSY; |
575 | } | ||
567 | return 0; | 576 | return 0; |
568 | } | 577 | } |
569 | 578 | ||
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c index 113f3d6c4b3a..27f75b17679b 100644 --- a/drivers/staging/greybus/timesync_platform.c +++ b/drivers/staging/greybus/timesync_platform.c | |||
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void) | |||
45 | 45 | ||
46 | int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata) | 46 | int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata) |
47 | { | 47 | { |
48 | if (!arche_platform_change_state_cb) | ||
49 | return 0; | ||
50 | |||
48 | return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC, | 51 | return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC, |
49 | pdata); | 52 | pdata); |
50 | } | 53 | } |
51 | 54 | ||
52 | void gb_timesync_platform_unlock_bus(void) | 55 | void gb_timesync_platform_unlock_bus(void) |
53 | { | 56 | { |
57 | if (!arche_platform_change_state_cb) | ||
58 | return; | ||
59 | |||
54 | arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL); | 60 | arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL); |
55 | } | 61 | } |
56 | 62 | ||
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index ee01f20d8b11..9afa6bec3e6f 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c | |||
@@ -390,15 +390,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
390 | result = VM_FAULT_LOCKED; | 390 | result = VM_FAULT_LOCKED; |
391 | break; | 391 | break; |
392 | case -ENODATA: | 392 | case -ENODATA: |
393 | case -EAGAIN: | ||
393 | case -EFAULT: | 394 | case -EFAULT: |
394 | result = VM_FAULT_NOPAGE; | 395 | result = VM_FAULT_NOPAGE; |
395 | break; | 396 | break; |
396 | case -ENOMEM: | 397 | case -ENOMEM: |
397 | result = VM_FAULT_OOM; | 398 | result = VM_FAULT_OOM; |
398 | break; | 399 | break; |
399 | case -EAGAIN: | ||
400 | result = VM_FAULT_RETRY; | ||
401 | break; | ||
402 | default: | 400 | default: |
403 | result = VM_FAULT_SIGBUS; | 401 | result = VM_FAULT_SIGBUS; |
404 | break; | 402 | break; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 1ebd13ef7bd3..26929c44d703 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -352,7 +352,15 @@ int core_enable_device_list_for_node( | |||
352 | kfree(new); | 352 | kfree(new); |
353 | return -EINVAL; | 353 | return -EINVAL; |
354 | } | 354 | } |
355 | BUG_ON(orig->se_lun_acl != NULL); | 355 | if (orig->se_lun_acl != NULL) { |
356 | pr_warn_ratelimited("Detected existing explicit" | ||
357 | " se_lun_acl->se_lun_group reference for %s" | ||
358 | " mapped_lun: %llu, failing\n", | ||
359 | nacl->initiatorname, mapped_lun); | ||
360 | mutex_unlock(&nacl->lun_entry_mutex); | ||
361 | kfree(new); | ||
362 | return -EINVAL; | ||
363 | } | ||
356 | 364 | ||
357 | rcu_assign_pointer(new->se_lun, lun); | 365 | rcu_assign_pointer(new->se_lun, lun); |
358 | rcu_assign_pointer(new->se_lun_acl, lun_acl); | 366 | rcu_assign_pointer(new->se_lun_acl, lun_acl); |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 4879e70e2eef..df7b6e95c019 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -451,6 +451,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, | |||
451 | int *post_ret) | 451 | int *post_ret) |
452 | { | 452 | { |
453 | struct se_device *dev = cmd->se_dev; | 453 | struct se_device *dev = cmd->se_dev; |
454 | sense_reason_t ret = TCM_NO_SENSE; | ||
454 | 455 | ||
455 | /* | 456 | /* |
456 | * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through | 457 | * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through |
@@ -458,9 +459,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, | |||
458 | * sent to the backend driver. | 459 | * sent to the backend driver. |
459 | */ | 460 | */ |
460 | spin_lock_irq(&cmd->t_state_lock); | 461 | spin_lock_irq(&cmd->t_state_lock); |
461 | if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { | 462 | if (cmd->transport_state & CMD_T_SENT) { |
462 | cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; | 463 | cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; |
463 | *post_ret = 1; | 464 | *post_ret = 1; |
465 | |||
466 | if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) | ||
467 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
464 | } | 468 | } |
465 | spin_unlock_irq(&cmd->t_state_lock); | 469 | spin_unlock_irq(&cmd->t_state_lock); |
466 | 470 | ||
@@ -470,7 +474,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, | |||
470 | */ | 474 | */ |
471 | up(&dev->caw_sem); | 475 | up(&dev->caw_sem); |
472 | 476 | ||
473 | return TCM_NO_SENSE; | 477 | return ret; |
474 | } | 478 | } |
475 | 479 | ||
476 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, | 480 | static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 1cadc9eefa21..437591bc7c08 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref) | |||
457 | { | 457 | { |
458 | struct se_node_acl *nacl = container_of(kref, | 458 | struct se_node_acl *nacl = container_of(kref, |
459 | struct se_node_acl, acl_kref); | 459 | struct se_node_acl, acl_kref); |
460 | struct se_portal_group *se_tpg = nacl->se_tpg; | ||
460 | 461 | ||
461 | complete(&nacl->acl_free_comp); | 462 | if (!nacl->dynamic_stop) { |
463 | complete(&nacl->acl_free_comp); | ||
464 | return; | ||
465 | } | ||
466 | |||
467 | mutex_lock(&se_tpg->acl_node_mutex); | ||
468 | list_del(&nacl->acl_list); | ||
469 | mutex_unlock(&se_tpg->acl_node_mutex); | ||
470 | |||
471 | core_tpg_wait_for_nacl_pr_ref(nacl); | ||
472 | core_free_device_list_for_node(nacl, se_tpg); | ||
473 | kfree(nacl); | ||
462 | } | 474 | } |
463 | 475 | ||
464 | void target_put_nacl(struct se_node_acl *nacl) | 476 | void target_put_nacl(struct se_node_acl *nacl) |
@@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs); | |||
499 | void transport_free_session(struct se_session *se_sess) | 511 | void transport_free_session(struct se_session *se_sess) |
500 | { | 512 | { |
501 | struct se_node_acl *se_nacl = se_sess->se_node_acl; | 513 | struct se_node_acl *se_nacl = se_sess->se_node_acl; |
514 | |||
502 | /* | 515 | /* |
503 | * Drop the se_node_acl->nacl_kref obtained from within | 516 | * Drop the se_node_acl->nacl_kref obtained from within |
504 | * core_tpg_get_initiator_node_acl(). | 517 | * core_tpg_get_initiator_node_acl(). |
505 | */ | 518 | */ |
506 | if (se_nacl) { | 519 | if (se_nacl) { |
520 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
521 | const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; | ||
522 | unsigned long flags; | ||
523 | |||
507 | se_sess->se_node_acl = NULL; | 524 | se_sess->se_node_acl = NULL; |
525 | |||
526 | /* | ||
527 | * Also determine if we need to drop the extra ->cmd_kref if | ||
528 | * it had been previously dynamically generated, and | ||
529 | * the endpoint is not caching dynamic ACLs. | ||
530 | */ | ||
531 | mutex_lock(&se_tpg->acl_node_mutex); | ||
532 | if (se_nacl->dynamic_node_acl && | ||
533 | !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { | ||
534 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); | ||
535 | if (list_empty(&se_nacl->acl_sess_list)) | ||
536 | se_nacl->dynamic_stop = true; | ||
537 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); | ||
538 | |||
539 | if (se_nacl->dynamic_stop) | ||
540 | list_del(&se_nacl->acl_list); | ||
541 | } | ||
542 | mutex_unlock(&se_tpg->acl_node_mutex); | ||
543 | |||
544 | if (se_nacl->dynamic_stop) | ||
545 | target_put_nacl(se_nacl); | ||
546 | |||
508 | target_put_nacl(se_nacl); | 547 | target_put_nacl(se_nacl); |
509 | } | 548 | } |
510 | if (se_sess->sess_cmd_map) { | 549 | if (se_sess->sess_cmd_map) { |
@@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session); | |||
518 | void transport_deregister_session(struct se_session *se_sess) | 557 | void transport_deregister_session(struct se_session *se_sess) |
519 | { | 558 | { |
520 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 559 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
521 | const struct target_core_fabric_ops *se_tfo; | ||
522 | struct se_node_acl *se_nacl; | ||
523 | unsigned long flags; | 560 | unsigned long flags; |
524 | bool drop_nacl = false; | ||
525 | 561 | ||
526 | if (!se_tpg) { | 562 | if (!se_tpg) { |
527 | transport_free_session(se_sess); | 563 | transport_free_session(se_sess); |
528 | return; | 564 | return; |
529 | } | 565 | } |
530 | se_tfo = se_tpg->se_tpg_tfo; | ||
531 | 566 | ||
532 | spin_lock_irqsave(&se_tpg->session_lock, flags); | 567 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
533 | list_del(&se_sess->sess_list); | 568 | list_del(&se_sess->sess_list); |
@@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess) | |||
535 | se_sess->fabric_sess_ptr = NULL; | 570 | se_sess->fabric_sess_ptr = NULL; |
536 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); | 571 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
537 | 572 | ||
538 | /* | ||
539 | * Determine if we need to do extra work for this initiator node's | ||
540 | * struct se_node_acl if it had been previously dynamically generated. | ||
541 | */ | ||
542 | se_nacl = se_sess->se_node_acl; | ||
543 | |||
544 | mutex_lock(&se_tpg->acl_node_mutex); | ||
545 | if (se_nacl && se_nacl->dynamic_node_acl) { | ||
546 | if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { | ||
547 | list_del(&se_nacl->acl_list); | ||
548 | drop_nacl = true; | ||
549 | } | ||
550 | } | ||
551 | mutex_unlock(&se_tpg->acl_node_mutex); | ||
552 | |||
553 | if (drop_nacl) { | ||
554 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | ||
555 | core_free_device_list_for_node(se_nacl, se_tpg); | ||
556 | se_sess->se_node_acl = NULL; | ||
557 | kfree(se_nacl); | ||
558 | } | ||
559 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", | 573 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
560 | se_tpg->se_tpg_tfo->get_fabric_name()); | 574 | se_tpg->se_tpg_tfo->get_fabric_name()); |
561 | /* | 575 | /* |
562 | * If last kref is dropping now for an explicit NodeACL, awake sleeping | 576 | * If last kref is dropping now for an explicit NodeACL, awake sleeping |
563 | * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group | 577 | * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group |
564 | * removal context from within transport_free_session() code. | 578 | * removal context from within transport_free_session() code. |
579 | * | ||
580 | * For dynamic ACL, target_put_nacl() uses target_complete_nacl() | ||
581 | * to release all remaining generate_node_acl=1 created ACL resources. | ||
565 | */ | 582 | */ |
566 | 583 | ||
567 | transport_free_session(se_sess); | 584 | transport_free_session(se_sess); |
@@ -3110,7 +3127,6 @@ static void target_tmr_work(struct work_struct *work) | |||
3110 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3127 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3111 | goto check_stop; | 3128 | goto check_stop; |
3112 | } | 3129 | } |
3113 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | ||
3114 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3130 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3115 | 3131 | ||
3116 | cmd->se_tfo->queue_tm_rsp(cmd); | 3132 | cmd->se_tfo->queue_tm_rsp(cmd); |
@@ -3123,11 +3139,25 @@ int transport_generic_handle_tmr( | |||
3123 | struct se_cmd *cmd) | 3139 | struct se_cmd *cmd) |
3124 | { | 3140 | { |
3125 | unsigned long flags; | 3141 | unsigned long flags; |
3142 | bool aborted = false; | ||
3126 | 3143 | ||
3127 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 3144 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3128 | cmd->transport_state |= CMD_T_ACTIVE; | 3145 | if (cmd->transport_state & CMD_T_ABORTED) { |
3146 | aborted = true; | ||
3147 | } else { | ||
3148 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | ||
3149 | cmd->transport_state |= CMD_T_ACTIVE; | ||
3150 | } | ||
3129 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3151 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3130 | 3152 | ||
3153 | if (aborted) { | ||
3154 | pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" | ||
3155 | "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, | ||
3156 | cmd->se_tmr_req->ref_task_tag, cmd->tag); | ||
3157 | transport_cmd_check_stop_to_fabric(cmd); | ||
3158 | return 0; | ||
3159 | } | ||
3160 | |||
3131 | INIT_WORK(&cmd->work, target_tmr_work); | 3161 | INIT_WORK(&cmd->work, target_tmr_work); |
3132 | queue_work(cmd->se_dev->tmr_wq, &cmd->work); | 3162 | queue_work(cmd->se_dev->tmr_wq, &cmd->work); |
3133 | return 0; | 3163 | return 0; |
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index d828b3b5000b..cac5a20a4de0 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c | |||
@@ -864,7 +864,7 @@ out: | |||
864 | " CHECK_CONDITION -> sending response\n", rc); | 864 | " CHECK_CONDITION -> sending response\n", rc); |
865 | ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | 865 | ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; |
866 | } | 866 | } |
867 | target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); | 867 | target_complete_cmd(ec_cmd, ec_cmd->scsi_status); |
868 | } | 868 | } |
869 | 869 | ||
870 | sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) | 870 | sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d2e50a27140c..24f9f98968a5 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
37 | /* CBM - Flash disk */ | 37 | /* CBM - Flash disk */ |
38 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, | 38 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, |
39 | 39 | ||
40 | /* WORLDE easy key (easykey.25) MIDI controller */ | ||
41 | { USB_DEVICE(0x0218, 0x0401), .driver_info = | ||
42 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
43 | |||
40 | /* HP 5300/5370C scanner */ | 44 | /* HP 5300/5370C scanner */ |
41 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = | 45 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = |
42 | USB_QUIRK_STRING_FETCH_255 }, | 46 | USB_QUIRK_STRING_FETCH_255 }, |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 5490fc51638e..fd80c1b9c823 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, | |||
2269 | if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) | 2269 | if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) |
2270 | return -EINVAL; | 2270 | return -EINVAL; |
2271 | length = le32_to_cpu(d->dwSize); | 2271 | length = le32_to_cpu(d->dwSize); |
2272 | if (len < length) | ||
2273 | return -EINVAL; | ||
2272 | type = le32_to_cpu(d->dwPropertyDataType); | 2274 | type = le32_to_cpu(d->dwPropertyDataType); |
2273 | if (type < USB_EXT_PROP_UNICODE || | 2275 | if (type < USB_EXT_PROP_UNICODE || |
2274 | type > USB_EXT_PROP_UNICODE_MULTI) { | 2276 | type > USB_EXT_PROP_UNICODE_MULTI) { |
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, | |||
2277 | return -EINVAL; | 2279 | return -EINVAL; |
2278 | } | 2280 | } |
2279 | pnl = le16_to_cpu(d->wPropertyNameLength); | 2281 | pnl = le16_to_cpu(d->wPropertyNameLength); |
2282 | if (length < 14 + pnl) { | ||
2283 | pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n", | ||
2284 | length, pnl, type); | ||
2285 | return -EINVAL; | ||
2286 | } | ||
2280 | pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); | 2287 | pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); |
2281 | if (length != 14 + pnl + pdl) { | 2288 | if (length != 14 + pnl + pdl) { |
2282 | pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", | 2289 | pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", |
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs, | |||
2363 | } | 2370 | } |
2364 | } | 2371 | } |
2365 | if (flags & (1 << i)) { | 2372 | if (flags & (1 << i)) { |
2373 | if (len < 4) { | ||
2374 | goto error; | ||
2375 | } | ||
2366 | os_descs_count = get_unaligned_le32(data); | 2376 | os_descs_count = get_unaligned_le32(data); |
2367 | data += 4; | 2377 | data += 4; |
2368 | len -= 4; | 2378 | len -= 4; |
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, | |||
2435 | 2445 | ||
2436 | ENTER(); | 2446 | ENTER(); |
2437 | 2447 | ||
2438 | if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || | 2448 | if (unlikely(len < 16 || |
2449 | get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || | ||
2439 | get_unaligned_le32(data + 4) != len)) | 2450 | get_unaligned_le32(data + 4) != len)) |
2440 | goto error; | 2451 | goto error; |
2441 | str_count = get_unaligned_le32(data + 8); | 2452 | str_count = get_unaligned_le32(data + 8); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index fca288bbc800..772f15821242 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
594 | | MUSB_PORT_STAT_RESUME; | 594 | | MUSB_PORT_STAT_RESUME; |
595 | musb->rh_timer = jiffies | 595 | musb->rh_timer = jiffies |
596 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); | 596 | + msecs_to_jiffies(USB_RESUME_TIMEOUT); |
597 | musb->need_finish_resume = 1; | ||
598 | |||
599 | musb->xceiv->otg->state = OTG_STATE_A_HOST; | 597 | musb->xceiv->otg->state = OTG_STATE_A_HOST; |
600 | musb->is_active = 1; | 598 | musb->is_active = 1; |
601 | musb_host_resume_root_hub(musb); | 599 | musb_host_resume_root_hub(musb); |
600 | schedule_delayed_work(&musb->finish_resume_work, | ||
601 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); | ||
602 | break; | 602 | break; |
603 | case OTG_STATE_B_WAIT_ACON: | 603 | case OTG_STATE_B_WAIT_ACON: |
604 | musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; | 604 | musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; |
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb) | |||
1925 | static void musb_irq_work(struct work_struct *data) | 1925 | static void musb_irq_work(struct work_struct *data) |
1926 | { | 1926 | { |
1927 | struct musb *musb = container_of(data, struct musb, irq_work.work); | 1927 | struct musb *musb = container_of(data, struct musb, irq_work.work); |
1928 | int error; | ||
1929 | |||
1930 | error = pm_runtime_get_sync(musb->controller); | ||
1931 | if (error < 0) { | ||
1932 | dev_err(musb->controller, "Could not enable: %i\n", error); | ||
1933 | |||
1934 | return; | ||
1935 | } | ||
1928 | 1936 | ||
1929 | musb_pm_runtime_check_session(musb); | 1937 | musb_pm_runtime_check_session(musb); |
1930 | 1938 | ||
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data) | |||
1932 | musb->xceiv_old_state = musb->xceiv->otg->state; | 1940 | musb->xceiv_old_state = musb->xceiv->otg->state; |
1933 | sysfs_notify(&musb->controller->kobj, NULL, "mode"); | 1941 | sysfs_notify(&musb->controller->kobj, NULL, "mode"); |
1934 | } | 1942 | } |
1943 | |||
1944 | pm_runtime_mark_last_busy(musb->controller); | ||
1945 | pm_runtime_put_autosuspend(musb->controller); | ||
1935 | } | 1946 | } |
1936 | 1947 | ||
1937 | static void musb_recover_from_babble(struct musb *musb) | 1948 | static void musb_recover_from_babble(struct musb *musb) |
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev) | |||
2710 | mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV; | 2721 | mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV; |
2711 | if ((devctl & mask) != (musb->context.devctl & mask)) | 2722 | if ((devctl & mask) != (musb->context.devctl & mask)) |
2712 | musb->port1_status = 0; | 2723 | musb->port1_status = 0; |
2713 | if (musb->need_finish_resume) { | ||
2714 | musb->need_finish_resume = 0; | ||
2715 | schedule_delayed_work(&musb->finish_resume_work, | ||
2716 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); | ||
2717 | } | ||
2718 | 2724 | ||
2719 | /* | 2725 | /* |
2720 | * The USB HUB code expects the device to be in RPM_ACTIVE once it came | 2726 | * The USB HUB code expects the device to be in RPM_ACTIVE once it came |
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev) | |||
2766 | 2772 | ||
2767 | musb_restore_context(musb); | 2773 | musb_restore_context(musb); |
2768 | 2774 | ||
2769 | if (musb->need_finish_resume) { | ||
2770 | musb->need_finish_resume = 0; | ||
2771 | schedule_delayed_work(&musb->finish_resume_work, | ||
2772 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); | ||
2773 | } | ||
2774 | |||
2775 | spin_lock_irqsave(&musb->lock, flags); | 2775 | spin_lock_irqsave(&musb->lock, flags); |
2776 | error = musb_run_resume_work(musb); | 2776 | error = musb_run_resume_work(musb); |
2777 | if (error) | 2777 | if (error) |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index ade902ea1221..ce5a18c98c6d 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -410,7 +410,6 @@ struct musb { | |||
410 | 410 | ||
411 | /* is_suspended means USB B_PERIPHERAL suspend */ | 411 | /* is_suspended means USB B_PERIPHERAL suspend */ |
412 | unsigned is_suspended:1; | 412 | unsigned is_suspended:1; |
413 | unsigned need_finish_resume :1; | ||
414 | 413 | ||
415 | /* may_wakeup means remote wakeup is enabled */ | 414 | /* may_wakeup means remote wakeup is enabled */ |
416 | unsigned may_wakeup:1; | 415 | unsigned may_wakeup:1; |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 7ce31a4c7e7f..42cc72e54c05 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = { | |||
2007 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, | 2007 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, |
2008 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, | 2008 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, |
2009 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, | 2009 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, |
2010 | { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */ | ||
2010 | { } /* Terminating entry */ | 2011 | { } /* Terminating entry */ |
2011 | }; | 2012 | }; |
2012 | MODULE_DEVICE_TABLE(usb, option_ids); | 2013 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 46fca6b75846..1db4b61bdf7b 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = { | |||
49 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, | 49 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, |
50 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, | 50 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, |
51 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, | 51 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, |
52 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, | ||
52 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, | 53 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, |
53 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, | 54 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, |
54 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, | 55 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index e3b7af8adfb7..09d9be88209e 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #define ATEN_VENDOR_ID 0x0557 | 27 | #define ATEN_VENDOR_ID 0x0557 |
28 | #define ATEN_VENDOR_ID2 0x0547 | 28 | #define ATEN_VENDOR_ID2 0x0547 |
29 | #define ATEN_PRODUCT_ID 0x2008 | 29 | #define ATEN_PRODUCT_ID 0x2008 |
30 | #define ATEN_PRODUCT_ID2 0x2118 | ||
30 | 31 | ||
31 | #define IODATA_VENDOR_ID 0x04bb | 32 | #define IODATA_VENDOR_ID 0x04bb |
32 | #define IODATA_PRODUCT_ID 0x0a03 | 33 | #define IODATA_PRODUCT_ID 0x0a03 |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 1bc6089b9008..696458db7e3c 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = { | |||
124 | {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ | 124 | {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ |
125 | {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ | 125 | {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ |
126 | {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ | 126 | {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ |
127 | {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */ | ||
127 | {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */ | 128 | {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */ |
128 | {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ | 129 | {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ |
129 | {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */ | 130 | {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */ |
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 128d10282d16..59b3f62a2d64 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data, | |||
1123 | mutex_lock(&container->lock); | 1123 | mutex_lock(&container->lock); |
1124 | 1124 | ||
1125 | ret = tce_iommu_create_default_window(container); | 1125 | ret = tce_iommu_create_default_window(container); |
1126 | if (ret) | 1126 | if (!ret) |
1127 | return ret; | 1127 | ret = tce_iommu_create_window(container, |
1128 | 1128 | create.page_shift, | |
1129 | ret = tce_iommu_create_window(container, create.page_shift, | 1129 | create.window_size, create.levels, |
1130 | create.window_size, create.levels, | 1130 | &create.start_addr); |
1131 | &create.start_addr); | ||
1132 | 1131 | ||
1133 | mutex_unlock(&container->lock); | 1132 | mutex_unlock(&container->lock); |
1134 | 1133 | ||
@@ -1246,6 +1245,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, | |||
1246 | static long tce_iommu_take_ownership_ddw(struct tce_container *container, | 1245 | static long tce_iommu_take_ownership_ddw(struct tce_container *container, |
1247 | struct iommu_table_group *table_group) | 1246 | struct iommu_table_group *table_group) |
1248 | { | 1247 | { |
1248 | long i, ret = 0; | ||
1249 | |||
1249 | if (!table_group->ops->create_table || !table_group->ops->set_window || | 1250 | if (!table_group->ops->create_table || !table_group->ops->set_window || |
1250 | !table_group->ops->release_ownership) { | 1251 | !table_group->ops->release_ownership) { |
1251 | WARN_ON_ONCE(1); | 1252 | WARN_ON_ONCE(1); |
@@ -1254,7 +1255,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, | |||
1254 | 1255 | ||
1255 | table_group->ops->take_ownership(table_group); | 1256 | table_group->ops->take_ownership(table_group); |
1256 | 1257 | ||
1258 | /* Set all windows to the new group */ | ||
1259 | for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { | ||
1260 | struct iommu_table *tbl = container->tables[i]; | ||
1261 | |||
1262 | if (!tbl) | ||
1263 | continue; | ||
1264 | |||
1265 | ret = table_group->ops->set_window(table_group, i, tbl); | ||
1266 | if (ret) | ||
1267 | goto release_exit; | ||
1268 | } | ||
1269 | |||
1257 | return 0; | 1270 | return 0; |
1271 | |||
1272 | release_exit: | ||
1273 | for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) | ||
1274 | table_group->ops->unset_window(table_group, i); | ||
1275 | |||
1276 | table_group->ops->release_ownership(table_group); | ||
1277 | |||
1278 | return ret; | ||
1258 | } | 1279 | } |
1259 | 1280 | ||
1260 | static int tce_iommu_attach_group(void *iommu_data, | 1281 | static int tce_iommu_attach_group(void *iommu_data, |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d6432603880c..8f99fe08de02 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, | |||
130 | 130 | ||
131 | static void vhost_init_is_le(struct vhost_virtqueue *vq) | 131 | static void vhost_init_is_le(struct vhost_virtqueue *vq) |
132 | { | 132 | { |
133 | if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) | 133 | vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) |
134 | vq->is_le = true; | 134 | || virtio_legacy_is_little_endian(); |
135 | } | 135 | } |
136 | #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ | 136 | #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ |
137 | 137 | ||
138 | static void vhost_reset_is_le(struct vhost_virtqueue *vq) | 138 | static void vhost_reset_is_le(struct vhost_virtqueue *vq) |
139 | { | 139 | { |
140 | vq->is_le = virtio_legacy_is_little_endian(); | 140 | vhost_init_is_le(vq); |
141 | } | 141 | } |
142 | 142 | ||
143 | struct vhost_flush_struct { | 143 | struct vhost_flush_struct { |
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq) | |||
1714 | int r; | 1714 | int r; |
1715 | bool is_le = vq->is_le; | 1715 | bool is_le = vq->is_le; |
1716 | 1716 | ||
1717 | if (!vq->private_data) { | 1717 | if (!vq->private_data) |
1718 | vhost_reset_is_le(vq); | ||
1719 | return 0; | 1718 | return 0; |
1720 | } | ||
1721 | 1719 | ||
1722 | vhost_init_is_le(vq); | 1720 | vhost_init_is_le(vq); |
1723 | 1721 | ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 7e38ed79c3fc..409aeaa49246 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev) | |||
159 | if (xen_domain()) | 159 | if (xen_domain()) |
160 | return true; | 160 | return true; |
161 | 161 | ||
162 | /* | ||
163 | * On ARM-based machines, the DMA ops will do the right thing, | ||
164 | * so always use them with legacy devices. | ||
165 | */ | ||
166 | if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64)) | ||
167 | return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1); | ||
168 | |||
169 | return false; | 162 | return false; |
170 | } | 163 | } |
171 | 164 | ||
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7f390849343b..c4444d6f439f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -1024,6 +1024,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
1024 | unsigned long buf_offset; | 1024 | unsigned long buf_offset; |
1025 | unsigned long current_buf_start; | 1025 | unsigned long current_buf_start; |
1026 | unsigned long start_byte; | 1026 | unsigned long start_byte; |
1027 | unsigned long prev_start_byte; | ||
1027 | unsigned long working_bytes = total_out - buf_start; | 1028 | unsigned long working_bytes = total_out - buf_start; |
1028 | unsigned long bytes; | 1029 | unsigned long bytes; |
1029 | char *kaddr; | 1030 | char *kaddr; |
@@ -1071,26 +1072,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
1071 | if (!bio->bi_iter.bi_size) | 1072 | if (!bio->bi_iter.bi_size) |
1072 | return 0; | 1073 | return 0; |
1073 | bvec = bio_iter_iovec(bio, bio->bi_iter); | 1074 | bvec = bio_iter_iovec(bio, bio->bi_iter); |
1074 | 1075 | prev_start_byte = start_byte; | |
1075 | start_byte = page_offset(bvec.bv_page) - disk_start; | 1076 | start_byte = page_offset(bvec.bv_page) - disk_start; |
1076 | 1077 | ||
1077 | /* | 1078 | /* |
1078 | * make sure our new page is covered by this | 1079 | * We need to make sure we're only adjusting |
1079 | * working buffer | 1080 | * our offset into compression working buffer when |
1081 | * we're switching pages. Otherwise we can incorrectly | ||
1082 | * keep copying when we were actually done. | ||
1080 | */ | 1083 | */ |
1081 | if (total_out <= start_byte) | 1084 | if (start_byte != prev_start_byte) { |
1082 | return 1; | 1085 | /* |
1086 | * make sure our new page is covered by this | ||
1087 | * working buffer | ||
1088 | */ | ||
1089 | if (total_out <= start_byte) | ||
1090 | return 1; | ||
1083 | 1091 | ||
1084 | /* | 1092 | /* |
1085 | * the next page in the biovec might not be adjacent | 1093 | * the next page in the biovec might not be adjacent |
1086 | * to the last page, but it might still be found | 1094 | * to the last page, but it might still be found |
1087 | * inside this working buffer. bump our offset pointer | 1095 | * inside this working buffer. bump our offset pointer |
1088 | */ | 1096 | */ |
1089 | if (total_out > start_byte && | 1097 | if (total_out > start_byte && |
1090 | current_buf_start < start_byte) { | 1098 | current_buf_start < start_byte) { |
1091 | buf_offset = start_byte - buf_start; | 1099 | buf_offset = start_byte - buf_start; |
1092 | working_bytes = total_out - start_byte; | 1100 | working_bytes = total_out - start_byte; |
1093 | current_buf_start = buf_start + buf_offset; | 1101 | current_buf_start = buf_start + buf_offset; |
1102 | } | ||
1094 | } | 1103 | } |
1095 | } | 1104 | } |
1096 | 1105 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 33f967d30b2a..21e51b0ba188 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -5653,6 +5653,10 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
5653 | #ifdef CONFIG_COMPAT | 5653 | #ifdef CONFIG_COMPAT |
5654 | long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 5654 | long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
5655 | { | 5655 | { |
5656 | /* | ||
5657 | * These all access 32-bit values anyway so no further | ||
5658 | * handling is necessary. | ||
5659 | */ | ||
5656 | switch (cmd) { | 5660 | switch (cmd) { |
5657 | case FS_IOC32_GETFLAGS: | 5661 | case FS_IOC32_GETFLAGS: |
5658 | cmd = FS_IOC_GETFLAGS; | 5662 | cmd = FS_IOC_GETFLAGS; |
@@ -5663,8 +5667,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
5663 | case FS_IOC32_GETVERSION: | 5667 | case FS_IOC32_GETVERSION: |
5664 | cmd = FS_IOC_GETVERSION; | 5668 | cmd = FS_IOC_GETVERSION; |
5665 | break; | 5669 | break; |
5666 | default: | ||
5667 | return -ENOIOCTLCMD; | ||
5668 | } | 5670 | } |
5669 | 5671 | ||
5670 | return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); | 5672 | return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 8f6a2a5863b9..a27fc8791551 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file) | |||
285 | rc = -ENOMEM; | 285 | rc = -ENOMEM; |
286 | goto error_exit; | 286 | goto error_exit; |
287 | } | 287 | } |
288 | spin_lock_init(&cifsFile->file_info_lock); | ||
288 | file->private_data = cifsFile; | 289 | file->private_data = cifsFile; |
289 | cifsFile->tlink = cifs_get_tlink(tlink); | 290 | cifsFile->tlink = cifs_get_tlink(tlink); |
290 | tcon = tlink_tcon(tlink); | 291 | tcon = tlink_tcon(tlink); |
@@ -1031,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |||
1031 | struct blk_dax_ctl dax = { 0 }; | 1031 | struct blk_dax_ctl dax = { 0 }; |
1032 | ssize_t map_len; | 1032 | ssize_t map_len; |
1033 | 1033 | ||
1034 | if (fatal_signal_pending(current)) { | ||
1035 | ret = -EINTR; | ||
1036 | break; | ||
1037 | } | ||
1038 | |||
1034 | dax.sector = dax_iomap_sector(iomap, pos); | 1039 | dax.sector = dax_iomap_sector(iomap, pos); |
1035 | dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; | 1040 | dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; |
1036 | map_len = dax_map_atomic(iomap->bdev, &dax); | 1041 | map_len = dax_map_atomic(iomap->bdev, &dax); |
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 4304072161aa..40d61077bead 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c | |||
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) | |||
542 | hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { | 542 | hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { |
543 | if (invalidate) | 543 | if (invalidate) |
544 | set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); | 544 | set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); |
545 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
545 | fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); | 546 | fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); |
546 | } | 547 | } |
547 | } else { | 548 | } else { |
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) | |||
560 | wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, | 561 | wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, |
561 | TASK_UNINTERRUPTIBLE); | 562 | TASK_UNINTERRUPTIBLE); |
562 | 563 | ||
564 | /* Make sure any pending writes are cancelled. */ | ||
565 | if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) | ||
566 | fscache_invalidate_writes(cookie); | ||
567 | |||
563 | /* Reset the cookie state if it wasn't relinquished */ | 568 | /* Reset the cookie state if it wasn't relinquished */ |
564 | if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { | 569 | if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { |
565 | atomic_inc(&cookie->n_active); | 570 | atomic_inc(&cookie->n_active); |
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c index 9b28649df3a1..a8aa00be4444 100644 --- a/fs/fscache/netfs.c +++ b/fs/fscache/netfs.c | |||
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) | |||
48 | cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; | 48 | cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; |
49 | 49 | ||
50 | spin_lock_init(&cookie->lock); | 50 | spin_lock_init(&cookie->lock); |
51 | spin_lock_init(&cookie->stores_lock); | ||
51 | INIT_HLIST_HEAD(&cookie->backing_objects); | 52 | INIT_HLIST_HEAD(&cookie->backing_objects); |
52 | 53 | ||
53 | /* check the netfs type is not already present */ | 54 | /* check the netfs type is not already present */ |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 9e792e30f4db..7a182c87f378 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object | |||
30 | static const struct fscache_state *fscache_object_available(struct fscache_object *, int); | 30 | static const struct fscache_state *fscache_object_available(struct fscache_object *, int); |
31 | static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); | 31 | static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); |
32 | static const struct fscache_state *fscache_update_object(struct fscache_object *, int); | 32 | static const struct fscache_state *fscache_update_object(struct fscache_object *, int); |
33 | static const struct fscache_state *fscache_object_dead(struct fscache_object *, int); | ||
33 | 34 | ||
34 | #define __STATE_NAME(n) fscache_osm_##n | 35 | #define __STATE_NAME(n) fscache_osm_##n |
35 | #define STATE(n) (&__STATE_NAME(n)) | 36 | #define STATE(n) (&__STATE_NAME(n)) |
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); | |||
91 | static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); | 92 | static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); |
92 | static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); | 93 | static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); |
93 | static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); | 94 | static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); |
94 | static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); | 95 | static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead); |
95 | 96 | ||
96 | static WAIT_STATE(WAIT_FOR_INIT, "?INI", | 97 | static WAIT_STATE(WAIT_FOR_INIT, "?INI", |
97 | TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); | 98 | TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); |
@@ -229,6 +230,10 @@ execute_work_state: | |||
229 | event = -1; | 230 | event = -1; |
230 | if (new_state == NO_TRANSIT) { | 231 | if (new_state == NO_TRANSIT) { |
231 | _debug("{OBJ%x} %s notrans", object->debug_id, state->name); | 232 | _debug("{OBJ%x} %s notrans", object->debug_id, state->name); |
233 | if (unlikely(state == STATE(OBJECT_DEAD))) { | ||
234 | _leave(" [dead]"); | ||
235 | return; | ||
236 | } | ||
232 | fscache_enqueue_object(object); | 237 | fscache_enqueue_object(object); |
233 | event_mask = object->oob_event_mask; | 238 | event_mask = object->oob_event_mask; |
234 | goto unmask_events; | 239 | goto unmask_events; |
@@ -239,7 +244,7 @@ execute_work_state: | |||
239 | object->state = state = new_state; | 244 | object->state = state = new_state; |
240 | 245 | ||
241 | if (state->work) { | 246 | if (state->work) { |
242 | if (unlikely(state->work == ((void *)2UL))) { | 247 | if (unlikely(state == STATE(OBJECT_DEAD))) { |
243 | _leave(" [dead]"); | 248 | _leave(" [dead]"); |
244 | return; | 249 | return; |
245 | } | 250 | } |
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob | |||
645 | fscache_mark_object_dead(object); | 650 | fscache_mark_object_dead(object); |
646 | object->oob_event_mask = 0; | 651 | object->oob_event_mask = 0; |
647 | 652 | ||
653 | if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) { | ||
654 | /* Reject any new read/write ops and abort any that are pending. */ | ||
655 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
656 | fscache_cancel_all_ops(object); | ||
657 | } | ||
658 | |||
648 | if (list_empty(&object->dependents) && | 659 | if (list_empty(&object->dependents) && |
649 | object->n_ops == 0 && | 660 | object->n_ops == 0 && |
650 | object->n_children == 0) | 661 | object->n_children == 0) |
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object, | |||
1077 | } | 1088 | } |
1078 | } | 1089 | } |
1079 | EXPORT_SYMBOL(fscache_object_mark_killed); | 1090 | EXPORT_SYMBOL(fscache_object_mark_killed); |
1091 | |||
1092 | /* | ||
1093 | * The object is dead. We can get here if an object gets queued by an event | ||
1094 | * that would lead to its death (such as EV_KILL) when the dispatcher is | ||
1095 | * already running (and so can be requeued) but hasn't yet cleared the event | ||
1096 | * mask. | ||
1097 | */ | ||
1098 | static const struct fscache_state *fscache_object_dead(struct fscache_object *object, | ||
1099 | int event) | ||
1100 | { | ||
1101 | if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD, | ||
1102 | &object->flags)) | ||
1103 | return NO_TRANSIT; | ||
1104 | |||
1105 | WARN(true, "FS-Cache object redispatched after death"); | ||
1106 | return NO_TRANSIT; | ||
1107 | } | ||
diff --git a/fs/iomap.c b/fs/iomap.c index 354a123f170e..a51cb4c07d4d 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, | |||
114 | 114 | ||
115 | BUG_ON(pos + len > iomap->offset + iomap->length); | 115 | BUG_ON(pos + len > iomap->offset + iomap->length); |
116 | 116 | ||
117 | if (fatal_signal_pending(current)) | ||
118 | return -EINTR; | ||
119 | |||
117 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); | 120 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); |
118 | if (!page) | 121 | if (!page) |
119 | return -ENOMEM; | 122 | return -ENOMEM; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ecc151697fd4..0a0eaecf9676 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2700,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, | |||
2700 | sattr->ia_valid |= ATTR_MTIME; | 2700 | sattr->ia_valid |= ATTR_MTIME; |
2701 | 2701 | ||
2702 | /* Except MODE, it seems harmless of setting twice. */ | 2702 | /* Except MODE, it seems harmless of setting twice. */ |
2703 | if ((attrset[1] & FATTR4_WORD1_MODE)) | 2703 | if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE && |
2704 | attrset[1] & FATTR4_WORD1_MODE) | ||
2704 | sattr->ia_valid &= ~ATTR_MODE; | 2705 | sattr->ia_valid &= ~ATTR_MODE; |
2705 | 2706 | ||
2706 | if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) | 2707 | if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) |
@@ -8490,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, | |||
8490 | goto out; | 8491 | goto out; |
8491 | } | 8492 | } |
8492 | 8493 | ||
8494 | nfs4_sequence_free_slot(&lgp->res.seq_res); | ||
8493 | err = nfs4_handle_exception(server, nfs4err, exception); | 8495 | err = nfs4_handle_exception(server, nfs4err, exception); |
8494 | if (!status) { | 8496 | if (!status) { |
8495 | if (exception->retry) | 8497 | if (exception->retry) |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 90e6193ce6be..daeb94e3acd4 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | |||
1091 | case -NFS4ERR_BADXDR: | 1091 | case -NFS4ERR_BADXDR: |
1092 | case -NFS4ERR_RESOURCE: | 1092 | case -NFS4ERR_RESOURCE: |
1093 | case -NFS4ERR_NOFILEHANDLE: | 1093 | case -NFS4ERR_NOFILEHANDLE: |
1094 | case -NFS4ERR_MOVED: | ||
1094 | /* Non-seqid mutating errors */ | 1095 | /* Non-seqid mutating errors */ |
1095 | return; | 1096 | return; |
1096 | }; | 1097 | }; |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 59554f3adf29..dd042498ce7c 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino) | |||
1200 | 1200 | ||
1201 | send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); | 1201 | send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); |
1202 | spin_unlock(&ino->i_lock); | 1202 | spin_unlock(&ino->i_lock); |
1203 | pnfs_free_lseg_list(&tmp_list); | ||
1204 | if (send) | 1203 | if (send) |
1205 | status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); | 1204 | status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); |
1206 | out_put_layout_hdr: | 1205 | out_put_layout_hdr: |
1206 | pnfs_free_lseg_list(&tmp_list); | ||
1207 | pnfs_put_layout_hdr(lo); | 1207 | pnfs_put_layout_hdr(lo); |
1208 | out: | 1208 | out: |
1209 | dprintk("<-- %s status: %d\n", __func__, status); | 1209 | dprintk("<-- %s status: %d\n", __func__, status); |
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 596205d939a1..1fc07a9c70e9 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c | |||
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, | |||
223 | struct nfs4_layout_stateid *ls; | 223 | struct nfs4_layout_stateid *ls; |
224 | struct nfs4_stid *stp; | 224 | struct nfs4_stid *stp; |
225 | 225 | ||
226 | stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache); | 226 | stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache, |
227 | nfsd4_free_layout_stateid); | ||
227 | if (!stp) | 228 | if (!stp) |
228 | return NULL; | 229 | return NULL; |
229 | stp->sc_free = nfsd4_free_layout_stateid; | 230 | |
230 | get_nfs4_file(fp); | 231 | get_nfs4_file(fp); |
231 | stp->sc_file = fp; | 232 | stp->sc_file = fp; |
232 | 233 | ||
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 4b4beaaa4eaa..a0dee8ae9f97 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -633,8 +633,8 @@ out: | |||
633 | return co; | 633 | return co; |
634 | } | 634 | } |
635 | 635 | ||
636 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, | 636 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, |
637 | struct kmem_cache *slab) | 637 | void (*sc_free)(struct nfs4_stid *)) |
638 | { | 638 | { |
639 | struct nfs4_stid *stid; | 639 | struct nfs4_stid *stid; |
640 | int new_id; | 640 | int new_id; |
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, | |||
650 | idr_preload_end(); | 650 | idr_preload_end(); |
651 | if (new_id < 0) | 651 | if (new_id < 0) |
652 | goto out_free; | 652 | goto out_free; |
653 | |||
654 | stid->sc_free = sc_free; | ||
653 | stid->sc_client = cl; | 655 | stid->sc_client = cl; |
654 | stid->sc_stateid.si_opaque.so_id = new_id; | 656 | stid->sc_stateid.si_opaque.so_id = new_id; |
655 | stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; | 657 | stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; |
@@ -675,15 +677,12 @@ out_free: | |||
675 | static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) | 677 | static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) |
676 | { | 678 | { |
677 | struct nfs4_stid *stid; | 679 | struct nfs4_stid *stid; |
678 | struct nfs4_ol_stateid *stp; | ||
679 | 680 | ||
680 | stid = nfs4_alloc_stid(clp, stateid_slab); | 681 | stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); |
681 | if (!stid) | 682 | if (!stid) |
682 | return NULL; | 683 | return NULL; |
683 | 684 | ||
684 | stp = openlockstateid(stid); | 685 | return openlockstateid(stid); |
685 | stp->st_stid.sc_free = nfs4_free_ol_stateid; | ||
686 | return stp; | ||
687 | } | 686 | } |
688 | 687 | ||
689 | static void nfs4_free_deleg(struct nfs4_stid *stid) | 688 | static void nfs4_free_deleg(struct nfs4_stid *stid) |
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, | |||
781 | goto out_dec; | 780 | goto out_dec; |
782 | if (delegation_blocked(¤t_fh->fh_handle)) | 781 | if (delegation_blocked(¤t_fh->fh_handle)) |
783 | goto out_dec; | 782 | goto out_dec; |
784 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); | 783 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); |
785 | if (dp == NULL) | 784 | if (dp == NULL) |
786 | goto out_dec; | 785 | goto out_dec; |
787 | 786 | ||
788 | dp->dl_stid.sc_free = nfs4_free_deleg; | ||
789 | /* | 787 | /* |
790 | * delegation seqid's are never incremented. The 4.1 special | 788 | * delegation seqid's are never incremented. The 4.1 special |
791 | * meaning of seqid 0 isn't meaningful, really, but let's avoid | 789 | * meaning of seqid 0 isn't meaningful, really, but let's avoid |
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, | |||
5580 | stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); | 5578 | stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); |
5581 | get_nfs4_file(fp); | 5579 | get_nfs4_file(fp); |
5582 | stp->st_stid.sc_file = fp; | 5580 | stp->st_stid.sc_file = fp; |
5583 | stp->st_stid.sc_free = nfs4_free_lock_stateid; | ||
5584 | stp->st_access_bmap = 0; | 5581 | stp->st_access_bmap = 0; |
5585 | stp->st_deny_bmap = open_stp->st_deny_bmap; | 5582 | stp->st_deny_bmap = open_stp->st_deny_bmap; |
5586 | stp->st_openstp = open_stp; | 5583 | stp->st_openstp = open_stp; |
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, | |||
5623 | lst = find_lock_stateid(lo, fi); | 5620 | lst = find_lock_stateid(lo, fi); |
5624 | if (lst == NULL) { | 5621 | if (lst == NULL) { |
5625 | spin_unlock(&clp->cl_lock); | 5622 | spin_unlock(&clp->cl_lock); |
5626 | ns = nfs4_alloc_stid(clp, stateid_slab); | 5623 | ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); |
5627 | if (ns == NULL) | 5624 | if (ns == NULL) |
5628 | return NULL; | 5625 | return NULL; |
5629 | 5626 | ||
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index c9399366f9df..4516e8b7d776 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, | |||
603 | __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, | 603 | __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, |
604 | stateid_t *stateid, unsigned char typemask, | 604 | stateid_t *stateid, unsigned char typemask, |
605 | struct nfs4_stid **s, struct nfsd_net *nn); | 605 | struct nfs4_stid **s, struct nfsd_net *nn); |
606 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, | 606 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, |
607 | struct kmem_cache *slab); | 607 | void (*sc_free)(struct nfs4_stid *)); |
608 | void nfs4_unhash_stid(struct nfs4_stid *s); | 608 | void nfs4_unhash_stid(struct nfs4_stid *s); |
609 | void nfs4_put_stid(struct nfs4_stid *s); | 609 | void nfs4_put_stid(struct nfs4_stid *s); |
610 | void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); | 610 | void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); |
diff --git a/fs/proc/page.c b/fs/proc/page.c index a2066e6dee90..2726536489b1 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -173,7 +173,8 @@ u64 stable_page_flags(struct page *page) | |||
173 | u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); | 173 | u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); |
174 | u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); | 174 | u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); |
175 | 175 | ||
176 | u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); | 176 | if (PageSwapCache(page)) |
177 | u |= 1 << KPF_SWAPCACHE; | ||
177 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); | 178 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); |
178 | 179 | ||
179 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); | 180 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); |
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 27c059e1760a..1d887efaaf71 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c | |||
@@ -280,7 +280,7 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type, | |||
280 | 1, id, type, PSTORE_TYPE_PMSG, 0); | 280 | 1, id, type, PSTORE_TYPE_PMSG, 0); |
281 | 281 | ||
282 | /* ftrace is last since it may want to dynamically allocate memory. */ | 282 | /* ftrace is last since it may want to dynamically allocate memory. */ |
283 | if (!prz_ok(prz)) { | 283 | if (!prz_ok(prz) && cxt->fprzs) { |
284 | if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) { | 284 | if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) { |
285 | prz = ramoops_get_next_prz(cxt->fprzs, | 285 | prz = ramoops_get_next_prz(cxt->fprzs, |
286 | &cxt->ftrace_read_cnt, 1, id, type, | 286 | &cxt->ftrace_read_cnt, 1, id, type, |
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 63554e9f6e0c..719db1968d81 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h | |||
@@ -9,18 +9,15 @@ | |||
9 | #ifndef KSYM_ALIGN | 9 | #ifndef KSYM_ALIGN |
10 | #define KSYM_ALIGN 8 | 10 | #define KSYM_ALIGN 8 |
11 | #endif | 11 | #endif |
12 | #ifndef KCRC_ALIGN | ||
13 | #define KCRC_ALIGN 8 | ||
14 | #endif | ||
15 | #else | 12 | #else |
16 | #define __put .long | 13 | #define __put .long |
17 | #ifndef KSYM_ALIGN | 14 | #ifndef KSYM_ALIGN |
18 | #define KSYM_ALIGN 4 | 15 | #define KSYM_ALIGN 4 |
19 | #endif | 16 | #endif |
17 | #endif | ||
20 | #ifndef KCRC_ALIGN | 18 | #ifndef KCRC_ALIGN |
21 | #define KCRC_ALIGN 4 | 19 | #define KCRC_ALIGN 4 |
22 | #endif | 20 | #endif |
23 | #endif | ||
24 | 21 | ||
25 | #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX | 22 | #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX |
26 | #define KSYM(name) _##name | 23 | #define KSYM(name) _##name |
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name): | |||
52 | .section ___kcrctab\sec+\name,"a" | 49 | .section ___kcrctab\sec+\name,"a" |
53 | .balign KCRC_ALIGN | 50 | .balign KCRC_ALIGN |
54 | KSYM(__kcrctab_\name): | 51 | KSYM(__kcrctab_\name): |
55 | __put KSYM(__crc_\name) | 52 | #if defined(CONFIG_MODULE_REL_CRCS) |
53 | .long KSYM(__crc_\name) - . | ||
54 | #else | ||
55 | .long KSYM(__crc_\name) | ||
56 | #endif | ||
56 | .weak KSYM(__crc_\name) | 57 | .weak KSYM(__crc_\name) |
57 | .previous | 58 | .previous |
58 | #endif | 59 | #endif |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 192016e2b518..9c4ee144b5f6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -517,6 +517,7 @@ struct drm_device { | |||
517 | struct drm_minor *control; /**< Control node */ | 517 | struct drm_minor *control; /**< Control node */ |
518 | struct drm_minor *primary; /**< Primary node */ | 518 | struct drm_minor *primary; /**< Primary node */ |
519 | struct drm_minor *render; /**< Render node */ | 519 | struct drm_minor *render; /**< Render node */ |
520 | bool registered; | ||
520 | 521 | ||
521 | /* currently active master for this device. Protected by master_mutex */ | 522 | /* currently active master for this device. Protected by master_mutex */ |
522 | struct drm_master *master; | 523 | struct drm_master *master; |
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index a9b95246e26e..045a97cbeba2 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h | |||
@@ -381,6 +381,8 @@ struct drm_connector_funcs { | |||
381 | * core drm connector interfaces. Everything added from this callback | 381 | * core drm connector interfaces. Everything added from this callback |
382 | * should be unregistered in the early_unregister callback. | 382 | * should be unregistered in the early_unregister callback. |
383 | * | 383 | * |
384 | * This is called while holding drm_connector->mutex. | ||
385 | * | ||
384 | * Returns: | 386 | * Returns: |
385 | * | 387 | * |
386 | * 0 on success, or a negative error code on failure. | 388 | * 0 on success, or a negative error code on failure. |
@@ -395,6 +397,8 @@ struct drm_connector_funcs { | |||
395 | * late_register(). It is called from drm_connector_unregister(), | 397 | * late_register(). It is called from drm_connector_unregister(), |
396 | * early in the driver unload sequence to disable userspace access | 398 | * early in the driver unload sequence to disable userspace access |
397 | * before data structures are torndown. | 399 | * before data structures are torndown. |
400 | * | ||
401 | * This is called while holding drm_connector->mutex. | ||
398 | */ | 402 | */ |
399 | void (*early_unregister)(struct drm_connector *connector); | 403 | void (*early_unregister)(struct drm_connector *connector); |
400 | 404 | ||
@@ -559,7 +563,6 @@ struct drm_cmdline_mode { | |||
559 | * @interlace_allowed: can this connector handle interlaced modes? | 563 | * @interlace_allowed: can this connector handle interlaced modes? |
560 | * @doublescan_allowed: can this connector handle doublescan? | 564 | * @doublescan_allowed: can this connector handle doublescan? |
561 | * @stereo_allowed: can this connector handle stereo modes? | 565 | * @stereo_allowed: can this connector handle stereo modes? |
562 | * @registered: is this connector exposed (registered) with userspace? | ||
563 | * @modes: modes available on this connector (from fill_modes() + user) | 566 | * @modes: modes available on this connector (from fill_modes() + user) |
564 | * @status: one of the drm_connector_status enums (connected, not, or unknown) | 567 | * @status: one of the drm_connector_status enums (connected, not, or unknown) |
565 | * @probed_modes: list of modes derived directly from the display | 568 | * @probed_modes: list of modes derived directly from the display |
@@ -608,6 +611,13 @@ struct drm_connector { | |||
608 | char *name; | 611 | char *name; |
609 | 612 | ||
610 | /** | 613 | /** |
614 | * @mutex: Lock for general connector state, but currently only protects | ||
615 | * @registered. Most of the connector state is still protected by the | ||
616 | * mutex in &drm_mode_config. | ||
617 | */ | ||
618 | struct mutex mutex; | ||
619 | |||
620 | /** | ||
611 | * @index: Compacted connector index, which matches the position inside | 621 | * @index: Compacted connector index, which matches the position inside |
612 | * the mode_config.list for drivers not supporting hot-add/removing. Can | 622 | * the mode_config.list for drivers not supporting hot-add/removing. Can |
613 | * be used as an array index. It is invariant over the lifetime of the | 623 | * be used as an array index. It is invariant over the lifetime of the |
@@ -620,6 +630,10 @@ struct drm_connector { | |||
620 | bool interlace_allowed; | 630 | bool interlace_allowed; |
621 | bool doublescan_allowed; | 631 | bool doublescan_allowed; |
622 | bool stereo_allowed; | 632 | bool stereo_allowed; |
633 | /** | ||
634 | * @registered: Is this connector exposed (registered) with userspace? | ||
635 | * Protected by @mutex. | ||
636 | */ | ||
623 | bool registered; | 637 | bool registered; |
624 | struct list_head modes; /* list of modes on this connector */ | 638 | struct list_head modes; /* list of modes on this connector */ |
625 | 639 | ||
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index d67ab83823ad..79591c3660cc 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -243,12 +243,10 @@ static inline int block_page_mkwrite_return(int err) | |||
243 | { | 243 | { |
244 | if (err == 0) | 244 | if (err == 0) |
245 | return VM_FAULT_LOCKED; | 245 | return VM_FAULT_LOCKED; |
246 | if (err == -EFAULT) | 246 | if (err == -EFAULT || err == -EAGAIN) |
247 | return VM_FAULT_NOPAGE; | 247 | return VM_FAULT_NOPAGE; |
248 | if (err == -ENOMEM) | 248 | if (err == -ENOMEM) |
249 | return VM_FAULT_OOM; | 249 | return VM_FAULT_OOM; |
250 | if (err == -EAGAIN) | ||
251 | return VM_FAULT_RETRY; | ||
252 | /* -ENOSPC, -EDQUOT, -EIO ... */ | 250 | /* -ENOSPC, -EDQUOT, -EIO ... */ |
253 | return VM_FAULT_SIGBUS; | 251 | return VM_FAULT_SIGBUS; |
254 | } | 252 | } |
diff --git a/include/linux/can/core.h b/include/linux/can/core.h index a0875001b13c..df08a41d5be5 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h | |||
@@ -45,10 +45,9 @@ struct can_proto { | |||
45 | extern int can_proto_register(const struct can_proto *cp); | 45 | extern int can_proto_register(const struct can_proto *cp); |
46 | extern void can_proto_unregister(const struct can_proto *cp); | 46 | extern void can_proto_unregister(const struct can_proto *cp); |
47 | 47 | ||
48 | extern int can_rx_register(struct net_device *dev, canid_t can_id, | 48 | int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, |
49 | canid_t mask, | 49 | void (*func)(struct sk_buff *, void *), |
50 | void (*func)(struct sk_buff *, void *), | 50 | void *data, char *ident, struct sock *sk); |
51 | void *data, char *ident); | ||
52 | 51 | ||
53 | extern void can_rx_unregister(struct net_device *dev, canid_t can_id, | 52 | extern void can_rx_unregister(struct net_device *dev, canid_t can_id, |
54 | canid_t mask, | 53 | canid_t mask, |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d936a0021839..921acaaa1601 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -8,9 +8,7 @@ enum cpuhp_state { | |||
8 | CPUHP_CREATE_THREADS, | 8 | CPUHP_CREATE_THREADS, |
9 | CPUHP_PERF_PREPARE, | 9 | CPUHP_PERF_PREPARE, |
10 | CPUHP_PERF_X86_PREPARE, | 10 | CPUHP_PERF_X86_PREPARE, |
11 | CPUHP_PERF_X86_UNCORE_PREP, | ||
12 | CPUHP_PERF_X86_AMD_UNCORE_PREP, | 11 | CPUHP_PERF_X86_AMD_UNCORE_PREP, |
13 | CPUHP_PERF_X86_RAPL_PREP, | ||
14 | CPUHP_PERF_BFIN, | 12 | CPUHP_PERF_BFIN, |
15 | CPUHP_PERF_POWER, | 13 | CPUHP_PERF_POWER, |
16 | CPUHP_PERF_SUPERH, | 14 | CPUHP_PERF_SUPERH, |
@@ -86,7 +84,6 @@ enum cpuhp_state { | |||
86 | CPUHP_AP_IRQ_ARMADA_XP_STARTING, | 84 | CPUHP_AP_IRQ_ARMADA_XP_STARTING, |
87 | CPUHP_AP_IRQ_BCM2836_STARTING, | 85 | CPUHP_AP_IRQ_BCM2836_STARTING, |
88 | CPUHP_AP_ARM_MVEBU_COHERENCY, | 86 | CPUHP_AP_ARM_MVEBU_COHERENCY, |
89 | CPUHP_AP_PERF_X86_UNCORE_STARTING, | ||
90 | CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, | 87 | CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, |
91 | CPUHP_AP_PERF_X86_STARTING, | 88 | CPUHP_AP_PERF_X86_STARTING, |
92 | CPUHP_AP_PERF_X86_AMD_IBS_STARTING, | 89 | CPUHP_AP_PERF_X86_AMD_IBS_STARTING, |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index c717f5ea88cb..b3d2c1a89ac4 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp, | |||
560 | static inline int cpumask_parse_user(const char __user *buf, int len, | 560 | static inline int cpumask_parse_user(const char __user *buf, int len, |
561 | struct cpumask *dstp) | 561 | struct cpumask *dstp) |
562 | { | 562 | { |
563 | return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); | 563 | return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
564 | } | 564 | } |
565 | 565 | ||
566 | /** | 566 | /** |
@@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, | |||
575 | struct cpumask *dstp) | 575 | struct cpumask *dstp) |
576 | { | 576 | { |
577 | return bitmap_parselist_user(buf, len, cpumask_bits(dstp), | 577 | return bitmap_parselist_user(buf, len, cpumask_bits(dstp), |
578 | nr_cpu_ids); | 578 | nr_cpumask_bits); |
579 | } | 579 | } |
580 | 580 | ||
581 | /** | 581 | /** |
@@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) | |||
590 | char *nl = strchr(buf, '\n'); | 590 | char *nl = strchr(buf, '\n'); |
591 | unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); | 591 | unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); |
592 | 592 | ||
593 | return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); | 593 | return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
594 | } | 594 | } |
595 | 595 | ||
596 | /** | 596 | /** |
@@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) | |||
602 | */ | 602 | */ |
603 | static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | 603 | static inline int cpulist_parse(const char *buf, struct cpumask *dstp) |
604 | { | 604 | { |
605 | return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); | 605 | return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); |
606 | } | 606 | } |
607 | 607 | ||
608 | /** | 608 | /** |
diff --git a/include/linux/export.h b/include/linux/export.h index 2a0f61fbc731..1a1dfdb2a5c6 100644 --- a/include/linux/export.h +++ b/include/linux/export.h | |||
@@ -43,12 +43,19 @@ extern struct module __this_module; | |||
43 | #ifdef CONFIG_MODVERSIONS | 43 | #ifdef CONFIG_MODVERSIONS |
44 | /* Mark the CRC weak since genksyms apparently decides not to | 44 | /* Mark the CRC weak since genksyms apparently decides not to |
45 | * generate a checksums for some symbols */ | 45 | * generate a checksums for some symbols */ |
46 | #if defined(CONFIG_MODULE_REL_CRCS) | ||
46 | #define __CRC_SYMBOL(sym, sec) \ | 47 | #define __CRC_SYMBOL(sym, sec) \ |
47 | extern __visible void *__crc_##sym __attribute__((weak)); \ | 48 | asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ |
48 | static const unsigned long __kcrctab_##sym \ | 49 | " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ |
49 | __used \ | 50 | " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \ |
50 | __attribute__((section("___kcrctab" sec "+" #sym), used)) \ | 51 | " .previous \n"); |
51 | = (unsigned long) &__crc_##sym; | 52 | #else |
53 | #define __CRC_SYMBOL(sym, sec) \ | ||
54 | asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ | ||
55 | " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ | ||
56 | " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ | ||
57 | " .previous \n"); | ||
58 | #endif | ||
52 | #else | 59 | #else |
53 | #define __CRC_SYMBOL(sym, sec) | 60 | #define __CRC_SYMBOL(sym, sec) |
54 | #endif | 61 | #endif |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 13ba552e6c09..4c467ef50159 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
@@ -360,6 +360,7 @@ struct fscache_object { | |||
360 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ | 360 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ |
361 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ | 361 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ |
362 | #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ | 362 | #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ |
363 | #define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */ | ||
363 | 364 | ||
364 | struct list_head cache_link; /* link in cache->object_list */ | 365 | struct list_head cache_link; /* link in cache->object_list */ |
365 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ | 366 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 42fe43fb0c80..183efde54269 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info { | |||
128 | u32 ring_data_startoffset; | 128 | u32 ring_data_startoffset; |
129 | u32 priv_write_index; | 129 | u32 priv_write_index; |
130 | u32 priv_read_index; | 130 | u32 priv_read_index; |
131 | u32 cached_read_index; | ||
131 | }; | 132 | }; |
132 | 133 | ||
133 | /* | 134 | /* |
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) | |||
180 | return write; | 181 | return write; |
181 | } | 182 | } |
182 | 183 | ||
184 | static inline u32 hv_get_cached_bytes_to_write( | ||
185 | const struct hv_ring_buffer_info *rbi) | ||
186 | { | ||
187 | u32 read_loc, write_loc, dsize, write; | ||
188 | |||
189 | dsize = rbi->ring_datasize; | ||
190 | read_loc = rbi->cached_read_index; | ||
191 | write_loc = rbi->ring_buffer->write_index; | ||
192 | |||
193 | write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : | ||
194 | read_loc - write_loc; | ||
195 | return write; | ||
196 | } | ||
183 | /* | 197 | /* |
184 | * VMBUS version is 32 bit entity broken up into | 198 | * VMBUS version is 32 bit entity broken up into |
185 | * two 16 bit quantities: major_number. minor_number. | 199 | * two 16 bit quantities: major_number. minor_number. |
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) | |||
1488 | 1502 | ||
1489 | static inline void hv_signal_on_read(struct vmbus_channel *channel) | 1503 | static inline void hv_signal_on_read(struct vmbus_channel *channel) |
1490 | { | 1504 | { |
1491 | u32 cur_write_sz; | 1505 | u32 cur_write_sz, cached_write_sz; |
1492 | u32 pending_sz; | 1506 | u32 pending_sz; |
1493 | struct hv_ring_buffer_info *rbi = &channel->inbound; | 1507 | struct hv_ring_buffer_info *rbi = &channel->inbound; |
1494 | 1508 | ||
@@ -1512,12 +1526,24 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel) | |||
1512 | 1526 | ||
1513 | cur_write_sz = hv_get_bytes_to_write(rbi); | 1527 | cur_write_sz = hv_get_bytes_to_write(rbi); |
1514 | 1528 | ||
1515 | if (cur_write_sz >= pending_sz) | 1529 | if (cur_write_sz < pending_sz) |
1530 | return; | ||
1531 | |||
1532 | cached_write_sz = hv_get_cached_bytes_to_write(rbi); | ||
1533 | if (cached_write_sz < pending_sz) | ||
1516 | vmbus_setevent(channel); | 1534 | vmbus_setevent(channel); |
1517 | 1535 | ||
1518 | return; | 1536 | return; |
1519 | } | 1537 | } |
1520 | 1538 | ||
1539 | static inline void | ||
1540 | init_cached_read_index(struct vmbus_channel *channel) | ||
1541 | { | ||
1542 | struct hv_ring_buffer_info *rbi = &channel->inbound; | ||
1543 | |||
1544 | rbi->cached_read_index = rbi->ring_buffer->read_index; | ||
1545 | } | ||
1546 | |||
1521 | /* | 1547 | /* |
1522 | * An API to support in-place processing of incoming VMBUS packets. | 1548 | * An API to support in-place processing of incoming VMBUS packets. |
1523 | */ | 1549 | */ |
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, | |||
1569 | * This call commits the read index and potentially signals the host. | 1595 | * This call commits the read index and potentially signals the host. |
1570 | * Here is the pattern for using the "in-place" consumption APIs: | 1596 | * Here is the pattern for using the "in-place" consumption APIs: |
1571 | * | 1597 | * |
1598 | * init_cached_read_index(); | ||
1599 | * | ||
1572 | * while (get_next_pkt_raw() { | 1600 | * while (get_next_pkt_raw() { |
1573 | * process the packet "in-place"; | 1601 | * process the packet "in-place"; |
1574 | * put_pkt_raw(); | 1602 | * put_pkt_raw(); |
diff --git a/include/linux/irq.h b/include/linux/irq.h index e79875574b39..39e3254e5769 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -184,6 +184,7 @@ struct irq_data { | |||
184 | * | 184 | * |
185 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | 185 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits |
186 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | 186 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending |
187 | * IRQD_ACTIVATED - Interrupt has already been activated | ||
187 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | 188 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ |
188 | * IRQD_PER_CPU - Interrupt is per cpu | 189 | * IRQD_PER_CPU - Interrupt is per cpu |
189 | * IRQD_AFFINITY_SET - Interrupt affinity was set | 190 | * IRQD_AFFINITY_SET - Interrupt affinity was set |
@@ -202,6 +203,7 @@ struct irq_data { | |||
202 | enum { | 203 | enum { |
203 | IRQD_TRIGGER_MASK = 0xf, | 204 | IRQD_TRIGGER_MASK = 0xf, |
204 | IRQD_SETAFFINITY_PENDING = (1 << 8), | 205 | IRQD_SETAFFINITY_PENDING = (1 << 8), |
206 | IRQD_ACTIVATED = (1 << 9), | ||
205 | IRQD_NO_BALANCING = (1 << 10), | 207 | IRQD_NO_BALANCING = (1 << 10), |
206 | IRQD_PER_CPU = (1 << 11), | 208 | IRQD_PER_CPU = (1 << 11), |
207 | IRQD_AFFINITY_SET = (1 << 12), | 209 | IRQD_AFFINITY_SET = (1 << 12), |
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d) | |||
312 | return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; | 314 | return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; |
313 | } | 315 | } |
314 | 316 | ||
317 | static inline bool irqd_is_activated(struct irq_data *d) | ||
318 | { | ||
319 | return __irqd_to_state(d) & IRQD_ACTIVATED; | ||
320 | } | ||
321 | |||
322 | static inline void irqd_set_activated(struct irq_data *d) | ||
323 | { | ||
324 | __irqd_to_state(d) |= IRQD_ACTIVATED; | ||
325 | } | ||
326 | |||
327 | static inline void irqd_clr_activated(struct irq_data *d) | ||
328 | { | ||
329 | __irqd_to_state(d) &= ~IRQD_ACTIVATED; | ||
330 | } | ||
331 | |||
315 | #undef __irqd_to_state | 332 | #undef __irqd_to_state |
316 | 333 | ||
317 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 334 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
diff --git a/include/linux/log2.h b/include/linux/log2.h index fd7ff3d91e6a..ef3d4f67118c 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h | |||
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n) | |||
203 | * ... and so on. | 203 | * ... and so on. |
204 | */ | 204 | */ |
205 | 205 | ||
206 | #define order_base_2(n) ilog2(roundup_pow_of_two(n)) | 206 | static inline __attribute_const__ |
207 | int __order_base_2(unsigned long n) | ||
208 | { | ||
209 | return n > 1 ? ilog2(n - 1) + 1 : 0; | ||
210 | } | ||
207 | 211 | ||
212 | #define order_base_2(n) \ | ||
213 | ( \ | ||
214 | __builtin_constant_p(n) ? ( \ | ||
215 | ((n) == 0 || (n) == 1) ? 0 : \ | ||
216 | ilog2((n) - 1) + 1) : \ | ||
217 | __order_base_2(n) \ | ||
218 | ) | ||
208 | #endif /* _LINUX_LOG2_H */ | 219 | #endif /* _LINUX_LOG2_H */ |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index c1784c0b4f35..134a2f69c21a 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); | |||
85 | extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); | 85 | extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); |
86 | /* VM interface that may be used by firmware interface */ | 86 | /* VM interface that may be used by firmware interface */ |
87 | extern int online_pages(unsigned long, unsigned long, int); | 87 | extern int online_pages(unsigned long, unsigned long, int); |
88 | extern int test_pages_in_a_zone(unsigned long, unsigned long); | 88 | extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, |
89 | unsigned long *valid_start, unsigned long *valid_end); | ||
89 | extern void __offline_isolated_pages(unsigned long, unsigned long); | 90 | extern void __offline_isolated_pages(unsigned long, unsigned long); |
90 | 91 | ||
91 | typedef void (*online_page_callback_t)(struct page *page); | 92 | typedef void (*online_page_callback_t)(struct page *page); |
diff --git a/include/linux/module.h b/include/linux/module.h index 7c84273d60b9..cc7cba219b20 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -346,7 +346,7 @@ struct module { | |||
346 | 346 | ||
347 | /* Exported symbols */ | 347 | /* Exported symbols */ |
348 | const struct kernel_symbol *syms; | 348 | const struct kernel_symbol *syms; |
349 | const unsigned long *crcs; | 349 | const s32 *crcs; |
350 | unsigned int num_syms; | 350 | unsigned int num_syms; |
351 | 351 | ||
352 | /* Kernel parameters. */ | 352 | /* Kernel parameters. */ |
@@ -359,18 +359,18 @@ struct module { | |||
359 | /* GPL-only exported symbols. */ | 359 | /* GPL-only exported symbols. */ |
360 | unsigned int num_gpl_syms; | 360 | unsigned int num_gpl_syms; |
361 | const struct kernel_symbol *gpl_syms; | 361 | const struct kernel_symbol *gpl_syms; |
362 | const unsigned long *gpl_crcs; | 362 | const s32 *gpl_crcs; |
363 | 363 | ||
364 | #ifdef CONFIG_UNUSED_SYMBOLS | 364 | #ifdef CONFIG_UNUSED_SYMBOLS |
365 | /* unused exported symbols. */ | 365 | /* unused exported symbols. */ |
366 | const struct kernel_symbol *unused_syms; | 366 | const struct kernel_symbol *unused_syms; |
367 | const unsigned long *unused_crcs; | 367 | const s32 *unused_crcs; |
368 | unsigned int num_unused_syms; | 368 | unsigned int num_unused_syms; |
369 | 369 | ||
370 | /* GPL-only, unused exported symbols. */ | 370 | /* GPL-only, unused exported symbols. */ |
371 | unsigned int num_unused_gpl_syms; | 371 | unsigned int num_unused_gpl_syms; |
372 | const struct kernel_symbol *unused_gpl_syms; | 372 | const struct kernel_symbol *unused_gpl_syms; |
373 | const unsigned long *unused_gpl_crcs; | 373 | const s32 *unused_gpl_crcs; |
374 | #endif | 374 | #endif |
375 | 375 | ||
376 | #ifdef CONFIG_MODULE_SIG | 376 | #ifdef CONFIG_MODULE_SIG |
@@ -382,7 +382,7 @@ struct module { | |||
382 | 382 | ||
383 | /* symbols that will be GPL-only in the near future. */ | 383 | /* symbols that will be GPL-only in the near future. */ |
384 | const struct kernel_symbol *gpl_future_syms; | 384 | const struct kernel_symbol *gpl_future_syms; |
385 | const unsigned long *gpl_future_crcs; | 385 | const s32 *gpl_future_crcs; |
386 | unsigned int num_gpl_future_syms; | 386 | unsigned int num_gpl_future_syms; |
387 | 387 | ||
388 | /* Exception table */ | 388 | /* Exception table */ |
@@ -523,7 +523,7 @@ struct module *find_module(const char *name); | |||
523 | 523 | ||
524 | struct symsearch { | 524 | struct symsearch { |
525 | const struct kernel_symbol *start, *stop; | 525 | const struct kernel_symbol *start, *stop; |
526 | const unsigned long *crcs; | 526 | const s32 *crcs; |
527 | enum { | 527 | enum { |
528 | NOT_GPL_ONLY, | 528 | NOT_GPL_ONLY, |
529 | GPL_ONLY, | 529 | GPL_ONLY, |
@@ -539,7 +539,7 @@ struct symsearch { | |||
539 | */ | 539 | */ |
540 | const struct kernel_symbol *find_symbol(const char *name, | 540 | const struct kernel_symbol *find_symbol(const char *name, |
541 | struct module **owner, | 541 | struct module **owner, |
542 | const unsigned long **crc, | 542 | const s32 **crc, |
543 | bool gplok, | 543 | bool gplok, |
544 | bool warn); | 544 | bool warn); |
545 | 545 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9bde9558b596..27914672602d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -866,11 +866,15 @@ struct netdev_xdp { | |||
866 | * of useless work if you return NETDEV_TX_BUSY. | 866 | * of useless work if you return NETDEV_TX_BUSY. |
867 | * Required; cannot be NULL. | 867 | * Required; cannot be NULL. |
868 | * | 868 | * |
869 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, | 869 | * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, |
870 | * netdev_features_t features); | 870 | * struct net_device *dev |
871 | * Adjusts the requested feature flags according to device-specific | 871 | * netdev_features_t features); |
872 | * constraints, and returns the resulting flags. Must not modify | 872 | * Called by core transmit path to determine if device is capable of |
873 | * the device state. | 873 | * performing offload operations on a given packet. This is to give |
874 | * the device an opportunity to implement any restrictions that cannot | ||
875 | * be otherwise expressed by feature flags. The check is called with | ||
876 | * the set of features that the stack has calculated and it returns | ||
877 | * those the driver believes to be appropriate. | ||
874 | * | 878 | * |
875 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, | 879 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
876 | * void *accel_priv, select_queue_fallback_t fallback); | 880 | * void *accel_priv, select_queue_fallback_t fallback); |
@@ -1028,6 +1032,12 @@ struct netdev_xdp { | |||
1028 | * Called to release previously enslaved netdev. | 1032 | * Called to release previously enslaved netdev. |
1029 | * | 1033 | * |
1030 | * Feature/offload setting functions. | 1034 | * Feature/offload setting functions. |
1035 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, | ||
1036 | * netdev_features_t features); | ||
1037 | * Adjusts the requested feature flags according to device-specific | ||
1038 | * constraints, and returns the resulting flags. Must not modify | ||
1039 | * the device state. | ||
1040 | * | ||
1031 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); | 1041 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); |
1032 | * Called to update device configuration to new features. Passed | 1042 | * Called to update device configuration to new features. Passed |
1033 | * feature set might be less than what was returned by ndo_fix_features()). | 1043 | * feature set might be less than what was returned by ndo_fix_features()). |
@@ -1100,15 +1110,6 @@ struct netdev_xdp { | |||
1100 | * Callback to use for xmit over the accelerated station. This | 1110 | * Callback to use for xmit over the accelerated station. This |
1101 | * is used in place of ndo_start_xmit on accelerated net | 1111 | * is used in place of ndo_start_xmit on accelerated net |
1102 | * devices. | 1112 | * devices. |
1103 | * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, | ||
1104 | * struct net_device *dev | ||
1105 | * netdev_features_t features); | ||
1106 | * Called by core transmit path to determine if device is capable of | ||
1107 | * performing offload operations on a given packet. This is to give | ||
1108 | * the device an opportunity to implement any restrictions that cannot | ||
1109 | * be otherwise expressed by feature flags. The check is called with | ||
1110 | * the set of features that the stack has calculated and it returns | ||
1111 | * those the driver believes to be appropriate. | ||
1112 | * int (*ndo_set_tx_maxrate)(struct net_device *dev, | 1113 | * int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1113 | * int queue_index, u32 maxrate); | 1114 | * int queue_index, u32 maxrate); |
1114 | * Called when a user wants to set a max-rate limitation of specific | 1115 | * Called when a user wants to set a max-rate limitation of specific |
@@ -1510,6 +1511,7 @@ enum netdev_priv_flags { | |||
1510 | * @max_mtu: Interface Maximum MTU value | 1511 | * @max_mtu: Interface Maximum MTU value |
1511 | * @type: Interface hardware type | 1512 | * @type: Interface hardware type |
1512 | * @hard_header_len: Maximum hardware header length. | 1513 | * @hard_header_len: Maximum hardware header length. |
1514 | * @min_header_len: Minimum hardware header length | ||
1513 | * | 1515 | * |
1514 | * @needed_headroom: Extra headroom the hardware may need, but not in all | 1516 | * @needed_headroom: Extra headroom the hardware may need, but not in all |
1515 | * cases can this be guaranteed | 1517 | * cases can this be guaranteed |
@@ -1727,6 +1729,7 @@ struct net_device { | |||
1727 | unsigned int max_mtu; | 1729 | unsigned int max_mtu; |
1728 | unsigned short type; | 1730 | unsigned short type; |
1729 | unsigned short hard_header_len; | 1731 | unsigned short hard_header_len; |
1732 | unsigned short min_header_len; | ||
1730 | 1733 | ||
1731 | unsigned short needed_headroom; | 1734 | unsigned short needed_headroom; |
1732 | unsigned short needed_tailroom; | 1735 | unsigned short needed_tailroom; |
@@ -2693,6 +2696,8 @@ static inline bool dev_validate_header(const struct net_device *dev, | |||
2693 | { | 2696 | { |
2694 | if (likely(len >= dev->hard_header_len)) | 2697 | if (likely(len >= dev->hard_header_len)) |
2695 | return true; | 2698 | return true; |
2699 | if (len < dev->min_header_len) | ||
2700 | return false; | ||
2696 | 2701 | ||
2697 | if (capable(CAP_SYS_RAWIO)) { | 2702 | if (capable(CAP_SYS_RAWIO)) { |
2698 | memset(ll_header + len, 0, dev->hard_header_len - len); | 2703 | memset(ll_header + len, 0, dev->hard_header_len - len); |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index bca536341d1a..1b1ca04820a3 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -282,7 +282,7 @@ enum nfsstat4 { | |||
282 | 282 | ||
283 | static inline bool seqid_mutating_err(u32 err) | 283 | static inline bool seqid_mutating_err(u32 err) |
284 | { | 284 | { |
285 | /* rfc 3530 section 8.1.5: */ | 285 | /* See RFC 7530, section 9.1.7 */ |
286 | switch (err) { | 286 | switch (err) { |
287 | case NFS4ERR_STALE_CLIENTID: | 287 | case NFS4ERR_STALE_CLIENTID: |
288 | case NFS4ERR_STALE_STATEID: | 288 | case NFS4ERR_STALE_STATEID: |
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err) | |||
291 | case NFS4ERR_BADXDR: | 291 | case NFS4ERR_BADXDR: |
292 | case NFS4ERR_RESOURCE: | 292 | case NFS4ERR_RESOURCE: |
293 | case NFS4ERR_NOFILEHANDLE: | 293 | case NFS4ERR_NOFILEHANDLE: |
294 | case NFS4ERR_MOVED: | ||
294 | return false; | 295 | return false; |
295 | }; | 296 | }; |
296 | return true; | 297 | return true; |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 1c7eec09e5eb..3a481a49546e 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
204 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | 204 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
205 | { | 205 | { |
206 | unsigned long __percpu *percpu_count; | 206 | unsigned long __percpu *percpu_count; |
207 | int ret; | 207 | bool ret; |
208 | 208 | ||
209 | rcu_read_lock_sched(); | 209 | rcu_read_lock_sched(); |
210 | 210 | ||
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |||
238 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | 238 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
239 | { | 239 | { |
240 | unsigned long __percpu *percpu_count; | 240 | unsigned long __percpu *percpu_count; |
241 | int ret = false; | 241 | bool ret = false; |
242 | 242 | ||
243 | rcu_read_lock_sched(); | 243 | rcu_read_lock_sched(); |
244 | 244 | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 85cc819676e8..333ad11b3dd9 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *); | |||
216 | void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); | 216 | void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); |
217 | bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, | 217 | bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, |
218 | const struct sockaddr *sap); | 218 | const struct sockaddr *sap); |
219 | void rpc_cleanup_clids(void); | ||
219 | #endif /* __KERNEL__ */ | 220 | #endif /* __KERNEL__ */ |
220 | #endif /* _LINUX_SUNRPC_CLNT_H */ | 221 | #endif /* _LINUX_SUNRPC_CLNT_H */ |
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 3ebb168b9afc..a34b141f125f 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h | |||
@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb, | |||
309 | } | 309 | } |
310 | 310 | ||
311 | for (opt_iter = 6; opt_iter < opt_len;) { | 311 | for (opt_iter = 6; opt_iter < opt_len;) { |
312 | if (opt_iter + 1 == opt_len) { | ||
313 | err_offset = opt_iter; | ||
314 | goto out; | ||
315 | } | ||
312 | tag_len = opt[opt_iter + 1]; | 316 | tag_len = opt[opt_iter + 1]; |
313 | if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) { | 317 | if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) { |
314 | err_offset = opt_iter + 1; | 318 | err_offset = opt_iter + 1; |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 7afe991e900e..dbf0abba33b8 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, | |||
776 | { | 776 | { |
777 | u32 hash; | 777 | u32 hash; |
778 | 778 | ||
779 | /* @flowlabel may include more than a flow label, eg, the traffic class. | ||
780 | * Here we want only the flow label value. | ||
781 | */ | ||
782 | flowlabel &= IPV6_FLOWLABEL_MASK; | ||
783 | |||
779 | if (flowlabel || | 784 | if (flowlabel || |
780 | net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || | 785 | net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || |
781 | (!autolabel && | 786 | (!autolabel && |
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 73dd87647460..0388b9c5f5e2 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h | |||
@@ -178,7 +178,10 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type) | |||
178 | } | 178 | } |
179 | static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) | 179 | static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) |
180 | { | 180 | { |
181 | return -EOPNOTSUPP; | 181 | /* return 0 since we are not walking attr looking for |
182 | * RTA_ENCAP_TYPE attribute on nexthops. | ||
183 | */ | ||
184 | return 0; | ||
182 | } | 185 | } |
183 | 186 | ||
184 | static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, | 187 | static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, |
diff --git a/include/net/sock.h b/include/net/sock.h index f0e867f58722..c4f5e6fca17c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -2006,7 +2006,9 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer, | |||
2006 | void sk_stop_timer(struct sock *sk, struct timer_list *timer); | 2006 | void sk_stop_timer(struct sock *sk, struct timer_list *timer); |
2007 | 2007 | ||
2008 | int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, | 2008 | int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, |
2009 | unsigned int flags); | 2009 | unsigned int flags, |
2010 | void (*destructor)(struct sock *sk, | ||
2011 | struct sk_buff *skb)); | ||
2010 | int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 2012 | int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
2011 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 2013 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
2012 | 2014 | ||
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index 6902c2a8bd23..4b6b489a8d7c 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h | |||
@@ -55,17 +55,17 @@ struct mcip_cmd { | |||
55 | 55 | ||
56 | struct mcip_bcr { | 56 | struct mcip_bcr { |
57 | #ifdef CONFIG_CPU_BIG_ENDIAN | 57 | #ifdef CONFIG_CPU_BIG_ENDIAN |
58 | unsigned int pad3:8, | 58 | unsigned int pad4:6, pw_dom:1, pad3:1, |
59 | idu:1, llm:1, num_cores:6, | 59 | idu:1, pad2:1, num_cores:6, |
60 | iocoh:1, gfrc:1, dbg:1, pad2:1, | 60 | pad:1, gfrc:1, dbg:1, pw:1, |
61 | msg:1, sem:1, ipi:1, pad:1, | 61 | msg:1, sem:1, ipi:1, slv:1, |
62 | ver:8; | 62 | ver:8; |
63 | #else | 63 | #else |
64 | unsigned int ver:8, | 64 | unsigned int ver:8, |
65 | pad:1, ipi:1, sem:1, msg:1, | 65 | slv:1, ipi:1, sem:1, msg:1, |
66 | pad2:1, dbg:1, gfrc:1, iocoh:1, | 66 | pw:1, dbg:1, gfrc:1, pad:1, |
67 | num_cores:6, llm:1, idu:1, | 67 | num_cores:6, pad2:1, idu:1, |
68 | pad3:8; | 68 | pad3:1, pw_dom:1, pad4:6; |
69 | #endif | 69 | #endif |
70 | }; | 70 | }; |
71 | 71 | ||
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 43edf82e54ff..da854fb4530f 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -538,6 +538,7 @@ struct se_node_acl { | |||
538 | char initiatorname[TRANSPORT_IQN_LEN]; | 538 | char initiatorname[TRANSPORT_IQN_LEN]; |
539 | /* Used to signal demo mode created ACL, disabled by default */ | 539 | /* Used to signal demo mode created ACL, disabled by default */ |
540 | bool dynamic_node_acl; | 540 | bool dynamic_node_acl; |
541 | bool dynamic_stop; | ||
541 | u32 queue_depth; | 542 | u32 queue_depth; |
542 | u32 acl_index; | 543 | u32 acl_index; |
543 | enum target_prot_type saved_prot_type; | 544 | enum target_prot_type saved_prot_type; |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index f0db7788f887..3dc91a46e8b8 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices { | |||
1384 | ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, | 1384 | ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, |
1385 | ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, | 1385 | ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, |
1386 | ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, | 1386 | ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, |
1387 | ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, | ||
1388 | ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, | ||
1387 | 1389 | ||
1388 | 1390 | ||
1389 | /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit | 1391 | /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit |
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices { | |||
1393 | */ | 1395 | */ |
1394 | 1396 | ||
1395 | __ETHTOOL_LINK_MODE_LAST | 1397 | __ETHTOOL_LINK_MODE_LAST |
1396 | = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, | 1398 | = ETHTOOL_LINK_MODE_5000baseT_Full_BIT, |
1397 | }; | 1399 | }; |
1398 | 1400 | ||
1399 | #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ | 1401 | #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ |
diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h index c396a8052f73..052799e4d751 100644 --- a/include/uapi/linux/seg6.h +++ b/include/uapi/linux/seg6.h | |||
@@ -23,14 +23,12 @@ struct ipv6_sr_hdr { | |||
23 | __u8 type; | 23 | __u8 type; |
24 | __u8 segments_left; | 24 | __u8 segments_left; |
25 | __u8 first_segment; | 25 | __u8 first_segment; |
26 | __u8 flag_1; | 26 | __u8 flags; |
27 | __u8 flag_2; | 27 | __u16 reserved; |
28 | __u8 reserved; | ||
29 | 28 | ||
30 | struct in6_addr segments[0]; | 29 | struct in6_addr segments[0]; |
31 | }; | 30 | }; |
32 | 31 | ||
33 | #define SR6_FLAG1_CLEANUP (1 << 7) | ||
34 | #define SR6_FLAG1_PROTECTED (1 << 6) | 32 | #define SR6_FLAG1_PROTECTED (1 << 6) |
35 | #define SR6_FLAG1_OAM (1 << 5) | 33 | #define SR6_FLAG1_OAM (1 << 5) |
36 | #define SR6_FLAG1_ALERT (1 << 4) | 34 | #define SR6_FLAG1_ALERT (1 << 4) |
@@ -42,8 +40,7 @@ struct ipv6_sr_hdr { | |||
42 | #define SR6_TLV_PADDING 4 | 40 | #define SR6_TLV_PADDING 4 |
43 | #define SR6_TLV_HMAC 5 | 41 | #define SR6_TLV_HMAC 5 |
44 | 42 | ||
45 | #define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP) | 43 | #define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC) |
46 | #define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC) | ||
47 | 44 | ||
48 | struct sr6_tlv { | 45 | struct sr6_tlv { |
49 | __u8 type; | 46 | __u8 type; |
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index dfdfe4e92d31..f4f87cff6dc6 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h | |||
@@ -37,7 +37,6 @@ | |||
37 | #define IB_USER_VERBS_H | 37 | #define IB_USER_VERBS_H |
38 | 38 | ||
39 | #include <linux/types.h> | 39 | #include <linux/types.h> |
40 | #include <rdma/ib_verbs.h> | ||
41 | 40 | ||
42 | /* | 41 | /* |
43 | * Increment this value if any changes that break userspace ABI | 42 | * Increment this value if any changes that break userspace ABI |
@@ -548,11 +547,17 @@ enum { | |||
548 | }; | 547 | }; |
549 | 548 | ||
550 | enum { | 549 | enum { |
551 | IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN | 550 | /* |
551 | * This value is equal to IB_QP_DEST_QPN. | ||
552 | */ | ||
553 | IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20, | ||
552 | }; | 554 | }; |
553 | 555 | ||
554 | enum { | 556 | enum { |
555 | IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT | 557 | /* |
558 | * This value is equal to IB_QP_RATE_LIMIT. | ||
559 | */ | ||
560 | IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25, | ||
556 | }; | 561 | }; |
557 | 562 | ||
558 | struct ib_uverbs_ex_create_qp { | 563 | struct ib_uverbs_ex_create_qp { |
diff --git a/init/Kconfig b/init/Kconfig index e1a937348a3e..4dd8bd232a1d 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1987,6 +1987,10 @@ config MODVERSIONS | |||
1987 | make them incompatible with the kernel you are running. If | 1987 | make them incompatible with the kernel you are running. If |
1988 | unsure, say N. | 1988 | unsure, say N. |
1989 | 1989 | ||
1990 | config MODULE_REL_CRCS | ||
1991 | bool | ||
1992 | depends on MODVERSIONS | ||
1993 | |||
1990 | config MODULE_SRCVERSION_ALL | 1994 | config MODULE_SRCVERSION_ALL |
1991 | bool "Source checksum for all modules" | 1995 | bool "Source checksum for all modules" |
1992 | help | 1996 | help |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2ee9ec3051b2..688dd02af985 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -5221,6 +5221,11 @@ err_free_css: | |||
5221 | return ERR_PTR(err); | 5221 | return ERR_PTR(err); |
5222 | } | 5222 | } |
5223 | 5223 | ||
5224 | /* | ||
5225 | * The returned cgroup is fully initialized including its control mask, but | ||
5226 | * it isn't associated with its kernfs_node and doesn't have the control | ||
5227 | * mask applied. | ||
5228 | */ | ||
5224 | static struct cgroup *cgroup_create(struct cgroup *parent) | 5229 | static struct cgroup *cgroup_create(struct cgroup *parent) |
5225 | { | 5230 | { |
5226 | struct cgroup_root *root = parent->root; | 5231 | struct cgroup_root *root = parent->root; |
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent) | |||
5288 | 5293 | ||
5289 | cgroup_propagate_control(cgrp); | 5294 | cgroup_propagate_control(cgrp); |
5290 | 5295 | ||
5291 | /* @cgrp doesn't have dir yet so the following will only create csses */ | ||
5292 | ret = cgroup_apply_control_enable(cgrp); | ||
5293 | if (ret) | ||
5294 | goto out_destroy; | ||
5295 | |||
5296 | return cgrp; | 5296 | return cgrp; |
5297 | 5297 | ||
5298 | out_cancel_ref: | 5298 | out_cancel_ref: |
@@ -5300,9 +5300,6 @@ out_cancel_ref: | |||
5300 | out_free_cgrp: | 5300 | out_free_cgrp: |
5301 | kfree(cgrp); | 5301 | kfree(cgrp); |
5302 | return ERR_PTR(ret); | 5302 | return ERR_PTR(ret); |
5303 | out_destroy: | ||
5304 | cgroup_destroy_locked(cgrp); | ||
5305 | return ERR_PTR(ret); | ||
5306 | } | 5303 | } |
5307 | 5304 | ||
5308 | static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, | 5305 | static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 110b38a58493..e235bb991bdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) | |||
1469 | static void | 1469 | static void |
1470 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) | 1470 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
1471 | { | 1471 | { |
1472 | |||
1473 | lockdep_assert_held(&ctx->lock); | 1472 | lockdep_assert_held(&ctx->lock); |
1474 | 1473 | ||
1475 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | 1474 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event) | |||
1624 | { | 1623 | { |
1625 | struct perf_event *group_leader = event->group_leader, *pos; | 1624 | struct perf_event *group_leader = event->group_leader, *pos; |
1626 | 1625 | ||
1626 | lockdep_assert_held(&event->ctx->lock); | ||
1627 | |||
1627 | /* | 1628 | /* |
1628 | * We can have double attach due to group movement in perf_event_open. | 1629 | * We can have double attach due to group movement in perf_event_open. |
1629 | */ | 1630 | */ |
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event) | |||
1697 | struct perf_event *sibling, *tmp; | 1698 | struct perf_event *sibling, *tmp; |
1698 | struct list_head *list = NULL; | 1699 | struct list_head *list = NULL; |
1699 | 1700 | ||
1701 | lockdep_assert_held(&event->ctx->lock); | ||
1702 | |||
1700 | /* | 1703 | /* |
1701 | * We can have double detach due to exit/hot-unplug + close. | 1704 | * We can have double detach due to exit/hot-unplug + close. |
1702 | */ | 1705 | */ |
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event, | |||
1895 | */ | 1898 | */ |
1896 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) | 1899 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) |
1897 | { | 1900 | { |
1898 | lockdep_assert_held(&event->ctx->mutex); | 1901 | struct perf_event_context *ctx = event->ctx; |
1902 | |||
1903 | lockdep_assert_held(&ctx->mutex); | ||
1899 | 1904 | ||
1900 | event_function_call(event, __perf_remove_from_context, (void *)flags); | 1905 | event_function_call(event, __perf_remove_from_context, (void *)flags); |
1906 | |||
1907 | /* | ||
1908 | * The above event_function_call() can NO-OP when it hits | ||
1909 | * TASK_TOMBSTONE. In that case we must already have been detached | ||
1910 | * from the context (by perf_event_exit_event()) but the grouping | ||
1911 | * might still be in-tact. | ||
1912 | */ | ||
1913 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | ||
1914 | if ((flags & DETACH_GROUP) && | ||
1915 | (event->attach_state & PERF_ATTACH_GROUP)) { | ||
1916 | /* | ||
1917 | * Since in that case we cannot possibly be scheduled, simply | ||
1918 | * detach now. | ||
1919 | */ | ||
1920 | raw_spin_lock_irq(&ctx->lock); | ||
1921 | perf_group_detach(event); | ||
1922 | raw_spin_unlock_irq(&ctx->lock); | ||
1923 | } | ||
1901 | } | 1924 | } |
1902 | 1925 | ||
1903 | /* | 1926 | /* |
@@ -3464,14 +3487,15 @@ struct perf_read_data { | |||
3464 | int ret; | 3487 | int ret; |
3465 | }; | 3488 | }; |
3466 | 3489 | ||
3467 | static int find_cpu_to_read(struct perf_event *event, int local_cpu) | 3490 | static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) |
3468 | { | 3491 | { |
3469 | int event_cpu = event->oncpu; | ||
3470 | u16 local_pkg, event_pkg; | 3492 | u16 local_pkg, event_pkg; |
3471 | 3493 | ||
3472 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { | 3494 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { |
3473 | event_pkg = topology_physical_package_id(event_cpu); | 3495 | int local_cpu = smp_processor_id(); |
3474 | local_pkg = topology_physical_package_id(local_cpu); | 3496 | |
3497 | event_pkg = topology_physical_package_id(event_cpu); | ||
3498 | local_pkg = topology_physical_package_id(local_cpu); | ||
3475 | 3499 | ||
3476 | if (event_pkg == local_pkg) | 3500 | if (event_pkg == local_pkg) |
3477 | return local_cpu; | 3501 | return local_cpu; |
@@ -3601,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event) | |||
3601 | 3625 | ||
3602 | static int perf_event_read(struct perf_event *event, bool group) | 3626 | static int perf_event_read(struct perf_event *event, bool group) |
3603 | { | 3627 | { |
3604 | int ret = 0, cpu_to_read, local_cpu; | 3628 | int event_cpu, ret = 0; |
3605 | 3629 | ||
3606 | /* | 3630 | /* |
3607 | * If event is enabled and currently active on a CPU, update the | 3631 | * If event is enabled and currently active on a CPU, update the |
@@ -3614,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group) | |||
3614 | .ret = 0, | 3638 | .ret = 0, |
3615 | }; | 3639 | }; |
3616 | 3640 | ||
3617 | local_cpu = get_cpu(); | 3641 | event_cpu = READ_ONCE(event->oncpu); |
3618 | cpu_to_read = find_cpu_to_read(event, local_cpu); | 3642 | if ((unsigned)event_cpu >= nr_cpu_ids) |
3619 | put_cpu(); | 3643 | return 0; |
3644 | |||
3645 | preempt_disable(); | ||
3646 | event_cpu = __perf_event_read_cpu(event, event_cpu); | ||
3620 | 3647 | ||
3621 | /* | 3648 | /* |
3622 | * Purposely ignore the smp_call_function_single() return | 3649 | * Purposely ignore the smp_call_function_single() return |
3623 | * value. | 3650 | * value. |
3624 | * | 3651 | * |
3625 | * If event->oncpu isn't a valid CPU it means the event got | 3652 | * If event_cpu isn't a valid CPU it means the event got |
3626 | * scheduled out and that will have updated the event count. | 3653 | * scheduled out and that will have updated the event count. |
3627 | * | 3654 | * |
3628 | * Therefore, either way, we'll have an up-to-date event count | 3655 | * Therefore, either way, we'll have an up-to-date event count |
3629 | * after this. | 3656 | * after this. |
3630 | */ | 3657 | */ |
3631 | (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); | 3658 | (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); |
3659 | preempt_enable(); | ||
3632 | ret = data.ret; | 3660 | ret = data.ret; |
3633 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 3661 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
3634 | struct perf_event_context *ctx = event->ctx; | 3662 | struct perf_event_context *ctx = event->ctx; |
@@ -6609,6 +6637,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
6609 | char *buf = NULL; | 6637 | char *buf = NULL; |
6610 | char *name; | 6638 | char *name; |
6611 | 6639 | ||
6640 | if (vma->vm_flags & VM_READ) | ||
6641 | prot |= PROT_READ; | ||
6642 | if (vma->vm_flags & VM_WRITE) | ||
6643 | prot |= PROT_WRITE; | ||
6644 | if (vma->vm_flags & VM_EXEC) | ||
6645 | prot |= PROT_EXEC; | ||
6646 | |||
6647 | if (vma->vm_flags & VM_MAYSHARE) | ||
6648 | flags = MAP_SHARED; | ||
6649 | else | ||
6650 | flags = MAP_PRIVATE; | ||
6651 | |||
6652 | if (vma->vm_flags & VM_DENYWRITE) | ||
6653 | flags |= MAP_DENYWRITE; | ||
6654 | if (vma->vm_flags & VM_MAYEXEC) | ||
6655 | flags |= MAP_EXECUTABLE; | ||
6656 | if (vma->vm_flags & VM_LOCKED) | ||
6657 | flags |= MAP_LOCKED; | ||
6658 | if (vma->vm_flags & VM_HUGETLB) | ||
6659 | flags |= MAP_HUGETLB; | ||
6660 | |||
6612 | if (file) { | 6661 | if (file) { |
6613 | struct inode *inode; | 6662 | struct inode *inode; |
6614 | dev_t dev; | 6663 | dev_t dev; |
@@ -6635,27 +6684,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
6635 | maj = MAJOR(dev); | 6684 | maj = MAJOR(dev); |
6636 | min = MINOR(dev); | 6685 | min = MINOR(dev); |
6637 | 6686 | ||
6638 | if (vma->vm_flags & VM_READ) | ||
6639 | prot |= PROT_READ; | ||
6640 | if (vma->vm_flags & VM_WRITE) | ||
6641 | prot |= PROT_WRITE; | ||
6642 | if (vma->vm_flags & VM_EXEC) | ||
6643 | prot |= PROT_EXEC; | ||
6644 | |||
6645 | if (vma->vm_flags & VM_MAYSHARE) | ||
6646 | flags = MAP_SHARED; | ||
6647 | else | ||
6648 | flags = MAP_PRIVATE; | ||
6649 | |||
6650 | if (vma->vm_flags & VM_DENYWRITE) | ||
6651 | flags |= MAP_DENYWRITE; | ||
6652 | if (vma->vm_flags & VM_MAYEXEC) | ||
6653 | flags |= MAP_EXECUTABLE; | ||
6654 | if (vma->vm_flags & VM_LOCKED) | ||
6655 | flags |= MAP_LOCKED; | ||
6656 | if (vma->vm_flags & VM_HUGETLB) | ||
6657 | flags |= MAP_HUGETLB; | ||
6658 | |||
6659 | goto got_name; | 6687 | goto got_name; |
6660 | } else { | 6688 | } else { |
6661 | if (vma->vm_ops && vma->vm_ops->name) { | 6689 | if (vma->vm_ops && vma->vm_ops->name) { |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 8c0a0ae43521..b59e6768c5e9 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain, | |||
1346 | } | 1346 | } |
1347 | EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); | 1347 | EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); |
1348 | 1348 | ||
1349 | static void __irq_domain_activate_irq(struct irq_data *irq_data) | ||
1350 | { | ||
1351 | if (irq_data && irq_data->domain) { | ||
1352 | struct irq_domain *domain = irq_data->domain; | ||
1353 | |||
1354 | if (irq_data->parent_data) | ||
1355 | __irq_domain_activate_irq(irq_data->parent_data); | ||
1356 | if (domain->ops->activate) | ||
1357 | domain->ops->activate(domain, irq_data); | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | static void __irq_domain_deactivate_irq(struct irq_data *irq_data) | ||
1362 | { | ||
1363 | if (irq_data && irq_data->domain) { | ||
1364 | struct irq_domain *domain = irq_data->domain; | ||
1365 | |||
1366 | if (domain->ops->deactivate) | ||
1367 | domain->ops->deactivate(domain, irq_data); | ||
1368 | if (irq_data->parent_data) | ||
1369 | __irq_domain_deactivate_irq(irq_data->parent_data); | ||
1370 | } | ||
1371 | } | ||
1372 | |||
1349 | /** | 1373 | /** |
1350 | * irq_domain_activate_irq - Call domain_ops->activate recursively to activate | 1374 | * irq_domain_activate_irq - Call domain_ops->activate recursively to activate |
1351 | * interrupt | 1375 | * interrupt |
@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); | |||
1356 | */ | 1380 | */ |
1357 | void irq_domain_activate_irq(struct irq_data *irq_data) | 1381 | void irq_domain_activate_irq(struct irq_data *irq_data) |
1358 | { | 1382 | { |
1359 | if (irq_data && irq_data->domain) { | 1383 | if (!irqd_is_activated(irq_data)) { |
1360 | struct irq_domain *domain = irq_data->domain; | 1384 | __irq_domain_activate_irq(irq_data); |
1361 | 1385 | irqd_set_activated(irq_data); | |
1362 | if (irq_data->parent_data) | ||
1363 | irq_domain_activate_irq(irq_data->parent_data); | ||
1364 | if (domain->ops->activate) | ||
1365 | domain->ops->activate(domain, irq_data); | ||
1366 | } | 1386 | } |
1367 | } | 1387 | } |
1368 | 1388 | ||
@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data) | |||
1376 | */ | 1396 | */ |
1377 | void irq_domain_deactivate_irq(struct irq_data *irq_data) | 1397 | void irq_domain_deactivate_irq(struct irq_data *irq_data) |
1378 | { | 1398 | { |
1379 | if (irq_data && irq_data->domain) { | 1399 | if (irqd_is_activated(irq_data)) { |
1380 | struct irq_domain *domain = irq_data->domain; | 1400 | __irq_domain_deactivate_irq(irq_data); |
1381 | 1401 | irqd_clr_activated(irq_data); | |
1382 | if (domain->ops->deactivate) | ||
1383 | domain->ops->deactivate(domain, irq_data); | ||
1384 | if (irq_data->parent_data) | ||
1385 | irq_domain_deactivate_irq(irq_data->parent_data); | ||
1386 | } | 1402 | } |
1387 | } | 1403 | } |
1388 | 1404 | ||
diff --git a/kernel/module.c b/kernel/module.c index 38d4270925d4..3d8f126208e3 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[]; | |||
389 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; | 389 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; |
390 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; | 390 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; |
391 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; | 391 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; |
392 | extern const unsigned long __start___kcrctab[]; | 392 | extern const s32 __start___kcrctab[]; |
393 | extern const unsigned long __start___kcrctab_gpl[]; | 393 | extern const s32 __start___kcrctab_gpl[]; |
394 | extern const unsigned long __start___kcrctab_gpl_future[]; | 394 | extern const s32 __start___kcrctab_gpl_future[]; |
395 | #ifdef CONFIG_UNUSED_SYMBOLS | 395 | #ifdef CONFIG_UNUSED_SYMBOLS |
396 | extern const struct kernel_symbol __start___ksymtab_unused[]; | 396 | extern const struct kernel_symbol __start___ksymtab_unused[]; |
397 | extern const struct kernel_symbol __stop___ksymtab_unused[]; | 397 | extern const struct kernel_symbol __stop___ksymtab_unused[]; |
398 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; | 398 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; |
399 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; | 399 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; |
400 | extern const unsigned long __start___kcrctab_unused[]; | 400 | extern const s32 __start___kcrctab_unused[]; |
401 | extern const unsigned long __start___kcrctab_unused_gpl[]; | 401 | extern const s32 __start___kcrctab_unused_gpl[]; |
402 | #endif | 402 | #endif |
403 | 403 | ||
404 | #ifndef CONFIG_MODVERSIONS | 404 | #ifndef CONFIG_MODVERSIONS |
@@ -497,7 +497,7 @@ struct find_symbol_arg { | |||
497 | 497 | ||
498 | /* Output */ | 498 | /* Output */ |
499 | struct module *owner; | 499 | struct module *owner; |
500 | const unsigned long *crc; | 500 | const s32 *crc; |
501 | const struct kernel_symbol *sym; | 501 | const struct kernel_symbol *sym; |
502 | }; | 502 | }; |
503 | 503 | ||
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms, | |||
563 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ | 563 | * (optional) module which owns it. Needs preempt disabled or module_mutex. */ |
564 | const struct kernel_symbol *find_symbol(const char *name, | 564 | const struct kernel_symbol *find_symbol(const char *name, |
565 | struct module **owner, | 565 | struct module **owner, |
566 | const unsigned long **crc, | 566 | const s32 **crc, |
567 | bool gplok, | 567 | bool gplok, |
568 | bool warn) | 568 | bool warn) |
569 | { | 569 | { |
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason) | |||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | #ifdef CONFIG_MODVERSIONS | 1251 | #ifdef CONFIG_MODVERSIONS |
1252 | /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ | 1252 | |
1253 | static unsigned long maybe_relocated(unsigned long crc, | 1253 | static u32 resolve_rel_crc(const s32 *crc) |
1254 | const struct module *crc_owner) | ||
1255 | { | 1254 | { |
1256 | #ifdef ARCH_RELOCATES_KCRCTAB | 1255 | return *(u32 *)((void *)crc + *crc); |
1257 | if (crc_owner == NULL) | ||
1258 | return crc - (unsigned long)reloc_start; | ||
1259 | #endif | ||
1260 | return crc; | ||
1261 | } | 1256 | } |
1262 | 1257 | ||
1263 | static int check_version(Elf_Shdr *sechdrs, | 1258 | static int check_version(Elf_Shdr *sechdrs, |
1264 | unsigned int versindex, | 1259 | unsigned int versindex, |
1265 | const char *symname, | 1260 | const char *symname, |
1266 | struct module *mod, | 1261 | struct module *mod, |
1267 | const unsigned long *crc, | 1262 | const s32 *crc) |
1268 | const struct module *crc_owner) | ||
1269 | { | 1263 | { |
1270 | unsigned int i, num_versions; | 1264 | unsigned int i, num_versions; |
1271 | struct modversion_info *versions; | 1265 | struct modversion_info *versions; |
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs, | |||
1283 | / sizeof(struct modversion_info); | 1277 | / sizeof(struct modversion_info); |
1284 | 1278 | ||
1285 | for (i = 0; i < num_versions; i++) { | 1279 | for (i = 0; i < num_versions; i++) { |
1280 | u32 crcval; | ||
1281 | |||
1286 | if (strcmp(versions[i].name, symname) != 0) | 1282 | if (strcmp(versions[i].name, symname) != 0) |
1287 | continue; | 1283 | continue; |
1288 | 1284 | ||
1289 | if (versions[i].crc == maybe_relocated(*crc, crc_owner)) | 1285 | if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) |
1286 | crcval = resolve_rel_crc(crc); | ||
1287 | else | ||
1288 | crcval = *crc; | ||
1289 | if (versions[i].crc == crcval) | ||
1290 | return 1; | 1290 | return 1; |
1291 | pr_debug("Found checksum %lX vs module %lX\n", | 1291 | pr_debug("Found checksum %X vs module %lX\n", |
1292 | maybe_relocated(*crc, crc_owner), versions[i].crc); | 1292 | crcval, versions[i].crc); |
1293 | goto bad_version; | 1293 | goto bad_version; |
1294 | } | 1294 | } |
1295 | 1295 | ||
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
1307 | unsigned int versindex, | 1307 | unsigned int versindex, |
1308 | struct module *mod) | 1308 | struct module *mod) |
1309 | { | 1309 | { |
1310 | const unsigned long *crc; | 1310 | const s32 *crc; |
1311 | 1311 | ||
1312 | /* | 1312 | /* |
1313 | * Since this should be found in kernel (which can't be removed), no | 1313 | * Since this should be found in kernel (which can't be removed), no |
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
1321 | } | 1321 | } |
1322 | preempt_enable(); | 1322 | preempt_enable(); |
1323 | return check_version(sechdrs, versindex, | 1323 | return check_version(sechdrs, versindex, |
1324 | VMLINUX_SYMBOL_STR(module_layout), mod, crc, | 1324 | VMLINUX_SYMBOL_STR(module_layout), mod, crc); |
1325 | NULL); | ||
1326 | } | 1325 | } |
1327 | 1326 | ||
1328 | /* First part is kernel version, which we ignore if module has crcs. */ | 1327 | /* First part is kernel version, which we ignore if module has crcs. */ |
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs, | |||
1340 | unsigned int versindex, | 1339 | unsigned int versindex, |
1341 | const char *symname, | 1340 | const char *symname, |
1342 | struct module *mod, | 1341 | struct module *mod, |
1343 | const unsigned long *crc, | 1342 | const s32 *crc) |
1344 | const struct module *crc_owner) | ||
1345 | { | 1343 | { |
1346 | return 1; | 1344 | return 1; |
1347 | } | 1345 | } |
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, | |||
1368 | { | 1366 | { |
1369 | struct module *owner; | 1367 | struct module *owner; |
1370 | const struct kernel_symbol *sym; | 1368 | const struct kernel_symbol *sym; |
1371 | const unsigned long *crc; | 1369 | const s32 *crc; |
1372 | int err; | 1370 | int err; |
1373 | 1371 | ||
1374 | /* | 1372 | /* |
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, | |||
1383 | if (!sym) | 1381 | if (!sym) |
1384 | goto unlock; | 1382 | goto unlock; |
1385 | 1383 | ||
1386 | if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, | 1384 | if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) { |
1387 | owner)) { | ||
1388 | sym = ERR_PTR(-EINVAL); | 1385 | sym = ERR_PTR(-EINVAL); |
1389 | goto getname; | 1386 | goto getname; |
1390 | } | 1387 | } |
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index b6e4c16377c7..9c15a9124e83 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c | |||
@@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces) | |||
18 | if (WARN_ON(!trace->entries)) | 18 | if (WARN_ON(!trace->entries)) |
19 | return; | 19 | return; |
20 | 20 | ||
21 | for (i = 0; i < trace->nr_entries; i++) { | 21 | for (i = 0; i < trace->nr_entries; i++) |
22 | printk("%*c", 1 + spaces, ' '); | 22 | printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]); |
23 | print_ip_sym(trace->entries[i]); | ||
24 | } | ||
25 | } | 23 | } |
26 | EXPORT_SYMBOL_GPL(print_stack_trace); | 24 | EXPORT_SYMBOL_GPL(print_stack_trace); |
27 | 25 | ||
@@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size, | |||
29 | struct stack_trace *trace, int spaces) | 27 | struct stack_trace *trace, int spaces) |
30 | { | 28 | { |
31 | int i; | 29 | int i; |
32 | unsigned long ip; | ||
33 | int generated; | 30 | int generated; |
34 | int total = 0; | 31 | int total = 0; |
35 | 32 | ||
@@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size, | |||
37 | return 0; | 34 | return 0; |
38 | 35 | ||
39 | for (i = 0; i < trace->nr_entries; i++) { | 36 | for (i = 0; i < trace->nr_entries; i++) { |
40 | ip = trace->entries[i]; | 37 | generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', |
41 | generated = snprintf(buf, size, "%*c[<%p>] %pS\n", | 38 | (void *)trace->entries[i]); |
42 | 1 + spaces, ' ', (void *) ip, (void *) ip); | ||
43 | 39 | ||
44 | total += generated; | 40 | total += generated; |
45 | 41 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 74e0388cc88d..fc6f740d0277 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -725,6 +725,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
725 | */ | 725 | */ |
726 | if (delta == 0) { | 726 | if (delta == 0) { |
727 | tick_nohz_restart(ts, now); | 727 | tick_nohz_restart(ts, now); |
728 | /* | ||
729 | * Make sure next tick stop doesn't get fooled by past | ||
730 | * clock deadline | ||
731 | */ | ||
732 | ts->next_tick = 0; | ||
728 | goto out; | 733 | goto out; |
729 | } | 734 | } |
730 | } | 735 | } |
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 775569ec50d0..af344a1bf0d0 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c | |||
@@ -266,7 +266,7 @@ out: | |||
266 | static struct cpumask save_cpumask; | 266 | static struct cpumask save_cpumask; |
267 | static bool disable_migrate; | 267 | static bool disable_migrate; |
268 | 268 | ||
269 | static void move_to_next_cpu(void) | 269 | static void move_to_next_cpu(bool initmask) |
270 | { | 270 | { |
271 | static struct cpumask *current_mask; | 271 | static struct cpumask *current_mask; |
272 | int next_cpu; | 272 | int next_cpu; |
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void) | |||
275 | return; | 275 | return; |
276 | 276 | ||
277 | /* Just pick the first CPU on first iteration */ | 277 | /* Just pick the first CPU on first iteration */ |
278 | if (!current_mask) { | 278 | if (initmask) { |
279 | current_mask = &save_cpumask; | 279 | current_mask = &save_cpumask; |
280 | get_online_cpus(); | 280 | get_online_cpus(); |
281 | cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); | 281 | cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); |
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void) | |||
330 | static int kthread_fn(void *data) | 330 | static int kthread_fn(void *data) |
331 | { | 331 | { |
332 | u64 interval; | 332 | u64 interval; |
333 | bool initmask = true; | ||
333 | 334 | ||
334 | while (!kthread_should_stop()) { | 335 | while (!kthread_should_stop()) { |
335 | 336 | ||
336 | move_to_next_cpu(); | 337 | move_to_next_cpu(initmask); |
338 | initmask = false; | ||
337 | 339 | ||
338 | local_irq_disable(); | 340 | local_irq_disable(); |
339 | get_sample(); | 341 | get_sample(); |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index a133ecd741e4..7ad9e53ad174 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6) | |||
1372 | return a1 + a2 + a3 + a4 + a5 + a6; | 1372 | return a1 + a2 + a3 + a4 + a5 + a6; |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | static struct __init trace_event_file * | 1375 | static __init struct trace_event_file * |
1376 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) | 1376 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) |
1377 | { | 1377 | { |
1378 | struct trace_event_file *file; | 1378 | struct trace_event_file *file; |
diff --git a/kernel/ucount.c b/kernel/ucount.c index 4bbd38ec3788..95c6336fc2b3 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c | |||
@@ -227,11 +227,10 @@ static __init int user_namespace_sysctl_init(void) | |||
227 | * properly. | 227 | * properly. |
228 | */ | 228 | */ |
229 | user_header = register_sysctl("user", empty); | 229 | user_header = register_sysctl("user", empty); |
230 | kmemleak_ignore(user_header); | ||
230 | BUG_ON(!user_header); | 231 | BUG_ON(!user_header); |
231 | BUG_ON(!setup_userns_sysctls(&init_user_ns)); | 232 | BUG_ON(!setup_userns_sysctls(&init_user_ns)); |
232 | #endif | 233 | #endif |
233 | return 0; | 234 | return 0; |
234 | } | 235 | } |
235 | subsys_initcall(user_namespace_sysctl_init); | 236 | subsys_initcall(user_namespace_sysctl_init); |
236 | |||
237 | |||
diff --git a/mm/filemap.c b/mm/filemap.c index b772a33ef640..3f9afded581b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, | |||
1791 | 1791 | ||
1792 | cond_resched(); | 1792 | cond_resched(); |
1793 | find_page: | 1793 | find_page: |
1794 | if (fatal_signal_pending(current)) { | ||
1795 | error = -EINTR; | ||
1796 | goto out; | ||
1797 | } | ||
1798 | |||
1794 | page = find_get_page(mapping, index); | 1799 | page = find_get_page(mapping, index); |
1795 | if (!page) { | 1800 | if (!page) { |
1796 | page_cache_sync_readahead(mapping, | 1801 | page_cache_sync_readahead(mapping, |
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index b82b3e215157..f479365530b6 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/ftrace.h> | ||
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
18 | #include <linux/printk.h> | 19 | #include <linux/printk.h> |
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size, | |||
300 | if (likely(!kasan_report_enabled())) | 301 | if (likely(!kasan_report_enabled())) |
301 | return; | 302 | return; |
302 | 303 | ||
304 | disable_trace_on_warning(); | ||
305 | |||
303 | info.access_addr = (void *)addr; | 306 | info.access_addr = (void *)addr; |
304 | info.access_size = size; | 307 | info.access_size = size; |
305 | info.is_write = is_write; | 308 | info.is_write = is_write; |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ca2723d47338..b8c11e063ff0 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | |||
1483 | } | 1483 | } |
1484 | 1484 | ||
1485 | /* | 1485 | /* |
1486 | * Confirm all pages in a range [start, end) is belongs to the same zone. | 1486 | * Confirm all pages in a range [start, end) belong to the same zone. |
1487 | * When true, return its valid [start, end). | ||
1487 | */ | 1488 | */ |
1488 | int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | 1489 | int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, |
1490 | unsigned long *valid_start, unsigned long *valid_end) | ||
1489 | { | 1491 | { |
1490 | unsigned long pfn, sec_end_pfn; | 1492 | unsigned long pfn, sec_end_pfn; |
1493 | unsigned long start, end; | ||
1491 | struct zone *zone = NULL; | 1494 | struct zone *zone = NULL; |
1492 | struct page *page; | 1495 | struct page *page; |
1493 | int i; | 1496 | int i; |
1494 | for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); | 1497 | for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); |
1495 | pfn < end_pfn; | 1498 | pfn < end_pfn; |
1496 | pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { | 1499 | pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { |
1497 | /* Make sure the memory section is present first */ | 1500 | /* Make sure the memory section is present first */ |
1498 | if (!present_section_nr(pfn_to_section_nr(pfn))) | 1501 | if (!present_section_nr(pfn_to_section_nr(pfn))) |
1499 | continue; | 1502 | continue; |
@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | |||
1509 | page = pfn_to_page(pfn + i); | 1512 | page = pfn_to_page(pfn + i); |
1510 | if (zone && page_zone(page) != zone) | 1513 | if (zone && page_zone(page) != zone) |
1511 | return 0; | 1514 | return 0; |
1515 | if (!zone) | ||
1516 | start = pfn + i; | ||
1512 | zone = page_zone(page); | 1517 | zone = page_zone(page); |
1518 | end = pfn + MAX_ORDER_NR_PAGES; | ||
1513 | } | 1519 | } |
1514 | } | 1520 | } |
1515 | return 1; | 1521 | |
1522 | if (zone) { | ||
1523 | *valid_start = start; | ||
1524 | *valid_end = end; | ||
1525 | return 1; | ||
1526 | } else { | ||
1527 | return 0; | ||
1528 | } | ||
1516 | } | 1529 | } |
1517 | 1530 | ||
1518 | /* | 1531 | /* |
@@ -1839,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1839 | long offlined_pages; | 1852 | long offlined_pages; |
1840 | int ret, drain, retry_max, node; | 1853 | int ret, drain, retry_max, node; |
1841 | unsigned long flags; | 1854 | unsigned long flags; |
1855 | unsigned long valid_start, valid_end; | ||
1842 | struct zone *zone; | 1856 | struct zone *zone; |
1843 | struct memory_notify arg; | 1857 | struct memory_notify arg; |
1844 | 1858 | ||
@@ -1849,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1849 | return -EINVAL; | 1863 | return -EINVAL; |
1850 | /* This makes hotplug much easier...and readable. | 1864 | /* This makes hotplug much easier...and readable. |
1851 | we assume this for now. .*/ | 1865 | we assume this for now. .*/ |
1852 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | 1866 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) |
1853 | return -EINVAL; | 1867 | return -EINVAL; |
1854 | 1868 | ||
1855 | zone = page_zone(pfn_to_page(start_pfn)); | 1869 | zone = page_zone(pfn_to_page(valid_start)); |
1856 | node = zone_to_nid(zone); | 1870 | node = zone_to_nid(zone); |
1857 | nr_pages = end_pfn - start_pfn; | 1871 | nr_pages = end_pfn - start_pfn; |
1858 | 1872 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index bb53285a1d99..3a7587a0314d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, | |||
415 | struct shrink_control *sc, unsigned long nr_to_split) | 415 | struct shrink_control *sc, unsigned long nr_to_split) |
416 | { | 416 | { |
417 | LIST_HEAD(list), *pos, *next; | 417 | LIST_HEAD(list), *pos, *next; |
418 | LIST_HEAD(to_remove); | ||
418 | struct inode *inode; | 419 | struct inode *inode; |
419 | struct shmem_inode_info *info; | 420 | struct shmem_inode_info *info; |
420 | struct page *page; | 421 | struct page *page; |
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, | |||
441 | /* Check if there's anything to gain */ | 442 | /* Check if there's anything to gain */ |
442 | if (round_up(inode->i_size, PAGE_SIZE) == | 443 | if (round_up(inode->i_size, PAGE_SIZE) == |
443 | round_up(inode->i_size, HPAGE_PMD_SIZE)) { | 444 | round_up(inode->i_size, HPAGE_PMD_SIZE)) { |
444 | list_del_init(&info->shrinklist); | 445 | list_move(&info->shrinklist, &to_remove); |
445 | removed++; | 446 | removed++; |
446 | iput(inode); | ||
447 | goto next; | 447 | goto next; |
448 | } | 448 | } |
449 | 449 | ||
@@ -454,6 +454,13 @@ next: | |||
454 | } | 454 | } |
455 | spin_unlock(&sbinfo->shrinklist_lock); | 455 | spin_unlock(&sbinfo->shrinklist_lock); |
456 | 456 | ||
457 | list_for_each_safe(pos, next, &to_remove) { | ||
458 | info = list_entry(pos, struct shmem_inode_info, shrinklist); | ||
459 | inode = &info->vfs_inode; | ||
460 | list_del_init(&info->shrinklist); | ||
461 | iput(inode); | ||
462 | } | ||
463 | |||
457 | list_for_each_safe(pos, next, &list) { | 464 | list_for_each_safe(pos, next, &list) { |
458 | int ret; | 465 | int ret; |
459 | 466 | ||
@@ -1422,6 +1422,10 @@ static int init_cache_random_seq(struct kmem_cache *s) | |||
1422 | int err; | 1422 | int err; |
1423 | unsigned long i, count = oo_objects(s->oo); | 1423 | unsigned long i, count = oo_objects(s->oo); |
1424 | 1424 | ||
1425 | /* Bailout if already initialised */ | ||
1426 | if (s->random_seq) | ||
1427 | return 0; | ||
1428 | |||
1425 | err = cache_random_seq_create(s, count, GFP_KERNEL); | 1429 | err = cache_random_seq_create(s, count, GFP_KERNEL); |
1426 | if (err) { | 1430 | if (err) { |
1427 | pr_err("SLUB: Unable to initialize free list for %s\n", | 1431 | pr_err("SLUB: Unable to initialize free list for %s\n", |
diff --git a/mm/zswap.c b/mm/zswap.c index 067a0d62f318..cabf09e0128b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry; | |||
78 | 78 | ||
79 | /* Enable/disable zswap (disabled by default) */ | 79 | /* Enable/disable zswap (disabled by default) */ |
80 | static bool zswap_enabled; | 80 | static bool zswap_enabled; |
81 | module_param_named(enabled, zswap_enabled, bool, 0644); | 81 | static int zswap_enabled_param_set(const char *, |
82 | const struct kernel_param *); | ||
83 | static struct kernel_param_ops zswap_enabled_param_ops = { | ||
84 | .set = zswap_enabled_param_set, | ||
85 | .get = param_get_bool, | ||
86 | }; | ||
87 | module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); | ||
82 | 88 | ||
83 | /* Crypto compressor to use */ | 89 | /* Crypto compressor to use */ |
84 | #define ZSWAP_COMPRESSOR_DEFAULT "lzo" | 90 | #define ZSWAP_COMPRESSOR_DEFAULT "lzo" |
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0); | |||
176 | /* used by param callback function */ | 182 | /* used by param callback function */ |
177 | static bool zswap_init_started; | 183 | static bool zswap_init_started; |
178 | 184 | ||
185 | /* fatal error during init */ | ||
186 | static bool zswap_init_failed; | ||
187 | |||
179 | /********************************* | 188 | /********************************* |
180 | * helpers and fwd declarations | 189 | * helpers and fwd declarations |
181 | **********************************/ | 190 | **********************************/ |
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp, | |||
624 | char *s = strstrip((char *)val); | 633 | char *s = strstrip((char *)val); |
625 | int ret; | 634 | int ret; |
626 | 635 | ||
636 | if (zswap_init_failed) { | ||
637 | pr_err("can't set param, initialization failed\n"); | ||
638 | return -ENODEV; | ||
639 | } | ||
640 | |||
627 | /* no change required */ | 641 | /* no change required */ |
628 | if (!strcmp(s, *(char **)kp->arg)) | 642 | if (!strcmp(s, *(char **)kp->arg)) |
629 | return 0; | 643 | return 0; |
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val, | |||
703 | return __zswap_param_set(val, kp, NULL, zswap_compressor); | 717 | return __zswap_param_set(val, kp, NULL, zswap_compressor); |
704 | } | 718 | } |
705 | 719 | ||
720 | static int zswap_enabled_param_set(const char *val, | ||
721 | const struct kernel_param *kp) | ||
722 | { | ||
723 | if (zswap_init_failed) { | ||
724 | pr_err("can't enable, initialization failed\n"); | ||
725 | return -ENODEV; | ||
726 | } | ||
727 | |||
728 | return param_set_bool(val, kp); | ||
729 | } | ||
730 | |||
706 | /********************************* | 731 | /********************************* |
707 | * writeback code | 732 | * writeback code |
708 | **********************************/ | 733 | **********************************/ |
@@ -1201,6 +1226,9 @@ hp_fail: | |||
1201 | dstmem_fail: | 1226 | dstmem_fail: |
1202 | zswap_entry_cache_destroy(); | 1227 | zswap_entry_cache_destroy(); |
1203 | cache_fail: | 1228 | cache_fail: |
1229 | /* if built-in, we aren't unloaded on failure; don't allow use */ | ||
1230 | zswap_init_failed = true; | ||
1231 | zswap_enabled = false; | ||
1204 | return -ENOMEM; | 1232 | return -ENOMEM; |
1205 | } | 1233 | } |
1206 | /* must be late so crypto has time to come up */ | 1234 | /* must be late so crypto has time to come up */ |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 1108079d934f..5488e4a6ccd0 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | |||
445 | * @func: callback function on filter match | 445 | * @func: callback function on filter match |
446 | * @data: returned parameter for callback function | 446 | * @data: returned parameter for callback function |
447 | * @ident: string for calling module identification | 447 | * @ident: string for calling module identification |
448 | * @sk: socket pointer (might be NULL) | ||
448 | * | 449 | * |
449 | * Description: | 450 | * Description: |
450 | * Invokes the callback function with the received sk_buff and the given | 451 | * Invokes the callback function with the received sk_buff and the given |
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | |||
468 | */ | 469 | */ |
469 | int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, | 470 | int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, |
470 | void (*func)(struct sk_buff *, void *), void *data, | 471 | void (*func)(struct sk_buff *, void *), void *data, |
471 | char *ident) | 472 | char *ident, struct sock *sk) |
472 | { | 473 | { |
473 | struct receiver *r; | 474 | struct receiver *r; |
474 | struct hlist_head *rl; | 475 | struct hlist_head *rl; |
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, | |||
496 | r->func = func; | 497 | r->func = func; |
497 | r->data = data; | 498 | r->data = data; |
498 | r->ident = ident; | 499 | r->ident = ident; |
500 | r->sk = sk; | ||
499 | 501 | ||
500 | hlist_add_head_rcu(&r->list, rl); | 502 | hlist_add_head_rcu(&r->list, rl); |
501 | d->entries++; | 503 | d->entries++; |
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register); | |||
520 | static void can_rx_delete_receiver(struct rcu_head *rp) | 522 | static void can_rx_delete_receiver(struct rcu_head *rp) |
521 | { | 523 | { |
522 | struct receiver *r = container_of(rp, struct receiver, rcu); | 524 | struct receiver *r = container_of(rp, struct receiver, rcu); |
525 | struct sock *sk = r->sk; | ||
523 | 526 | ||
524 | kmem_cache_free(rcv_cache, r); | 527 | kmem_cache_free(rcv_cache, r); |
528 | if (sk) | ||
529 | sock_put(sk); | ||
525 | } | 530 | } |
526 | 531 | ||
527 | /** | 532 | /** |
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, | |||
596 | spin_unlock(&can_rcvlists_lock); | 601 | spin_unlock(&can_rcvlists_lock); |
597 | 602 | ||
598 | /* schedule the receiver item for deletion */ | 603 | /* schedule the receiver item for deletion */ |
599 | if (r) | 604 | if (r) { |
605 | if (r->sk) | ||
606 | sock_hold(r->sk); | ||
600 | call_rcu(&r->rcu, can_rx_delete_receiver); | 607 | call_rcu(&r->rcu, can_rx_delete_receiver); |
608 | } | ||
601 | } | 609 | } |
602 | EXPORT_SYMBOL(can_rx_unregister); | 610 | EXPORT_SYMBOL(can_rx_unregister); |
603 | 611 | ||
diff --git a/net/can/af_can.h b/net/can/af_can.h index fca0fe9fc45a..b86f5129e838 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h | |||
@@ -50,13 +50,14 @@ | |||
50 | 50 | ||
51 | struct receiver { | 51 | struct receiver { |
52 | struct hlist_node list; | 52 | struct hlist_node list; |
53 | struct rcu_head rcu; | ||
54 | canid_t can_id; | 53 | canid_t can_id; |
55 | canid_t mask; | 54 | canid_t mask; |
56 | unsigned long matches; | 55 | unsigned long matches; |
57 | void (*func)(struct sk_buff *, void *); | 56 | void (*func)(struct sk_buff *, void *); |
58 | void *data; | 57 | void *data; |
59 | char *ident; | 58 | char *ident; |
59 | struct sock *sk; | ||
60 | struct rcu_head rcu; | ||
60 | }; | 61 | }; |
61 | 62 | ||
62 | #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) | 63 | #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 21ac75390e3d..95d13b233c65 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, | |||
734 | 734 | ||
735 | static void bcm_remove_op(struct bcm_op *op) | 735 | static void bcm_remove_op(struct bcm_op *op) |
736 | { | 736 | { |
737 | hrtimer_cancel(&op->timer); | 737 | if (op->tsklet.func) { |
738 | hrtimer_cancel(&op->thrtimer); | 738 | while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) || |
739 | 739 | test_bit(TASKLET_STATE_RUN, &op->tsklet.state) || | |
740 | if (op->tsklet.func) | 740 | hrtimer_active(&op->timer)) { |
741 | tasklet_kill(&op->tsklet); | 741 | hrtimer_cancel(&op->timer); |
742 | tasklet_kill(&op->tsklet); | ||
743 | } | ||
744 | } | ||
742 | 745 | ||
743 | if (op->thrtsklet.func) | 746 | if (op->thrtsklet.func) { |
744 | tasklet_kill(&op->thrtsklet); | 747 | while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) || |
748 | test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) || | ||
749 | hrtimer_active(&op->thrtimer)) { | ||
750 | hrtimer_cancel(&op->thrtimer); | ||
751 | tasklet_kill(&op->thrtsklet); | ||
752 | } | ||
753 | } | ||
745 | 754 | ||
746 | if ((op->frames) && (op->frames != &op->sframe)) | 755 | if ((op->frames) && (op->frames != &op->sframe)) |
747 | kfree(op->frames); | 756 | kfree(op->frames); |
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1216 | err = can_rx_register(dev, op->can_id, | 1225 | err = can_rx_register(dev, op->can_id, |
1217 | REGMASK(op->can_id), | 1226 | REGMASK(op->can_id), |
1218 | bcm_rx_handler, op, | 1227 | bcm_rx_handler, op, |
1219 | "bcm"); | 1228 | "bcm", sk); |
1220 | 1229 | ||
1221 | op->rx_reg_dev = dev; | 1230 | op->rx_reg_dev = dev; |
1222 | dev_put(dev); | 1231 | dev_put(dev); |
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1225 | } else | 1234 | } else |
1226 | err = can_rx_register(NULL, op->can_id, | 1235 | err = can_rx_register(NULL, op->can_id, |
1227 | REGMASK(op->can_id), | 1236 | REGMASK(op->can_id), |
1228 | bcm_rx_handler, op, "bcm"); | 1237 | bcm_rx_handler, op, "bcm", sk); |
1229 | if (err) { | 1238 | if (err) { |
1230 | /* this bcm rx op is broken -> remove it */ | 1239 | /* this bcm rx op is broken -> remove it */ |
1231 | list_del(&op->list); | 1240 | list_del(&op->list); |
diff --git a/net/can/gw.c b/net/can/gw.c index a54ab0c82104..7056a1a2bb70 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj) | |||
442 | { | 442 | { |
443 | return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, | 443 | return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, |
444 | gwj->ccgw.filter.can_mask, can_can_gw_rcv, | 444 | gwj->ccgw.filter.can_mask, can_can_gw_rcv, |
445 | gwj, "gw"); | 445 | gwj, "gw", NULL); |
446 | } | 446 | } |
447 | 447 | ||
448 | static inline void cgw_unregister_filter(struct cgw_job *gwj) | 448 | static inline void cgw_unregister_filter(struct cgw_job *gwj) |
diff --git a/net/can/raw.c b/net/can/raw.c index b075f028d7e2..6dc546a06673 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk, | |||
190 | for (i = 0; i < count; i++) { | 190 | for (i = 0; i < count; i++) { |
191 | err = can_rx_register(dev, filter[i].can_id, | 191 | err = can_rx_register(dev, filter[i].can_id, |
192 | filter[i].can_mask, | 192 | filter[i].can_mask, |
193 | raw_rcv, sk, "raw"); | 193 | raw_rcv, sk, "raw", sk); |
194 | if (err) { | 194 | if (err) { |
195 | /* clean up successfully registered filters */ | 195 | /* clean up successfully registered filters */ |
196 | while (--i >= 0) | 196 | while (--i >= 0) |
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, | |||
211 | 211 | ||
212 | if (err_mask) | 212 | if (err_mask) |
213 | err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, | 213 | err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, |
214 | raw_rcv, sk, "raw"); | 214 | raw_rcv, sk, "raw", sk); |
215 | 215 | ||
216 | return err; | 216 | return err; |
217 | } | 217 | } |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 662bea587165..ea633342ab0d 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len) | |||
332 | EXPORT_SYMBOL(__skb_free_datagram_locked); | 332 | EXPORT_SYMBOL(__skb_free_datagram_locked); |
333 | 333 | ||
334 | int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, | 334 | int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, |
335 | unsigned int flags) | 335 | unsigned int flags, |
336 | void (*destructor)(struct sock *sk, | ||
337 | struct sk_buff *skb)) | ||
336 | { | 338 | { |
337 | int err = 0; | 339 | int err = 0; |
338 | 340 | ||
@@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, | |||
342 | if (skb == skb_peek(&sk->sk_receive_queue)) { | 344 | if (skb == skb_peek(&sk->sk_receive_queue)) { |
343 | __skb_unlink(skb, &sk->sk_receive_queue); | 345 | __skb_unlink(skb, &sk->sk_receive_queue); |
344 | atomic_dec(&skb->users); | 346 | atomic_dec(&skb->users); |
347 | if (destructor) | ||
348 | destructor(sk, skb); | ||
345 | err = 0; | 349 | err = 0; |
346 | } | 350 | } |
347 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 351 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
@@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb); | |||
375 | 379 | ||
376 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) | 380 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) |
377 | { | 381 | { |
378 | int err = __sk_queue_drop_skb(sk, skb, flags); | 382 | int err = __sk_queue_drop_skb(sk, skb, flags, NULL); |
379 | 383 | ||
380 | kfree_skb(skb); | 384 | kfree_skb(skb); |
381 | sk_mem_reclaim_partial(sk); | 385 | sk_mem_reclaim_partial(sk); |
diff --git a/net/core/dev.c b/net/core/dev.c index 7f218e095361..29101c98399f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); | |||
1695 | 1695 | ||
1696 | static struct static_key netstamp_needed __read_mostly; | 1696 | static struct static_key netstamp_needed __read_mostly; |
1697 | #ifdef HAVE_JUMP_LABEL | 1697 | #ifdef HAVE_JUMP_LABEL |
1698 | /* We are not allowed to call static_key_slow_dec() from irq context | ||
1699 | * If net_disable_timestamp() is called from irq context, defer the | ||
1700 | * static_key_slow_dec() calls. | ||
1701 | */ | ||
1702 | static atomic_t netstamp_needed_deferred; | 1698 | static atomic_t netstamp_needed_deferred; |
1703 | #endif | 1699 | static void netstamp_clear(struct work_struct *work) |
1704 | |||
1705 | void net_enable_timestamp(void) | ||
1706 | { | 1700 | { |
1707 | #ifdef HAVE_JUMP_LABEL | ||
1708 | int deferred = atomic_xchg(&netstamp_needed_deferred, 0); | 1701 | int deferred = atomic_xchg(&netstamp_needed_deferred, 0); |
1709 | 1702 | ||
1710 | if (deferred) { | 1703 | while (deferred--) |
1711 | while (--deferred) | 1704 | static_key_slow_dec(&netstamp_needed); |
1712 | static_key_slow_dec(&netstamp_needed); | 1705 | } |
1713 | return; | 1706 | static DECLARE_WORK(netstamp_work, netstamp_clear); |
1714 | } | ||
1715 | #endif | 1707 | #endif |
1708 | |||
1709 | void net_enable_timestamp(void) | ||
1710 | { | ||
1716 | static_key_slow_inc(&netstamp_needed); | 1711 | static_key_slow_inc(&netstamp_needed); |
1717 | } | 1712 | } |
1718 | EXPORT_SYMBOL(net_enable_timestamp); | 1713 | EXPORT_SYMBOL(net_enable_timestamp); |
@@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp); | |||
1720 | void net_disable_timestamp(void) | 1715 | void net_disable_timestamp(void) |
1721 | { | 1716 | { |
1722 | #ifdef HAVE_JUMP_LABEL | 1717 | #ifdef HAVE_JUMP_LABEL |
1723 | if (in_interrupt()) { | 1718 | /* net_disable_timestamp() can be called from non process context */ |
1724 | atomic_inc(&netstamp_needed_deferred); | 1719 | atomic_inc(&netstamp_needed_deferred); |
1725 | return; | 1720 | schedule_work(&netstamp_work); |
1726 | } | 1721 | #else |
1727 | #endif | ||
1728 | static_key_slow_dec(&netstamp_needed); | 1722 | static_key_slow_dec(&netstamp_needed); |
1723 | #endif | ||
1729 | } | 1724 | } |
1730 | EXPORT_SYMBOL(net_disable_timestamp); | 1725 | EXPORT_SYMBOL(net_disable_timestamp); |
1731 | 1726 | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 236a21e3c878..d92de0a1f0a4 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -1405,9 +1405,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | |||
1405 | if (regs.len > reglen) | 1405 | if (regs.len > reglen) |
1406 | regs.len = reglen; | 1406 | regs.len = reglen; |
1407 | 1407 | ||
1408 | regbuf = vzalloc(reglen); | 1408 | regbuf = NULL; |
1409 | if (reglen && !regbuf) | 1409 | if (reglen) { |
1410 | return -ENOMEM; | 1410 | regbuf = vzalloc(reglen); |
1411 | if (!regbuf) | ||
1412 | return -ENOMEM; | ||
1413 | } | ||
1411 | 1414 | ||
1412 | ops->get_regs(dev, ®s, regbuf); | 1415 | ops->get_regs(dev, ®s, regbuf); |
1413 | 1416 | ||
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index da3862124545..0f99297b2fb3 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c | |||
@@ -273,6 +273,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index, | |||
273 | if (err) { | 273 | if (err) { |
274 | dev_warn(ds->dev, "Failed to create slave %d: %d\n", | 274 | dev_warn(ds->dev, "Failed to create slave %d: %d\n", |
275 | index, err); | 275 | index, err); |
276 | ds->ports[index].netdev = NULL; | ||
276 | return err; | 277 | return err; |
277 | } | 278 | } |
278 | 279 | ||
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 8c5a479681ca..516c87e75de7 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev) | |||
356 | dev->header_ops = ð_header_ops; | 356 | dev->header_ops = ð_header_ops; |
357 | dev->type = ARPHRD_ETHER; | 357 | dev->type = ARPHRD_ETHER; |
358 | dev->hard_header_len = ETH_HLEN; | 358 | dev->hard_header_len = ETH_HLEN; |
359 | dev->min_header_len = ETH_HLEN; | ||
359 | dev->mtu = ETH_DATA_LEN; | 360 | dev->mtu = ETH_DATA_LEN; |
360 | dev->min_mtu = ETH_MIN_MTU; | 361 | dev->min_mtu = ETH_MIN_MTU; |
361 | dev->max_mtu = ETH_DATA_LEN; | 362 | dev->max_mtu = ETH_DATA_LEN; |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 72d6f056d863..ae206163c273 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) | |||
1587 | goto validate_return_locked; | 1587 | goto validate_return_locked; |
1588 | } | 1588 | } |
1589 | 1589 | ||
1590 | if (opt_iter + 1 == opt_len) { | ||
1591 | err_offset = opt_iter; | ||
1592 | goto validate_return_locked; | ||
1593 | } | ||
1590 | tag_len = tag[1]; | 1594 | tag_len = tag[1]; |
1591 | if (tag_len > (opt_len - opt_iter)) { | 1595 | if (tag_len > (opt_len - opt_iter)) { |
1592 | err_offset = opt_iter + 1; | 1596 | err_offset = opt_iter + 1; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 5b15459955f8..44fd86de2823 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1172,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) | |||
1172 | psf->sf_crcount = im->crcount; | 1172 | psf->sf_crcount = im->crcount; |
1173 | } | 1173 | } |
1174 | in_dev_put(pmc->interface); | 1174 | in_dev_put(pmc->interface); |
1175 | kfree(pmc); | ||
1175 | } | 1176 | } |
1176 | spin_unlock_bh(&im->lock); | 1177 | spin_unlock_bh(&im->lock); |
1177 | } | 1178 | } |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 53ae0c6315ad..900011709e3b 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -1238,7 +1238,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) | |||
1238 | pktinfo->ipi_ifindex = 0; | 1238 | pktinfo->ipi_ifindex = 0; |
1239 | pktinfo->ipi_spec_dst.s_addr = 0; | 1239 | pktinfo->ipi_spec_dst.s_addr = 0; |
1240 | } | 1240 | } |
1241 | skb_dst_drop(skb); | 1241 | /* We need to keep the dst for __ip_options_echo() |
1242 | * We could restrict the test to opt.ts_needtime || opt.srr, | ||
1243 | * but the following is good enough as IP options are not often used. | ||
1244 | */ | ||
1245 | if (unlikely(IPCB(skb)->opt.optlen)) | ||
1246 | skb_dst_force(skb); | ||
1247 | else | ||
1248 | skb_dst_drop(skb); | ||
1242 | } | 1249 | } |
1243 | 1250 | ||
1244 | int ip_setsockopt(struct sock *sk, int level, | 1251 | int ip_setsockopt(struct sock *sk, int level, |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 86cca610f4c2..68d77b1f1495 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, | |||
642 | { | 642 | { |
643 | struct sk_buff *skb = skb_peek(&sk->sk_write_queue); | 643 | struct sk_buff *skb = skb_peek(&sk->sk_write_queue); |
644 | 644 | ||
645 | if (!skb) | ||
646 | return 0; | ||
645 | pfh->wcheck = csum_partial((char *)&pfh->icmph, | 647 | pfh->wcheck = csum_partial((char *)&pfh->icmph, |
646 | sizeof(struct icmphdr), pfh->wcheck); | 648 | sizeof(struct icmphdr), pfh->wcheck); |
647 | pfh->icmph.checksum = csum_fold(pfh->wcheck); | 649 | pfh->icmph.checksum = csum_fold(pfh->wcheck); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4a044964da66..0efb4c7f6704 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -770,6 +770,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |||
770 | ret = -EAGAIN; | 770 | ret = -EAGAIN; |
771 | break; | 771 | break; |
772 | } | 772 | } |
773 | /* if __tcp_splice_read() got nothing while we have | ||
774 | * an skb in receive queue, we do not want to loop. | ||
775 | * This might happen with URG data. | ||
776 | */ | ||
777 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
778 | break; | ||
773 | sk_wait_data(sk, &timeo, NULL); | 779 | sk_wait_data(sk, &timeo, NULL); |
774 | if (signal_pending(current)) { | 780 | if (signal_pending(current)) { |
775 | ret = sock_intr_errno(timeo); | 781 | ret = sock_intr_errno(timeo); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1d5331a1b1dc..8ce50dc3ab8c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk) | |||
2518 | int full_space = min_t(int, tp->window_clamp, allowed_space); | 2518 | int full_space = min_t(int, tp->window_clamp, allowed_space); |
2519 | int window; | 2519 | int window; |
2520 | 2520 | ||
2521 | if (mss > full_space) | 2521 | if (unlikely(mss > full_space)) { |
2522 | mss = full_space; | 2522 | mss = full_space; |
2523 | 2523 | if (mss <= 0) | |
2524 | return 0; | ||
2525 | } | ||
2524 | if (free_space < (full_space >> 1)) { | 2526 | if (free_space < (full_space >> 1)) { |
2525 | icsk->icsk_ack.quick = 0; | 2527 | icsk->icsk_ack.quick = 0; |
2526 | 2528 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1307a7c2e544..8aab7d78d25b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1501,7 +1501,7 @@ try_again: | |||
1501 | return err; | 1501 | return err; |
1502 | 1502 | ||
1503 | csum_copy_err: | 1503 | csum_copy_err: |
1504 | if (!__sk_queue_drop_skb(sk, skb, flags)) { | 1504 | if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) { |
1505 | UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); | 1505 | UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); |
1506 | UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1506 | UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1507 | } | 1507 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f60e88e56255..a7bcc0ab5e99 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3386,9 +3386,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3386 | } | 3386 | } |
3387 | 3387 | ||
3388 | if (idev) { | 3388 | if (idev) { |
3389 | if (idev->if_flags & IF_READY) | 3389 | if (idev->if_flags & IF_READY) { |
3390 | /* device is already configured. */ | 3390 | /* device is already configured - |
3391 | * but resend MLD reports, we might | ||
3392 | * have roamed and need to update | ||
3393 | * multicast snooping switches | ||
3394 | */ | ||
3395 | ipv6_mc_up(idev); | ||
3391 | break; | 3396 | break; |
3397 | } | ||
3392 | idev->if_flags |= IF_READY; | 3398 | idev->if_flags |= IF_READY; |
3393 | } | 3399 | } |
3394 | 3400 | ||
@@ -4009,6 +4015,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) | |||
4009 | 4015 | ||
4010 | if (bump_id) | 4016 | if (bump_id) |
4011 | rt_genid_bump_ipv6(dev_net(dev)); | 4017 | rt_genid_bump_ipv6(dev_net(dev)); |
4018 | |||
4019 | /* Make sure that a new temporary address will be created | ||
4020 | * before this temporary address becomes deprecated. | ||
4021 | */ | ||
4022 | if (ifp->flags & IFA_F_TEMPORARY) | ||
4023 | addrconf_verify_rtnl(); | ||
4012 | } | 4024 | } |
4013 | 4025 | ||
4014 | static void addrconf_dad_run(struct inet6_dev *idev) | 4026 | static void addrconf_dad_run(struct inet6_dev *idev) |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index e4198502fd98..275cac628a95 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -327,7 +327,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb) | |||
327 | struct ipv6_sr_hdr *hdr; | 327 | struct ipv6_sr_hdr *hdr; |
328 | struct inet6_dev *idev; | 328 | struct inet6_dev *idev; |
329 | struct in6_addr *addr; | 329 | struct in6_addr *addr; |
330 | bool cleanup = false; | ||
331 | int accept_seg6; | 330 | int accept_seg6; |
332 | 331 | ||
333 | hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); | 332 | hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); |
@@ -351,11 +350,7 @@ static int ipv6_srh_rcv(struct sk_buff *skb) | |||
351 | #endif | 350 | #endif |
352 | 351 | ||
353 | looped_back: | 352 | looped_back: |
354 | if (hdr->segments_left > 0) { | 353 | if (hdr->segments_left == 0) { |
355 | if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 && | ||
356 | sr_has_cleanup(hdr)) | ||
357 | cleanup = true; | ||
358 | } else { | ||
359 | if (hdr->nexthdr == NEXTHDR_IPV6) { | 354 | if (hdr->nexthdr == NEXTHDR_IPV6) { |
360 | int offset = (hdr->hdrlen + 1) << 3; | 355 | int offset = (hdr->hdrlen + 1) << 3; |
361 | 356 | ||
@@ -418,21 +413,6 @@ looped_back: | |||
418 | 413 | ||
419 | ipv6_hdr(skb)->daddr = *addr; | 414 | ipv6_hdr(skb)->daddr = *addr; |
420 | 415 | ||
421 | if (cleanup) { | ||
422 | int srhlen = (hdr->hdrlen + 1) << 3; | ||
423 | int nh = hdr->nexthdr; | ||
424 | |||
425 | skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen); | ||
426 | memmove(skb_network_header(skb) + srhlen, | ||
427 | skb_network_header(skb), | ||
428 | (unsigned char *)hdr - skb_network_header(skb)); | ||
429 | skb->network_header += srhlen; | ||
430 | ipv6_hdr(skb)->nexthdr = nh; | ||
431 | ipv6_hdr(skb)->payload_len = htons(skb->len - | ||
432 | sizeof(struct ipv6hdr)); | ||
433 | skb_push_rcsum(skb, sizeof(struct ipv6hdr)); | ||
434 | } | ||
435 | |||
436 | skb_dst_drop(skb); | 416 | skb_dst_drop(skb); |
437 | 417 | ||
438 | ip6_route_input(skb); | 418 | ip6_route_input(skb); |
@@ -453,13 +433,8 @@ looped_back: | |||
453 | } | 433 | } |
454 | ipv6_hdr(skb)->hop_limit--; | 434 | ipv6_hdr(skb)->hop_limit--; |
455 | 435 | ||
456 | /* be sure that srh is still present before reinjecting */ | 436 | skb_pull(skb, sizeof(struct ipv6hdr)); |
457 | if (!cleanup) { | 437 | goto looped_back; |
458 | skb_pull(skb, sizeof(struct ipv6hdr)); | ||
459 | goto looped_back; | ||
460 | } | ||
461 | skb_set_transport_header(skb, sizeof(struct ipv6hdr)); | ||
462 | IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); | ||
463 | } | 438 | } |
464 | 439 | ||
465 | dst_input(skb); | 440 | dst_input(skb); |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 558631860d91..630b73be5999 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) | |||
367 | 367 | ||
368 | 368 | ||
369 | static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 369 | static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
370 | u8 type, u8 code, int offset, __be32 info) | 370 | u8 type, u8 code, int offset, __be32 info) |
371 | { | 371 | { |
372 | const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; | 372 | const struct gre_base_hdr *greh; |
373 | __be16 *p = (__be16 *)(skb->data + offset); | 373 | const struct ipv6hdr *ipv6h; |
374 | int grehlen = offset + 4; | 374 | int grehlen = sizeof(*greh); |
375 | struct ip6_tnl *t; | 375 | struct ip6_tnl *t; |
376 | int key_off = 0; | ||
376 | __be16 flags; | 377 | __be16 flags; |
378 | __be32 key; | ||
377 | 379 | ||
378 | flags = p[0]; | 380 | if (!pskb_may_pull(skb, offset + grehlen)) |
379 | if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { | 381 | return; |
380 | if (flags&(GRE_VERSION|GRE_ROUTING)) | 382 | greh = (const struct gre_base_hdr *)(skb->data + offset); |
381 | return; | 383 | flags = greh->flags; |
382 | if (flags&GRE_KEY) { | 384 | if (flags & (GRE_VERSION | GRE_ROUTING)) |
383 | grehlen += 4; | 385 | return; |
384 | if (flags&GRE_CSUM) | 386 | if (flags & GRE_CSUM) |
385 | grehlen += 4; | 387 | grehlen += 4; |
386 | } | 388 | if (flags & GRE_KEY) { |
389 | key_off = grehlen + offset; | ||
390 | grehlen += 4; | ||
387 | } | 391 | } |
388 | 392 | ||
389 | /* If only 8 bytes returned, keyed message will be dropped here */ | 393 | if (!pskb_may_pull(skb, offset + grehlen)) |
390 | if (!pskb_may_pull(skb, grehlen)) | ||
391 | return; | 394 | return; |
392 | ipv6h = (const struct ipv6hdr *)skb->data; | 395 | ipv6h = (const struct ipv6hdr *)skb->data; |
393 | p = (__be16 *)(skb->data + offset); | 396 | greh = (const struct gre_base_hdr *)(skb->data + offset); |
397 | key = key_off ? *(__be32 *)(skb->data + key_off) : 0; | ||
394 | 398 | ||
395 | t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, | 399 | t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, |
396 | flags & GRE_KEY ? | 400 | key, greh->protocol); |
397 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, | ||
398 | p[1]); | ||
399 | if (!t) | 401 | if (!t) |
400 | return; | 402 | return; |
401 | 403 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2c0df09e9036..b6a94ff0bbd0 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1344,7 +1344,7 @@ emsgsize: | |||
1344 | */ | 1344 | */ |
1345 | if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && | 1345 | if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && |
1346 | headersize == sizeof(struct ipv6hdr) && | 1346 | headersize == sizeof(struct ipv6hdr) && |
1347 | length < mtu - headersize && | 1347 | length <= mtu - headersize && |
1348 | !(flags & MSG_MORE) && | 1348 | !(flags & MSG_MORE) && |
1349 | rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) | 1349 | rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) |
1350 | csummode = CHECKSUM_PARTIAL; | 1350 | csummode = CHECKSUM_PARTIAL; |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index ff8ee06491c3..75fac933c209 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -441,7 +441,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) | |||
441 | if (i + sizeof(*tel) > optlen) | 441 | if (i + sizeof(*tel) > optlen) |
442 | break; | 442 | break; |
443 | 443 | ||
444 | tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i; | 444 | tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); |
445 | /* return index of option if found and valid */ | 445 | /* return index of option if found and valid */ |
446 | if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && | 446 | if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && |
447 | tel->length == 1) | 447 | tel->length == 1) |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 7139fffd61b6..1bdc703cb966 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -779,6 +779,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) | |||
779 | psf->sf_crcount = im->mca_crcount; | 779 | psf->sf_crcount = im->mca_crcount; |
780 | } | 780 | } |
781 | in6_dev_put(pmc->idev); | 781 | in6_dev_put(pmc->idev); |
782 | kfree(pmc); | ||
782 | } | 783 | } |
783 | spin_unlock_bh(&im->mca_lock); | 784 | spin_unlock_bh(&im->mca_lock); |
784 | } | 785 | } |
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 03a064803626..6ef3dfb6e811 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c | |||
@@ -174,7 +174,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr, | |||
174 | * hash function (RadioGatun) with up to 1216 bits | 174 | * hash function (RadioGatun) with up to 1216 bits |
175 | */ | 175 | */ |
176 | 176 | ||
177 | /* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */ | 177 | /* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */ |
178 | plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16; | 178 | plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16; |
179 | 179 | ||
180 | /* this limit allows for 14 segments */ | 180 | /* this limit allows for 14 segments */ |
@@ -186,7 +186,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr, | |||
186 | * | 186 | * |
187 | * 1. Source IPv6 address (128 bits) | 187 | * 1. Source IPv6 address (128 bits) |
188 | * 2. first_segment value (8 bits) | 188 | * 2. first_segment value (8 bits) |
189 | * 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0) | 189 | * 3. Flags (8 bits) |
190 | * 4. HMAC Key ID (32 bits) | 190 | * 4. HMAC Key ID (32 bits) |
191 | * 5. All segments in the segments list (n * 128 bits) | 191 | * 5. All segments in the segments list (n * 128 bits) |
192 | */ | 192 | */ |
@@ -202,8 +202,8 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr, | |||
202 | /* first_segment value */ | 202 | /* first_segment value */ |
203 | *off++ = hdr->first_segment; | 203 | *off++ = hdr->first_segment; |
204 | 204 | ||
205 | /* cleanup flag */ | 205 | /* flags */ |
206 | *off++ = !!(sr_has_cleanup(hdr)) << 7; | 206 | *off++ = hdr->flags; |
207 | 207 | ||
208 | /* HMAC Key ID */ | 208 | /* HMAC Key ID */ |
209 | memcpy(off, &hmackeyid, 4); | 209 | memcpy(off, &hmackeyid, 4); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index fad992ad4bc8..99853c6e33a8 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -1380,6 +1380,7 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
1380 | err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); | 1380 | err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); |
1381 | if (err) { | 1381 | if (err) { |
1382 | free_percpu(dev->tstats); | 1382 | free_percpu(dev->tstats); |
1383 | dev->tstats = NULL; | ||
1383 | return err; | 1384 | return err; |
1384 | } | 1385 | } |
1385 | 1386 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index cb8929681dc7..eaad72c3d746 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -991,6 +991,16 @@ drop: | |||
991 | return 0; /* don't send reset */ | 991 | return 0; /* don't send reset */ |
992 | } | 992 | } |
993 | 993 | ||
994 | static void tcp_v6_restore_cb(struct sk_buff *skb) | ||
995 | { | ||
996 | /* We need to move header back to the beginning if xfrm6_policy_check() | ||
997 | * and tcp_v6_fill_cb() are going to be called again. | ||
998 | * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. | ||
999 | */ | ||
1000 | memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, | ||
1001 | sizeof(struct inet6_skb_parm)); | ||
1002 | } | ||
1003 | |||
994 | static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, | 1004 | static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, |
995 | struct request_sock *req, | 1005 | struct request_sock *req, |
996 | struct dst_entry *dst, | 1006 | struct dst_entry *dst, |
@@ -1182,8 +1192,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1182 | sk_gfp_mask(sk, GFP_ATOMIC)); | 1192 | sk_gfp_mask(sk, GFP_ATOMIC)); |
1183 | consume_skb(ireq->pktopts); | 1193 | consume_skb(ireq->pktopts); |
1184 | ireq->pktopts = NULL; | 1194 | ireq->pktopts = NULL; |
1185 | if (newnp->pktoptions) | 1195 | if (newnp->pktoptions) { |
1196 | tcp_v6_restore_cb(newnp->pktoptions); | ||
1186 | skb_set_owner_r(newnp->pktoptions, newsk); | 1197 | skb_set_owner_r(newnp->pktoptions, newsk); |
1198 | } | ||
1187 | } | 1199 | } |
1188 | } | 1200 | } |
1189 | 1201 | ||
@@ -1198,16 +1210,6 @@ out: | |||
1198 | return NULL; | 1210 | return NULL; |
1199 | } | 1211 | } |
1200 | 1212 | ||
1201 | static void tcp_v6_restore_cb(struct sk_buff *skb) | ||
1202 | { | ||
1203 | /* We need to move header back to the beginning if xfrm6_policy_check() | ||
1204 | * and tcp_v6_fill_cb() are going to be called again. | ||
1205 | * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. | ||
1206 | */ | ||
1207 | memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, | ||
1208 | sizeof(struct inet6_skb_parm)); | ||
1209 | } | ||
1210 | |||
1211 | /* The socket must have it's spinlock held when we get | 1213 | /* The socket must have it's spinlock held when we get |
1212 | * here, unless it is a TCP_LISTEN socket. | 1214 | * here, unless it is a TCP_LISTEN socket. |
1213 | * | 1215 | * |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4d5c4eee4b3f..8990856f5101 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -441,7 +441,7 @@ try_again: | |||
441 | return err; | 441 | return err; |
442 | 442 | ||
443 | csum_copy_err: | 443 | csum_copy_err: |
444 | if (!__sk_queue_drop_skb(sk, skb, flags)) { | 444 | if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) { |
445 | if (is_udp4) { | 445 | if (is_udp4) { |
446 | UDP_INC_STATS(sock_net(sk), | 446 | UDP_INC_STATS(sock_net(sk), |
447 | UDP_MIB_CSUMERRORS, is_udplite); | 447 | UDP_MIB_CSUMERRORS, is_udplite); |
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 7e08a4d3d77d..64f0e8531af0 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c | |||
@@ -929,23 +929,25 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
929 | goto out_error; | 929 | goto out_error; |
930 | } | 930 | } |
931 | 931 | ||
932 | /* New message, alloc head skb */ | 932 | if (msg_data_left(msg)) { |
933 | head = alloc_skb(0, sk->sk_allocation); | 933 | /* New message, alloc head skb */ |
934 | while (!head) { | ||
935 | kcm_push(kcm); | ||
936 | err = sk_stream_wait_memory(sk, &timeo); | ||
937 | if (err) | ||
938 | goto out_error; | ||
939 | |||
940 | head = alloc_skb(0, sk->sk_allocation); | 934 | head = alloc_skb(0, sk->sk_allocation); |
941 | } | 935 | while (!head) { |
936 | kcm_push(kcm); | ||
937 | err = sk_stream_wait_memory(sk, &timeo); | ||
938 | if (err) | ||
939 | goto out_error; | ||
942 | 940 | ||
943 | skb = head; | 941 | head = alloc_skb(0, sk->sk_allocation); |
942 | } | ||
944 | 943 | ||
945 | /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling | 944 | skb = head; |
946 | * csum_and_copy_from_iter from skb_do_copy_data_nocache. | 945 | |
947 | */ | 946 | /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling |
948 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 947 | * csum_and_copy_from_iter from skb_do_copy_data_nocache. |
948 | */ | ||
949 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
950 | } | ||
949 | 951 | ||
950 | start: | 952 | start: |
951 | while (msg_data_left(msg)) { | 953 | while (msg_data_left(msg)) { |
@@ -1018,10 +1020,12 @@ wait_for_memory: | |||
1018 | if (eor) { | 1020 | if (eor) { |
1019 | bool not_busy = skb_queue_empty(&sk->sk_write_queue); | 1021 | bool not_busy = skb_queue_empty(&sk->sk_write_queue); |
1020 | 1022 | ||
1021 | /* Message complete, queue it on send buffer */ | 1023 | if (head) { |
1022 | __skb_queue_tail(&sk->sk_write_queue, head); | 1024 | /* Message complete, queue it on send buffer */ |
1023 | kcm->seq_skb = NULL; | 1025 | __skb_queue_tail(&sk->sk_write_queue, head); |
1024 | KCM_STATS_INCR(kcm->stats.tx_msgs); | 1026 | kcm->seq_skb = NULL; |
1027 | KCM_STATS_INCR(kcm->stats.tx_msgs); | ||
1028 | } | ||
1025 | 1029 | ||
1026 | if (msg->msg_flags & MSG_BATCH) { | 1030 | if (msg->msg_flags & MSG_BATCH) { |
1027 | kcm->tx_wait_more = true; | 1031 | kcm->tx_wait_more = true; |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8f560f7140a0..aebf281d09ee 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -263,6 +263,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, | |||
263 | int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, | 263 | int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, |
264 | const struct l2tp_nl_cmd_ops *ops); | 264 | const struct l2tp_nl_cmd_ops *ops); |
265 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); | 265 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); |
266 | int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); | ||
266 | 267 | ||
267 | /* Session reference counts. Incremented when code obtains a reference | 268 | /* Session reference counts. Incremented when code obtains a reference |
268 | * to a session. | 269 | * to a session. |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 3d73278b86ca..28c21546d5b6 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 13 | ||
14 | #include <asm/ioctls.h> | ||
14 | #include <linux/icmp.h> | 15 | #include <linux/icmp.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
@@ -553,6 +554,30 @@ out: | |||
553 | return err ? err : copied; | 554 | return err ? err : copied; |
554 | } | 555 | } |
555 | 556 | ||
557 | int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) | ||
558 | { | ||
559 | struct sk_buff *skb; | ||
560 | int amount; | ||
561 | |||
562 | switch (cmd) { | ||
563 | case SIOCOUTQ: | ||
564 | amount = sk_wmem_alloc_get(sk); | ||
565 | break; | ||
566 | case SIOCINQ: | ||
567 | spin_lock_bh(&sk->sk_receive_queue.lock); | ||
568 | skb = skb_peek(&sk->sk_receive_queue); | ||
569 | amount = skb ? skb->len : 0; | ||
570 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
571 | break; | ||
572 | |||
573 | default: | ||
574 | return -ENOIOCTLCMD; | ||
575 | } | ||
576 | |||
577 | return put_user(amount, (int __user *)arg); | ||
578 | } | ||
579 | EXPORT_SYMBOL(l2tp_ioctl); | ||
580 | |||
556 | static struct proto l2tp_ip_prot = { | 581 | static struct proto l2tp_ip_prot = { |
557 | .name = "L2TP/IP", | 582 | .name = "L2TP/IP", |
558 | .owner = THIS_MODULE, | 583 | .owner = THIS_MODULE, |
@@ -561,7 +586,7 @@ static struct proto l2tp_ip_prot = { | |||
561 | .bind = l2tp_ip_bind, | 586 | .bind = l2tp_ip_bind, |
562 | .connect = l2tp_ip_connect, | 587 | .connect = l2tp_ip_connect, |
563 | .disconnect = l2tp_ip_disconnect, | 588 | .disconnect = l2tp_ip_disconnect, |
564 | .ioctl = udp_ioctl, | 589 | .ioctl = l2tp_ioctl, |
565 | .destroy = l2tp_ip_destroy_sock, | 590 | .destroy = l2tp_ip_destroy_sock, |
566 | .setsockopt = ip_setsockopt, | 591 | .setsockopt = ip_setsockopt, |
567 | .getsockopt = ip_getsockopt, | 592 | .getsockopt = ip_getsockopt, |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 331ccf5a7bad..f47c45250f86 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -722,7 +722,7 @@ static struct proto l2tp_ip6_prot = { | |||
722 | .bind = l2tp_ip6_bind, | 722 | .bind = l2tp_ip6_bind, |
723 | .connect = l2tp_ip6_connect, | 723 | .connect = l2tp_ip6_connect, |
724 | .disconnect = l2tp_ip6_disconnect, | 724 | .disconnect = l2tp_ip6_disconnect, |
725 | .ioctl = udp_ioctl, | 725 | .ioctl = l2tp_ioctl, |
726 | .destroy = l2tp_ip6_destroy_sock, | 726 | .destroy = l2tp_ip6_destroy_sock, |
727 | .setsockopt = ipv6_setsockopt, | 727 | .setsockopt = ipv6_setsockopt, |
728 | .getsockopt = ipv6_getsockopt, | 728 | .getsockopt = ipv6_getsockopt, |
diff --git a/net/mac80211/fils_aead.c b/net/mac80211/fils_aead.c index ecfdd97758a3..5c3af5eb4052 100644 --- a/net/mac80211/fils_aead.c +++ b/net/mac80211/fils_aead.c | |||
@@ -124,7 +124,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len, | |||
124 | 124 | ||
125 | /* CTR */ | 125 | /* CTR */ |
126 | 126 | ||
127 | tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0); | 127 | tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); |
128 | if (IS_ERR(tfm2)) { | 128 | if (IS_ERR(tfm2)) { |
129 | kfree(tmp); | 129 | kfree(tmp); |
130 | return PTR_ERR(tfm2); | 130 | return PTR_ERR(tfm2); |
@@ -183,7 +183,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len, | |||
183 | 183 | ||
184 | /* CTR */ | 184 | /* CTR */ |
185 | 185 | ||
186 | tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0); | 186 | tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); |
187 | if (IS_ERR(tfm2)) | 187 | if (IS_ERR(tfm2)) |
188 | return PTR_ERR(tfm2); | 188 | return PTR_ERR(tfm2); |
189 | /* K2 for CTR */ | 189 | /* K2 for CTR */ |
@@ -272,7 +272,7 @@ int fils_encrypt_assoc_req(struct sk_buff *skb, | |||
272 | crypt_len = skb->data + skb->len - encr; | 272 | crypt_len = skb->data + skb->len - encr; |
273 | skb_put(skb, AES_BLOCK_SIZE); | 273 | skb_put(skb, AES_BLOCK_SIZE); |
274 | return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, | 274 | return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, |
275 | encr, crypt_len, 1, addr, len, encr); | 275 | encr, crypt_len, 5, addr, len, encr); |
276 | } | 276 | } |
277 | 277 | ||
278 | int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, | 278 | int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 42120d965263..50e1b7f78bd4 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, | |||
339 | /* fast-forward to vendor IEs */ | 339 | /* fast-forward to vendor IEs */ |
340 | offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); | 340 | offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); |
341 | 341 | ||
342 | if (offset) { | 342 | if (offset < ifmsh->ie_len) { |
343 | len = ifmsh->ie_len - offset; | 343 | len = ifmsh->ie_len - offset; |
344 | data = ifmsh->ie + offset; | 344 | data = ifmsh->ie + offset; |
345 | if (skb_tailroom(skb) < len) | 345 | if (skb_tailroom(skb) < len) |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3d555c79a7b5..d56ee46b11fc 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2755,7 +2755,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2755 | struct virtio_net_hdr vnet_hdr = { 0 }; | 2755 | struct virtio_net_hdr vnet_hdr = { 0 }; |
2756 | int offset = 0; | 2756 | int offset = 0; |
2757 | struct packet_sock *po = pkt_sk(sk); | 2757 | struct packet_sock *po = pkt_sk(sk); |
2758 | int hlen, tlen; | 2758 | int hlen, tlen, linear; |
2759 | int extra_len = 0; | 2759 | int extra_len = 0; |
2760 | 2760 | ||
2761 | /* | 2761 | /* |
@@ -2816,8 +2816,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2816 | err = -ENOBUFS; | 2816 | err = -ENOBUFS; |
2817 | hlen = LL_RESERVED_SPACE(dev); | 2817 | hlen = LL_RESERVED_SPACE(dev); |
2818 | tlen = dev->needed_tailroom; | 2818 | tlen = dev->needed_tailroom; |
2819 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, | 2819 | linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); |
2820 | __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), | 2820 | linear = max(linear, min_t(int, len, dev->hard_header_len)); |
2821 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, | ||
2821 | msg->msg_flags & MSG_DONTWAIT, &err); | 2822 | msg->msg_flags & MSG_DONTWAIT, &err); |
2822 | if (skb == NULL) | 2823 | if (skb == NULL) |
2823 | goto out_unlock; | 2824 | goto out_unlock; |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 970db7a41684..5752789acc13 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb, | |||
568 | &mask->icmp.type, | 568 | &mask->icmp.type, |
569 | TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, | 569 | TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, |
570 | sizeof(key->icmp.type)); | 570 | sizeof(key->icmp.type)); |
571 | fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, | 571 | fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, |
572 | &mask->icmp.code, | 572 | &mask->icmp.code, |
573 | TCA_FLOWER_KEY_ICMPV4_CODE_MASK, | 573 | TCA_FLOWER_KEY_ICMPV6_CODE_MASK, |
574 | sizeof(key->icmp.code)); | 574 | sizeof(key->icmp.code)); |
575 | } | 575 | } |
576 | 576 | ||
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index f935429bd5ef..b12bc2abea93 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c | |||
@@ -16,16 +16,11 @@ | |||
16 | #include <net/sch_generic.h> | 16 | #include <net/sch_generic.h> |
17 | #include <net/pkt_cls.h> | 17 | #include <net/pkt_cls.h> |
18 | 18 | ||
19 | struct cls_mall_filter { | 19 | struct cls_mall_head { |
20 | struct tcf_exts exts; | 20 | struct tcf_exts exts; |
21 | struct tcf_result res; | 21 | struct tcf_result res; |
22 | u32 handle; | 22 | u32 handle; |
23 | struct rcu_head rcu; | ||
24 | u32 flags; | 23 | u32 flags; |
25 | }; | ||
26 | |||
27 | struct cls_mall_head { | ||
28 | struct cls_mall_filter *filter; | ||
29 | struct rcu_head rcu; | 24 | struct rcu_head rcu; |
30 | }; | 25 | }; |
31 | 26 | ||
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
33 | struct tcf_result *res) | 28 | struct tcf_result *res) |
34 | { | 29 | { |
35 | struct cls_mall_head *head = rcu_dereference_bh(tp->root); | 30 | struct cls_mall_head *head = rcu_dereference_bh(tp->root); |
36 | struct cls_mall_filter *f = head->filter; | ||
37 | 31 | ||
38 | if (tc_skip_sw(f->flags)) | 32 | if (tc_skip_sw(head->flags)) |
39 | return -1; | 33 | return -1; |
40 | 34 | ||
41 | return tcf_exts_exec(skb, &f->exts, res); | 35 | return tcf_exts_exec(skb, &head->exts, res); |
42 | } | 36 | } |
43 | 37 | ||
44 | static int mall_init(struct tcf_proto *tp) | 38 | static int mall_init(struct tcf_proto *tp) |
45 | { | 39 | { |
46 | struct cls_mall_head *head; | ||
47 | |||
48 | head = kzalloc(sizeof(*head), GFP_KERNEL); | ||
49 | if (!head) | ||
50 | return -ENOBUFS; | ||
51 | |||
52 | rcu_assign_pointer(tp->root, head); | ||
53 | |||
54 | return 0; | 40 | return 0; |
55 | } | 41 | } |
56 | 42 | ||
57 | static void mall_destroy_filter(struct rcu_head *head) | 43 | static void mall_destroy_rcu(struct rcu_head *rcu) |
58 | { | 44 | { |
59 | struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); | 45 | struct cls_mall_head *head = container_of(rcu, struct cls_mall_head, |
46 | rcu); | ||
60 | 47 | ||
61 | tcf_exts_destroy(&f->exts); | 48 | tcf_exts_destroy(&head->exts); |
62 | 49 | kfree(head); | |
63 | kfree(f); | ||
64 | } | 50 | } |
65 | 51 | ||
66 | static int mall_replace_hw_filter(struct tcf_proto *tp, | 52 | static int mall_replace_hw_filter(struct tcf_proto *tp, |
67 | struct cls_mall_filter *f, | 53 | struct cls_mall_head *head, |
68 | unsigned long cookie) | 54 | unsigned long cookie) |
69 | { | 55 | { |
70 | struct net_device *dev = tp->q->dev_queue->dev; | 56 | struct net_device *dev = tp->q->dev_queue->dev; |
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, | |||
74 | offload.type = TC_SETUP_MATCHALL; | 60 | offload.type = TC_SETUP_MATCHALL; |
75 | offload.cls_mall = &mall_offload; | 61 | offload.cls_mall = &mall_offload; |
76 | offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; | 62 | offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; |
77 | offload.cls_mall->exts = &f->exts; | 63 | offload.cls_mall->exts = &head->exts; |
78 | offload.cls_mall->cookie = cookie; | 64 | offload.cls_mall->cookie = cookie; |
79 | 65 | ||
80 | return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, | 66 | return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, |
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, | |||
82 | } | 68 | } |
83 | 69 | ||
84 | static void mall_destroy_hw_filter(struct tcf_proto *tp, | 70 | static void mall_destroy_hw_filter(struct tcf_proto *tp, |
85 | struct cls_mall_filter *f, | 71 | struct cls_mall_head *head, |
86 | unsigned long cookie) | 72 | unsigned long cookie) |
87 | { | 73 | { |
88 | struct net_device *dev = tp->q->dev_queue->dev; | 74 | struct net_device *dev = tp->q->dev_queue->dev; |
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force) | |||
103 | { | 89 | { |
104 | struct cls_mall_head *head = rtnl_dereference(tp->root); | 90 | struct cls_mall_head *head = rtnl_dereference(tp->root); |
105 | struct net_device *dev = tp->q->dev_queue->dev; | 91 | struct net_device *dev = tp->q->dev_queue->dev; |
106 | struct cls_mall_filter *f = head->filter; | ||
107 | 92 | ||
108 | if (!force && f) | 93 | if (!head) |
109 | return false; | 94 | return true; |
110 | 95 | ||
111 | if (f) { | 96 | if (tc_should_offload(dev, tp, head->flags)) |
112 | if (tc_should_offload(dev, tp, f->flags)) | 97 | mall_destroy_hw_filter(tp, head, (unsigned long) head); |
113 | mall_destroy_hw_filter(tp, f, (unsigned long) f); | ||
114 | 98 | ||
115 | call_rcu(&f->rcu, mall_destroy_filter); | 99 | call_rcu(&head->rcu, mall_destroy_rcu); |
116 | } | ||
117 | kfree_rcu(head, rcu); | ||
118 | return true; | 100 | return true; |
119 | } | 101 | } |
120 | 102 | ||
121 | static unsigned long mall_get(struct tcf_proto *tp, u32 handle) | 103 | static unsigned long mall_get(struct tcf_proto *tp, u32 handle) |
122 | { | 104 | { |
123 | struct cls_mall_head *head = rtnl_dereference(tp->root); | 105 | return 0UL; |
124 | struct cls_mall_filter *f = head->filter; | ||
125 | |||
126 | if (f && f->handle == handle) | ||
127 | return (unsigned long) f; | ||
128 | return 0; | ||
129 | } | 106 | } |
130 | 107 | ||
131 | static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { | 108 | static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { |
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { | |||
134 | }; | 111 | }; |
135 | 112 | ||
136 | static int mall_set_parms(struct net *net, struct tcf_proto *tp, | 113 | static int mall_set_parms(struct net *net, struct tcf_proto *tp, |
137 | struct cls_mall_filter *f, | 114 | struct cls_mall_head *head, |
138 | unsigned long base, struct nlattr **tb, | 115 | unsigned long base, struct nlattr **tb, |
139 | struct nlattr *est, bool ovr) | 116 | struct nlattr *est, bool ovr) |
140 | { | 117 | { |
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp, | |||
147 | return err; | 124 | return err; |
148 | 125 | ||
149 | if (tb[TCA_MATCHALL_CLASSID]) { | 126 | if (tb[TCA_MATCHALL_CLASSID]) { |
150 | f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); | 127 | head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); |
151 | tcf_bind_filter(tp, &f->res, base); | 128 | tcf_bind_filter(tp, &head->res, base); |
152 | } | 129 | } |
153 | 130 | ||
154 | tcf_exts_change(tp, &f->exts, &e); | 131 | tcf_exts_change(tp, &head->exts, &e); |
155 | 132 | ||
156 | return 0; | 133 | return 0; |
157 | } | 134 | } |
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, | |||
162 | unsigned long *arg, bool ovr) | 139 | unsigned long *arg, bool ovr) |
163 | { | 140 | { |
164 | struct cls_mall_head *head = rtnl_dereference(tp->root); | 141 | struct cls_mall_head *head = rtnl_dereference(tp->root); |
165 | struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg; | ||
166 | struct net_device *dev = tp->q->dev_queue->dev; | 142 | struct net_device *dev = tp->q->dev_queue->dev; |
167 | struct cls_mall_filter *f; | ||
168 | struct nlattr *tb[TCA_MATCHALL_MAX + 1]; | 143 | struct nlattr *tb[TCA_MATCHALL_MAX + 1]; |
144 | struct cls_mall_head *new; | ||
169 | u32 flags = 0; | 145 | u32 flags = 0; |
170 | int err; | 146 | int err; |
171 | 147 | ||
172 | if (!tca[TCA_OPTIONS]) | 148 | if (!tca[TCA_OPTIONS]) |
173 | return -EINVAL; | 149 | return -EINVAL; |
174 | 150 | ||
175 | if (head->filter) | 151 | if (head) |
176 | return -EBUSY; | 152 | return -EEXIST; |
177 | |||
178 | if (fold) | ||
179 | return -EINVAL; | ||
180 | 153 | ||
181 | err = nla_parse_nested(tb, TCA_MATCHALL_MAX, | 154 | err = nla_parse_nested(tb, TCA_MATCHALL_MAX, |
182 | tca[TCA_OPTIONS], mall_policy); | 155 | tca[TCA_OPTIONS], mall_policy); |
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, | |||
189 | return -EINVAL; | 162 | return -EINVAL; |
190 | } | 163 | } |
191 | 164 | ||
192 | f = kzalloc(sizeof(*f), GFP_KERNEL); | 165 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
193 | if (!f) | 166 | if (!new) |
194 | return -ENOBUFS; | 167 | return -ENOBUFS; |
195 | 168 | ||
196 | tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); | 169 | tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0); |
197 | 170 | ||
198 | if (!handle) | 171 | if (!handle) |
199 | handle = 1; | 172 | handle = 1; |
200 | f->handle = handle; | 173 | new->handle = handle; |
201 | f->flags = flags; | 174 | new->flags = flags; |
202 | 175 | ||
203 | err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); | 176 | err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr); |
204 | if (err) | 177 | if (err) |
205 | goto errout; | 178 | goto errout; |
206 | 179 | ||
207 | if (tc_should_offload(dev, tp, flags)) { | 180 | if (tc_should_offload(dev, tp, flags)) { |
208 | err = mall_replace_hw_filter(tp, f, (unsigned long) f); | 181 | err = mall_replace_hw_filter(tp, new, (unsigned long) new); |
209 | if (err) { | 182 | if (err) { |
210 | if (tc_skip_sw(flags)) | 183 | if (tc_skip_sw(flags)) |
211 | goto errout; | 184 | goto errout; |
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, | |||
214 | } | 187 | } |
215 | } | 188 | } |
216 | 189 | ||
217 | *arg = (unsigned long) f; | 190 | *arg = (unsigned long) head; |
218 | rcu_assign_pointer(head->filter, f); | 191 | rcu_assign_pointer(tp->root, new); |
219 | 192 | if (head) | |
193 | call_rcu(&head->rcu, mall_destroy_rcu); | ||
220 | return 0; | 194 | return 0; |
221 | 195 | ||
222 | errout: | 196 | errout: |
223 | kfree(f); | 197 | kfree(new); |
224 | return err; | 198 | return err; |
225 | } | 199 | } |
226 | 200 | ||
227 | static int mall_delete(struct tcf_proto *tp, unsigned long arg) | 201 | static int mall_delete(struct tcf_proto *tp, unsigned long arg) |
228 | { | 202 | { |
229 | struct cls_mall_head *head = rtnl_dereference(tp->root); | 203 | return -EOPNOTSUPP; |
230 | struct cls_mall_filter *f = (struct cls_mall_filter *) arg; | ||
231 | struct net_device *dev = tp->q->dev_queue->dev; | ||
232 | |||
233 | if (tc_should_offload(dev, tp, f->flags)) | ||
234 | mall_destroy_hw_filter(tp, f, (unsigned long) f); | ||
235 | |||
236 | RCU_INIT_POINTER(head->filter, NULL); | ||
237 | tcf_unbind_filter(tp, &f->res); | ||
238 | call_rcu(&f->rcu, mall_destroy_filter); | ||
239 | return 0; | ||
240 | } | 204 | } |
241 | 205 | ||
242 | static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) | 206 | static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
243 | { | 207 | { |
244 | struct cls_mall_head *head = rtnl_dereference(tp->root); | 208 | struct cls_mall_head *head = rtnl_dereference(tp->root); |
245 | struct cls_mall_filter *f = head->filter; | ||
246 | 209 | ||
247 | if (arg->count < arg->skip) | 210 | if (arg->count < arg->skip) |
248 | goto skip; | 211 | goto skip; |
249 | if (arg->fn(tp, (unsigned long) f, arg) < 0) | 212 | if (arg->fn(tp, (unsigned long) head, arg) < 0) |
250 | arg->stop = 1; | 213 | arg->stop = 1; |
251 | skip: | 214 | skip: |
252 | arg->count++; | 215 | arg->count++; |
@@ -255,28 +218,28 @@ skip: | |||
255 | static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, | 218 | static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, |
256 | struct sk_buff *skb, struct tcmsg *t) | 219 | struct sk_buff *skb, struct tcmsg *t) |
257 | { | 220 | { |
258 | struct cls_mall_filter *f = (struct cls_mall_filter *) fh; | 221 | struct cls_mall_head *head = (struct cls_mall_head *) fh; |
259 | struct nlattr *nest; | 222 | struct nlattr *nest; |
260 | 223 | ||
261 | if (!f) | 224 | if (!head) |
262 | return skb->len; | 225 | return skb->len; |
263 | 226 | ||
264 | t->tcm_handle = f->handle; | 227 | t->tcm_handle = head->handle; |
265 | 228 | ||
266 | nest = nla_nest_start(skb, TCA_OPTIONS); | 229 | nest = nla_nest_start(skb, TCA_OPTIONS); |
267 | if (!nest) | 230 | if (!nest) |
268 | goto nla_put_failure; | 231 | goto nla_put_failure; |
269 | 232 | ||
270 | if (f->res.classid && | 233 | if (head->res.classid && |
271 | nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid)) | 234 | nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid)) |
272 | goto nla_put_failure; | 235 | goto nla_put_failure; |
273 | 236 | ||
274 | if (tcf_exts_dump(skb, &f->exts)) | 237 | if (tcf_exts_dump(skb, &head->exts)) |
275 | goto nla_put_failure; | 238 | goto nla_put_failure; |
276 | 239 | ||
277 | nla_nest_end(skb, nest); | 240 | nla_nest_end(skb, nest); |
278 | 241 | ||
279 | if (tcf_exts_dump_stats(skb, &f->exts) < 0) | 242 | if (tcf_exts_dump_stats(skb, &head->exts) < 0) |
280 | goto nla_put_failure; | 243 | goto nla_put_failure; |
281 | 244 | ||
282 | return skb->len; | 245 | return skb->len; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 37eeab7899fc..1b5d669e3029 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, | |||
239 | union sctp_addr *laddr = (union sctp_addr *)addr; | 239 | union sctp_addr *laddr = (union sctp_addr *)addr; |
240 | struct sctp_transport *transport; | 240 | struct sctp_transport *transport; |
241 | 241 | ||
242 | if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) | 242 | if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) |
243 | return NULL; | 243 | return NULL; |
244 | 244 | ||
245 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, | 245 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, |
@@ -7426,7 +7426,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | |||
7426 | */ | 7426 | */ |
7427 | release_sock(sk); | 7427 | release_sock(sk); |
7428 | current_timeo = schedule_timeout(current_timeo); | 7428 | current_timeo = schedule_timeout(current_timeo); |
7429 | BUG_ON(sk != asoc->base.sk); | 7429 | if (sk != asoc->base.sk) |
7430 | goto do_error; | ||
7430 | lock_sock(sk); | 7431 | lock_sock(sk); |
7431 | 7432 | ||
7432 | *timeo_p = current_timeo; | 7433 | *timeo_p = current_timeo; |
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index dc6fb79a361f..25d9a9cf7b66 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c | |||
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr, | |||
260 | if (!oa->data) | 260 | if (!oa->data) |
261 | return -ENOMEM; | 261 | return -ENOMEM; |
262 | 262 | ||
263 | creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); | 263 | creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL); |
264 | if (!creds) { | 264 | if (!creds) { |
265 | kfree(oa->data); | 265 | kfree(oa->data); |
266 | return -ENOMEM; | 266 | return -ENOMEM; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 1efbe48e794f..1dc9f3bac099 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -336,6 +336,11 @@ out: | |||
336 | 336 | ||
337 | static DEFINE_IDA(rpc_clids); | 337 | static DEFINE_IDA(rpc_clids); |
338 | 338 | ||
339 | void rpc_cleanup_clids(void) | ||
340 | { | ||
341 | ida_destroy(&rpc_clids); | ||
342 | } | ||
343 | |||
339 | static int rpc_alloc_clid(struct rpc_clnt *clnt) | 344 | static int rpc_alloc_clid(struct rpc_clnt *clnt) |
340 | { | 345 | { |
341 | int clid; | 346 | int clid; |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index d1c330a7953a..c73de181467a 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -119,6 +119,7 @@ out: | |||
119 | static void __exit | 119 | static void __exit |
120 | cleanup_sunrpc(void) | 120 | cleanup_sunrpc(void) |
121 | { | 121 | { |
122 | rpc_cleanup_clids(); | ||
122 | rpcauth_remove_module(); | 123 | rpcauth_remove_module(); |
123 | cleanup_socket_xprt(); | 124 | cleanup_socket_xprt(); |
124 | svc_cleanup_xprt_sock(); | 125 | svc_cleanup_xprt_sock(); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5c1b267e22be..aee396b9f190 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -5916,6 +5916,7 @@ do { \ | |||
5916 | break; | 5916 | break; |
5917 | } | 5917 | } |
5918 | cfg->ht_opmode = ht_opmode; | 5918 | cfg->ht_opmode = ht_opmode; |
5919 | mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); | ||
5919 | } | 5920 | } |
5920 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, | 5921 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, |
5921 | 1, 65535, mask, | 5922 | 1, 65535, mask, |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index eadcd4d359d9..d883116ebaa4 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -164,6 +164,7 @@ cmd_gensymtypes_c = \ | |||
164 | $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ | 164 | $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ |
165 | $(GENKSYMS) $(if $(1), -T $(2)) \ | 165 | $(GENKSYMS) $(if $(1), -T $(2)) \ |
166 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ | 166 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ |
167 | $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ | ||
167 | $(if $(KBUILD_PRESERVE),-p) \ | 168 | $(if $(KBUILD_PRESERVE),-p) \ |
168 | -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) | 169 | -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) |
169 | 170 | ||
@@ -337,6 +338,7 @@ cmd_gensymtypes_S = \ | |||
337 | $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ | 338 | $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ |
338 | $(GENKSYMS) $(if $(1), -T $(2)) \ | 339 | $(GENKSYMS) $(if $(1), -T $(2)) \ |
339 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ | 340 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ |
341 | $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ | ||
340 | $(if $(KBUILD_PRESERVE),-p) \ | 342 | $(if $(KBUILD_PRESERVE),-p) \ |
341 | -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) | 343 | -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) |
342 | 344 | ||
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index 06121ce524a7..c9235d8340f1 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c | |||
@@ -44,7 +44,7 @@ char *cur_filename, *source_file; | |||
44 | int in_source_file; | 44 | int in_source_file; |
45 | 45 | ||
46 | static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, | 46 | static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, |
47 | flag_preserve, flag_warnings; | 47 | flag_preserve, flag_warnings, flag_rel_crcs; |
48 | static const char *mod_prefix = ""; | 48 | static const char *mod_prefix = ""; |
49 | 49 | ||
50 | static int errors; | 50 | static int errors; |
@@ -693,7 +693,10 @@ void export_symbol(const char *name) | |||
693 | fputs(">\n", debugfile); | 693 | fputs(">\n", debugfile); |
694 | 694 | ||
695 | /* Used as a linker script. */ | 695 | /* Used as a linker script. */ |
696 | printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc); | 696 | printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" : |
697 | "SECTIONS { .rodata : ALIGN(4) { " | ||
698 | "%s__crc_%s = .; LONG(0x%08lx); } }\n", | ||
699 | mod_prefix, name, crc); | ||
697 | } | 700 | } |
698 | } | 701 | } |
699 | 702 | ||
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...) | |||
730 | 733 | ||
731 | static void genksyms_usage(void) | 734 | static void genksyms_usage(void) |
732 | { | 735 | { |
733 | fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" | 736 | fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n" |
734 | #ifdef __GNU_LIBRARY__ | 737 | #ifdef __GNU_LIBRARY__ |
735 | " -s, --symbol-prefix Select symbol prefix\n" | 738 | " -s, --symbol-prefix Select symbol prefix\n" |
736 | " -d, --debug Increment the debug level (repeatable)\n" | 739 | " -d, --debug Increment the debug level (repeatable)\n" |
@@ -742,6 +745,7 @@ static void genksyms_usage(void) | |||
742 | " -q, --quiet Disable warnings (default)\n" | 745 | " -q, --quiet Disable warnings (default)\n" |
743 | " -h, --help Print this message\n" | 746 | " -h, --help Print this message\n" |
744 | " -V, --version Print the release version\n" | 747 | " -V, --version Print the release version\n" |
748 | " -R, --relative-crc Emit section relative symbol CRCs\n" | ||
745 | #else /* __GNU_LIBRARY__ */ | 749 | #else /* __GNU_LIBRARY__ */ |
746 | " -s Select symbol prefix\n" | 750 | " -s Select symbol prefix\n" |
747 | " -d Increment the debug level (repeatable)\n" | 751 | " -d Increment the debug level (repeatable)\n" |
@@ -753,6 +757,7 @@ static void genksyms_usage(void) | |||
753 | " -q Disable warnings (default)\n" | 757 | " -q Disable warnings (default)\n" |
754 | " -h Print this message\n" | 758 | " -h Print this message\n" |
755 | " -V Print the release version\n" | 759 | " -V Print the release version\n" |
760 | " -R Emit section relative symbol CRCs\n" | ||
756 | #endif /* __GNU_LIBRARY__ */ | 761 | #endif /* __GNU_LIBRARY__ */ |
757 | , stderr); | 762 | , stderr); |
758 | } | 763 | } |
@@ -774,13 +779,14 @@ int main(int argc, char **argv) | |||
774 | {"preserve", 0, 0, 'p'}, | 779 | {"preserve", 0, 0, 'p'}, |
775 | {"version", 0, 0, 'V'}, | 780 | {"version", 0, 0, 'V'}, |
776 | {"help", 0, 0, 'h'}, | 781 | {"help", 0, 0, 'h'}, |
782 | {"relative-crc", 0, 0, 'R'}, | ||
777 | {0, 0, 0, 0} | 783 | {0, 0, 0, 0} |
778 | }; | 784 | }; |
779 | 785 | ||
780 | while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph", | 786 | while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR", |
781 | &long_opts[0], NULL)) != EOF) | 787 | &long_opts[0], NULL)) != EOF) |
782 | #else /* __GNU_LIBRARY__ */ | 788 | #else /* __GNU_LIBRARY__ */ |
783 | while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF) | 789 | while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF) |
784 | #endif /* __GNU_LIBRARY__ */ | 790 | #endif /* __GNU_LIBRARY__ */ |
785 | switch (o) { | 791 | switch (o) { |
786 | case 's': | 792 | case 's': |
@@ -823,6 +829,9 @@ int main(int argc, char **argv) | |||
823 | case 'h': | 829 | case 'h': |
824 | genksyms_usage(); | 830 | genksyms_usage(); |
825 | return 0; | 831 | return 0; |
832 | case 'R': | ||
833 | flag_rel_crcs = 1; | ||
834 | break; | ||
826 | default: | 835 | default: |
827 | genksyms_usage(); | 836 | genksyms_usage(); |
828 | return 1; | 837 | return 1; |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 299b92ca1ae0..5d554419170b 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s) | |||
219 | "_SDA2_BASE_", /* ppc */ | 219 | "_SDA2_BASE_", /* ppc */ |
220 | NULL }; | 220 | NULL }; |
221 | 221 | ||
222 | static char *special_prefixes[] = { | ||
223 | "__crc_", /* modversions */ | ||
224 | NULL }; | ||
225 | |||
222 | static char *special_suffixes[] = { | 226 | static char *special_suffixes[] = { |
223 | "_veneer", /* arm */ | 227 | "_veneer", /* arm */ |
224 | "_from_arm", /* arm */ | 228 | "_from_arm", /* arm */ |
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s) | |||
259 | if (strcmp(sym_name, special_symbols[i]) == 0) | 263 | if (strcmp(sym_name, special_symbols[i]) == 0) |
260 | return 0; | 264 | return 0; |
261 | 265 | ||
266 | for (i = 0; special_prefixes[i]; i++) { | ||
267 | int l = strlen(special_prefixes[i]); | ||
268 | |||
269 | if (l <= strlen(sym_name) && | ||
270 | strncmp(sym_name, special_prefixes[i], l) == 0) | ||
271 | return 0; | ||
272 | } | ||
273 | |||
262 | for (i = 0; special_suffixes[i]; i++) { | 274 | for (i = 0; special_suffixes[i]; i++) { |
263 | int l = strlen(sym_name) - strlen(special_suffixes[i]); | 275 | int l = strlen(sym_name) - strlen(special_suffixes[i]); |
264 | 276 | ||
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 29c89a6bad3d..4dedd0d3d3a7 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info, | |||
621 | if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { | 621 | if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { |
622 | is_crc = true; | 622 | is_crc = true; |
623 | crc = (unsigned int) sym->st_value; | 623 | crc = (unsigned int) sym->st_value; |
624 | if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) { | ||
625 | unsigned int *crcp; | ||
626 | |||
627 | /* symbol points to the CRC in the ELF object */ | ||
628 | crcp = (void *)info->hdr + sym->st_value + | ||
629 | info->sechdrs[sym->st_shndx].sh_offset - | ||
630 | (info->hdr->e_type != ET_REL ? | ||
631 | info->sechdrs[sym->st_shndx].sh_addr : 0); | ||
632 | crc = *crcp; | ||
633 | } | ||
624 | sym_update_crc(symname + strlen(CRC_PFX), mod, crc, | 634 | sym_update_crc(symname + strlen(CRC_PFX), mod, crc, |
625 | export); | 635 | export); |
626 | } | 636 | } |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index c7c6619431d5..d98550abe16d 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -5887,7 +5887,7 @@ static int selinux_setprocattr(struct task_struct *p, | |||
5887 | return error; | 5887 | return error; |
5888 | 5888 | ||
5889 | /* Obtain a SID for the context, if one was specified. */ | 5889 | /* Obtain a SID for the context, if one was specified. */ |
5890 | if (size && str[1] && str[1] != '\n') { | 5890 | if (size && str[0] && str[0] != '\n') { |
5891 | if (str[size-1] == '\n') { | 5891 | if (str[size-1] == '\n') { |
5892 | str[size-1] = 0; | 5892 | str[size-1] = 0; |
5893 | size--; | 5893 | size--; |
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index c850345c43b5..dfa5156f3585 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c | |||
@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) | |||
419 | { | 419 | { |
420 | unsigned long flags; | 420 | unsigned long flags; |
421 | struct snd_seq_event_cell *ptr; | 421 | struct snd_seq_event_cell *ptr; |
422 | int max_count = 5 * HZ; | ||
423 | 422 | ||
424 | if (snd_BUG_ON(!pool)) | 423 | if (snd_BUG_ON(!pool)) |
425 | return -EINVAL; | 424 | return -EINVAL; |
@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) | |||
432 | if (waitqueue_active(&pool->output_sleep)) | 431 | if (waitqueue_active(&pool->output_sleep)) |
433 | wake_up(&pool->output_sleep); | 432 | wake_up(&pool->output_sleep); |
434 | 433 | ||
435 | while (atomic_read(&pool->counter) > 0) { | 434 | while (atomic_read(&pool->counter) > 0) |
436 | if (max_count == 0) { | ||
437 | pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); | ||
438 | break; | ||
439 | } | ||
440 | schedule_timeout_uninterruptible(1); | 435 | schedule_timeout_uninterruptible(1); |
441 | max_count--; | ||
442 | } | ||
443 | 436 | ||
444 | /* release all resources */ | 437 | /* release all resources */ |
445 | spin_lock_irqsave(&pool->lock, flags); | 438 | spin_lock_irqsave(&pool->lock, flags); |
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 0bec02e89d51..450c5187eecb 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c | |||
@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void) | |||
181 | } | 181 | } |
182 | } | 182 | } |
183 | 183 | ||
184 | static void queue_use(struct snd_seq_queue *queue, int client, int use); | ||
185 | |||
184 | /* allocate a new queue - | 186 | /* allocate a new queue - |
185 | * return queue index value or negative value for error | 187 | * return queue index value or negative value for error |
186 | */ | 188 | */ |
@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) | |||
192 | if (q == NULL) | 194 | if (q == NULL) |
193 | return -ENOMEM; | 195 | return -ENOMEM; |
194 | q->info_flags = info_flags; | 196 | q->info_flags = info_flags; |
197 | queue_use(q, client, 1); | ||
195 | if (queue_list_add(q) < 0) { | 198 | if (queue_list_add(q) < 0) { |
196 | queue_delete(q); | 199 | queue_delete(q); |
197 | return -ENOMEM; | 200 | return -ENOMEM; |
198 | } | 201 | } |
199 | snd_seq_queue_use(q->queue, client, 1); /* use this queue */ | ||
200 | return q->queue; | 202 | return q->queue; |
201 | } | 203 | } |
202 | 204 | ||
@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client, | |||
502 | return result; | 504 | return result; |
503 | } | 505 | } |
504 | 506 | ||
505 | 507 | /* use or unuse this queue */ | |
506 | /* use or unuse this queue - | 508 | static void queue_use(struct snd_seq_queue *queue, int client, int use) |
507 | * if it is the first client, starts the timer. | ||
508 | * if it is not longer used by any clients, stop the timer. | ||
509 | */ | ||
510 | int snd_seq_queue_use(int queueid, int client, int use) | ||
511 | { | 509 | { |
512 | struct snd_seq_queue *queue; | ||
513 | |||
514 | queue = queueptr(queueid); | ||
515 | if (queue == NULL) | ||
516 | return -EINVAL; | ||
517 | mutex_lock(&queue->timer_mutex); | ||
518 | if (use) { | 510 | if (use) { |
519 | if (!test_and_set_bit(client, queue->clients_bitmap)) | 511 | if (!test_and_set_bit(client, queue->clients_bitmap)) |
520 | queue->clients++; | 512 | queue->clients++; |
@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use) | |||
529 | } else { | 521 | } else { |
530 | snd_seq_timer_close(queue); | 522 | snd_seq_timer_close(queue); |
531 | } | 523 | } |
524 | } | ||
525 | |||
526 | /* use or unuse this queue - | ||
527 | * if it is the first client, starts the timer. | ||
528 | * if it is not longer used by any clients, stop the timer. | ||
529 | */ | ||
530 | int snd_seq_queue_use(int queueid, int client, int use) | ||
531 | { | ||
532 | struct snd_seq_queue *queue; | ||
533 | |||
534 | queue = queueptr(queueid); | ||
535 | if (queue == NULL) | ||
536 | return -EINVAL; | ||
537 | mutex_lock(&queue->timer_mutex); | ||
538 | queue_use(queue, client, use); | ||
532 | mutex_unlock(&queue->timer_mutex); | 539 | mutex_unlock(&queue->timer_mutex); |
533 | queuefree(queue); | 540 | queuefree(queue); |
534 | return 0; | 541 | return 0; |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index cf9bc042fe96..3fc201c3b95a 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -3639,6 +3639,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi), | |||
3639 | HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), | 3639 | HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), |
3640 | HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), | 3640 | HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), |
3641 | HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), | 3641 | HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), |
3642 | HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi), | ||
3642 | HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi), | 3643 | HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi), |
3643 | HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), | 3644 | HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), |
3644 | HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), | 3645 | HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), |
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 90009c0b3a92..ab3c280a23d1 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c | |||
@@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface, | |||
754 | goto error; | 754 | goto error; |
755 | } | 755 | } |
756 | 756 | ||
757 | line6_get_interval(line6); | ||
758 | |||
757 | if (properties->capabilities & LINE6_CAP_CONTROL) { | 759 | if (properties->capabilities & LINE6_CAP_CONTROL) { |
758 | line6_get_interval(line6); | ||
759 | ret = line6_init_cap_control(line6); | 760 | ret = line6_init_cap_control(line6); |
760 | if (ret < 0) | 761 | if (ret < 0) |
761 | goto error; | 762 | goto error; |
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 5e0dea2cdc01..039636ffb6c8 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c | |||
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, | |||
150 | *type = INSN_RETURN; | 150 | *type = INSN_RETURN; |
151 | break; | 151 | break; |
152 | 152 | ||
153 | case 0xc5: /* iret */ | ||
154 | case 0xca: /* retf */ | 153 | case 0xca: /* retf */ |
155 | case 0xcb: /* retf */ | 154 | case 0xcb: /* retf */ |
155 | case 0xcf: /* iret */ | ||
156 | *type = INSN_CONTEXT_SWITCH; | 156 | *type = INSN_CONTEXT_SWITCH; |
157 | break; | 157 | break; |
158 | 158 | ||
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 9ff0db4e2d0c..933aeec46f4a 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c | |||
@@ -1199,7 +1199,7 @@ static int ui_init(void) | |||
1199 | BUG_ON(1); | 1199 | BUG_ON(1); |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | perf_hpp__register_sort_field(fmt); | 1202 | perf_hpp__prepend_sort_field(fmt); |
1203 | return 0; | 1203 | return 0; |
1204 | } | 1204 | } |
1205 | 1205 | ||
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 37388397b5bc..18cfcdc90356 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c | |||
@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, | |||
521 | list_add_tail(&format->sort_list, &list->sorts); | 521 | list_add_tail(&format->sort_list, &list->sorts); |
522 | } | 522 | } |
523 | 523 | ||
524 | void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, | ||
525 | struct perf_hpp_fmt *format) | ||
526 | { | ||
527 | list_add(&format->sort_list, &list->sorts); | ||
528 | } | ||
529 | |||
524 | void perf_hpp__column_unregister(struct perf_hpp_fmt *format) | 530 | void perf_hpp__column_unregister(struct perf_hpp_fmt *format) |
525 | { | 531 | { |
526 | list_del(&format->list); | 532 | list_del(&format->list); |
@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list) | |||
560 | perf_hpp_list__for_each_sort_list(list, fmt) { | 566 | perf_hpp_list__for_each_sort_list(list, fmt) { |
561 | struct perf_hpp_fmt *pos; | 567 | struct perf_hpp_fmt *pos; |
562 | 568 | ||
569 | /* skip sort-only fields ("sort_compute" in perf diff) */ | ||
570 | if (!fmt->entry && !fmt->color) | ||
571 | continue; | ||
572 | |||
563 | perf_hpp_list__for_each_format(list, pos) { | 573 | perf_hpp_list__for_each_format(list, pos) { |
564 | if (fmt_equal(fmt, pos)) | 574 | if (fmt_equal(fmt, pos)) |
565 | goto next; | 575 | goto next; |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 42922512c1c6..8b610dd9e2f6 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor) | |||
437 | } | 437 | } |
438 | call->ip = cursor_node->ip; | 438 | call->ip = cursor_node->ip; |
439 | call->ms.sym = cursor_node->sym; | 439 | call->ms.sym = cursor_node->sym; |
440 | call->ms.map = cursor_node->map; | 440 | call->ms.map = map__get(cursor_node->map); |
441 | 441 | ||
442 | if (cursor_node->branch) { | 442 | if (cursor_node->branch) { |
443 | call->branch_count = 1; | 443 | call->branch_count = 1; |
@@ -477,6 +477,7 @@ add_child(struct callchain_node *parent, | |||
477 | 477 | ||
478 | list_for_each_entry_safe(call, tmp, &new->val, list) { | 478 | list_for_each_entry_safe(call, tmp, &new->val, list) { |
479 | list_del(&call->list); | 479 | list_del(&call->list); |
480 | map__zput(call->ms.map); | ||
480 | free(call); | 481 | free(call); |
481 | } | 482 | } |
482 | free(new); | 483 | free(new); |
@@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor, | |||
761 | list->ms.map, list->ms.sym, | 762 | list->ms.map, list->ms.sym, |
762 | false, NULL, 0, 0); | 763 | false, NULL, 0, 0); |
763 | list_del(&list->list); | 764 | list_del(&list->list); |
765 | map__zput(list->ms.map); | ||
764 | free(list); | 766 | free(list); |
765 | } | 767 | } |
766 | 768 | ||
@@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor, | |||
811 | } | 813 | } |
812 | 814 | ||
813 | node->ip = ip; | 815 | node->ip = ip; |
814 | node->map = map; | 816 | map__zput(node->map); |
817 | node->map = map__get(map); | ||
815 | node->sym = sym; | 818 | node->sym = sym; |
816 | node->branch = branch; | 819 | node->branch = branch; |
817 | node->nr_loop_iter = nr_loop_iter; | 820 | node->nr_loop_iter = nr_loop_iter; |
@@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node) | |||
1142 | 1145 | ||
1143 | list_for_each_entry_safe(list, tmp, &node->parent_val, list) { | 1146 | list_for_each_entry_safe(list, tmp, &node->parent_val, list) { |
1144 | list_del(&list->list); | 1147 | list_del(&list->list); |
1148 | map__zput(list->ms.map); | ||
1145 | free(list); | 1149 | free(list); |
1146 | } | 1150 | } |
1147 | 1151 | ||
1148 | list_for_each_entry_safe(list, tmp, &node->val, list) { | 1152 | list_for_each_entry_safe(list, tmp, &node->val, list) { |
1149 | list_del(&list->list); | 1153 | list_del(&list->list); |
1154 | map__zput(list->ms.map); | ||
1150 | free(list); | 1155 | free(list); |
1151 | } | 1156 | } |
1152 | 1157 | ||
@@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) | |||
1210 | goto out; | 1215 | goto out; |
1211 | *new = *chain; | 1216 | *new = *chain; |
1212 | new->has_children = false; | 1217 | new->has_children = false; |
1218 | map__get(new->ms.map); | ||
1213 | list_add_tail(&new->list, &head); | 1219 | list_add_tail(&new->list, &head); |
1214 | } | 1220 | } |
1215 | parent = parent->parent; | 1221 | parent = parent->parent; |
@@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) | |||
1230 | out: | 1236 | out: |
1231 | list_for_each_entry_safe(chain, new, &head, list) { | 1237 | list_for_each_entry_safe(chain, new, &head, list) { |
1232 | list_del(&chain->list); | 1238 | list_del(&chain->list); |
1239 | map__zput(chain->ms.map); | ||
1233 | free(chain); | 1240 | free(chain); |
1234 | } | 1241 | } |
1235 | return -ENOMEM; | 1242 | return -ENOMEM; |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 35c8e379530f..4f4b60f1558a 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/rbtree.h> | 6 | #include <linux/rbtree.h> |
7 | #include "event.h" | 7 | #include "event.h" |
8 | #include "map.h" | ||
8 | #include "symbol.h" | 9 | #include "symbol.h" |
9 | 10 | ||
10 | #define HELP_PAD "\t\t\t\t" | 11 | #define HELP_PAD "\t\t\t\t" |
@@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor, | |||
184 | */ | 185 | */ |
185 | static inline void callchain_cursor_reset(struct callchain_cursor *cursor) | 186 | static inline void callchain_cursor_reset(struct callchain_cursor *cursor) |
186 | { | 187 | { |
188 | struct callchain_cursor_node *node; | ||
189 | |||
187 | cursor->nr = 0; | 190 | cursor->nr = 0; |
188 | cursor->last = &cursor->first; | 191 | cursor->last = &cursor->first; |
192 | |||
193 | for (node = cursor->first; node != NULL; node = node->next) | ||
194 | map__zput(node->map); | ||
189 | } | 195 | } |
190 | 196 | ||
191 | int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, | 197 | int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 6770a9645609..7d1b7d33e644 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include "util.h" | 1 | #include "util.h" |
2 | #include "build-id.h" | 2 | #include "build-id.h" |
3 | #include "hist.h" | 3 | #include "hist.h" |
4 | #include "map.h" | ||
4 | #include "session.h" | 5 | #include "session.h" |
5 | #include "sort.h" | 6 | #include "sort.h" |
6 | #include "evlist.h" | 7 | #include "evlist.h" |
@@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, | |||
1019 | int max_stack_depth, void *arg) | 1020 | int max_stack_depth, void *arg) |
1020 | { | 1021 | { |
1021 | int err, err2; | 1022 | int err, err2; |
1023 | struct map *alm = NULL; | ||
1024 | |||
1025 | if (al && al->map) | ||
1026 | alm = map__get(al->map); | ||
1022 | 1027 | ||
1023 | err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, | 1028 | err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, |
1024 | iter->evsel, al, max_stack_depth); | 1029 | iter->evsel, al, max_stack_depth); |
@@ -1058,6 +1063,8 @@ out: | |||
1058 | if (!err) | 1063 | if (!err) |
1059 | err = err2; | 1064 | err = err2; |
1060 | 1065 | ||
1066 | map__put(alm); | ||
1067 | |||
1061 | return err; | 1068 | return err; |
1062 | } | 1069 | } |
1063 | 1070 | ||
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index d4b6514eeef5..28c216e3d5b7 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list, | |||
283 | struct perf_hpp_fmt *format); | 283 | struct perf_hpp_fmt *format); |
284 | void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, | 284 | void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, |
285 | struct perf_hpp_fmt *format); | 285 | struct perf_hpp_fmt *format); |
286 | void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, | ||
287 | struct perf_hpp_fmt *format); | ||
286 | 288 | ||
287 | static inline void perf_hpp__column_register(struct perf_hpp_fmt *format) | 289 | static inline void perf_hpp__column_register(struct perf_hpp_fmt *format) |
288 | { | 290 | { |
@@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format) | |||
294 | perf_hpp_list__register_sort_field(&perf_hpp_list, format); | 296 | perf_hpp_list__register_sort_field(&perf_hpp_list, format); |
295 | } | 297 | } |
296 | 298 | ||
299 | static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format) | ||
300 | { | ||
301 | perf_hpp_list__prepend_sort_field(&perf_hpp_list, format); | ||
302 | } | ||
303 | |||
297 | #define perf_hpp_list__for_each_format(_list, format) \ | 304 | #define perf_hpp_list__for_each_format(_list, format) \ |
298 | list_for_each_entry(format, &(_list)->fields, list) | 305 | list_for_each_entry(format, &(_list)->fields, list) |
299 | 306 | ||