diff options
339 files changed, 2991 insertions, 1643 deletions
diff --git a/Documentation/device-mapper/writecache.txt b/Documentation/device-mapper/writecache.txt index 4424fa2c67d7..01532b3008ae 100644 --- a/Documentation/device-mapper/writecache.txt +++ b/Documentation/device-mapper/writecache.txt | |||
| @@ -15,6 +15,8 @@ Constructor parameters: | |||
| 15 | size) | 15 | size) |
| 16 | 5. the number of optional parameters (the parameters with an argument | 16 | 5. the number of optional parameters (the parameters with an argument |
| 17 | count as two) | 17 | count as two) |
| 18 | start_sector n (default: 0) | ||
| 19 | offset from the start of cache device in 512-byte sectors | ||
| 18 | high_watermark n (default: 50) | 20 | high_watermark n (default: 50) |
| 19 | start writeback when the number of used blocks reach this | 21 | start writeback when the number of used blocks reach this |
| 20 | watermark | 22 | watermark |
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt index bdadc3da9556..6970f30a3770 100644 --- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt +++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt | |||
| @@ -66,7 +66,7 @@ Required root node properties: | |||
| 66 | - "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale | 66 | - "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale |
| 67 | Octa board. | 67 | Octa board. |
| 68 | - "insignal,origen" - for Exynos4210-based Insignal Origen board. | 68 | - "insignal,origen" - for Exynos4210-based Insignal Origen board. |
| 69 | - "insignal,origen4412 - for Exynos4412-based Insignal Origen board. | 69 | - "insignal,origen4412" - for Exynos4412-based Insignal Origen board. |
| 70 | 70 | ||
| 71 | 71 | ||
| 72 | Optional nodes: | 72 | Optional nodes: |
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt index 6fddb4f4f71a..3055d5c2c04e 100644 --- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt +++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt | |||
| @@ -36,7 +36,7 @@ Optional nodes: | |||
| 36 | 36 | ||
| 37 | - port/ports: to describe a connection to an external encoder. The | 37 | - port/ports: to describe a connection to an external encoder. The |
| 38 | binding follows Documentation/devicetree/bindings/graph.txt and | 38 | binding follows Documentation/devicetree/bindings/graph.txt and |
| 39 | suppors a single port with a single endpoint. | 39 | supports a single port with a single endpoint. |
| 40 | 40 | ||
| 41 | - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and | 41 | - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and |
| 42 | Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting | 42 | Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting |
diff --git a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt index 20fc72d9e61e..45a61b462287 100644 --- a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt +++ b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | Nintendo Wii (Hollywood) GPIO controller | 1 | Nintendo Wii (Hollywood) GPIO controller |
| 2 | 2 | ||
| 3 | Required properties: | 3 | Required properties: |
| 4 | - compatible: "nintendo,hollywood-gpio | 4 | - compatible: "nintendo,hollywood-gpio" |
| 5 | - reg: Physical base address and length of the controller's registers. | 5 | - reg: Physical base address and length of the controller's registers. |
| 6 | - gpio-controller: Marks the device node as a GPIO controller. | 6 | - gpio-controller: Marks the device node as a GPIO controller. |
| 7 | - #gpio-cells: Should be <2>. The first cell is the pin number and the | 7 | - #gpio-cells: Should be <2>. The first cell is the pin number and the |
diff --git a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt index 121d9b7c79a2..1063c30d53f7 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt | |||
| @@ -32,7 +32,7 @@ i2c@00000000 { | |||
| 32 | reg = <0x6c>; | 32 | reg = <0x6c>; |
| 33 | interrupt-parent = <&gpx1>; | 33 | interrupt-parent = <&gpx1>; |
| 34 | interrupts = <2 IRQ_TYPE_LEVEL_LOW>; | 34 | interrupts = <2 IRQ_TYPE_LEVEL_LOW>; |
| 35 | vdd-supply = <&ldo15_reg>"; | 35 | vdd-supply = <&ldo15_reg>; |
| 36 | vid-supply = <&ldo18_reg>; | 36 | vid-supply = <&ldo18_reg>; |
| 37 | reset-gpios = <&gpx1 5 0>; | 37 | reset-gpios = <&gpx1 5 0>; |
| 38 | touchscreen-size-x = <1080>; | 38 | touchscreen-size-x = <1080>; |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt index 1099fe0788fa..f246ccbf8838 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt | |||
| @@ -15,7 +15,7 @@ Required properties: | |||
| 15 | include "nvidia,tegra30-ictlr". | 15 | include "nvidia,tegra30-ictlr". |
| 16 | - reg : Specifies base physical address and size of the registers. | 16 | - reg : Specifies base physical address and size of the registers. |
| 17 | Each controller must be described separately (Tegra20 has 4 of them, | 17 | Each controller must be described separately (Tegra20 has 4 of them, |
| 18 | whereas Tegra30 and later have 5" | 18 | whereas Tegra30 and later have 5). |
| 19 | - interrupt-controller : Identifies the node as an interrupt controller. | 19 | - interrupt-controller : Identifies the node as an interrupt controller. |
| 20 | - #interrupt-cells : Specifies the number of cells needed to encode an | 20 | - #interrupt-cells : Specifies the number of cells needed to encode an |
| 21 | interrupt source. The value must be 3. | 21 | interrupt source. The value must be 3. |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt index 136bd612bd83..6a36bf66d932 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt | |||
| @@ -12,7 +12,7 @@ Required properties: | |||
| 12 | specifier, shall be 2 | 12 | specifier, shall be 2 |
| 13 | - interrupts: interrupts references to primary interrupt controller | 13 | - interrupts: interrupts references to primary interrupt controller |
| 14 | (only needed for exti controller with multiple exti under | 14 | (only needed for exti controller with multiple exti under |
| 15 | same parent interrupt: st,stm32-exti and st,stm32h7-exti") | 15 | same parent interrupt: st,stm32-exti and st,stm32h7-exti) |
| 16 | 16 | ||
| 17 | Example: | 17 | Example: |
| 18 | 18 | ||
diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt index 356c29789cf5..3a66d3c483e1 100644 --- a/Documentation/devicetree/bindings/mips/brcm/soc.txt +++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt | |||
| @@ -152,7 +152,7 @@ Required properties: | |||
| 152 | - compatible : should contain one of: | 152 | - compatible : should contain one of: |
| 153 | "brcm,bcm7425-timers" | 153 | "brcm,bcm7425-timers" |
| 154 | "brcm,bcm7429-timers" | 154 | "brcm,bcm7429-timers" |
| 155 | "brcm,bcm7435-timers and | 155 | "brcm,bcm7435-timers" and |
| 156 | "brcm,brcmstb-timers" | 156 | "brcm,brcmstb-timers" |
| 157 | - reg : the timers register range | 157 | - reg : the timers register range |
| 158 | - interrupts : the interrupt line for this timer block | 158 | - interrupts : the interrupt line for this timer block |
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt index df873d1f3b7c..f8c33890bc29 100644 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt | |||
| @@ -238,7 +238,7 @@ PROPERTIES | |||
| 238 | Must include one of the following: | 238 | Must include one of the following: |
| 239 | - "fsl,fman-dtsec" for dTSEC MAC | 239 | - "fsl,fman-dtsec" for dTSEC MAC |
| 240 | - "fsl,fman-xgec" for XGEC MAC | 240 | - "fsl,fman-xgec" for XGEC MAC |
| 241 | - "fsl,fman-memac for mEMAC MAC | 241 | - "fsl,fman-memac" for mEMAC MAC |
| 242 | 242 | ||
| 243 | - cell-index | 243 | - cell-index |
| 244 | Usage: required | 244 | Usage: required |
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 9b387f861aed..7dec508987c7 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt | |||
| @@ -133,7 +133,7 @@ located inside a PM domain with index 0 of a power controller represented by a | |||
| 133 | node with the label "power". | 133 | node with the label "power". |
| 134 | In the second example the consumer device are partitioned across two PM domains, | 134 | In the second example the consumer device are partitioned across two PM domains, |
| 135 | the first with index 0 and the second with index 1, of a power controller that | 135 | the first with index 0 and the second with index 1, of a power controller that |
| 136 | is represented by a node with the label "power. | 136 | is represented by a node with the label "power". |
| 137 | 137 | ||
| 138 | Optional properties: | 138 | Optional properties: |
| 139 | - required-opps: This contains phandle to an OPP node in another device's OPP | 139 | - required-opps: This contains phandle to an OPP node in another device's OPP |
diff --git a/Documentation/devicetree/bindings/regulator/tps65090.txt b/Documentation/devicetree/bindings/regulator/tps65090.txt index ca69f5e3040c..ae326f263597 100644 --- a/Documentation/devicetree/bindings/regulator/tps65090.txt +++ b/Documentation/devicetree/bindings/regulator/tps65090.txt | |||
| @@ -16,7 +16,7 @@ Required properties: | |||
| 16 | Optional properties: | 16 | Optional properties: |
| 17 | - ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3. | 17 | - ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3. |
| 18 | If DCDCs are externally controlled then this property should be there. | 18 | If DCDCs are externally controlled then this property should be there. |
| 19 | - "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3. | 19 | - dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3. |
| 20 | If DCDCs are externally controlled and if it is from GPIO then GPIO | 20 | If DCDCs are externally controlled and if it is from GPIO then GPIO |
| 21 | number should be provided. If it is externally controlled and no GPIO | 21 | number should be provided. If it is externally controlled and no GPIO |
| 22 | entry then driver will just configure this rails as external control | 22 | entry then driver will just configure this rails as external control |
diff --git a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt index a21658f18fe6..3661e6153a92 100644 --- a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt +++ b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt | |||
| @@ -15,7 +15,7 @@ Please refer to reset.txt in this directory for common reset | |||
| 15 | controller binding usage. | 15 | controller binding usage. |
| 16 | 16 | ||
| 17 | Required properties: | 17 | Required properties: |
| 18 | - compatible: Should be st,stih407-softreset"; | 18 | - compatible: Should be "st,stih407-softreset"; |
| 19 | - #reset-cells: 1, see below | 19 | - #reset-cells: 1, see below |
| 20 | 20 | ||
| 21 | example: | 21 | example: |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt index d330c73de9a2..68b7d6207e3d 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt | |||
| @@ -39,7 +39,7 @@ Required properties: | |||
| 39 | 39 | ||
| 40 | Optional property: | 40 | Optional property: |
| 41 | - clock-frequency: Desired I2C bus clock frequency in Hz. | 41 | - clock-frequency: Desired I2C bus clock frequency in Hz. |
| 42 | When missing default to 400000Hz. | 42 | When missing default to 100000Hz. |
| 43 | 43 | ||
| 44 | Child nodes should conform to I2C bus binding as described in i2c.txt. | 44 | Child nodes should conform to I2C bus binding as described in i2c.txt. |
| 45 | 45 | ||
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt index 6a4aadc4ce06..84b28dbe9f15 100644 --- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt +++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt | |||
| @@ -30,7 +30,7 @@ Required properties: | |||
| 30 | 30 | ||
| 31 | Board connectors: | 31 | Board connectors: |
| 32 | * Headset Mic | 32 | * Headset Mic |
| 33 | * Secondary Mic", | 33 | * Secondary Mic |
| 34 | * DMIC | 34 | * DMIC |
| 35 | * Ext Spk | 35 | * Ext Spk |
| 36 | 36 | ||
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt index aa54e49fc8a2..c7600a93ab39 100644 --- a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt +++ b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt | |||
| @@ -35,7 +35,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio. | |||
| 35 | "Digital Mic3" | 35 | "Digital Mic3" |
| 36 | 36 | ||
| 37 | Audio pins and MicBias on WCD9335 Codec: | 37 | Audio pins and MicBias on WCD9335 Codec: |
| 38 | "MIC_BIAS1 | 38 | "MIC_BIAS1" |
| 39 | "MIC_BIAS2" | 39 | "MIC_BIAS2" |
| 40 | "MIC_BIAS3" | 40 | "MIC_BIAS3" |
| 41 | "MIC_BIAS4" | 41 | "MIC_BIAS4" |
diff --git a/Documentation/devicetree/bindings/w1/w1-gpio.txt b/Documentation/devicetree/bindings/w1/w1-gpio.txt index 6e09c35d9f1a..37091902a021 100644 --- a/Documentation/devicetree/bindings/w1/w1-gpio.txt +++ b/Documentation/devicetree/bindings/w1/w1-gpio.txt | |||
| @@ -15,7 +15,7 @@ Optional properties: | |||
| 15 | 15 | ||
| 16 | Examples: | 16 | Examples: |
| 17 | 17 | ||
| 18 | onewire@0 { | 18 | onewire { |
| 19 | compatible = "w1-gpio"; | 19 | compatible = "w1-gpio"; |
| 20 | gpios = <&gpio 126 0>, <&gpio 105 0>; | 20 | gpios = <&gpio 126 0>, <&gpio 105 0>; |
| 21 | }; | 21 | }; |
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index c13214d073a4..d3e5dd26db12 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt | |||
| @@ -1490,7 +1490,7 @@ To remove an ARP target: | |||
| 1490 | 1490 | ||
| 1491 | To configure the interval between learning packet transmits: | 1491 | To configure the interval between learning packet transmits: |
| 1492 | # echo 12 > /sys/class/net/bond0/bonding/lp_interval | 1492 | # echo 12 > /sys/class/net/bond0/bonding/lp_interval |
| 1493 | NOTE: the lp_inteval is the number of seconds between instances where | 1493 | NOTE: the lp_interval is the number of seconds between instances where |
| 1494 | the bonding driver sends learning packets to each slaves peer switch. The | 1494 | the bonding driver sends learning packets to each slaves peer switch. The |
| 1495 | default interval is 1 second. | 1495 | default interval is 1 second. |
| 1496 | 1496 | ||
diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst index 9708f5fa76de..f81111eba9c5 100644 --- a/Documentation/networking/e100.rst +++ b/Documentation/networking/e100.rst | |||
| @@ -47,41 +47,45 @@ Driver Configuration Parameters | |||
| 47 | The default value for each parameter is generally the recommended setting, | 47 | The default value for each parameter is generally the recommended setting, |
| 48 | unless otherwise noted. | 48 | unless otherwise noted. |
| 49 | 49 | ||
| 50 | Rx Descriptors: Number of receive descriptors. A receive descriptor is a data | 50 | Rx Descriptors: |
| 51 | Number of receive descriptors. A receive descriptor is a data | ||
| 51 | structure that describes a receive buffer and its attributes to the network | 52 | structure that describes a receive buffer and its attributes to the network |
| 52 | controller. The data in the descriptor is used by the controller to write | 53 | controller. The data in the descriptor is used by the controller to write |
| 53 | data from the controller to host memory. In the 3.x.x driver the valid range | 54 | data from the controller to host memory. In the 3.x.x driver the valid range |
| 54 | for this parameter is 64-256. The default value is 256. This parameter can be | 55 | for this parameter is 64-256. The default value is 256. This parameter can be |
| 55 | changed using the command:: | 56 | changed using the command:: |
| 56 | 57 | ||
| 57 | ethtool -G eth? rx n | 58 | ethtool -G eth? rx n |
| 58 | 59 | ||
| 59 | Where n is the number of desired Rx descriptors. | 60 | Where n is the number of desired Rx descriptors. |
| 60 | 61 | ||
| 61 | Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data | 62 | Tx Descriptors: |
| 63 | Number of transmit descriptors. A transmit descriptor is a data | ||
| 62 | structure that describes a transmit buffer and its attributes to the network | 64 | structure that describes a transmit buffer and its attributes to the network |
| 63 | controller. The data in the descriptor is used by the controller to read | 65 | controller. The data in the descriptor is used by the controller to read |
| 64 | data from the host memory to the controller. In the 3.x.x driver the valid | 66 | data from the host memory to the controller. In the 3.x.x driver the valid |
| 65 | range for this parameter is 64-256. The default value is 128. This parameter | 67 | range for this parameter is 64-256. The default value is 128. This parameter |
| 66 | can be changed using the command:: | 68 | can be changed using the command:: |
| 67 | 69 | ||
| 68 | ethtool -G eth? tx n | 70 | ethtool -G eth? tx n |
| 69 | 71 | ||
| 70 | Where n is the number of desired Tx descriptors. | 72 | Where n is the number of desired Tx descriptors. |
| 71 | 73 | ||
| 72 | Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by | 74 | Speed/Duplex: |
| 75 | The driver auto-negotiates the link speed and duplex settings by | ||
| 73 | default. The ethtool utility can be used as follows to force speed/duplex.:: | 76 | default. The ethtool utility can be used as follows to force speed/duplex.:: |
| 74 | 77 | ||
| 75 | ethtool -s eth? autoneg off speed {10|100} duplex {full|half} | 78 | ethtool -s eth? autoneg off speed {10|100} duplex {full|half} |
| 76 | 79 | ||
| 77 | NOTE: setting the speed/duplex to incorrect values will cause the link to | 80 | NOTE: setting the speed/duplex to incorrect values will cause the link to |
| 78 | fail. | 81 | fail. |
| 79 | 82 | ||
| 80 | Event Log Message Level: The driver uses the message level flag to log events | 83 | Event Log Message Level: |
| 84 | The driver uses the message level flag to log events | ||
| 81 | to syslog. The message level can be set at driver load time. It can also be | 85 | to syslog. The message level can be set at driver load time. It can also be |
| 82 | set using the command:: | 86 | set using the command:: |
| 83 | 87 | ||
| 84 | ethtool -s eth? msglvl n | 88 | ethtool -s eth? msglvl n |
| 85 | 89 | ||
| 86 | 90 | ||
| 87 | Additional Configurations | 91 | Additional Configurations |
| @@ -92,7 +96,7 @@ Configuring the Driver on Different Distributions | |||
| 92 | 96 | ||
| 93 | Configuring a network driver to load properly when the system is started | 97 | Configuring a network driver to load properly when the system is started |
| 94 | is distribution dependent. Typically, the configuration process involves | 98 | is distribution dependent. Typically, the configuration process involves |
| 95 | adding an alias line to /etc/modprobe.d/*.conf as well as editing other | 99 | adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other |
| 96 | system startup scripts and/or configuration files. Many popular Linux | 100 | system startup scripts and/or configuration files. Many popular Linux |
| 97 | distributions ship with tools to make these changes for you. To learn | 101 | distributions ship with tools to make these changes for you. To learn |
| 98 | the proper way to configure a network device for your system, refer to | 102 | the proper way to configure a network device for your system, refer to |
| @@ -160,7 +164,10 @@ This results in unbalanced receive traffic. | |||
| 160 | If you have multiple interfaces in a server, either turn on ARP | 164 | If you have multiple interfaces in a server, either turn on ARP |
| 161 | filtering by | 165 | filtering by |
| 162 | 166 | ||
| 163 | (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter | 167 | (1) entering:: |
| 168 | |||
| 169 | echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter | ||
| 170 | |||
| 164 | (this only works if your kernel's version is higher than 2.4.5), or | 171 | (this only works if your kernel's version is higher than 2.4.5), or |
| 165 | 172 | ||
| 166 | (2) installing the interfaces in separate broadcast domains (either | 173 | (2) installing the interfaces in separate broadcast domains (either |
diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst index 144b87eef153..f10dd4086921 100644 --- a/Documentation/networking/e1000.rst +++ b/Documentation/networking/e1000.rst | |||
| @@ -34,7 +34,8 @@ Command Line Parameters | |||
| 34 | The default value for each parameter is generally the recommended setting, | 34 | The default value for each parameter is generally the recommended setting, |
| 35 | unless otherwise noted. | 35 | unless otherwise noted. |
| 36 | 36 | ||
| 37 | NOTES: For more information about the AutoNeg, Duplex, and Speed | 37 | NOTES: |
| 38 | For more information about the AutoNeg, Duplex, and Speed | ||
| 38 | parameters, see the "Speed and Duplex Configuration" section in | 39 | parameters, see the "Speed and Duplex Configuration" section in |
| 39 | this document. | 40 | this document. |
| 40 | 41 | ||
| @@ -45,22 +46,27 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed | |||
| 45 | 46 | ||
| 46 | AutoNeg | 47 | AutoNeg |
| 47 | ------- | 48 | ------- |
| 49 | |||
| 48 | (Supported only on adapters with copper connections) | 50 | (Supported only on adapters with copper connections) |
| 49 | Valid Range: 0x01-0x0F, 0x20-0x2F | 51 | |
| 50 | Default Value: 0x2F | 52 | :Valid Range: 0x01-0x0F, 0x20-0x2F |
| 53 | :Default Value: 0x2F | ||
| 51 | 54 | ||
| 52 | This parameter is a bit-mask that specifies the speed and duplex settings | 55 | This parameter is a bit-mask that specifies the speed and duplex settings |
| 53 | advertised by the adapter. When this parameter is used, the Speed and | 56 | advertised by the adapter. When this parameter is used, the Speed and |
| 54 | Duplex parameters must not be specified. | 57 | Duplex parameters must not be specified. |
| 55 | 58 | ||
| 56 | NOTE: Refer to the Speed and Duplex section of this readme for more | 59 | NOTE: |
| 60 | Refer to the Speed and Duplex section of this readme for more | ||
| 57 | information on the AutoNeg parameter. | 61 | information on the AutoNeg parameter. |
| 58 | 62 | ||
| 59 | Duplex | 63 | Duplex |
| 60 | ------ | 64 | ------ |
| 65 | |||
| 61 | (Supported only on adapters with copper connections) | 66 | (Supported only on adapters with copper connections) |
| 62 | Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) | 67 | |
| 63 | Default Value: 0 | 68 | :Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full) |
| 69 | :Default Value: 0 | ||
| 64 | 70 | ||
| 65 | This defines the direction in which data is allowed to flow. Can be | 71 | This defines the direction in which data is allowed to flow. Can be |
| 66 | either one or two-directional. If both Duplex and the link partner are | 72 | either one or two-directional. If both Duplex and the link partner are |
| @@ -70,18 +76,22 @@ duplex. | |||
| 70 | 76 | ||
| 71 | FlowControl | 77 | FlowControl |
| 72 | ----------- | 78 | ----------- |
| 73 | Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) | 79 | |
| 74 | Default Value: Reads flow control settings from the EEPROM | 80 | :Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx) |
| 81 | :Default Value: Reads flow control settings from the EEPROM | ||
| 75 | 82 | ||
| 76 | This parameter controls the automatic generation(Tx) and response(Rx) | 83 | This parameter controls the automatic generation(Tx) and response(Rx) |
| 77 | to Ethernet PAUSE frames. | 84 | to Ethernet PAUSE frames. |
| 78 | 85 | ||
| 79 | InterruptThrottleRate | 86 | InterruptThrottleRate |
| 80 | --------------------- | 87 | --------------------- |
| 88 | |||
| 81 | (not supported on Intel(R) 82542, 82543 or 82544-based adapters) | 89 | (not supported on Intel(R) 82542, 82543 or 82544-based adapters) |
| 82 | Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, | 90 | |
| 83 | 4=simplified balancing) | 91 | :Valid Range: |
| 84 | Default Value: 3 | 92 | 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative, |
| 93 | 4=simplified balancing) | ||
| 94 | :Default Value: 3 | ||
| 85 | 95 | ||
| 86 | The driver can limit the amount of interrupts per second that the adapter | 96 | The driver can limit the amount of interrupts per second that the adapter |
| 87 | will generate for incoming packets. It does this by writing a value to the | 97 | will generate for incoming packets. It does this by writing a value to the |
| @@ -135,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation | |||
| 135 | and may improve small packet latency, but is generally not suitable | 145 | and may improve small packet latency, but is generally not suitable |
| 136 | for bulk throughput traffic. | 146 | for bulk throughput traffic. |
| 137 | 147 | ||
| 138 | NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and | 148 | NOTE: |
| 149 | InterruptThrottleRate takes precedence over the TxAbsIntDelay and | ||
| 139 | RxAbsIntDelay parameters. In other words, minimizing the receive | 150 | RxAbsIntDelay parameters. In other words, minimizing the receive |
| 140 | and/or transmit absolute delays does not force the controller to | 151 | and/or transmit absolute delays does not force the controller to |
| 141 | generate more interrupts than what the Interrupt Throttle Rate | 152 | generate more interrupts than what the Interrupt Throttle Rate |
| 142 | allows. | 153 | allows. |
| 143 | 154 | ||
| 144 | CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection | 155 | CAUTION: |
| 156 | If you are using the Intel(R) PRO/1000 CT Network Connection | ||
| 145 | (controller 82547), setting InterruptThrottleRate to a value | 157 | (controller 82547), setting InterruptThrottleRate to a value |
| 146 | greater than 75,000, may hang (stop transmitting) adapters | 158 | greater than 75,000, may hang (stop transmitting) adapters |
| 147 | under certain network conditions. If this occurs a NETDEV | 159 | under certain network conditions. If this occurs a NETDEV |
| @@ -151,7 +163,8 @@ CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection | |||
| 151 | hang, ensure that InterruptThrottleRate is set no greater | 163 | hang, ensure that InterruptThrottleRate is set no greater |
| 152 | than 75,000 and is not set to 0. | 164 | than 75,000 and is not set to 0. |
| 153 | 165 | ||
| 154 | NOTE: When e1000 is loaded with default settings and multiple adapters | 166 | NOTE: |
| 167 | When e1000 is loaded with default settings and multiple adapters | ||
| 155 | are in use simultaneously, the CPU utilization may increase non- | 168 | are in use simultaneously, the CPU utilization may increase non- |
| 156 | linearly. In order to limit the CPU utilization without impacting | 169 | linearly. In order to limit the CPU utilization without impacting |
| 157 | the overall throughput, we recommend that you load the driver as | 170 | the overall throughput, we recommend that you load the driver as |
| @@ -168,9 +181,11 @@ NOTE: When e1000 is loaded with default settings and multiple adapters | |||
| 168 | 181 | ||
| 169 | RxDescriptors | 182 | RxDescriptors |
| 170 | ------------- | 183 | ------------- |
| 171 | Valid Range: 48-256 for 82542 and 82543-based adapters | 184 | |
| 172 | 48-4096 for all other supported adapters | 185 | :Valid Range: |
| 173 | Default Value: 256 | 186 | - 48-256 for 82542 and 82543-based adapters |
| 187 | - 48-4096 for all other supported adapters | ||
| 188 | :Default Value: 256 | ||
| 174 | 189 | ||
| 175 | This value specifies the number of receive buffer descriptors allocated | 190 | This value specifies the number of receive buffer descriptors allocated |
| 176 | by the driver. Increasing this value allows the driver to buffer more | 191 | by the driver. Increasing this value allows the driver to buffer more |
| @@ -180,15 +195,17 @@ Each descriptor is 16 bytes. A receive buffer is also allocated for each | |||
| 180 | descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending | 195 | descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending |
| 181 | on the MTU setting. The maximum MTU size is 16110. | 196 | on the MTU setting. The maximum MTU size is 16110. |
| 182 | 197 | ||
| 183 | NOTE: MTU designates the frame size. It only needs to be set for Jumbo | 198 | NOTE: |
| 199 | MTU designates the frame size. It only needs to be set for Jumbo | ||
| 184 | Frames. Depending on the available system resources, the request | 200 | Frames. Depending on the available system resources, the request |
| 185 | for a higher number of receive descriptors may be denied. In this | 201 | for a higher number of receive descriptors may be denied. In this |
| 186 | case, use a lower number. | 202 | case, use a lower number. |
| 187 | 203 | ||
| 188 | RxIntDelay | 204 | RxIntDelay |
| 189 | ---------- | 205 | ---------- |
| 190 | Valid Range: 0-65535 (0=off) | 206 | |
| 191 | Default Value: 0 | 207 | :Valid Range: 0-65535 (0=off) |
| 208 | :Default Value: 0 | ||
| 192 | 209 | ||
| 193 | This value delays the generation of receive interrupts in units of 1.024 | 210 | This value delays the generation of receive interrupts in units of 1.024 |
| 194 | microseconds. Receive interrupt reduction can improve CPU efficiency if | 211 | microseconds. Receive interrupt reduction can improve CPU efficiency if |
| @@ -198,7 +215,8 @@ of TCP traffic. If the system is reporting dropped receives, this value | |||
| 198 | may be set too high, causing the driver to run out of available receive | 215 | may be set too high, causing the driver to run out of available receive |
| 199 | descriptors. | 216 | descriptors. |
| 200 | 217 | ||
| 201 | CAUTION: When setting RxIntDelay to a value other than 0, adapters may | 218 | CAUTION: |
| 219 | When setting RxIntDelay to a value other than 0, adapters may | ||
| 202 | hang (stop transmitting) under certain network conditions. If | 220 | hang (stop transmitting) under certain network conditions. If |
| 203 | this occurs a NETDEV WATCHDOG message is logged in the system | 221 | this occurs a NETDEV WATCHDOG message is logged in the system |
| 204 | event log. In addition, the controller is automatically reset, | 222 | event log. In addition, the controller is automatically reset, |
| @@ -207,9 +225,11 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may | |||
| 207 | 225 | ||
| 208 | RxAbsIntDelay | 226 | RxAbsIntDelay |
| 209 | ------------- | 227 | ------------- |
| 228 | |||
| 210 | (This parameter is supported only on 82540, 82545 and later adapters.) | 229 | (This parameter is supported only on 82540, 82545 and later adapters.) |
| 211 | Valid Range: 0-65535 (0=off) | 230 | |
| 212 | Default Value: 128 | 231 | :Valid Range: 0-65535 (0=off) |
| 232 | :Default Value: 128 | ||
| 213 | 233 | ||
| 214 | This value, in units of 1.024 microseconds, limits the delay in which a | 234 | This value, in units of 1.024 microseconds, limits the delay in which a |
| 215 | receive interrupt is generated. Useful only if RxIntDelay is non-zero, | 235 | receive interrupt is generated. Useful only if RxIntDelay is non-zero, |
| @@ -220,9 +240,11 @@ conditions. | |||
| 220 | 240 | ||
| 221 | Speed | 241 | Speed |
| 222 | ----- | 242 | ----- |
| 243 | |||
| 223 | (This parameter is supported only on adapters with copper connections.) | 244 | (This parameter is supported only on adapters with copper connections.) |
| 224 | Valid Settings: 0, 10, 100, 1000 | 245 | |
| 225 | Default Value: 0 (auto-negotiate at all supported speeds) | 246 | :Valid Settings: 0, 10, 100, 1000 |
| 247 | :Default Value: 0 (auto-negotiate at all supported speeds) | ||
| 226 | 248 | ||
| 227 | Speed forces the line speed to the specified value in megabits per second | 249 | Speed forces the line speed to the specified value in megabits per second |
| 228 | (Mbps). If this parameter is not specified or is set to 0 and the link | 250 | (Mbps). If this parameter is not specified or is set to 0 and the link |
| @@ -231,22 +253,26 @@ speed. Duplex should also be set when Speed is set to either 10 or 100. | |||
| 231 | 253 | ||
| 232 | TxDescriptors | 254 | TxDescriptors |
| 233 | ------------- | 255 | ------------- |
| 234 | Valid Range: 48-256 for 82542 and 82543-based adapters | 256 | |
| 235 | 48-4096 for all other supported adapters | 257 | :Valid Range: |
| 236 | Default Value: 256 | 258 | - 48-256 for 82542 and 82543-based adapters |
| 259 | - 48-4096 for all other supported adapters | ||
| 260 | :Default Value: 256 | ||
| 237 | 261 | ||
| 238 | This value is the number of transmit descriptors allocated by the driver. | 262 | This value is the number of transmit descriptors allocated by the driver. |
| 239 | Increasing this value allows the driver to queue more transmits. Each | 263 | Increasing this value allows the driver to queue more transmits. Each |
| 240 | descriptor is 16 bytes. | 264 | descriptor is 16 bytes. |
| 241 | 265 | ||
| 242 | NOTE: Depending on the available system resources, the request for a | 266 | NOTE: |
| 267 | Depending on the available system resources, the request for a | ||
| 243 | higher number of transmit descriptors may be denied. In this case, | 268 | higher number of transmit descriptors may be denied. In this case, |
| 244 | use a lower number. | 269 | use a lower number. |
| 245 | 270 | ||
| 246 | TxIntDelay | 271 | TxIntDelay |
| 247 | ---------- | 272 | ---------- |
| 248 | Valid Range: 0-65535 (0=off) | 273 | |
| 249 | Default Value: 8 | 274 | :Valid Range: 0-65535 (0=off) |
| 275 | :Default Value: 8 | ||
| 250 | 276 | ||
| 251 | This value delays the generation of transmit interrupts in units of | 277 | This value delays the generation of transmit interrupts in units of |
| 252 | 1.024 microseconds. Transmit interrupt reduction can improve CPU | 278 | 1.024 microseconds. Transmit interrupt reduction can improve CPU |
| @@ -256,9 +282,11 @@ causing the driver to run out of available transmit descriptors. | |||
| 256 | 282 | ||
| 257 | TxAbsIntDelay | 283 | TxAbsIntDelay |
| 258 | ------------- | 284 | ------------- |
| 285 | |||
| 259 | (This parameter is supported only on 82540, 82545 and later adapters.) | 286 | (This parameter is supported only on 82540, 82545 and later adapters.) |
| 260 | Valid Range: 0-65535 (0=off) | 287 | |
| 261 | Default Value: 32 | 288 | :Valid Range: 0-65535 (0=off) |
| 289 | :Default Value: 32 | ||
| 262 | 290 | ||
| 263 | This value, in units of 1.024 microseconds, limits the delay in which a | 291 | This value, in units of 1.024 microseconds, limits the delay in which a |
| 264 | transmit interrupt is generated. Useful only if TxIntDelay is non-zero, | 292 | transmit interrupt is generated. Useful only if TxIntDelay is non-zero, |
| @@ -269,18 +297,21 @@ network conditions. | |||
| 269 | 297 | ||
| 270 | XsumRX | 298 | XsumRX |
| 271 | ------ | 299 | ------ |
| 300 | |||
| 272 | (This parameter is NOT supported on the 82542-based adapter.) | 301 | (This parameter is NOT supported on the 82542-based adapter.) |
| 273 | Valid Range: 0-1 | 302 | |
| 274 | Default Value: 1 | 303 | :Valid Range: 0-1 |
| 304 | :Default Value: 1 | ||
| 275 | 305 | ||
| 276 | A value of '1' indicates that the driver should enable IP checksum | 306 | A value of '1' indicates that the driver should enable IP checksum |
| 277 | offload for received packets (both UDP and TCP) to the adapter hardware. | 307 | offload for received packets (both UDP and TCP) to the adapter hardware. |
| 278 | 308 | ||
| 279 | Copybreak | 309 | Copybreak |
| 280 | --------- | 310 | --------- |
| 281 | Valid Range: 0-xxxxxxx (0=off) | 311 | |
| 282 | Default Value: 256 | 312 | :Valid Range: 0-xxxxxxx (0=off) |
| 283 | Usage: modprobe e1000.ko copybreak=128 | 313 | :Default Value: 256 |
| 314 | :Usage: modprobe e1000.ko copybreak=128 | ||
| 284 | 315 | ||
| 285 | Driver copies all packets below or equaling this size to a fresh RX | 316 | Driver copies all packets below or equaling this size to a fresh RX |
| 286 | buffer before handing it up the stack. | 317 | buffer before handing it up the stack. |
| @@ -292,8 +323,9 @@ it is also available during runtime at | |||
| 292 | 323 | ||
| 293 | SmartPowerDownEnable | 324 | SmartPowerDownEnable |
| 294 | -------------------- | 325 | -------------------- |
| 295 | Valid Range: 0-1 | 326 | |
| 296 | Default Value: 0 (disabled) | 327 | :Valid Range: 0-1 |
| 328 | :Default Value: 0 (disabled) | ||
| 297 | 329 | ||
| 298 | Allows PHY to turn off in lower power states. The user can turn off | 330 | Allows PHY to turn off in lower power states. The user can turn off |
| 299 | this parameter in supported chipsets. | 331 | this parameter in supported chipsets. |
| @@ -309,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex. | |||
| 309 | 341 | ||
| 310 | For copper-based boards, the keywords interact as follows: | 342 | For copper-based boards, the keywords interact as follows: |
| 311 | 343 | ||
| 312 | The default operation is auto-negotiate. The board advertises all | 344 | - The default operation is auto-negotiate. The board advertises all |
| 313 | supported speed and duplex combinations, and it links at the highest | 345 | supported speed and duplex combinations, and it links at the highest |
| 314 | common speed and duplex mode IF the link partner is set to auto-negotiate. | 346 | common speed and duplex mode IF the link partner is set to auto-negotiate. |
| 315 | 347 | ||
| 316 | If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps | 348 | - If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps |
| 317 | is advertised (The 1000BaseT spec requires auto-negotiation.) | 349 | is advertised (The 1000BaseT spec requires auto-negotiation.) |
| 318 | 350 | ||
| 319 | If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- | 351 | - If Speed = 10 or 100, then both Speed and Duplex should be set. Auto- |
| 320 | negotiation is disabled, and the AutoNeg parameter is ignored. Partner | 352 | negotiation is disabled, and the AutoNeg parameter is ignored. Partner |
| 321 | SHOULD also be forced. | 353 | SHOULD also be forced. |
| 322 | 354 | ||
| @@ -328,13 +360,15 @@ process. | |||
| 328 | The parameter may be specified as either a decimal or hexadecimal value as | 360 | The parameter may be specified as either a decimal or hexadecimal value as |
| 329 | determined by the bitmap below. | 361 | determined by the bitmap below. |
| 330 | 362 | ||
| 363 | ============== ====== ====== ======= ======= ====== ====== ======= ====== | ||
| 331 | Bit position 7 6 5 4 3 2 1 0 | 364 | Bit position 7 6 5 4 3 2 1 0 |
| 332 | Decimal Value 128 64 32 16 8 4 2 1 | 365 | Decimal Value 128 64 32 16 8 4 2 1 |
| 333 | Hex value 80 40 20 10 8 4 2 1 | 366 | Hex value 80 40 20 10 8 4 2 1 |
| 334 | Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 | 367 | Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 |
| 335 | Duplex Full Full Half Full Half | 368 | Duplex Full Full Half Full Half |
| 369 | ============== ====== ====== ======= ======= ====== ====== ======= ====== | ||
| 336 | 370 | ||
| 337 | Some examples of using AutoNeg: | 371 | Some examples of using AutoNeg:: |
| 338 | 372 | ||
| 339 | modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half) | 373 | modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half) |
| 340 | modprobe e1000 AutoNeg=1 (Same as above) | 374 | modprobe e1000 AutoNeg=1 (Same as above) |
| @@ -357,56 +391,59 @@ Additional Configurations | |||
| 357 | 391 | ||
| 358 | Jumbo Frames | 392 | Jumbo Frames |
| 359 | ------------ | 393 | ------------ |
| 360 | Jumbo Frames support is enabled by changing the MTU to a value larger | 394 | |
| 361 | than the default of 1500. Use the ifconfig command to increase the MTU | 395 | Jumbo Frames support is enabled by changing the MTU to a value larger than |
| 362 | size. For example:: | 396 | the default of 1500. Use the ifconfig command to increase the MTU size. |
| 397 | For example:: | ||
| 363 | 398 | ||
| 364 | ifconfig eth<x> mtu 9000 up | 399 | ifconfig eth<x> mtu 9000 up |
| 365 | 400 | ||
| 366 | This setting is not saved across reboots. It can be made permanent if | 401 | This setting is not saved across reboots. It can be made permanent if |
| 367 | you add:: | 402 | you add:: |
| 368 | 403 | ||
| 369 | MTU=9000 | 404 | MTU=9000 |
| 370 | 405 | ||
| 371 | to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example | 406 | to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example |
| 372 | applies to the Red Hat distributions; other distributions may store this | 407 | applies to the Red Hat distributions; other distributions may store this |
| 373 | setting in a different location. | 408 | setting in a different location. |
| 409 | |||
| 410 | Notes: | ||
| 411 | Degradation in throughput performance may be observed in some Jumbo frames | ||
| 412 | environments. If this is observed, increasing the application's socket buffer | ||
| 413 | size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help. | ||
| 414 | See the specific application manual and /usr/src/linux*/Documentation/ | ||
| 415 | networking/ip-sysctl.txt for more details. | ||
| 374 | 416 | ||
| 375 | Notes: Degradation in throughput performance may be observed in some | 417 | - The maximum MTU setting for Jumbo Frames is 16110. This value coincides |
| 376 | Jumbo frames environments. If this is observed, increasing the | 418 | with the maximum Jumbo Frames size of 16128. |
| 377 | application's socket buffer size and/or increasing the | ||
| 378 | /proc/sys/net/ipv4/tcp_*mem entry values may help. See the specific | ||
| 379 | application manual and /usr/src/linux*/Documentation/ | ||
| 380 | networking/ip-sysctl.txt for more details. | ||
| 381 | 419 | ||
| 382 | - The maximum MTU setting for Jumbo Frames is 16110. This value | 420 | - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in |
| 383 | coincides with the maximum Jumbo Frames size of 16128. | 421 | poor performance or loss of link. |
| 384 | 422 | ||
| 385 | - Using Jumbo frames at 10 or 100 Mbps is not supported and may result | 423 | - Adapters based on the Intel(R) 82542 and 82573V/E controller do not |
| 386 | in poor performance or loss of link. | 424 | support Jumbo Frames. These correspond to the following product names:: |
| 387 | 425 | ||
| 388 | - Adapters based on the Intel(R) 82542 and 82573V/E controller do not | 426 | Intel(R) PRO/1000 Gigabit Server Adapter |
| 389 | support Jumbo Frames. These correspond to the following product names: | 427 | Intel(R) PRO/1000 PM Network Connection |
| 390 | Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network | ||
| 391 | Connection | ||
| 392 | 428 | ||
| 393 | ethtool | 429 | ethtool |
| 394 | ------- | 430 | ------- |
| 395 | The driver utilizes the ethtool interface for driver configuration and | ||
| 396 | diagnostics, as well as displaying statistical information. The ethtool | ||
| 397 | version 1.6 or later is required for this functionality. | ||
| 398 | 431 | ||
| 399 | The latest release of ethtool can be found from | 432 | The driver utilizes the ethtool interface for driver configuration and |
| 400 | https://www.kernel.org/pub/software/network/ethtool/ | 433 | diagnostics, as well as displaying statistical information. The ethtool |
| 434 | version 1.6 or later is required for this functionality. | ||
| 435 | |||
| 436 | The latest release of ethtool can be found from | ||
| 437 | https://www.kernel.org/pub/software/network/ethtool/ | ||
| 401 | 438 | ||
| 402 | Enabling Wake on LAN* (WoL) | 439 | Enabling Wake on LAN* (WoL) |
| 403 | --------------------------- | 440 | --------------------------- |
| 404 | WoL is configured through the ethtool* utility. | ||
| 405 | 441 | ||
| 406 | WoL will be enabled on the system during the next shut down or reboot. | 442 | WoL is configured through the ethtool* utility. |
| 407 | For this driver version, in order to enable WoL, the e1000 driver must be | ||
| 408 | loaded when shutting down or rebooting the system. | ||
| 409 | 443 | ||
| 444 | WoL will be enabled on the system during the next shut down or reboot. | ||
| 445 | For this driver version, in order to enable WoL, the e1000 driver must be | ||
| 446 | loaded when shutting down or rebooting the system. | ||
| 410 | 447 | ||
| 411 | Support | 448 | Support |
| 412 | ======= | 449 | ======= |
diff --git a/MAINTAINERS b/MAINTAINERS index 192d7f73fd01..0fe4228f78cb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2523,7 +2523,7 @@ S: Supported | |||
| 2523 | F: drivers/scsi/esas2r | 2523 | F: drivers/scsi/esas2r |
| 2524 | 2524 | ||
| 2525 | ATUSB IEEE 802.15.4 RADIO DRIVER | 2525 | ATUSB IEEE 802.15.4 RADIO DRIVER |
| 2526 | M: Stefan Schmidt <stefan@osg.samsung.com> | 2526 | M: Stefan Schmidt <stefan@datenfreihafen.org> |
| 2527 | L: linux-wpan@vger.kernel.org | 2527 | L: linux-wpan@vger.kernel.org |
| 2528 | S: Maintained | 2528 | S: Maintained |
| 2529 | F: drivers/net/ieee802154/atusb.c | 2529 | F: drivers/net/ieee802154/atusb.c |
| @@ -5790,7 +5790,6 @@ F: include/linux/fsl/ | |||
| 5790 | 5790 | ||
| 5791 | FREESCALE SOC FS_ENET DRIVER | 5791 | FREESCALE SOC FS_ENET DRIVER |
| 5792 | M: Pantelis Antoniou <pantelis.antoniou@gmail.com> | 5792 | M: Pantelis Antoniou <pantelis.antoniou@gmail.com> |
| 5793 | M: Vitaly Bordug <vbordug@ru.mvista.com> | ||
| 5794 | L: linuxppc-dev@lists.ozlabs.org | 5793 | L: linuxppc-dev@lists.ozlabs.org |
| 5795 | L: netdev@vger.kernel.org | 5794 | L: netdev@vger.kernel.org |
| 5796 | S: Maintained | 5795 | S: Maintained |
| @@ -6909,7 +6908,7 @@ F: drivers/clk/clk-versaclock5.c | |||
| 6909 | 6908 | ||
| 6910 | IEEE 802.15.4 SUBSYSTEM | 6909 | IEEE 802.15.4 SUBSYSTEM |
| 6911 | M: Alexander Aring <alex.aring@gmail.com> | 6910 | M: Alexander Aring <alex.aring@gmail.com> |
| 6912 | M: Stefan Schmidt <stefan@osg.samsung.com> | 6911 | M: Stefan Schmidt <stefan@datenfreihafen.org> |
| 6913 | L: linux-wpan@vger.kernel.org | 6912 | L: linux-wpan@vger.kernel.org |
| 6914 | W: http://wpan.cakelab.org/ | 6913 | W: http://wpan.cakelab.org/ |
| 6915 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git | 6914 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git |
| @@ -8629,7 +8628,7 @@ MARVELL MWIFIEX WIRELESS DRIVER | |||
| 8629 | M: Amitkumar Karwar <amitkarwar@gmail.com> | 8628 | M: Amitkumar Karwar <amitkarwar@gmail.com> |
| 8630 | M: Nishant Sarmukadam <nishants@marvell.com> | 8629 | M: Nishant Sarmukadam <nishants@marvell.com> |
| 8631 | M: Ganapathi Bhat <gbhat@marvell.com> | 8630 | M: Ganapathi Bhat <gbhat@marvell.com> |
| 8632 | M: Xinming Hu <huxm@marvell.com> | 8631 | M: Xinming Hu <huxinming820@gmail.com> |
| 8633 | L: linux-wireless@vger.kernel.org | 8632 | L: linux-wireless@vger.kernel.org |
| 8634 | S: Maintained | 8633 | S: Maintained |
| 8635 | F: drivers/net/wireless/marvell/mwifiex/ | 8634 | F: drivers/net/wireless/marvell/mwifiex/ |
| @@ -9075,7 +9074,7 @@ S: Maintained | |||
| 9075 | F: drivers/usb/mtu3/ | 9074 | F: drivers/usb/mtu3/ |
| 9076 | 9075 | ||
| 9077 | MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES | 9076 | MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES |
| 9078 | M: Peter Senna Tschudin <peter.senna@collabora.com> | 9077 | M: Peter Senna Tschudin <peter.senna@gmail.com> |
| 9079 | M: Martin Donnelly <martin.donnelly@ge.com> | 9078 | M: Martin Donnelly <martin.donnelly@ge.com> |
| 9080 | M: Martyn Welch <martyn.welch@collabora.co.uk> | 9079 | M: Martyn Welch <martyn.welch@collabora.co.uk> |
| 9081 | S: Maintained | 9080 | S: Maintained |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index e81bcd271be7..9cf59fc60eab 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
| @@ -413,7 +413,7 @@ config ARC_HAS_DIV_REM | |||
| 413 | 413 | ||
| 414 | config ARC_HAS_ACCL_REGS | 414 | config ARC_HAS_ACCL_REGS |
| 415 | bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)" | 415 | bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)" |
| 416 | default n | 416 | default y |
| 417 | help | 417 | help |
| 418 | Depending on the configuration, CPU can contain accumulator reg-pair | 418 | Depending on the configuration, CPU can contain accumulator reg-pair |
| 419 | (also referred to as r58:r59). These can also be used by gcc as GPR so | 419 | (also referred to as r58:r59). These can also be used by gcc as GPR so |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index d37f49d6a27f..6c1b20dd76ad 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
| @@ -16,7 +16,7 @@ endif | |||
| 16 | 16 | ||
| 17 | KBUILD_DEFCONFIG := nsim_700_defconfig | 17 | KBUILD_DEFCONFIG := nsim_700_defconfig |
| 18 | 18 | ||
| 19 | cflags-y += -fno-common -pipe -fno-builtin -D__linux__ | 19 | cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ |
| 20 | cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 | 20 | cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 |
| 21 | cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs | 21 | cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs |
| 22 | 22 | ||
| @@ -140,16 +140,3 @@ dtbs: scripts | |||
| 140 | 140 | ||
| 141 | archclean: | 141 | archclean: |
| 142 | $(Q)$(MAKE) $(clean)=$(boot) | 142 | $(Q)$(MAKE) $(clean)=$(boot) |
| 143 | |||
| 144 | # Hacks to enable final link due to absence of link-time branch relexation | ||
| 145 | # and gcc choosing optimal(shorter) branches at -O3 | ||
| 146 | # | ||
| 147 | # vineetg Feb 2010: -mlong-calls switched off for overall kernel build | ||
| 148 | # However lib/decompress_inflate.o (.init.text) calls | ||
| 149 | # zlib_inflate_workspacesize (.text) causing relocation errors. | ||
| 150 | # Thus forcing all exten calls in this file to be long calls | ||
| 151 | export CFLAGS_decompress_inflate.o = -mmedium-calls | ||
| 152 | export CFLAGS_initramfs.o = -mmedium-calls | ||
| 153 | ifdef CONFIG_SMP | ||
| 154 | export CFLAGS_core.o = -mmedium-calls | ||
| 155 | endif | ||
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 09f85154c5a4..a635ea972304 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" | ||
| 15 | CONFIG_EMBEDDED=y | 14 | CONFIG_EMBEDDED=y |
| 16 | CONFIG_PERF_EVENTS=y | 15 | CONFIG_PERF_EVENTS=y |
| 17 | # CONFIG_VM_EVENT_COUNTERS is not set | 16 | # CONFIG_VM_EVENT_COUNTERS is not set |
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 09fed3ef22b6..aa507e423075 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" | ||
| 15 | CONFIG_EMBEDDED=y | 14 | CONFIG_EMBEDDED=y |
| 16 | CONFIG_PERF_EVENTS=y | 15 | CONFIG_PERF_EVENTS=y |
| 17 | # CONFIG_VM_EVENT_COUNTERS is not set | 16 | # CONFIG_VM_EVENT_COUNTERS is not set |
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index ea2f6d817d1a..eba07f468654 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" | ||
| 15 | CONFIG_EMBEDDED=y | 14 | CONFIG_EMBEDDED=y |
| 16 | CONFIG_PERF_EVENTS=y | 15 | CONFIG_PERF_EVENTS=y |
| 17 | # CONFIG_VM_EVENT_COUNTERS is not set | 16 | # CONFIG_VM_EVENT_COUNTERS is not set |
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index ab231c040efe..098b19fbaa51 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" | ||
| 15 | CONFIG_EXPERT=y | 14 | CONFIG_EXPERT=y |
| 16 | CONFIG_PERF_EVENTS=y | 15 | CONFIG_PERF_EVENTS=y |
| 17 | # CONFIG_COMPAT_BRK is not set | 16 | # CONFIG_COMPAT_BRK is not set |
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index cf449cbf440d..0104c404d897 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" | ||
| 15 | CONFIG_EMBEDDED=y | 14 | CONFIG_EMBEDDED=y |
| 16 | CONFIG_PERF_EVENTS=y | 15 | CONFIG_PERF_EVENTS=y |
| 17 | # CONFIG_VM_EVENT_COUNTERS is not set | 16 | # CONFIG_VM_EVENT_COUNTERS is not set |
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 1b54c72f4296..6491be0ddbc9 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig | |||
| @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y | |||
| 9 | # CONFIG_UTS_NS is not set | 9 | # CONFIG_UTS_NS is not set |
| 10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
| 11 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
| 12 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" | ||
| 13 | CONFIG_EMBEDDED=y | 12 | CONFIG_EMBEDDED=y |
| 14 | CONFIG_PERF_EVENTS=y | 13 | CONFIG_PERF_EVENTS=y |
| 15 | # CONFIG_VM_EVENT_COUNTERS is not set | 14 | # CONFIG_VM_EVENT_COUNTERS is not set |
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 31c2c70b34a1..99e05cf63fca 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" | ||
| 15 | CONFIG_KALLSYMS_ALL=y | 14 | CONFIG_KALLSYMS_ALL=y |
| 16 | CONFIG_EMBEDDED=y | 15 | CONFIG_EMBEDDED=y |
| 17 | CONFIG_PERF_EVENTS=y | 16 | CONFIG_PERF_EVENTS=y |
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index a578c721d50f..0dc4f9b737e7 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" | ||
| 15 | CONFIG_KALLSYMS_ALL=y | 14 | CONFIG_KALLSYMS_ALL=y |
| 16 | CONFIG_EMBEDDED=y | 15 | CONFIG_EMBEDDED=y |
| 17 | CONFIG_PERF_EVENTS=y | 16 | CONFIG_PERF_EVENTS=y |
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index 37d7395f3272..be3c30a15e54 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig | |||
| @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y | |||
| 9 | # CONFIG_UTS_NS is not set | 9 | # CONFIG_UTS_NS is not set |
| 10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
| 11 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
| 12 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" | ||
| 13 | CONFIG_KALLSYMS_ALL=y | 12 | CONFIG_KALLSYMS_ALL=y |
| 14 | CONFIG_EMBEDDED=y | 13 | CONFIG_EMBEDDED=y |
| 15 | CONFIG_PERF_EVENTS=y | 14 | CONFIG_PERF_EVENTS=y |
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 1e1470e2a7f0..3a74b9b21772 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" | ||
| 15 | CONFIG_KALLSYMS_ALL=y | 14 | CONFIG_KALLSYMS_ALL=y |
| 16 | CONFIG_EMBEDDED=y | 15 | CONFIG_EMBEDDED=y |
| 17 | CONFIG_PERF_EVENTS=y | 16 | CONFIG_PERF_EVENTS=y |
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 084a6e42685b..ea2834b4dc1d 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig | |||
| @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y | |||
| 11 | # CONFIG_UTS_NS is not set | 11 | # CONFIG_UTS_NS is not set |
| 12 | # CONFIG_PID_NS is not set | 12 | # CONFIG_PID_NS is not set |
| 13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
| 14 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" | ||
| 15 | CONFIG_KALLSYMS_ALL=y | 14 | CONFIG_KALLSYMS_ALL=y |
| 16 | CONFIG_EMBEDDED=y | 15 | CONFIG_EMBEDDED=y |
| 17 | CONFIG_PERF_EVENTS=y | 16 | CONFIG_PERF_EVENTS=y |
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index f36d47990415..80a5a1b4924b 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig | |||
| @@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y | |||
| 9 | # CONFIG_UTS_NS is not set | 9 | # CONFIG_UTS_NS is not set |
| 10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
| 11 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
| 12 | CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" | ||
| 13 | CONFIG_PERF_EVENTS=y | 12 | CONFIG_PERF_EVENTS=y |
| 14 | # CONFIG_COMPAT_BRK is not set | 13 | # CONFIG_COMPAT_BRK is not set |
| 15 | CONFIG_KPROBES=y | 14 | CONFIG_KPROBES=y |
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 1aca2e8fd1ba..2cc87f909747 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig | |||
| @@ -56,7 +56,6 @@ CONFIG_STMMAC_ETH=y | |||
| 56 | # CONFIG_INPUT is not set | 56 | # CONFIG_INPUT is not set |
| 57 | # CONFIG_SERIO is not set | 57 | # CONFIG_SERIO is not set |
| 58 | # CONFIG_VT is not set | 58 | # CONFIG_VT is not set |
| 59 | CONFIG_DEVPTS_MULTIPLE_INSTANCES=y | ||
| 60 | # CONFIG_LEGACY_PTYS is not set | 59 | # CONFIG_LEGACY_PTYS is not set |
| 61 | # CONFIG_DEVKMEM is not set | 60 | # CONFIG_DEVKMEM is not set |
| 62 | CONFIG_SERIAL_8250=y | 61 | CONFIG_SERIAL_8250=y |
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index ec36d5b6d435..29f3988c9424 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h | |||
| @@ -234,6 +234,9 @@ | |||
| 234 | POP gp | 234 | POP gp |
| 235 | RESTORE_R12_TO_R0 | 235 | RESTORE_R12_TO_R0 |
| 236 | 236 | ||
| 237 | #ifdef CONFIG_ARC_CURR_IN_REG | ||
| 238 | ld r25, [sp, 12] | ||
| 239 | #endif | ||
| 237 | ld sp, [sp] /* restore original sp */ | 240 | ld sp, [sp] /* restore original sp */ |
| 238 | /* orig_r0, ECR, user_r25 skipped automatically */ | 241 | /* orig_r0, ECR, user_r25 skipped automatically */ |
| 239 | .endm | 242 | .endm |
| @@ -315,6 +318,9 @@ | |||
| 315 | POP gp | 318 | POP gp |
| 316 | RESTORE_R12_TO_R0 | 319 | RESTORE_R12_TO_R0 |
| 317 | 320 | ||
| 321 | #ifdef CONFIG_ARC_CURR_IN_REG | ||
| 322 | ld r25, [sp, 12] | ||
| 323 | #endif | ||
| 318 | ld sp, [sp] /* restore original sp */ | 324 | ld sp, [sp] /* restore original sp */ |
| 319 | /* orig_r0, ECR, user_r25 skipped automatically */ | 325 | /* orig_r0, ECR, user_r25 skipped automatically */ |
| 320 | .endm | 326 | .endm |
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 51597f344a62..302b0db8ea2b 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h | |||
| @@ -86,9 +86,6 @@ | |||
| 86 | POP r1 | 86 | POP r1 |
| 87 | POP r0 | 87 | POP r0 |
| 88 | 88 | ||
| 89 | #ifdef CONFIG_ARC_CURR_IN_REG | ||
| 90 | ld r25, [sp, 12] | ||
| 91 | #endif | ||
| 92 | .endm | 89 | .endm |
| 93 | 90 | ||
| 94 | /*-------------------------------------------------------------- | 91 | /*-------------------------------------------------------------- |
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index c28e6c347b49..871f3cb16af9 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h | |||
| @@ -34,9 +34,7 @@ struct machine_desc { | |||
| 34 | const char *name; | 34 | const char *name; |
| 35 | const char **dt_compat; | 35 | const char **dt_compat; |
| 36 | void (*init_early)(void); | 36 | void (*init_early)(void); |
| 37 | #ifdef CONFIG_SMP | ||
| 38 | void (*init_per_cpu)(unsigned int); | 37 | void (*init_per_cpu)(unsigned int); |
| 39 | #endif | ||
| 40 | void (*init_machine)(void); | 38 | void (*init_machine)(void); |
| 41 | void (*init_late)(void); | 39 | void (*init_late)(void); |
| 42 | 40 | ||
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 109baa06831c..09ddddf71cc5 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h | |||
| @@ -105,7 +105,7 @@ typedef pte_t * pgtable_t; | |||
| 105 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) | 105 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) |
| 106 | 106 | ||
| 107 | /* Default Permissions for stack/heaps pages (Non Executable) */ | 107 | /* Default Permissions for stack/heaps pages (Non Executable) */ |
| 108 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) | 108 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 109 | 109 | ||
| 110 | #define WANT_PAGE_VIRTUAL 1 | 110 | #define WANT_PAGE_VIRTUAL 1 |
| 111 | 111 | ||
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 8ec5599a0957..cf4be70d5892 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
| @@ -377,7 +377,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
| 377 | 377 | ||
| 378 | /* Decode a PTE containing swap "identifier "into constituents */ | 378 | /* Decode a PTE containing swap "identifier "into constituents */ |
| 379 | #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) | 379 | #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) |
| 380 | #define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) | 380 | #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) |
| 381 | 381 | ||
| 382 | /* NOPs, to keep generic kernel happy */ | 382 | /* NOPs, to keep generic kernel happy */ |
| 383 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 383 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 538b36afe89e..62b185057c04 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c | |||
| @@ -31,10 +31,10 @@ void __init init_IRQ(void) | |||
| 31 | /* a SMP H/w block could do IPI IRQ request here */ | 31 | /* a SMP H/w block could do IPI IRQ request here */ |
| 32 | if (plat_smp_ops.init_per_cpu) | 32 | if (plat_smp_ops.init_per_cpu) |
| 33 | plat_smp_ops.init_per_cpu(smp_processor_id()); | 33 | plat_smp_ops.init_per_cpu(smp_processor_id()); |
| 34 | #endif | ||
| 34 | 35 | ||
| 35 | if (machine_desc->init_per_cpu) | 36 | if (machine_desc->init_per_cpu) |
| 36 | machine_desc->init_per_cpu(smp_processor_id()); | 37 | machine_desc->init_per_cpu(smp_processor_id()); |
| 37 | #endif | ||
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 5ac3b547453f..4674541eba3f 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
| @@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls) | |||
| 47 | SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) | 47 | SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) |
| 48 | { | 48 | { |
| 49 | struct pt_regs *regs = current_pt_regs(); | 49 | struct pt_regs *regs = current_pt_regs(); |
| 50 | int uval = -EFAULT; | 50 | u32 uval; |
| 51 | int ret; | ||
| 51 | 52 | ||
| 52 | /* | 53 | /* |
| 53 | * This is only for old cores lacking LLOCK/SCOND, which by defintion | 54 | * This is only for old cores lacking LLOCK/SCOND, which by defintion |
| @@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) | |||
| 60 | /* Z indicates to userspace if operation succeded */ | 61 | /* Z indicates to userspace if operation succeded */ |
| 61 | regs->status32 &= ~STATUS_Z_MASK; | 62 | regs->status32 &= ~STATUS_Z_MASK; |
| 62 | 63 | ||
| 63 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 64 | ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)); |
| 64 | return -EFAULT; | 65 | if (!ret) |
| 66 | goto fail; | ||
| 65 | 67 | ||
| 68 | again: | ||
| 66 | preempt_disable(); | 69 | preempt_disable(); |
| 67 | 70 | ||
| 68 | if (__get_user(uval, uaddr)) | 71 | ret = __get_user(uval, uaddr); |
| 69 | goto done; | 72 | if (ret) |
| 73 | goto fault; | ||
| 70 | 74 | ||
| 71 | if (uval == expected) { | 75 | if (uval != expected) |
| 72 | if (!__put_user(new, uaddr)) | 76 | goto out; |
| 73 | regs->status32 |= STATUS_Z_MASK; | ||
| 74 | } | ||
| 75 | 77 | ||
| 76 | done: | 78 | ret = __put_user(new, uaddr); |
| 77 | preempt_enable(); | 79 | if (ret) |
| 80 | goto fault; | ||
| 81 | |||
| 82 | regs->status32 |= STATUS_Z_MASK; | ||
| 78 | 83 | ||
| 84 | out: | ||
| 85 | preempt_enable(); | ||
| 79 | return uval; | 86 | return uval; |
| 87 | |||
| 88 | fault: | ||
| 89 | preempt_enable(); | ||
| 90 | |||
| 91 | if (unlikely(ret != -EFAULT)) | ||
| 92 | goto fail; | ||
| 93 | |||
| 94 | down_read(¤t->mm->mmap_sem); | ||
| 95 | ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, | ||
| 96 | FAULT_FLAG_WRITE, NULL); | ||
| 97 | up_read(¤t->mm->mmap_sem); | ||
| 98 | |||
| 99 | if (likely(!ret)) | ||
| 100 | goto again; | ||
| 101 | |||
| 102 | fail: | ||
| 103 | force_sig(SIGSEGV, current); | ||
| 104 | return ret; | ||
| 80 | } | 105 | } |
| 81 | 106 | ||
| 82 | #ifdef CONFIG_ISA_ARCV2 | 107 | #ifdef CONFIG_ISA_ARCV2 |
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index 19ab3cf98f0f..9356753c2ed8 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig | |||
| @@ -7,5 +7,8 @@ | |||
| 7 | 7 | ||
| 8 | menuconfig ARC_SOC_HSDK | 8 | menuconfig ARC_SOC_HSDK |
| 9 | bool "ARC HS Development Kit SOC" | 9 | bool "ARC HS Development Kit SOC" |
| 10 | depends on ISA_ARCV2 | ||
| 11 | select ARC_HAS_ACCL_REGS | ||
| 10 | select CLK_HSDK | 12 | select CLK_HSDK |
| 11 | select RESET_HSDK | 13 | select RESET_HSDK |
| 14 | select MIGHT_HAVE_PCI | ||
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index 2958aedb649a..2588b842407c 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c | |||
| @@ -42,6 +42,66 @@ static void __init hsdk_init_per_cpu(unsigned int cpu) | |||
| 42 | #define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) | 42 | #define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) |
| 43 | #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) | 43 | #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) |
| 44 | 44 | ||
| 45 | #define HSDK_GPIO_INTC (ARC_PERIPHERAL_BASE + 0x3000) | ||
| 46 | |||
| 47 | static void __init hsdk_enable_gpio_intc_wire(void) | ||
| 48 | { | ||
| 49 | /* | ||
| 50 | * Peripherals on CPU Card are wired to cpu intc via intermediate | ||
| 51 | * DW APB GPIO blocks (mainly for debouncing) | ||
| 52 | * | ||
| 53 | * --------------------- | ||
| 54 | * | snps,archs-intc | | ||
| 55 | * --------------------- | ||
| 56 | * | | ||
| 57 | * ---------------------- | ||
| 58 | * | snps,archs-idu-intc | | ||
| 59 | * ---------------------- | ||
| 60 | * | | | | | | ||
| 61 | * | [eth] [USB] [... other peripherals] | ||
| 62 | * | | ||
| 63 | * ------------------- | ||
| 64 | * | snps,dw-apb-intc | | ||
| 65 | * ------------------- | ||
| 66 | * | | | | | ||
| 67 | * [Bt] [HAPS] [... other peripherals] | ||
| 68 | * | ||
| 69 | * Current implementation of "irq-dw-apb-ictl" driver doesn't work well | ||
| 70 | * with stacked INTCs. In particular problem happens if its master INTC | ||
| 71 | * not yet instantiated. See discussion here - | ||
| 72 | * https://lkml.org/lkml/2015/3/4/755 | ||
| 73 | * | ||
| 74 | * So setup the first gpio block as a passive pass thru and hide it from | ||
| 75 | * DT hardware topology - connect intc directly to cpu intc | ||
| 76 | * The GPIO "wire" needs to be init nevertheless (here) | ||
| 77 | * | ||
| 78 | * One side adv is that peripheral interrupt handling avoids one nested | ||
| 79 | * intc ISR hop | ||
| 80 | * | ||
| 81 | * According to HSDK User's Manual [1], "Table 2 Interrupt Mapping" | ||
| 82 | * we have the following GPIO input lines used as sources of interrupt: | ||
| 83 | * - GPIO[0] - Bluetooth interrupt of RS9113 module | ||
| 84 | * - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector) | ||
| 85 | * - GPIO[3] - Audio codec (MAX9880A) interrupt | ||
| 86 | * - GPIO[8-23] - Available on Arduino and PMOD_x headers | ||
| 87 | * For now there's no use of Arduino and PMOD_x headers in Linux | ||
| 88 | * use-case so we only enable lines 0, 2 and 3. | ||
| 89 | * | ||
| 90 | * [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf | ||
| 91 | */ | ||
| 92 | #define GPIO_INTEN (HSDK_GPIO_INTC + 0x30) | ||
| 93 | #define GPIO_INTMASK (HSDK_GPIO_INTC + 0x34) | ||
| 94 | #define GPIO_INTTYPE_LEVEL (HSDK_GPIO_INTC + 0x38) | ||
| 95 | #define GPIO_INT_POLARITY (HSDK_GPIO_INTC + 0x3c) | ||
| 96 | #define GPIO_INT_CONNECTED_MASK 0x0d | ||
| 97 | |||
| 98 | iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK); | ||
| 99 | iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK); | ||
| 100 | iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL); | ||
| 101 | iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY); | ||
| 102 | iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); | ||
| 103 | } | ||
| 104 | |||
| 45 | static void __init hsdk_init_early(void) | 105 | static void __init hsdk_init_early(void) |
| 46 | { | 106 | { |
| 47 | /* | 107 | /* |
| @@ -62,6 +122,8 @@ static void __init hsdk_init_early(void) | |||
| 62 | * minimum possible div-by-2. | 122 | * minimum possible div-by-2. |
| 63 | */ | 123 | */ |
| 64 | iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); | 124 | iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); |
| 125 | |||
| 126 | hsdk_enable_gpio_intc_wire(); | ||
| 65 | } | 127 | } |
| 66 | 128 | ||
| 67 | static const char *hsdk_compat[] __initconst = { | 129 | static const char *hsdk_compat[] __initconst = { |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 3b38c717008a..46bff1661836 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
| @@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t | |||
| 2278 | DPRINT(("smpl_buf @%p\n", smpl_buf)); | 2278 | DPRINT(("smpl_buf @%p\n", smpl_buf)); |
| 2279 | 2279 | ||
| 2280 | /* allocate vma */ | 2280 | /* allocate vma */ |
| 2281 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 2281 | vma = vm_area_alloc(mm); |
| 2282 | if (!vma) { | 2282 | if (!vma) { |
| 2283 | DPRINT(("Cannot allocate vma\n")); | 2283 | DPRINT(("Cannot allocate vma\n")); |
| 2284 | goto error_kmem; | 2284 | goto error_kmem; |
| 2285 | } | 2285 | } |
| 2286 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 2287 | 2286 | ||
| 2288 | /* | 2287 | /* |
| 2289 | * partially initialize the vma for the sampling buffer | 2288 | * partially initialize the vma for the sampling buffer |
| 2290 | */ | 2289 | */ |
| 2291 | vma->vm_mm = mm; | ||
| 2292 | vma->vm_file = get_file(filp); | 2290 | vma->vm_file = get_file(filp); |
| 2293 | vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; | 2291 | vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; |
| 2294 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ | 2292 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ |
| @@ -2346,7 +2344,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t | |||
| 2346 | return 0; | 2344 | return 0; |
| 2347 | 2345 | ||
| 2348 | error: | 2346 | error: |
| 2349 | kmem_cache_free(vm_area_cachep, vma); | 2347 | vm_area_free(vma); |
| 2350 | error_kmem: | 2348 | error_kmem: |
| 2351 | pfm_rvfree(smpl_buf, size); | 2349 | pfm_rvfree(smpl_buf, size); |
| 2352 | 2350 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 18278b448530..bdb14a369137 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
| @@ -114,10 +114,8 @@ ia64_init_addr_space (void) | |||
| 114 | * the problem. When the process attempts to write to the register backing store | 114 | * the problem. When the process attempts to write to the register backing store |
| 115 | * for the first time, it will get a SEGFAULT in this case. | 115 | * for the first time, it will get a SEGFAULT in this case. |
| 116 | */ | 116 | */ |
| 117 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 117 | vma = vm_area_alloc(current->mm); |
| 118 | if (vma) { | 118 | if (vma) { |
| 119 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 120 | vma->vm_mm = current->mm; | ||
| 121 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | 119 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
| 122 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 120 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
| 123 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; | 121 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; |
| @@ -125,7 +123,7 @@ ia64_init_addr_space (void) | |||
| 125 | down_write(¤t->mm->mmap_sem); | 123 | down_write(¤t->mm->mmap_sem); |
| 126 | if (insert_vm_struct(current->mm, vma)) { | 124 | if (insert_vm_struct(current->mm, vma)) { |
| 127 | up_write(¤t->mm->mmap_sem); | 125 | up_write(¤t->mm->mmap_sem); |
| 128 | kmem_cache_free(vm_area_cachep, vma); | 126 | vm_area_free(vma); |
| 129 | return; | 127 | return; |
| 130 | } | 128 | } |
| 131 | up_write(¤t->mm->mmap_sem); | 129 | up_write(¤t->mm->mmap_sem); |
| @@ -133,10 +131,8 @@ ia64_init_addr_space (void) | |||
| 133 | 131 | ||
| 134 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | 132 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
| 135 | if (!(current->personality & MMAP_PAGE_ZERO)) { | 133 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
| 136 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 134 | vma = vm_area_alloc(current->mm); |
| 137 | if (vma) { | 135 | if (vma) { |
| 138 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 139 | vma->vm_mm = current->mm; | ||
| 140 | vma->vm_end = PAGE_SIZE; | 136 | vma->vm_end = PAGE_SIZE; |
| 141 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | 137 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
| 142 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | | 138 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | |
| @@ -144,7 +140,7 @@ ia64_init_addr_space (void) | |||
| 144 | down_write(¤t->mm->mmap_sem); | 140 | down_write(¤t->mm->mmap_sem); |
| 145 | if (insert_vm_struct(current->mm, vma)) { | 141 | if (insert_vm_struct(current->mm, vma)) { |
| 146 | up_write(¤t->mm->mmap_sem); | 142 | up_write(¤t->mm->mmap_sem); |
| 147 | kmem_cache_free(vm_area_cachep, vma); | 143 | vm_area_free(vma); |
| 148 | return; | 144 | return; |
| 149 | } | 145 | } |
| 150 | up_write(¤t->mm->mmap_sem); | 146 | up_write(¤t->mm->mmap_sem); |
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 6aed974276d8..34f7222c5efe 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig | |||
| @@ -12,17 +12,17 @@ config NDS32 | |||
| 12 | select CLONE_BACKWARDS | 12 | select CLONE_BACKWARDS |
| 13 | select COMMON_CLK | 13 | select COMMON_CLK |
| 14 | select DMA_NONCOHERENT_OPS | 14 | select DMA_NONCOHERENT_OPS |
| 15 | select GENERIC_ASHLDI3 | ||
| 16 | select GENERIC_ASHRDI3 | ||
| 17 | select GENERIC_LSHRDI3 | ||
| 18 | select GENERIC_CMPDI2 | ||
| 19 | select GENERIC_MULDI3 | ||
| 20 | select GENERIC_UCMPDI2 | ||
| 21 | select GENERIC_ATOMIC64 | 15 | select GENERIC_ATOMIC64 |
| 22 | select GENERIC_CPU_DEVICES | 16 | select GENERIC_CPU_DEVICES |
| 23 | select GENERIC_CLOCKEVENTS | 17 | select GENERIC_CLOCKEVENTS |
| 24 | select GENERIC_IRQ_CHIP | 18 | select GENERIC_IRQ_CHIP |
| 25 | select GENERIC_IRQ_SHOW | 19 | select GENERIC_IRQ_SHOW |
| 20 | select GENERIC_LIB_ASHLDI3 | ||
| 21 | select GENERIC_LIB_ASHRDI3 | ||
| 22 | select GENERIC_LIB_CMPDI2 | ||
| 23 | select GENERIC_LIB_LSHRDI3 | ||
| 24 | select GENERIC_LIB_MULDI3 | ||
| 25 | select GENERIC_LIB_UCMPDI2 | ||
| 26 | select GENERIC_STRNCPY_FROM_USER | 26 | select GENERIC_STRNCPY_FROM_USER |
| 27 | select GENERIC_STRNLEN_USER | 27 | select GENERIC_STRNLEN_USER |
| 28 | select GENERIC_TIME_VSYSCALL | 28 | select GENERIC_TIME_VSYSCALL |
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 513bb2e9baf9..031c676821ff 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile | |||
| @@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN | |||
| 34 | KBUILD_CFLAGS += $(call cc-option, -EL) | 34 | KBUILD_CFLAGS += $(call cc-option, -EL) |
| 35 | KBUILD_AFLAGS += $(call cc-option, -EL) | 35 | KBUILD_AFLAGS += $(call cc-option, -EL) |
| 36 | LDFLAGS += $(call cc-option, -EL) | 36 | LDFLAGS += $(call cc-option, -EL) |
| 37 | CHECKFLAGS += -D__NDS32_EL__ | ||
| 37 | else | 38 | else |
| 38 | KBUILD_CFLAGS += $(call cc-option, -EB) | 39 | KBUILD_CFLAGS += $(call cc-option, -EB) |
| 39 | KBUILD_AFLAGS += $(call cc-option, -EB) | 40 | KBUILD_AFLAGS += $(call cc-option, -EB) |
| 40 | LDFLAGS += $(call cc-option, -EB) | 41 | LDFLAGS += $(call cc-option, -EB) |
| 42 | CHECKFLAGS += -D__NDS32_EB__ | ||
| 41 | endif | 43 | endif |
| 42 | 44 | ||
| 43 | boot := arch/nds32/boot | 45 | boot := arch/nds32/boot |
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index 10b48f0d8e85..8b26198d51bb 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | #define PG_dcache_dirty PG_arch_1 | 9 | #define PG_dcache_dirty PG_arch_1 |
| 10 | 10 | ||
| 11 | void flush_icache_range(unsigned long start, unsigned long end); | ||
| 12 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); | ||
| 11 | #ifdef CONFIG_CPU_CACHE_ALIASING | 13 | #ifdef CONFIG_CPU_CACHE_ALIASING |
| 12 | void flush_cache_mm(struct mm_struct *mm); | 14 | void flush_cache_mm(struct mm_struct *mm); |
| 13 | void flush_cache_dup_mm(struct mm_struct *mm); | 15 | void flush_cache_dup_mm(struct mm_struct *mm); |
| @@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma, | |||
| 34 | void flush_kernel_dcache_page(struct page *page); | 36 | void flush_kernel_dcache_page(struct page *page); |
| 35 | void flush_kernel_vmap_range(void *addr, int size); | 37 | void flush_kernel_vmap_range(void *addr, int size); |
| 36 | void invalidate_kernel_vmap_range(void *addr, int size); | 38 | void invalidate_kernel_vmap_range(void *addr, int size); |
| 37 | void flush_icache_range(unsigned long start, unsigned long end); | ||
| 38 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); | ||
| 39 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) | 39 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) |
| 40 | #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) | 40 | #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) |
| 41 | 41 | ||
| 42 | #else | 42 | #else |
| 43 | #include <asm-generic/cacheflush.h> | 43 | #include <asm-generic/cacheflush.h> |
| 44 | #undef flush_icache_range | ||
| 45 | #undef flush_icache_page | ||
| 46 | #undef flush_icache_user_range | ||
| 47 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
| 48 | unsigned long addr, int len); | ||
| 44 | #endif | 49 | #endif |
| 45 | 50 | ||
| 46 | #endif /* __NDS32_CACHEFLUSH_H__ */ | 51 | #endif /* __NDS32_CACHEFLUSH_H__ */ |
diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h index eab5e84bd991..cb6cb91cfdf8 100644 --- a/arch/nds32/include/asm/futex.h +++ b/arch/nds32/include/asm/futex.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | " .popsection\n" \ | 16 | " .popsection\n" \ |
| 17 | " .pushsection .fixup,\"ax\"\n" \ | 17 | " .pushsection .fixup,\"ax\"\n" \ |
| 18 | "4: move %0, " err_reg "\n" \ | 18 | "4: move %0, " err_reg "\n" \ |
| 19 | " j 3b\n" \ | 19 | " b 3b\n" \ |
| 20 | " .popsection" | 20 | " .popsection" |
| 21 | 21 | ||
| 22 | #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ | 22 | #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ |
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index 2f5b2ccebe47..63a1a5ef5219 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c | |||
| @@ -278,7 +278,8 @@ static void __init setup_memory(void) | |||
| 278 | 278 | ||
| 279 | void __init setup_arch(char **cmdline_p) | 279 | void __init setup_arch(char **cmdline_p) |
| 280 | { | 280 | { |
| 281 | early_init_devtree( __dtb_start); | 281 | early_init_devtree(__atags_pointer ? \ |
| 282 | phys_to_virt(__atags_pointer) : __dtb_start); | ||
| 282 | 283 | ||
| 283 | setup_cpuinfo(); | 284 | setup_cpuinfo(); |
| 284 | 285 | ||
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index ce8fd34497bf..254703653b6f 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c | |||
| @@ -13,7 +13,39 @@ | |||
| 13 | 13 | ||
| 14 | extern struct cache_info L1_cache_info[2]; | 14 | extern struct cache_info L1_cache_info[2]; |
| 15 | 15 | ||
| 16 | #ifndef CONFIG_CPU_CACHE_ALIASING | 16 | void flush_icache_range(unsigned long start, unsigned long end) |
| 17 | { | ||
| 18 | unsigned long line_size, flags; | ||
| 19 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 20 | start = start & ~(line_size - 1); | ||
| 21 | end = (end + line_size - 1) & ~(line_size - 1); | ||
| 22 | local_irq_save(flags); | ||
| 23 | cpu_cache_wbinval_range(start, end, 1); | ||
| 24 | local_irq_restore(flags); | ||
| 25 | } | ||
| 26 | EXPORT_SYMBOL(flush_icache_range); | ||
| 27 | |||
| 28 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
| 29 | { | ||
| 30 | unsigned long flags; | ||
| 31 | unsigned long kaddr; | ||
| 32 | local_irq_save(flags); | ||
| 33 | kaddr = (unsigned long)kmap_atomic(page); | ||
| 34 | cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); | ||
| 35 | kunmap_atomic((void *)kaddr); | ||
| 36 | local_irq_restore(flags); | ||
| 37 | } | ||
| 38 | EXPORT_SYMBOL(flush_icache_page); | ||
| 39 | |||
| 40 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
| 41 | unsigned long addr, int len) | ||
| 42 | { | ||
| 43 | unsigned long kaddr; | ||
| 44 | kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK); | ||
| 45 | flush_icache_range(kaddr, kaddr + len); | ||
| 46 | kunmap_atomic((void *)kaddr); | ||
| 47 | } | ||
| 48 | |||
| 17 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | 49 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
| 18 | pte_t * pte) | 50 | pte_t * pte) |
| 19 | { | 51 | { |
| @@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | |||
| 35 | 67 | ||
| 36 | if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) || | 68 | if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) || |
| 37 | (vma->vm_flags & VM_EXEC)) { | 69 | (vma->vm_flags & VM_EXEC)) { |
| 38 | 70 | unsigned long kaddr; | |
| 39 | if (!PageHighMem(page)) { | 71 | local_irq_save(flags); |
| 40 | cpu_cache_wbinval_page((unsigned long) | 72 | kaddr = (unsigned long)kmap_atomic(page); |
| 41 | page_address(page), | 73 | cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); |
| 42 | vma->vm_flags & VM_EXEC); | 74 | kunmap_atomic((void *)kaddr); |
| 43 | } else { | 75 | local_irq_restore(flags); |
| 44 | unsigned long kaddr = (unsigned long)kmap_atomic(page); | ||
| 45 | cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC); | ||
| 46 | kunmap_atomic((void *)kaddr); | ||
| 47 | } | ||
| 48 | } | 76 | } |
| 49 | } | 77 | } |
| 50 | #else | 78 | #ifdef CONFIG_CPU_CACHE_ALIASING |
| 51 | extern pte_t va_present(struct mm_struct *mm, unsigned long addr); | 79 | extern pte_t va_present(struct mm_struct *mm, unsigned long addr); |
| 52 | 80 | ||
| 53 | static inline unsigned long aliasing(unsigned long addr, unsigned long page) | 81 | static inline unsigned long aliasing(unsigned long addr, unsigned long page) |
| @@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size) | |||
| 317 | local_irq_restore(flags); | 345 | local_irq_restore(flags); |
| 318 | } | 346 | } |
| 319 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); | 347 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); |
| 320 | |||
| 321 | void flush_icache_range(unsigned long start, unsigned long end) | ||
| 322 | { | ||
| 323 | unsigned long line_size, flags; | ||
| 324 | line_size = L1_cache_info[DCACHE].line_size; | ||
| 325 | start = start & ~(line_size - 1); | ||
| 326 | end = (end + line_size - 1) & ~(line_size - 1); | ||
| 327 | local_irq_save(flags); | ||
| 328 | cpu_cache_wbinval_range(start, end, 1); | ||
| 329 | local_irq_restore(flags); | ||
| 330 | } | ||
| 331 | EXPORT_SYMBOL(flush_icache_range); | ||
| 332 | |||
| 333 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
| 334 | { | ||
| 335 | unsigned long flags; | ||
| 336 | local_irq_save(flags); | ||
| 337 | cpu_cache_wbinval_page((unsigned long)page_address(page), | ||
| 338 | vma->vm_flags & VM_EXEC); | ||
| 339 | local_irq_restore(flags); | ||
| 340 | } | ||
| 341 | |||
| 342 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | ||
| 343 | pte_t * pte) | ||
| 344 | { | ||
| 345 | struct page *page; | ||
| 346 | unsigned long flags; | ||
| 347 | unsigned long pfn = pte_pfn(*pte); | ||
| 348 | |||
| 349 | if (!pfn_valid(pfn)) | ||
| 350 | return; | ||
| 351 | |||
| 352 | if (vma->vm_mm == current->active_mm) { | ||
| 353 | local_irq_save(flags); | ||
| 354 | __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN); | ||
| 355 | __nds32__tlbop_rwr(*pte); | ||
| 356 | __nds32__isb(); | ||
| 357 | local_irq_restore(flags); | ||
| 358 | } | ||
| 359 | |||
| 360 | page = pfn_to_page(pfn); | ||
| 361 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags) || | ||
| 362 | (vma->vm_flags & VM_EXEC)) { | ||
| 363 | local_irq_save(flags); | ||
| 364 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | ||
| 365 | local_irq_restore(flags); | ||
| 366 | } | ||
| 367 | } | ||
| 368 | #endif | 348 | #endif |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 2ea575cb3401..fb96206de317 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
| @@ -243,6 +243,7 @@ endif | |||
| 243 | cpu-as-$(CONFIG_4xx) += -Wa,-m405 | 243 | cpu-as-$(CONFIG_4xx) += -Wa,-m405 |
| 244 | cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) | 244 | cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) |
| 245 | cpu-as-$(CONFIG_E200) += -Wa,-me200 | 245 | cpu-as-$(CONFIG_E200) += -Wa,-me200 |
| 246 | cpu-as-$(CONFIG_E500) += -Wa,-me500 | ||
| 246 | cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 | 247 | cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 |
| 247 | cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) | 248 | cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) |
| 248 | 249 | ||
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 896efa559996..79d570cbf332 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h | |||
| @@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( | |||
| 35 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, | 35 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
| 36 | unsigned long ua, unsigned long entries); | 36 | unsigned long ua, unsigned long entries); |
| 37 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | 37 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
| 38 | unsigned long ua, unsigned long *hpa); | 38 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
| 39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | 39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
| 40 | unsigned long ua, unsigned long *hpa); | 40 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
| 41 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); | 41 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
| 42 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); | 42 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); |
| 43 | #endif | 43 | #endif |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index e734f6e45abc..689306118b48 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
| @@ -144,7 +144,9 @@ power9_restore_additional_sprs: | |||
| 144 | mtspr SPRN_MMCR1, r4 | 144 | mtspr SPRN_MMCR1, r4 |
| 145 | 145 | ||
| 146 | ld r3, STOP_MMCR2(r13) | 146 | ld r3, STOP_MMCR2(r13) |
| 147 | ld r4, PACA_SPRG_VDSO(r13) | ||
| 147 | mtspr SPRN_MMCR2, r3 | 148 | mtspr SPRN_MMCR2, r3 |
| 149 | mtspr SPRN_SPRG3, r4 | ||
| 148 | blr | 150 | blr |
| 149 | 151 | ||
| 150 | /* | 152 | /* |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index d066e37551ec..8c456fa691a5 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
| @@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | |||
| 449 | /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ | 449 | /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ |
| 450 | return H_TOO_HARD; | 450 | return H_TOO_HARD; |
| 451 | 451 | ||
| 452 | if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) | 452 | if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) |
| 453 | return H_HARDWARE; | 453 | return H_HARDWARE; |
| 454 | 454 | ||
| 455 | if (mm_iommu_mapped_inc(mem)) | 455 | if (mm_iommu_mapped_inc(mem)) |
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 925fc316a104..5b298f5a1a14 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
| @@ -279,7 +279,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | |||
| 279 | if (!mem) | 279 | if (!mem) |
| 280 | return H_TOO_HARD; | 280 | return H_TOO_HARD; |
| 281 | 281 | ||
| 282 | if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) | 282 | if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, |
| 283 | &hpa))) | ||
| 283 | return H_HARDWARE; | 284 | return H_HARDWARE; |
| 284 | 285 | ||
| 285 | pua = (void *) vmalloc_to_phys(pua); | 286 | pua = (void *) vmalloc_to_phys(pua); |
| @@ -469,7 +470,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
| 469 | 470 | ||
| 470 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); | 471 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); |
| 471 | if (mem) | 472 | if (mem) |
| 472 | prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; | 473 | prereg = mm_iommu_ua_to_hpa_rm(mem, ua, |
| 474 | IOMMU_PAGE_SHIFT_4K, &tces) == 0; | ||
| 473 | } | 475 | } |
| 474 | 476 | ||
| 475 | if (!prereg) { | 477 | if (!prereg) { |
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index abb43646927a..a4ca57612558 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/hugetlb.h> | 19 | #include <linux/hugetlb.h> |
| 20 | #include <linux/swap.h> | 20 | #include <linux/swap.h> |
| 21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
| 22 | #include <asm/pte-walk.h> | ||
| 22 | 23 | ||
| 23 | static DEFINE_MUTEX(mem_list_mutex); | 24 | static DEFINE_MUTEX(mem_list_mutex); |
| 24 | 25 | ||
| @@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t { | |||
| 27 | struct rcu_head rcu; | 28 | struct rcu_head rcu; |
| 28 | unsigned long used; | 29 | unsigned long used; |
| 29 | atomic64_t mapped; | 30 | atomic64_t mapped; |
| 31 | unsigned int pageshift; | ||
| 30 | u64 ua; /* userspace address */ | 32 | u64 ua; /* userspace address */ |
| 31 | u64 entries; /* number of entries in hpas[] */ | 33 | u64 entries; /* number of entries in hpas[] */ |
| 32 | u64 *hpas; /* vmalloc'ed */ | 34 | u64 *hpas; /* vmalloc'ed */ |
| @@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, | |||
| 125 | { | 127 | { |
| 126 | struct mm_iommu_table_group_mem_t *mem; | 128 | struct mm_iommu_table_group_mem_t *mem; |
| 127 | long i, j, ret = 0, locked_entries = 0; | 129 | long i, j, ret = 0, locked_entries = 0; |
| 130 | unsigned int pageshift; | ||
| 131 | unsigned long flags; | ||
| 128 | struct page *page = NULL; | 132 | struct page *page = NULL; |
| 129 | 133 | ||
| 130 | mutex_lock(&mem_list_mutex); | 134 | mutex_lock(&mem_list_mutex); |
| @@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, | |||
| 159 | goto unlock_exit; | 163 | goto unlock_exit; |
| 160 | } | 164 | } |
| 161 | 165 | ||
| 166 | /* | ||
| 167 | * For a starting point for a maximum page size calculation | ||
| 168 | * we use @ua and @entries natural alignment to allow IOMMU pages | ||
| 169 | * smaller than huge pages but still bigger than PAGE_SIZE. | ||
| 170 | */ | ||
| 171 | mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); | ||
| 162 | mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); | 172 | mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); |
| 163 | if (!mem->hpas) { | 173 | if (!mem->hpas) { |
| 164 | kfree(mem); | 174 | kfree(mem); |
| @@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, | |||
| 199 | } | 209 | } |
| 200 | } | 210 | } |
| 201 | populate: | 211 | populate: |
| 212 | pageshift = PAGE_SHIFT; | ||
| 213 | if (PageCompound(page)) { | ||
| 214 | pte_t *pte; | ||
| 215 | struct page *head = compound_head(page); | ||
| 216 | unsigned int compshift = compound_order(head); | ||
| 217 | |||
| 218 | local_irq_save(flags); /* disables as well */ | ||
| 219 | pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); | ||
| 220 | local_irq_restore(flags); | ||
| 221 | |||
| 222 | /* Double check it is still the same pinned page */ | ||
| 223 | if (pte && pte_page(*pte) == head && | ||
| 224 | pageshift == compshift) | ||
| 225 | pageshift = max_t(unsigned int, pageshift, | ||
| 226 | PAGE_SHIFT); | ||
| 227 | } | ||
| 228 | mem->pageshift = min(mem->pageshift, pageshift); | ||
| 202 | mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; | 229 | mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; |
| 203 | } | 230 | } |
| 204 | 231 | ||
| @@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, | |||
| 349 | EXPORT_SYMBOL_GPL(mm_iommu_find); | 376 | EXPORT_SYMBOL_GPL(mm_iommu_find); |
| 350 | 377 | ||
| 351 | long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | 378 | long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
| 352 | unsigned long ua, unsigned long *hpa) | 379 | unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
| 353 | { | 380 | { |
| 354 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; | 381 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
| 355 | u64 *va = &mem->hpas[entry]; | 382 | u64 *va = &mem->hpas[entry]; |
| @@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |||
| 357 | if (entry >= mem->entries) | 384 | if (entry >= mem->entries) |
| 358 | return -EFAULT; | 385 | return -EFAULT; |
| 359 | 386 | ||
| 387 | if (pageshift > mem->pageshift) | ||
| 388 | return -EFAULT; | ||
| 389 | |||
| 360 | *hpa = *va | (ua & ~PAGE_MASK); | 390 | *hpa = *va | (ua & ~PAGE_MASK); |
| 361 | 391 | ||
| 362 | return 0; | 392 | return 0; |
| @@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |||
| 364 | EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); | 394 | EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); |
| 365 | 395 | ||
| 366 | long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | 396 | long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
| 367 | unsigned long ua, unsigned long *hpa) | 397 | unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
| 368 | { | 398 | { |
| 369 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; | 399 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
| 370 | void *va = &mem->hpas[entry]; | 400 | void *va = &mem->hpas[entry]; |
| @@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, | |||
| 373 | if (entry >= mem->entries) | 403 | if (entry >= mem->entries) |
| 374 | return -EFAULT; | 404 | return -EFAULT; |
| 375 | 405 | ||
| 406 | if (pageshift > mem->pageshift) | ||
| 407 | return -EFAULT; | ||
| 408 | |||
| 376 | pa = (void *) vmalloc_to_phys(va); | 409 | pa = (void *) vmalloc_to_phys(va); |
| 377 | if (!pa) | 410 | if (!pa) |
| 378 | return -EFAULT; | 411 | return -EFAULT; |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 47166ad2a669..196978733e64 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
| @@ -2734,7 +2734,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, | |||
| 2734 | { | 2734 | { |
| 2735 | int nr, dotted; | 2735 | int nr, dotted; |
| 2736 | unsigned long first_adr; | 2736 | unsigned long first_adr; |
| 2737 | unsigned long inst, last_inst = 0; | 2737 | unsigned int inst, last_inst = 0; |
| 2738 | unsigned char val[4]; | 2738 | unsigned char val[4]; |
| 2739 | 2739 | ||
| 2740 | dotted = 0; | 2740 | dotted = 0; |
| @@ -2758,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, | |||
| 2758 | dotted = 0; | 2758 | dotted = 0; |
| 2759 | last_inst = inst; | 2759 | last_inst = inst; |
| 2760 | if (praddr) | 2760 | if (praddr) |
| 2761 | printf(REG" %.8lx", adr, inst); | 2761 | printf(REG" %.8x", adr, inst); |
| 2762 | printf("\t"); | 2762 | printf("\t"); |
| 2763 | dump_func(inst, adr); | 2763 | dump_func(inst, adr); |
| 2764 | printf("\n"); | 2764 | printf("\n"); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f1dbb4ee19d7..887d3a7bb646 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -63,7 +63,7 @@ config X86 | |||
| 63 | select ARCH_HAS_PTE_SPECIAL | 63 | select ARCH_HAS_PTE_SPECIAL |
| 64 | select ARCH_HAS_REFCOUNT | 64 | select ARCH_HAS_REFCOUNT |
| 65 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 | 65 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 |
| 66 | select ARCH_HAS_UACCESS_MCSAFE if X86_64 | 66 | select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE |
| 67 | select ARCH_HAS_SET_MEMORY | 67 | select ARCH_HAS_SET_MEMORY |
| 68 | select ARCH_HAS_SG_CHAIN | 68 | select ARCH_HAS_SG_CHAIN |
| 69 | select ARCH_HAS_STRICT_KERNEL_RWX | 69 | select ARCH_HAS_STRICT_KERNEL_RWX |
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8a10a045b57b..8cf03f101938 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c | |||
| @@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu) | |||
| 408 | ds->bts_buffer_base = (unsigned long) cea; | 408 | ds->bts_buffer_base = (unsigned long) cea; |
| 409 | ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); | 409 | ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); |
| 410 | ds->bts_index = ds->bts_buffer_base; | 410 | ds->bts_index = ds->bts_buffer_base; |
| 411 | max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE); | 411 | max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; |
| 412 | ds->bts_absolute_maximum = ds->bts_buffer_base + max; | 412 | ds->bts_absolute_maximum = ds->bts_buffer_base + |
| 413 | ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16); | 413 | max * BTS_RECORD_SIZE; |
| 414 | ds->bts_interrupt_threshold = ds->bts_absolute_maximum - | ||
| 415 | (max / 16) * BTS_RECORD_SIZE; | ||
| 414 | return 0; | 416 | return 0; |
| 415 | } | 417 | } |
| 416 | 418 | ||
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index c356098b6fb9..4d4015ddcf26 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h | |||
| @@ -7,8 +7,6 @@ | |||
| 7 | #ifndef _ASM_X86_MACH_DEFAULT_APM_H | 7 | #ifndef _ASM_X86_MACH_DEFAULT_APM_H |
| 8 | #define _ASM_X86_MACH_DEFAULT_APM_H | 8 | #define _ASM_X86_MACH_DEFAULT_APM_H |
| 9 | 9 | ||
| 10 | #include <asm/nospec-branch.h> | ||
| 11 | |||
| 12 | #ifdef APM_ZERO_SEGS | 10 | #ifdef APM_ZERO_SEGS |
| 13 | # define APM_DO_ZERO_SEGS \ | 11 | # define APM_DO_ZERO_SEGS \ |
| 14 | "pushl %%ds\n\t" \ | 12 | "pushl %%ds\n\t" \ |
| @@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, | |||
| 34 | * N.B. We do NOT need a cld after the BIOS call | 32 | * N.B. We do NOT need a cld after the BIOS call |
| 35 | * because we always save and restore the flags. | 33 | * because we always save and restore the flags. |
| 36 | */ | 34 | */ |
| 37 | firmware_restrict_branch_speculation_start(); | ||
| 38 | __asm__ __volatile__(APM_DO_ZERO_SEGS | 35 | __asm__ __volatile__(APM_DO_ZERO_SEGS |
| 39 | "pushl %%edi\n\t" | 36 | "pushl %%edi\n\t" |
| 40 | "pushl %%ebp\n\t" | 37 | "pushl %%ebp\n\t" |
| @@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, | |||
| 47 | "=S" (*esi) | 44 | "=S" (*esi) |
| 48 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | 45 | : "a" (func), "b" (ebx_in), "c" (ecx_in) |
| 49 | : "memory", "cc"); | 46 | : "memory", "cc"); |
| 50 | firmware_restrict_branch_speculation_end(); | ||
| 51 | } | 47 | } |
| 52 | 48 | ||
| 53 | static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | 49 | static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, |
| @@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | |||
| 60 | * N.B. We do NOT need a cld after the BIOS call | 56 | * N.B. We do NOT need a cld after the BIOS call |
| 61 | * because we always save and restore the flags. | 57 | * because we always save and restore the flags. |
| 62 | */ | 58 | */ |
| 63 | firmware_restrict_branch_speculation_start(); | ||
| 64 | __asm__ __volatile__(APM_DO_ZERO_SEGS | 59 | __asm__ __volatile__(APM_DO_ZERO_SEGS |
| 65 | "pushl %%edi\n\t" | 60 | "pushl %%edi\n\t" |
| 66 | "pushl %%ebp\n\t" | 61 | "pushl %%ebp\n\t" |
| @@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | |||
| 73 | "=S" (si) | 68 | "=S" (si) |
| 74 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | 69 | : "a" (func), "b" (ebx_in), "c" (ecx_in) |
| 75 | : "memory", "cc"); | 70 | : "memory", "cc"); |
| 76 | firmware_restrict_branch_speculation_end(); | ||
| 77 | return error; | 71 | return error; |
| 78 | } | 72 | } |
| 79 | 73 | ||
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 62acb613114b..a9d637bc301d 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
| @@ -52,7 +52,12 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len) | |||
| 52 | unsigned long ret; | 52 | unsigned long ret; |
| 53 | 53 | ||
| 54 | __uaccess_begin(); | 54 | __uaccess_begin(); |
| 55 | ret = memcpy_mcsafe(to, from, len); | 55 | /* |
| 56 | * Note, __memcpy_mcsafe() is explicitly used since it can | ||
| 57 | * handle exceptions / faults. memcpy_mcsafe() may fall back to | ||
| 58 | * memcpy() which lacks this handling. | ||
| 59 | */ | ||
| 60 | ret = __memcpy_mcsafe(to, from, len); | ||
| 56 | __uaccess_end(); | 61 | __uaccess_end(); |
| 57 | return ret; | 62 | return ret; |
| 58 | } | 63 | } |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 5d0de79fdab0..ec00d1ff5098 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
| @@ -240,6 +240,7 @@ | |||
| 240 | #include <asm/olpc.h> | 240 | #include <asm/olpc.h> |
| 241 | #include <asm/paravirt.h> | 241 | #include <asm/paravirt.h> |
| 242 | #include <asm/reboot.h> | 242 | #include <asm/reboot.h> |
| 243 | #include <asm/nospec-branch.h> | ||
| 243 | 244 | ||
| 244 | #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) | 245 | #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) |
| 245 | extern int (*console_blank_hook)(int); | 246 | extern int (*console_blank_hook)(int); |
| @@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call) | |||
| 614 | gdt[0x40 / 8] = bad_bios_desc; | 615 | gdt[0x40 / 8] = bad_bios_desc; |
| 615 | 616 | ||
| 616 | apm_irq_save(flags); | 617 | apm_irq_save(flags); |
| 618 | firmware_restrict_branch_speculation_start(); | ||
| 617 | APM_DO_SAVE_SEGS; | 619 | APM_DO_SAVE_SEGS; |
| 618 | apm_bios_call_asm(call->func, call->ebx, call->ecx, | 620 | apm_bios_call_asm(call->func, call->ebx, call->ecx, |
| 619 | &call->eax, &call->ebx, &call->ecx, &call->edx, | 621 | &call->eax, &call->ebx, &call->ecx, &call->edx, |
| 620 | &call->esi); | 622 | &call->esi); |
| 621 | APM_DO_RESTORE_SEGS; | 623 | APM_DO_RESTORE_SEGS; |
| 624 | firmware_restrict_branch_speculation_end(); | ||
| 622 | apm_irq_restore(flags); | 625 | apm_irq_restore(flags); |
| 623 | gdt[0x40 / 8] = save_desc_40; | 626 | gdt[0x40 / 8] = save_desc_40; |
| 624 | put_cpu(); | 627 | put_cpu(); |
| @@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call) | |||
| 690 | gdt[0x40 / 8] = bad_bios_desc; | 693 | gdt[0x40 / 8] = bad_bios_desc; |
| 691 | 694 | ||
| 692 | apm_irq_save(flags); | 695 | apm_irq_save(flags); |
| 696 | firmware_restrict_branch_speculation_start(); | ||
| 693 | APM_DO_SAVE_SEGS; | 697 | APM_DO_SAVE_SEGS; |
| 694 | error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, | 698 | error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, |
| 695 | &call->eax); | 699 | &call->eax); |
| 696 | APM_DO_RESTORE_SEGS; | 700 | APM_DO_RESTORE_SEGS; |
| 701 | firmware_restrict_branch_speculation_end(); | ||
| 697 | apm_irq_restore(flags); | 702 | apm_irq_restore(flags); |
| 698 | gdt[0x40 / 8] = save_desc_40; | 703 | gdt[0x40 / 8] = save_desc_40; |
| 699 | put_cpu(); | 704 | put_cpu(); |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bf8d1eb7fca3..3b8e7c13c614 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
| @@ -138,6 +138,7 @@ static unsigned long kvm_get_tsc_khz(void) | |||
| 138 | src = &hv_clock[cpu].pvti; | 138 | src = &hv_clock[cpu].pvti; |
| 139 | tsc_khz = pvclock_tsc_khz(src); | 139 | tsc_khz = pvclock_tsc_khz(src); |
| 140 | put_cpu(); | 140 | put_cpu(); |
| 141 | setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); | ||
| 141 | return tsc_khz; | 142 | return tsc_khz; |
| 142 | } | 143 | } |
| 143 | 144 | ||
| @@ -319,6 +320,8 @@ void __init kvmclock_init(void) | |||
| 319 | printk(KERN_INFO "kvm-clock: Using msrs %x and %x", | 320 | printk(KERN_INFO "kvm-clock: Using msrs %x and %x", |
| 320 | msr_kvm_system_time, msr_kvm_wall_clock); | 321 | msr_kvm_system_time, msr_kvm_wall_clock); |
| 321 | 322 | ||
| 323 | pvclock_set_pvti_cpu0_va(hv_clock); | ||
| 324 | |||
| 322 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) | 325 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) |
| 323 | pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); | 326 | pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); |
| 324 | 327 | ||
| @@ -366,14 +369,11 @@ int __init kvm_setup_vsyscall_timeinfo(void) | |||
| 366 | vcpu_time = &hv_clock[cpu].pvti; | 369 | vcpu_time = &hv_clock[cpu].pvti; |
| 367 | flags = pvclock_read_flags(vcpu_time); | 370 | flags = pvclock_read_flags(vcpu_time); |
| 368 | 371 | ||
| 369 | if (!(flags & PVCLOCK_TSC_STABLE_BIT)) { | ||
| 370 | put_cpu(); | ||
| 371 | return 1; | ||
| 372 | } | ||
| 373 | |||
| 374 | pvclock_set_pvti_cpu0_va(hv_clock); | ||
| 375 | put_cpu(); | 372 | put_cpu(); |
| 376 | 373 | ||
| 374 | if (!(flags & PVCLOCK_TSC_STABLE_BIT)) | ||
| 375 | return 1; | ||
| 376 | |||
| 377 | kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; | 377 | kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; |
| 378 | #endif | 378 | #endif |
| 379 | return 0; | 379 | return 0; |
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 92fd433c50b9..1bbec387d289 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
| @@ -85,7 +85,7 @@ config KVM_AMD_SEV | |||
| 85 | def_bool y | 85 | def_bool y |
| 86 | bool "AMD Secure Encrypted Virtualization (SEV) support" | 86 | bool "AMD Secure Encrypted Virtualization (SEV) support" |
| 87 | depends on KVM_AMD && X86_64 | 87 | depends on KVM_AMD && X86_64 |
| 88 | depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP | 88 | depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) |
| 89 | ---help--- | 89 | ---help--- |
| 90 | Provides support for launching Encrypted VMs on AMD processors. | 90 | Provides support for launching Encrypted VMs on AMD processors. |
| 91 | 91 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1689f433f3a0..e30da9a2430c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2571,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
| 2571 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2571 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2572 | #ifdef CONFIG_X86_64 | 2572 | #ifdef CONFIG_X86_64 |
| 2573 | int cpu = raw_smp_processor_id(); | 2573 | int cpu = raw_smp_processor_id(); |
| 2574 | unsigned long fs_base, kernel_gs_base; | ||
| 2574 | #endif | 2575 | #endif |
| 2575 | int i; | 2576 | int i; |
| 2576 | 2577 | ||
| @@ -2586,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
| 2586 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; | 2587 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; |
| 2587 | 2588 | ||
| 2588 | #ifdef CONFIG_X86_64 | 2589 | #ifdef CONFIG_X86_64 |
| 2589 | save_fsgs_for_kvm(); | 2590 | if (likely(is_64bit_mm(current->mm))) { |
| 2590 | vmx->host_state.fs_sel = current->thread.fsindex; | 2591 | save_fsgs_for_kvm(); |
| 2591 | vmx->host_state.gs_sel = current->thread.gsindex; | 2592 | vmx->host_state.fs_sel = current->thread.fsindex; |
| 2592 | #else | 2593 | vmx->host_state.gs_sel = current->thread.gsindex; |
| 2593 | savesegment(fs, vmx->host_state.fs_sel); | 2594 | fs_base = current->thread.fsbase; |
| 2594 | savesegment(gs, vmx->host_state.gs_sel); | 2595 | kernel_gs_base = current->thread.gsbase; |
| 2596 | } else { | ||
| 2597 | #endif | ||
| 2598 | savesegment(fs, vmx->host_state.fs_sel); | ||
| 2599 | savesegment(gs, vmx->host_state.gs_sel); | ||
| 2600 | #ifdef CONFIG_X86_64 | ||
| 2601 | fs_base = read_msr(MSR_FS_BASE); | ||
| 2602 | kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); | ||
| 2603 | } | ||
| 2595 | #endif | 2604 | #endif |
| 2596 | if (!(vmx->host_state.fs_sel & 7)) { | 2605 | if (!(vmx->host_state.fs_sel & 7)) { |
| 2597 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); | 2606 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); |
| @@ -2611,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
| 2611 | savesegment(ds, vmx->host_state.ds_sel); | 2620 | savesegment(ds, vmx->host_state.ds_sel); |
| 2612 | savesegment(es, vmx->host_state.es_sel); | 2621 | savesegment(es, vmx->host_state.es_sel); |
| 2613 | 2622 | ||
| 2614 | vmcs_writel(HOST_FS_BASE, current->thread.fsbase); | 2623 | vmcs_writel(HOST_FS_BASE, fs_base); |
| 2615 | vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu)); | 2624 | vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu)); |
| 2616 | 2625 | ||
| 2617 | vmx->msr_host_kernel_gs_base = current->thread.gsbase; | 2626 | vmx->msr_host_kernel_gs_base = kernel_gs_base; |
| 2618 | if (is_long_mode(&vmx->vcpu)) | 2627 | if (is_long_mode(&vmx->vcpu)) |
| 2619 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | 2628 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
| 2620 | #else | 2629 | #else |
| @@ -4322,11 +4331,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
| 4322 | vmcs_conf->order = get_order(vmcs_conf->size); | 4331 | vmcs_conf->order = get_order(vmcs_conf->size); |
| 4323 | vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; | 4332 | vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; |
| 4324 | 4333 | ||
| 4325 | /* KVM supports Enlightened VMCS v1 only */ | 4334 | vmcs_conf->revision_id = vmx_msr_low; |
| 4326 | if (static_branch_unlikely(&enable_evmcs)) | ||
| 4327 | vmcs_conf->revision_id = KVM_EVMCS_VERSION; | ||
| 4328 | else | ||
| 4329 | vmcs_conf->revision_id = vmx_msr_low; | ||
| 4330 | 4335 | ||
| 4331 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; | 4336 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; |
| 4332 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; | 4337 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; |
| @@ -4396,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) | |||
| 4396 | return NULL; | 4401 | return NULL; |
| 4397 | vmcs = page_address(pages); | 4402 | vmcs = page_address(pages); |
| 4398 | memset(vmcs, 0, vmcs_config.size); | 4403 | memset(vmcs, 0, vmcs_config.size); |
| 4399 | vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ | 4404 | |
| 4405 | /* KVM supports Enlightened VMCS v1 only */ | ||
| 4406 | if (static_branch_unlikely(&enable_evmcs)) | ||
| 4407 | vmcs->revision_id = KVM_EVMCS_VERSION; | ||
| 4408 | else | ||
| 4409 | vmcs->revision_id = vmcs_config.revision_id; | ||
| 4410 | |||
| 4400 | return vmcs; | 4411 | return vmcs; |
| 4401 | } | 4412 | } |
| 4402 | 4413 | ||
| @@ -4564,6 +4575,19 @@ static __init int alloc_kvm_area(void) | |||
| 4564 | return -ENOMEM; | 4575 | return -ENOMEM; |
| 4565 | } | 4576 | } |
| 4566 | 4577 | ||
| 4578 | /* | ||
| 4579 | * When eVMCS is enabled, alloc_vmcs_cpu() sets | ||
| 4580 | * vmcs->revision_id to KVM_EVMCS_VERSION instead of | ||
| 4581 | * revision_id reported by MSR_IA32_VMX_BASIC. | ||
| 4582 | * | ||
| 4583 | * However, even though not explictly documented by | ||
| 4584 | * TLFS, VMXArea passed as VMXON argument should | ||
| 4585 | * still be marked with revision_id reported by | ||
| 4586 | * physical CPU. | ||
| 4587 | */ | ||
| 4588 | if (static_branch_unlikely(&enable_evmcs)) | ||
| 4589 | vmcs->revision_id = vmcs_config.revision_id; | ||
| 4590 | |||
| 4567 | per_cpu(vmxarea, cpu) = vmcs; | 4591 | per_cpu(vmxarea, cpu) = vmcs; |
| 4568 | } | 4592 | } |
| 4569 | return 0; | 4593 | return 0; |
| @@ -11753,7 +11777,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) | |||
| 11753 | { | 11777 | { |
| 11754 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 11778 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 11755 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 11779 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 11756 | u32 msr_entry_idx; | ||
| 11757 | u32 exit_qual; | 11780 | u32 exit_qual; |
| 11758 | int r; | 11781 | int r; |
| 11759 | 11782 | ||
| @@ -11775,10 +11798,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) | |||
| 11775 | nested_get_vmcs12_pages(vcpu, vmcs12); | 11798 | nested_get_vmcs12_pages(vcpu, vmcs12); |
| 11776 | 11799 | ||
| 11777 | r = EXIT_REASON_MSR_LOAD_FAIL; | 11800 | r = EXIT_REASON_MSR_LOAD_FAIL; |
| 11778 | msr_entry_idx = nested_vmx_load_msr(vcpu, | 11801 | exit_qual = nested_vmx_load_msr(vcpu, |
| 11779 | vmcs12->vm_entry_msr_load_addr, | 11802 | vmcs12->vm_entry_msr_load_addr, |
| 11780 | vmcs12->vm_entry_msr_load_count); | 11803 | vmcs12->vm_entry_msr_load_count); |
| 11781 | if (msr_entry_idx) | 11804 | if (exit_qual) |
| 11782 | goto fail; | 11805 | goto fail; |
| 11783 | 11806 | ||
| 11784 | /* | 11807 | /* |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0046aa70205a..2b812b3c5088 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = { | |||
| 1097 | 1097 | ||
| 1098 | MSR_F10H_DECFG, | 1098 | MSR_F10H_DECFG, |
| 1099 | MSR_IA32_UCODE_REV, | 1099 | MSR_IA32_UCODE_REV, |
| 1100 | MSR_IA32_ARCH_CAPABILITIES, | ||
| 1100 | }; | 1101 | }; |
| 1101 | 1102 | ||
| 1102 | static unsigned int num_msr_based_features; | 1103 | static unsigned int num_msr_based_features; |
| @@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) | |||
| 1105 | { | 1106 | { |
| 1106 | switch (msr->index) { | 1107 | switch (msr->index) { |
| 1107 | case MSR_IA32_UCODE_REV: | 1108 | case MSR_IA32_UCODE_REV: |
| 1108 | rdmsrl(msr->index, msr->data); | 1109 | case MSR_IA32_ARCH_CAPABILITIES: |
| 1110 | rdmsrl_safe(msr->index, &msr->data); | ||
| 1109 | break; | 1111 | break; |
| 1110 | default: | 1112 | default: |
| 1111 | if (kvm_x86_ops->get_msr_feature(msr)) | 1113 | if (kvm_x86_ops->get_msr_feature(msr)) |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 314c52c967e5..c166f424871c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
| @@ -1155,8 +1155,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, | |||
| 1155 | 1155 | ||
| 1156 | /* make one iovec available as scatterlist */ | 1156 | /* make one iovec available as scatterlist */ |
| 1157 | err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); | 1157 | err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); |
| 1158 | if (err < 0) | 1158 | if (err < 0) { |
| 1159 | rsgl->sg_num_bytes = 0; | ||
| 1159 | return err; | 1160 | return err; |
| 1161 | } | ||
| 1160 | 1162 | ||
| 1161 | /* chain the new scatterlist with previous one */ | 1163 | /* chain the new scatterlist with previous one */ |
| 1162 | if (areq->last_rsgl) | 1164 | if (areq->last_rsgl) |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 442a9e24f439..917f77f4cb55 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -2042,7 +2042,7 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { | |||
| 2042 | .ident = "Thinkpad X1 Carbon 6th", | 2042 | .ident = "Thinkpad X1 Carbon 6th", |
| 2043 | .matches = { | 2043 | .matches = { |
| 2044 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 2044 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
| 2045 | DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"), | 2045 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), |
| 2046 | }, | 2046 | }, |
| 2047 | }, | 2047 | }, |
| 2048 | { }, | 2048 | { }, |
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index 53fe633df1e8..c9bf2c219841 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | #include "agp.h" | 12 | #include "agp.h" |
| 13 | 13 | ||
| 14 | static int alpha_core_agp_vm_fault(struct vm_fault *vmf) | 14 | static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) |
| 15 | { | 15 | { |
| 16 | alpha_agp_info *agp = agp_bridge->dev_private_data; | 16 | alpha_agp_info *agp = agp_bridge->dev_private_data; |
| 17 | dma_addr_t dma_addr; | 17 | dma_addr_t dma_addr; |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index e50c29c97ca7..c69e39fdd02b 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
| @@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table) | |||
| 156 | 156 | ||
| 157 | /* Address to map to */ | 157 | /* Address to map to */ |
| 158 | pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); | 158 | pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); |
| 159 | aperturebase = tmp << 25; | 159 | aperturebase = (u64)tmp << 25; |
| 160 | aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); | 160 | aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); |
| 161 | 161 | ||
| 162 | enable_gart_translation(hammer, gatt_table); | 162 | enable_gart_translation(hammer, gatt_table); |
| @@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap) | |||
| 277 | pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); | 277 | pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); |
| 278 | nb_order = (nb_order >> 1) & 7; | 278 | nb_order = (nb_order >> 1) & 7; |
| 279 | pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); | 279 | pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); |
| 280 | nb_aper = nb_base << 25; | 280 | nb_aper = (u64)nb_base << 25; |
| 281 | 281 | ||
| 282 | /* Northbridge seems to contain crap. Try the AGP bridge. */ | 282 | /* Northbridge seems to contain crap. Try the AGP bridge. */ |
| 283 | 283 | ||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ece120da3353..3c3971256130 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -2394,6 +2394,18 @@ static bool __init intel_pstate_no_acpi_pss(void) | |||
| 2394 | return true; | 2394 | return true; |
| 2395 | } | 2395 | } |
| 2396 | 2396 | ||
| 2397 | static bool __init intel_pstate_no_acpi_pcch(void) | ||
| 2398 | { | ||
| 2399 | acpi_status status; | ||
| 2400 | acpi_handle handle; | ||
| 2401 | |||
| 2402 | status = acpi_get_handle(NULL, "\\_SB", &handle); | ||
| 2403 | if (ACPI_FAILURE(status)) | ||
| 2404 | return true; | ||
| 2405 | |||
| 2406 | return !acpi_has_method(handle, "PCCH"); | ||
| 2407 | } | ||
| 2408 | |||
| 2397 | static bool __init intel_pstate_has_acpi_ppc(void) | 2409 | static bool __init intel_pstate_has_acpi_ppc(void) |
| 2398 | { | 2410 | { |
| 2399 | int i; | 2411 | int i; |
| @@ -2453,7 +2465,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) | |||
| 2453 | 2465 | ||
| 2454 | switch (plat_info[idx].data) { | 2466 | switch (plat_info[idx].data) { |
| 2455 | case PSS: | 2467 | case PSS: |
| 2456 | return intel_pstate_no_acpi_pss(); | 2468 | if (!intel_pstate_no_acpi_pss()) |
| 2469 | return false; | ||
| 2470 | |||
| 2471 | return intel_pstate_no_acpi_pcch(); | ||
| 2457 | case PPC: | 2472 | case PPC: |
| 2458 | return intel_pstate_has_acpi_ppc() && !force_load; | 2473 | return intel_pstate_has_acpi_ppc() && !force_load; |
| 2459 | } | 2474 | } |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 3f0ce2ae35ee..0c56c9759672 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
| @@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void) | |||
| 580 | { | 580 | { |
| 581 | int ret; | 581 | int ret; |
| 582 | 582 | ||
| 583 | /* Skip initialization if another cpufreq driver is there. */ | ||
| 584 | if (cpufreq_get_current_driver()) | ||
| 585 | return 0; | ||
| 586 | |||
| 583 | if (acpi_disabled) | 587 | if (acpi_disabled) |
| 584 | return 0; | 588 | return 0; |
| 585 | 589 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index f4c474a95875..71efcf38f11b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | |||
| @@ -57,6 +57,10 @@ | |||
| 57 | #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 | 57 | #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 |
| 58 | #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c | 58 | #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c |
| 59 | #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 | 59 | #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 |
| 60 | #define ACP_BT_PLAY_REGS_START 0x14970 | ||
| 61 | #define ACP_BT_PLAY_REGS_END 0x14a24 | ||
| 62 | #define ACP_BT_COMP1_REG_OFFSET 0xac | ||
| 63 | #define ACP_BT_COMP2_REG_OFFSET 0xa8 | ||
| 60 | 64 | ||
| 61 | #define mmACP_PGFSM_RETAIN_REG 0x51c9 | 65 | #define mmACP_PGFSM_RETAIN_REG 0x51c9 |
| 62 | #define mmACP_PGFSM_CONFIG_REG 0x51ca | 66 | #define mmACP_PGFSM_CONFIG_REG 0x51ca |
| @@ -77,7 +81,7 @@ | |||
| 77 | #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF | 81 | #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF |
| 78 | 82 | ||
| 79 | #define ACP_TIMEOUT_LOOP 0x000000FF | 83 | #define ACP_TIMEOUT_LOOP 0x000000FF |
| 80 | #define ACP_DEVS 3 | 84 | #define ACP_DEVS 4 |
| 81 | #define ACP_SRC_ID 162 | 85 | #define ACP_SRC_ID 162 |
| 82 | 86 | ||
| 83 | enum { | 87 | enum { |
| @@ -316,14 +320,13 @@ static int acp_hw_init(void *handle) | |||
| 316 | if (adev->acp.acp_cell == NULL) | 320 | if (adev->acp.acp_cell == NULL) |
| 317 | return -ENOMEM; | 321 | return -ENOMEM; |
| 318 | 322 | ||
| 319 | adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL); | 323 | adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); |
| 320 | |||
| 321 | if (adev->acp.acp_res == NULL) { | 324 | if (adev->acp.acp_res == NULL) { |
| 322 | kfree(adev->acp.acp_cell); | 325 | kfree(adev->acp.acp_cell); |
| 323 | return -ENOMEM; | 326 | return -ENOMEM; |
| 324 | } | 327 | } |
| 325 | 328 | ||
| 326 | i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL); | 329 | i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); |
| 327 | if (i2s_pdata == NULL) { | 330 | if (i2s_pdata == NULL) { |
| 328 | kfree(adev->acp.acp_res); | 331 | kfree(adev->acp.acp_res); |
| 329 | kfree(adev->acp.acp_cell); | 332 | kfree(adev->acp.acp_cell); |
| @@ -358,6 +361,20 @@ static int acp_hw_init(void *handle) | |||
| 358 | i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; | 361 | i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; |
| 359 | i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; | 362 | i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; |
| 360 | 363 | ||
| 364 | i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; | ||
| 365 | switch (adev->asic_type) { | ||
| 366 | case CHIP_STONEY: | ||
| 367 | i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; | ||
| 368 | break; | ||
| 369 | default: | ||
| 370 | break; | ||
| 371 | } | ||
| 372 | |||
| 373 | i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD; | ||
| 374 | i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000; | ||
| 375 | i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET; | ||
| 376 | i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET; | ||
| 377 | |||
| 361 | adev->acp.acp_res[0].name = "acp2x_dma"; | 378 | adev->acp.acp_res[0].name = "acp2x_dma"; |
| 362 | adev->acp.acp_res[0].flags = IORESOURCE_MEM; | 379 | adev->acp.acp_res[0].flags = IORESOURCE_MEM; |
| 363 | adev->acp.acp_res[0].start = acp_base; | 380 | adev->acp.acp_res[0].start = acp_base; |
| @@ -373,13 +390,18 @@ static int acp_hw_init(void *handle) | |||
| 373 | adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; | 390 | adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; |
| 374 | adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; | 391 | adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; |
| 375 | 392 | ||
| 376 | adev->acp.acp_res[3].name = "acp2x_dma_irq"; | 393 | adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap"; |
| 377 | adev->acp.acp_res[3].flags = IORESOURCE_IRQ; | 394 | adev->acp.acp_res[3].flags = IORESOURCE_MEM; |
| 378 | adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); | 395 | adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START; |
| 379 | adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; | 396 | adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END; |
| 397 | |||
| 398 | adev->acp.acp_res[4].name = "acp2x_dma_irq"; | ||
| 399 | adev->acp.acp_res[4].flags = IORESOURCE_IRQ; | ||
| 400 | adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162); | ||
| 401 | adev->acp.acp_res[4].end = adev->acp.acp_res[4].start; | ||
| 380 | 402 | ||
| 381 | adev->acp.acp_cell[0].name = "acp_audio_dma"; | 403 | adev->acp.acp_cell[0].name = "acp_audio_dma"; |
| 382 | adev->acp.acp_cell[0].num_resources = 4; | 404 | adev->acp.acp_cell[0].num_resources = 5; |
| 383 | adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; | 405 | adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; |
| 384 | adev->acp.acp_cell[0].platform_data = &adev->asic_type; | 406 | adev->acp.acp_cell[0].platform_data = &adev->asic_type; |
| 385 | adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); | 407 | adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); |
| @@ -396,6 +418,12 @@ static int acp_hw_init(void *handle) | |||
| 396 | adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; | 418 | adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; |
| 397 | adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); | 419 | adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); |
| 398 | 420 | ||
| 421 | adev->acp.acp_cell[3].name = "designware-i2s"; | ||
| 422 | adev->acp.acp_cell[3].num_resources = 1; | ||
| 423 | adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3]; | ||
| 424 | adev->acp.acp_cell[3].platform_data = &i2s_pdata[2]; | ||
| 425 | adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data); | ||
| 426 | |||
| 399 | r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, | 427 | r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, |
| 400 | ACP_DEVS); | 428 | ACP_DEVS); |
| 401 | if (r) | 429 | if (r) |
| @@ -451,7 +479,6 @@ static int acp_hw_init(void *handle) | |||
| 451 | val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); | 479 | val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); |
| 452 | val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; | 480 | val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; |
| 453 | cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); | 481 | cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); |
| 454 | |||
| 455 | return 0; | 482 | return 0; |
| 456 | } | 483 | } |
| 457 | 484 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 9ab89371d9e8..ca8bf1c9a98e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
| @@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { | |||
| 575 | { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 575 | { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 576 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 576 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 577 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 577 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 578 | { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
| 578 | { 0, 0, 0, 0, 0 }, | 579 | { 0, 0, 0, 0, 0 }, |
| 579 | }; | 580 | }; |
| 580 | 581 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 82312a7bc6ad..9c85a90be293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -927,6 +927,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |||
| 927 | r = amdgpu_bo_vm_update_pte(p); | 927 | r = amdgpu_bo_vm_update_pte(p); |
| 928 | if (r) | 928 | if (r) |
| 929 | return r; | 929 | return r; |
| 930 | |||
| 931 | r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); | ||
| 932 | if (r) | ||
| 933 | return r; | ||
| 930 | } | 934 | } |
| 931 | 935 | ||
| 932 | return amdgpu_cs_sync_rings(p); | 936 | return amdgpu_cs_sync_rings(p); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6e5284e6c028..2c5f093e79e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -2747,6 +2747,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
| 2747 | if (r) | 2747 | if (r) |
| 2748 | return r; | 2748 | return r; |
| 2749 | 2749 | ||
| 2750 | /* Make sure IB tests flushed */ | ||
| 2751 | flush_delayed_work(&adev->late_init_work); | ||
| 2752 | |||
| 2750 | /* blat the mode back in */ | 2753 | /* blat the mode back in */ |
| 2751 | if (fbcon) { | 2754 | if (fbcon) { |
| 2752 | if (!amdgpu_device_has_dc_support(adev)) { | 2755 | if (!amdgpu_device_has_dc_support(adev)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index edf16b2b957a..fdcb498f6d19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -107,6 +107,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, | |||
| 107 | return; | 107 | return; |
| 108 | list_add_tail(&base->bo_list, &bo->va); | 108 | list_add_tail(&base->bo_list, &bo->va); |
| 109 | 109 | ||
| 110 | if (bo->tbo.type == ttm_bo_type_kernel) | ||
| 111 | list_move(&base->vm_status, &vm->relocated); | ||
| 112 | |||
| 110 | if (bo->tbo.resv != vm->root.base.bo->tbo.resv) | 113 | if (bo->tbo.resv != vm->root.base.bo->tbo.resv) |
| 111 | return; | 114 | return; |
| 112 | 115 | ||
| @@ -468,7 +471,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, | |||
| 468 | pt->parent = amdgpu_bo_ref(parent->base.bo); | 471 | pt->parent = amdgpu_bo_ref(parent->base.bo); |
| 469 | 472 | ||
| 470 | amdgpu_vm_bo_base_init(&entry->base, vm, pt); | 473 | amdgpu_vm_bo_base_init(&entry->base, vm, pt); |
| 471 | list_move(&entry->base.vm_status, &vm->relocated); | ||
| 472 | } | 474 | } |
| 473 | 475 | ||
| 474 | if (level < AMDGPU_VM_PTB) { | 476 | if (level < AMDGPU_VM_PTB) { |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 4304d9e408b8..ace9ad578ca0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | |||
| @@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, | |||
| 83 | enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? | 83 | enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? |
| 84 | I2C_MOT_TRUE : I2C_MOT_FALSE; | 84 | I2C_MOT_TRUE : I2C_MOT_FALSE; |
| 85 | enum ddc_result res; | 85 | enum ddc_result res; |
| 86 | uint32_t read_bytes = msg->size; | 86 | ssize_t read_bytes; |
| 87 | 87 | ||
| 88 | if (WARN_ON(msg->size > 16)) | 88 | if (WARN_ON(msg->size > 16)) |
| 89 | return -E2BIG; | 89 | return -E2BIG; |
| 90 | 90 | ||
| 91 | switch (msg->request & ~DP_AUX_I2C_MOT) { | 91 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
| 92 | case DP_AUX_NATIVE_READ: | 92 | case DP_AUX_NATIVE_READ: |
| 93 | res = dal_ddc_service_read_dpcd_data( | 93 | read_bytes = dal_ddc_service_read_dpcd_data( |
| 94 | TO_DM_AUX(aux)->ddc_service, | 94 | TO_DM_AUX(aux)->ddc_service, |
| 95 | false, | 95 | false, |
| 96 | I2C_MOT_UNDEF, | 96 | I2C_MOT_UNDEF, |
| 97 | msg->address, | 97 | msg->address, |
| 98 | msg->buffer, | 98 | msg->buffer, |
| 99 | msg->size, | 99 | msg->size); |
| 100 | &read_bytes); | 100 | return read_bytes; |
| 101 | break; | ||
| 102 | case DP_AUX_NATIVE_WRITE: | 101 | case DP_AUX_NATIVE_WRITE: |
| 103 | res = dal_ddc_service_write_dpcd_data( | 102 | res = dal_ddc_service_write_dpcd_data( |
| 104 | TO_DM_AUX(aux)->ddc_service, | 103 | TO_DM_AUX(aux)->ddc_service, |
| @@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, | |||
| 109 | msg->size); | 108 | msg->size); |
| 110 | break; | 109 | break; |
| 111 | case DP_AUX_I2C_READ: | 110 | case DP_AUX_I2C_READ: |
| 112 | res = dal_ddc_service_read_dpcd_data( | 111 | read_bytes = dal_ddc_service_read_dpcd_data( |
| 113 | TO_DM_AUX(aux)->ddc_service, | 112 | TO_DM_AUX(aux)->ddc_service, |
| 114 | true, | 113 | true, |
| 115 | mot, | 114 | mot, |
| 116 | msg->address, | 115 | msg->address, |
| 117 | msg->buffer, | 116 | msg->buffer, |
| 118 | msg->size, | 117 | msg->size); |
| 119 | &read_bytes); | 118 | return read_bytes; |
| 120 | break; | ||
| 121 | case DP_AUX_I2C_WRITE: | 119 | case DP_AUX_I2C_WRITE: |
| 122 | res = dal_ddc_service_write_dpcd_data( | 120 | res = dal_ddc_service_write_dpcd_data( |
| 123 | TO_DM_AUX(aux)->ddc_service, | 121 | TO_DM_AUX(aux)->ddc_service, |
| @@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, | |||
| 139 | r == DDC_RESULT_SUCESSFULL); | 137 | r == DDC_RESULT_SUCESSFULL); |
| 140 | #endif | 138 | #endif |
| 141 | 139 | ||
| 142 | if (res != DDC_RESULT_SUCESSFULL) | 140 | return msg->size; |
| 143 | return -EIO; | ||
| 144 | return read_bytes; | ||
| 145 | } | 141 | } |
| 146 | 142 | ||
| 147 | static enum drm_connector_status | 143 | static enum drm_connector_status |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 5a3346124a01..5a2e952c5bea 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | |||
| @@ -255,8 +255,9 @@ static void pp_to_dc_clock_levels_with_latency( | |||
| 255 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); | 255 | DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); |
| 256 | 256 | ||
| 257 | for (i = 0; i < clk_level_info->num_levels; i++) { | 257 | for (i = 0; i < clk_level_info->num_levels; i++) { |
| 258 | DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz); | 258 | DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz); |
| 259 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; | 259 | /* translate 10kHz to kHz */ |
| 260 | clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10; | ||
| 260 | clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; | 261 | clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; |
| 261 | } | 262 | } |
| 262 | } | 263 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index ae48d603ebd6..49c2face1e7a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | |||
| @@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data( | |||
| 629 | return ret; | 629 | return ret; |
| 630 | } | 630 | } |
| 631 | 631 | ||
| 632 | enum ddc_result dal_ddc_service_read_dpcd_data( | 632 | ssize_t dal_ddc_service_read_dpcd_data( |
| 633 | struct ddc_service *ddc, | 633 | struct ddc_service *ddc, |
| 634 | bool i2c, | 634 | bool i2c, |
| 635 | enum i2c_mot_mode mot, | 635 | enum i2c_mot_mode mot, |
| 636 | uint32_t address, | 636 | uint32_t address, |
| 637 | uint8_t *data, | 637 | uint8_t *data, |
| 638 | uint32_t len, | 638 | uint32_t len) |
| 639 | uint32_t *read) | ||
| 640 | { | 639 | { |
| 641 | struct aux_payload read_payload = { | 640 | struct aux_payload read_payload = { |
| 642 | .i2c_over_aux = i2c, | 641 | .i2c_over_aux = i2c, |
| @@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data( | |||
| 653 | .mot = mot | 652 | .mot = mot |
| 654 | }; | 653 | }; |
| 655 | 654 | ||
| 656 | *read = 0; | ||
| 657 | |||
| 658 | if (len > DEFAULT_AUX_MAX_DATA_SIZE) { | 655 | if (len > DEFAULT_AUX_MAX_DATA_SIZE) { |
| 659 | BREAK_TO_DEBUGGER(); | 656 | BREAK_TO_DEBUGGER(); |
| 660 | return DDC_RESULT_FAILED_INVALID_OPERATION; | 657 | return DDC_RESULT_FAILED_INVALID_OPERATION; |
| @@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data( | |||
| 664 | ddc->ctx->i2caux, | 661 | ddc->ctx->i2caux, |
| 665 | ddc->ddc_pin, | 662 | ddc->ddc_pin, |
| 666 | &command)) { | 663 | &command)) { |
| 667 | *read = command.payloads->length; | 664 | return (ssize_t)command.payloads->length; |
| 668 | return DDC_RESULT_SUCESSFULL; | ||
| 669 | } | 665 | } |
| 670 | 666 | ||
| 671 | return DDC_RESULT_FAILED_OPERATION; | 667 | return DDC_RESULT_FAILED_OPERATION; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7857cb42b3e6..bdd121485cbc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
| @@ -1767,12 +1767,10 @@ static void dp_test_send_link_training(struct dc_link *link) | |||
| 1767 | dp_retrain_link_dp_test(link, &link_settings, false); | 1767 | dp_retrain_link_dp_test(link, &link_settings, false); |
| 1768 | } | 1768 | } |
| 1769 | 1769 | ||
| 1770 | /* TODO hbr2 compliance eye output is unstable | 1770 | /* TODO Raven hbr2 compliance eye output is unstable |
| 1771 | * (toggling on and off) with debugger break | 1771 | * (toggling on and off) with debugger break |
| 1772 | * This caueses intermittent PHY automation failure | 1772 | * This caueses intermittent PHY automation failure |
| 1773 | * Need to look into the root cause */ | 1773 | * Need to look into the root cause */ |
| 1774 | static uint8_t force_tps4_for_cp2520 = 1; | ||
| 1775 | |||
| 1776 | static void dp_test_send_phy_test_pattern(struct dc_link *link) | 1774 | static void dp_test_send_phy_test_pattern(struct dc_link *link) |
| 1777 | { | 1775 | { |
| 1778 | union phy_test_pattern dpcd_test_pattern; | 1776 | union phy_test_pattern dpcd_test_pattern; |
| @@ -1832,13 +1830,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) | |||
| 1832 | break; | 1830 | break; |
| 1833 | case PHY_TEST_PATTERN_CP2520_1: | 1831 | case PHY_TEST_PATTERN_CP2520_1: |
| 1834 | /* CP2520 pattern is unstable, temporarily use TPS4 instead */ | 1832 | /* CP2520 pattern is unstable, temporarily use TPS4 instead */ |
| 1835 | test_pattern = (force_tps4_for_cp2520 == 1) ? | 1833 | test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? |
| 1836 | DP_TEST_PATTERN_TRAINING_PATTERN4 : | 1834 | DP_TEST_PATTERN_TRAINING_PATTERN4 : |
| 1837 | DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; | 1835 | DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; |
| 1838 | break; | 1836 | break; |
| 1839 | case PHY_TEST_PATTERN_CP2520_2: | 1837 | case PHY_TEST_PATTERN_CP2520_2: |
| 1840 | /* CP2520 pattern is unstable, temporarily use TPS4 instead */ | 1838 | /* CP2520 pattern is unstable, temporarily use TPS4 instead */ |
| 1841 | test_pattern = (force_tps4_for_cp2520 == 1) ? | 1839 | test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? |
| 1842 | DP_TEST_PATTERN_TRAINING_PATTERN4 : | 1840 | DP_TEST_PATTERN_TRAINING_PATTERN4 : |
| 1843 | DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; | 1841 | DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; |
| 1844 | break; | 1842 | break; |
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 9cfde0ccf4e9..53c71296f3dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
| @@ -76,6 +76,7 @@ struct dc_caps { | |||
| 76 | bool is_apu; | 76 | bool is_apu; |
| 77 | bool dual_link_dvi; | 77 | bool dual_link_dvi; |
| 78 | bool post_blend_color_processing; | 78 | bool post_blend_color_processing; |
| 79 | bool force_dp_tps4_for_cp2520; | ||
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 81 | struct dc_dcc_surface_param { | 82 | struct dc_dcc_surface_param { |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index b235a75355b8..bae752332a9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c | |||
| @@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = { | |||
| 741 | .mem_input_is_flip_pending = dce_mi_is_flip_pending | 741 | .mem_input_is_flip_pending = dce_mi_is_flip_pending |
| 742 | }; | 742 | }; |
| 743 | 743 | ||
| 744 | static struct mem_input_funcs dce112_mi_funcs = { | ||
| 745 | .mem_input_program_display_marks = dce112_mi_program_display_marks, | ||
| 746 | .allocate_mem_input = dce_mi_allocate_dmif, | ||
| 747 | .free_mem_input = dce_mi_free_dmif, | ||
| 748 | .mem_input_program_surface_flip_and_addr = | ||
| 749 | dce_mi_program_surface_flip_and_addr, | ||
| 750 | .mem_input_program_pte_vm = dce_mi_program_pte_vm, | ||
| 751 | .mem_input_program_surface_config = | ||
| 752 | dce_mi_program_surface_config, | ||
| 753 | .mem_input_is_flip_pending = dce_mi_is_flip_pending | ||
| 754 | }; | ||
| 755 | |||
| 756 | static struct mem_input_funcs dce120_mi_funcs = { | ||
| 757 | .mem_input_program_display_marks = dce120_mi_program_display_marks, | ||
| 758 | .allocate_mem_input = dce_mi_allocate_dmif, | ||
| 759 | .free_mem_input = dce_mi_free_dmif, | ||
| 760 | .mem_input_program_surface_flip_and_addr = | ||
| 761 | dce_mi_program_surface_flip_and_addr, | ||
| 762 | .mem_input_program_pte_vm = dce_mi_program_pte_vm, | ||
| 763 | .mem_input_program_surface_config = | ||
| 764 | dce_mi_program_surface_config, | ||
| 765 | .mem_input_is_flip_pending = dce_mi_is_flip_pending | ||
| 766 | }; | ||
| 744 | 767 | ||
| 745 | void dce_mem_input_construct( | 768 | void dce_mem_input_construct( |
| 746 | struct dce_mem_input *dce_mi, | 769 | struct dce_mem_input *dce_mi, |
| @@ -769,7 +792,7 @@ void dce112_mem_input_construct( | |||
| 769 | const struct dce_mem_input_mask *mi_mask) | 792 | const struct dce_mem_input_mask *mi_mask) |
| 770 | { | 793 | { |
| 771 | dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); | 794 | dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); |
| 772 | dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks; | 795 | dce_mi->base.funcs = &dce112_mi_funcs; |
| 773 | } | 796 | } |
| 774 | 797 | ||
| 775 | void dce120_mem_input_construct( | 798 | void dce120_mem_input_construct( |
| @@ -781,5 +804,5 @@ void dce120_mem_input_construct( | |||
| 781 | const struct dce_mem_input_mask *mi_mask) | 804 | const struct dce_mem_input_mask *mi_mask) |
| 782 | { | 805 | { |
| 783 | dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); | 806 | dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); |
| 784 | dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks; | 807 | dce_mi->base.funcs = &dce120_mi_funcs; |
| 785 | } | 808 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 38ec0d609297..344dd2e69e7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | |||
| @@ -678,9 +678,22 @@ bool dce100_validate_bandwidth( | |||
| 678 | struct dc *dc, | 678 | struct dc *dc, |
| 679 | struct dc_state *context) | 679 | struct dc_state *context) |
| 680 | { | 680 | { |
| 681 | /* TODO implement when needed but for now hardcode max value*/ | 681 | int i; |
| 682 | context->bw.dce.dispclk_khz = 681000; | 682 | bool at_least_one_pipe = false; |
| 683 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; | 683 | |
| 684 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 685 | if (context->res_ctx.pipe_ctx[i].stream) | ||
| 686 | at_least_one_pipe = true; | ||
| 687 | } | ||
| 688 | |||
| 689 | if (at_least_one_pipe) { | ||
| 690 | /* TODO implement when needed but for now hardcode max value*/ | ||
| 691 | context->bw.dce.dispclk_khz = 681000; | ||
| 692 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; | ||
| 693 | } else { | ||
| 694 | context->bw.dce.dispclk_khz = 0; | ||
| 695 | context->bw.dce.yclk_khz = 0; | ||
| 696 | } | ||
| 684 | 697 | ||
| 685 | return true; | 698 | return true; |
| 686 | } | 699 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index df5cb2d1d164..34dac84066a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | |||
| @@ -1027,6 +1027,8 @@ static bool construct( | |||
| 1027 | dc->caps.max_slave_planes = 1; | 1027 | dc->caps.max_slave_planes = 1; |
| 1028 | dc->caps.is_apu = true; | 1028 | dc->caps.is_apu = true; |
| 1029 | dc->caps.post_blend_color_processing = false; | 1029 | dc->caps.post_blend_color_processing = false; |
| 1030 | /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ | ||
| 1031 | dc->caps.force_dp_tps4_for_cp2520 = true; | ||
| 1030 | 1032 | ||
| 1031 | if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) | 1033 | if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) |
| 1032 | dc->debug = debug_defaults_drv; | 1034 | dc->debug = debug_defaults_drv; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 30b3a08b91be..090b7a8dd67b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | |||
| @@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data( | |||
| 102 | uint8_t *read_buf, | 102 | uint8_t *read_buf, |
| 103 | uint32_t read_size); | 103 | uint32_t read_size); |
| 104 | 104 | ||
| 105 | enum ddc_result dal_ddc_service_read_dpcd_data( | 105 | ssize_t dal_ddc_service_read_dpcd_data( |
| 106 | struct ddc_service *ddc, | 106 | struct ddc_service *ddc, |
| 107 | bool i2c, | 107 | bool i2c, |
| 108 | enum i2c_mot_mode mot, | 108 | enum i2c_mot_mode mot, |
| 109 | uint32_t address, | 109 | uint32_t address, |
| 110 | uint8_t *data, | 110 | uint8_t *data, |
| 111 | uint32_t len, | 111 | uint32_t len); |
| 112 | uint32_t *read); | ||
| 113 | 112 | ||
| 114 | enum ddc_result dal_ddc_service_write_dpcd_data( | 113 | enum ddc_result dal_ddc_service_write_dpcd_data( |
| 115 | struct ddc_service *ddc, | 114 | struct ddc_service *ddc, |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index d644a9bb9078..9f407c48d4f0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | |||
| @@ -381,6 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) | |||
| 381 | uint32_t fw_to_load; | 381 | uint32_t fw_to_load; |
| 382 | int result = 0; | 382 | int result = 0; |
| 383 | struct SMU_DRAMData_TOC *toc; | 383 | struct SMU_DRAMData_TOC *toc; |
| 384 | uint32_t num_entries = 0; | ||
| 384 | 385 | ||
| 385 | if (!hwmgr->reload_fw) { | 386 | if (!hwmgr->reload_fw) { |
| 386 | pr_info("skip reloading...\n"); | 387 | pr_info("skip reloading...\n"); |
| @@ -422,41 +423,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) | |||
| 422 | } | 423 | } |
| 423 | 424 | ||
| 424 | toc = (struct SMU_DRAMData_TOC *)smu_data->header; | 425 | toc = (struct SMU_DRAMData_TOC *)smu_data->header; |
| 425 | toc->num_entries = 0; | ||
| 426 | toc->structure_version = 1; | 426 | toc->structure_version = 1; |
| 427 | 427 | ||
| 428 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 428 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 429 | UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), | 429 | UCODE_ID_RLC_G, &toc->entry[num_entries++]), |
| 430 | "Failed to Get Firmware Entry.", return -EINVAL); | 430 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 431 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 431 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 432 | UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), | 432 | UCODE_ID_CP_CE, &toc->entry[num_entries++]), |
| 433 | "Failed to Get Firmware Entry.", return -EINVAL); | 433 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 434 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 434 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 435 | UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), | 435 | UCODE_ID_CP_PFP, &toc->entry[num_entries++]), |
| 436 | "Failed to Get Firmware Entry.", return -EINVAL); | 436 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 437 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 437 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 438 | UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), | 438 | UCODE_ID_CP_ME, &toc->entry[num_entries++]), |
| 439 | "Failed to Get Firmware Entry.", return -EINVAL); | 439 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 440 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 440 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 441 | UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), | 441 | UCODE_ID_CP_MEC, &toc->entry[num_entries++]), |
| 442 | "Failed to Get Firmware Entry.", return -EINVAL); | 442 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 443 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 443 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 444 | UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), | 444 | UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]), |
| 445 | "Failed to Get Firmware Entry.", return -EINVAL); | 445 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 446 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 446 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 447 | UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), | 447 | UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]), |
| 448 | "Failed to Get Firmware Entry.", return -EINVAL); | 448 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 449 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 449 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 450 | UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), | 450 | UCODE_ID_SDMA0, &toc->entry[num_entries++]), |
| 451 | "Failed to Get Firmware Entry.", return -EINVAL); | 451 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 452 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 452 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 453 | UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), | 453 | UCODE_ID_SDMA1, &toc->entry[num_entries++]), |
| 454 | "Failed to Get Firmware Entry.", return -EINVAL); | 454 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 455 | if (!hwmgr->not_vf) | 455 | if (!hwmgr->not_vf) |
| 456 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, | 456 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, |
| 457 | UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), | 457 | UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]), |
| 458 | "Failed to Get Firmware Entry.", return -EINVAL); | 458 | "Failed to Get Firmware Entry.", return -EINVAL); |
| 459 | 459 | ||
| 460 | toc->num_entries = num_entries; | ||
| 460 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); | 461 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); |
| 461 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); | 462 | smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); |
| 462 | 463 | ||
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 03eeee11dd5b..42a40daff132 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c | |||
| @@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg) | |||
| 519 | u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); | 519 | u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); |
| 520 | 520 | ||
| 521 | /* | 521 | /* |
| 522 | * This is rediculous - rather than writing bits to clear, we | 522 | * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR |
| 523 | * have to set the actual status register value. This is racy. | 523 | * is set. Writing has some other effect to acknowledge the IRQ - |
| 524 | * without this, we only get a single IRQ. | ||
| 524 | */ | 525 | */ |
| 525 | writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); | 526 | writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); |
| 526 | 527 | ||
| @@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc, | |||
| 1116 | static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc) | 1117 | static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc) |
| 1117 | { | 1118 | { |
| 1118 | struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); | 1119 | struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); |
| 1120 | unsigned long flags; | ||
| 1119 | 1121 | ||
| 1122 | spin_lock_irqsave(&dcrtc->irq_lock, flags); | ||
| 1120 | armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA); | 1123 | armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA); |
| 1124 | spin_unlock_irqrestore(&dcrtc->irq_lock, flags); | ||
| 1121 | return 0; | 1125 | return 0; |
| 1122 | } | 1126 | } |
| 1123 | 1127 | ||
| 1124 | static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc) | 1128 | static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc) |
| 1125 | { | 1129 | { |
| 1126 | struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); | 1130 | struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); |
| 1131 | unsigned long flags; | ||
| 1127 | 1132 | ||
| 1133 | spin_lock_irqsave(&dcrtc->irq_lock, flags); | ||
| 1128 | armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA); | 1134 | armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA); |
| 1135 | spin_unlock_irqrestore(&dcrtc->irq_lock, flags); | ||
| 1129 | } | 1136 | } |
| 1130 | 1137 | ||
| 1131 | static const struct drm_crtc_funcs armada_crtc_funcs = { | 1138 | static const struct drm_crtc_funcs armada_crtc_funcs = { |
| @@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, | |||
| 1415 | CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); | 1422 | CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); |
| 1416 | writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); | 1423 | writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); |
| 1417 | writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); | 1424 | writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); |
| 1425 | readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); | ||
| 1418 | writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); | 1426 | writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); |
| 1419 | 1427 | ||
| 1420 | ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", | 1428 | ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", |
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h index 27319a8335e2..345dc4d0851e 100644 --- a/drivers/gpu/drm/armada/armada_hw.h +++ b/drivers/gpu/drm/armada/armada_hw.h | |||
| @@ -160,6 +160,7 @@ enum { | |||
| 160 | CFG_ALPHAM_GRA = 0x1 << 16, | 160 | CFG_ALPHAM_GRA = 0x1 << 16, |
| 161 | CFG_ALPHAM_CFG = 0x2 << 16, | 161 | CFG_ALPHAM_CFG = 0x2 << 16, |
| 162 | CFG_ALPHA_MASK = 0xff << 8, | 162 | CFG_ALPHA_MASK = 0xff << 8, |
| 163 | #define CFG_ALPHA(x) ((x) << 8) | ||
| 163 | CFG_PIXCMD_MASK = 0xff, | 164 | CFG_PIXCMD_MASK = 0xff, |
| 164 | }; | 165 | }; |
| 165 | 166 | ||
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index c391955009d6..afa7ded3ae31 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c | |||
| @@ -28,6 +28,7 @@ struct armada_ovl_plane_properties { | |||
| 28 | uint16_t contrast; | 28 | uint16_t contrast; |
| 29 | uint16_t saturation; | 29 | uint16_t saturation; |
| 30 | uint32_t colorkey_mode; | 30 | uint32_t colorkey_mode; |
| 31 | uint32_t colorkey_enable; | ||
| 31 | }; | 32 | }; |
| 32 | 33 | ||
| 33 | struct armada_ovl_plane { | 34 | struct armada_ovl_plane { |
| @@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop, | |||
| 54 | writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE); | 55 | writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE); |
| 55 | 56 | ||
| 56 | spin_lock_irq(&dcrtc->irq_lock); | 57 | spin_lock_irq(&dcrtc->irq_lock); |
| 57 | armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA, | 58 | armada_updatel(prop->colorkey_mode, |
| 58 | CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, | 59 | CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, |
| 59 | dcrtc->base + LCD_SPU_DMA_CTRL1); | 60 | dcrtc->base + LCD_SPU_DMA_CTRL1); |
| 60 | 61 | if (dcrtc->variant->has_spu_adv_reg) | |
| 61 | armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG); | 62 | armada_updatel(prop->colorkey_enable, |
| 63 | ADV_GRACOLORKEY | ADV_VIDCOLORKEY, | ||
| 64 | dcrtc->base + LCD_SPU_ADV_REG); | ||
| 62 | spin_unlock_irq(&dcrtc->irq_lock); | 65 | spin_unlock_irq(&dcrtc->irq_lock); |
| 63 | } | 66 | } |
| 64 | 67 | ||
| @@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane, | |||
| 321 | dplane->prop.colorkey_vb |= K2B(val); | 324 | dplane->prop.colorkey_vb |= K2B(val); |
| 322 | update_attr = true; | 325 | update_attr = true; |
| 323 | } else if (property == priv->colorkey_mode_prop) { | 326 | } else if (property == priv->colorkey_mode_prop) { |
| 324 | dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK; | 327 | if (val == CKMODE_DISABLE) { |
| 325 | dplane->prop.colorkey_mode |= CFG_CKMODE(val); | 328 | dplane->prop.colorkey_mode = |
| 329 | CFG_CKMODE(CKMODE_DISABLE) | | ||
| 330 | CFG_ALPHAM_CFG | CFG_ALPHA(255); | ||
| 331 | dplane->prop.colorkey_enable = 0; | ||
| 332 | } else { | ||
| 333 | dplane->prop.colorkey_mode = | ||
| 334 | CFG_CKMODE(val) | | ||
| 335 | CFG_ALPHAM_GRA | CFG_ALPHA(0); | ||
| 336 | dplane->prop.colorkey_enable = ADV_GRACOLORKEY; | ||
| 337 | } | ||
| 326 | update_attr = true; | 338 | update_attr = true; |
| 327 | } else if (property == priv->brightness_prop) { | 339 | } else if (property == priv->brightness_prop) { |
| 328 | dplane->prop.brightness = val - 256; | 340 | dplane->prop.brightness = val - 256; |
| @@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) | |||
| 453 | dplane->prop.colorkey_yr = 0xfefefe00; | 465 | dplane->prop.colorkey_yr = 0xfefefe00; |
| 454 | dplane->prop.colorkey_ug = 0x01010100; | 466 | dplane->prop.colorkey_ug = 0x01010100; |
| 455 | dplane->prop.colorkey_vb = 0x01010100; | 467 | dplane->prop.colorkey_vb = 0x01010100; |
| 456 | dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB); | 468 | dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) | |
| 469 | CFG_ALPHAM_GRA | CFG_ALPHA(0); | ||
| 470 | dplane->prop.colorkey_enable = ADV_GRACOLORKEY; | ||
| 457 | dplane->prop.brightness = 0; | 471 | dplane->prop.brightness = 0; |
| 458 | dplane->prop.contrast = 0x4000; | 472 | dplane->prop.contrast = 0x4000; |
| 459 | dplane->prop.saturation = 0x4000; | 473 | dplane->prop.saturation = 0x4000; |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b51c05d03f14..7f562410f9cf 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
| @@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, | |||
| 862 | { | 862 | { |
| 863 | struct intel_vgpu *vgpu = s->vgpu; | 863 | struct intel_vgpu *vgpu = s->vgpu; |
| 864 | struct intel_gvt *gvt = vgpu->gvt; | 864 | struct intel_gvt *gvt = vgpu->gvt; |
| 865 | u32 ctx_sr_ctl; | ||
| 865 | 866 | ||
| 866 | if (offset + 4 > gvt->device_info.mmio_size) { | 867 | if (offset + 4 > gvt->device_info.mmio_size) { |
| 867 | gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", | 868 | gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", |
| @@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s, | |||
| 894 | patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); | 895 | patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); |
| 895 | } | 896 | } |
| 896 | 897 | ||
| 898 | /* TODO | ||
| 899 | * Right now only scan LRI command on KBL and in inhibit context. | ||
| 900 | * It's good enough to support initializing mmio by lri command in | ||
| 901 | * vgpu inhibit context on KBL. | ||
| 902 | */ | ||
| 903 | if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) && | ||
| 904 | intel_gvt_mmio_is_in_ctx(gvt, offset) && | ||
| 905 | !strncmp(cmd, "lri", 3)) { | ||
| 906 | intel_gvt_hypervisor_read_gpa(s->vgpu, | ||
| 907 | s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); | ||
| 908 | /* check inhibit context */ | ||
| 909 | if (ctx_sr_ctl & 1) { | ||
| 910 | u32 data = cmd_val(s, index + 1); | ||
| 911 | |||
| 912 | if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) | ||
| 913 | intel_vgpu_mask_mmio_write(vgpu, | ||
| 914 | offset, &data, 4); | ||
| 915 | else | ||
| 916 | vgpu_vreg(vgpu, offset) = data; | ||
| 917 | } | ||
| 918 | } | ||
| 919 | |||
| 897 | /* TODO: Update the global mask if this MMIO is a masked-MMIO */ | 920 | /* TODO: Update the global mask if this MMIO is a masked-MMIO */ |
| 898 | intel_gvt_mmio_set_cmd_accessed(gvt, offset); | 921 | intel_gvt_mmio_set_cmd_accessed(gvt, offset); |
| 899 | return 0; | 922 | return 0; |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 05d15a095310..858967daf04b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
| @@ -268,6 +268,8 @@ struct intel_gvt_mmio { | |||
| 268 | #define F_CMD_ACCESSED (1 << 5) | 268 | #define F_CMD_ACCESSED (1 << 5) |
| 269 | /* This reg could be accessed by unaligned address */ | 269 | /* This reg could be accessed by unaligned address */ |
| 270 | #define F_UNALIGN (1 << 6) | 270 | #define F_UNALIGN (1 << 6) |
| 271 | /* This reg is saved/restored in context */ | ||
| 272 | #define F_IN_CTX (1 << 7) | ||
| 271 | 273 | ||
| 272 | struct gvt_mmio_block *mmio_block; | 274 | struct gvt_mmio_block *mmio_block; |
| 273 | unsigned int num_mmio_block; | 275 | unsigned int num_mmio_block; |
| @@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask( | |||
| 639 | return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; | 641 | return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; |
| 640 | } | 642 | } |
| 641 | 643 | ||
| 644 | /** | ||
| 645 | * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask | ||
| 646 | * @gvt: a GVT device | ||
| 647 | * @offset: register offset | ||
| 648 | * | ||
| 649 | * Returns: | ||
| 650 | * True if a MMIO has a in-context mask, false if it isn't. | ||
| 651 | * | ||
| 652 | */ | ||
| 653 | static inline bool intel_gvt_mmio_is_in_ctx( | ||
| 654 | struct intel_gvt *gvt, unsigned int offset) | ||
| 655 | { | ||
| 656 | return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX; | ||
| 657 | } | ||
| 658 | |||
| 659 | /** | ||
| 660 | * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context | ||
| 661 | * @gvt: a GVT device | ||
| 662 | * @offset: register offset | ||
| 663 | * | ||
| 664 | */ | ||
| 665 | static inline void intel_gvt_mmio_set_in_ctx( | ||
| 666 | struct intel_gvt *gvt, unsigned int offset) | ||
| 667 | { | ||
| 668 | gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; | ||
| 669 | } | ||
| 670 | |||
| 642 | int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); | 671 | int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); |
| 643 | void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); | 672 | void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); |
| 644 | int intel_gvt_debugfs_init(struct intel_gvt *gvt); | 673 | int intel_gvt_debugfs_init(struct intel_gvt *gvt); |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bcbc47a88a70..8f1caacdc78a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -3046,6 +3046,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 3046 | } | 3046 | } |
| 3047 | 3047 | ||
| 3048 | /** | 3048 | /** |
| 3049 | * intel_vgpu_mask_mmio_write - write mask register | ||
| 3050 | * @vgpu: a vGPU | ||
| 3051 | * @offset: access offset | ||
| 3052 | * @p_data: write data buffer | ||
| 3053 | * @bytes: access data length | ||
| 3054 | * | ||
| 3055 | * Returns: | ||
| 3056 | * Zero on success, negative error code if failed. | ||
| 3057 | */ | ||
| 3058 | int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | ||
| 3059 | void *p_data, unsigned int bytes) | ||
| 3060 | { | ||
| 3061 | u32 mask, old_vreg; | ||
| 3062 | |||
| 3063 | old_vreg = vgpu_vreg(vgpu, offset); | ||
| 3064 | write_vreg(vgpu, offset, p_data, bytes); | ||
| 3065 | mask = vgpu_vreg(vgpu, offset) >> 16; | ||
| 3066 | vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) | | ||
| 3067 | (vgpu_vreg(vgpu, offset) & mask); | ||
| 3068 | |||
| 3069 | return 0; | ||
| 3070 | } | ||
| 3071 | |||
| 3072 | /** | ||
| 3049 | * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be | 3073 | * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be |
| 3050 | * force-nopriv register | 3074 | * force-nopriv register |
| 3051 | * | 3075 | * |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 71b620875943..dac8c6401e26 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h | |||
| @@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, | |||
| 98 | int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, | 98 | int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, |
| 99 | void *pdata, unsigned int bytes, bool is_read); | 99 | void *pdata, unsigned int bytes, bool is_read); |
| 100 | 100 | ||
| 101 | int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | ||
| 102 | void *p_data, unsigned int bytes); | ||
| 101 | #endif | 103 | #endif |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 0f949554d118..5ca9caf7552a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
| @@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) | |||
| 581 | 581 | ||
| 582 | for (mmio = gvt->engine_mmio_list.mmio; | 582 | for (mmio = gvt->engine_mmio_list.mmio; |
| 583 | i915_mmio_reg_valid(mmio->reg); mmio++) { | 583 | i915_mmio_reg_valid(mmio->reg); mmio++) { |
| 584 | if (mmio->in_context) | 584 | if (mmio->in_context) { |
| 585 | gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; | 585 | gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; |
| 586 | intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); | ||
| 587 | } | ||
| 586 | } | 588 | } |
| 587 | } | 589 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4a02747ac658..c16cb025755e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -1998,10 +1998,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, | |||
| 1998 | 1998 | ||
| 1999 | static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) | 1999 | static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) |
| 2000 | { | 2000 | { |
| 2001 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 2001 | u32 hotplug_status = 0, hotplug_status_mask; |
| 2002 | int i; | ||
| 2003 | |||
| 2004 | if (IS_G4X(dev_priv) || | ||
| 2005 | IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
| 2006 | hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | | ||
| 2007 | DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; | ||
| 2008 | else | ||
| 2009 | hotplug_status_mask = HOTPLUG_INT_STATUS_I915; | ||
| 2002 | 2010 | ||
| 2003 | if (hotplug_status) | 2011 | /* |
| 2012 | * We absolutely have to clear all the pending interrupt | ||
| 2013 | * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port | ||
| 2014 | * interrupt bit won't have an edge, and the i965/g4x | ||
| 2015 | * edge triggered IIR will not notice that an interrupt | ||
| 2016 | * is still pending. We can't use PORT_HOTPLUG_EN to | ||
| 2017 | * guarantee the edge as the act of toggling the enable | ||
| 2018 | * bits can itself generate a new hotplug interrupt :( | ||
| 2019 | */ | ||
| 2020 | for (i = 0; i < 10; i++) { | ||
| 2021 | u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; | ||
| 2022 | |||
| 2023 | if (tmp == 0) | ||
| 2024 | return hotplug_status; | ||
| 2025 | |||
| 2026 | hotplug_status |= tmp; | ||
| 2004 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 2027 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 2028 | } | ||
| 2029 | |||
| 2030 | WARN_ONCE(1, | ||
| 2031 | "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", | ||
| 2032 | I915_READ(PORT_HOTPLUG_STAT)); | ||
| 2005 | 2033 | ||
| 2006 | return hotplug_status; | 2034 | return hotplug_status; |
| 2007 | } | 2035 | } |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 501d2d290e9c..70dce544984e 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c | |||
| @@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev) | |||
| 55 | nouveau_display(dev)->init = nv04_display_init; | 55 | nouveau_display(dev)->init = nv04_display_init; |
| 56 | nouveau_display(dev)->fini = nv04_display_fini; | 56 | nouveau_display(dev)->fini = nv04_display_fini; |
| 57 | 57 | ||
| 58 | /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */ | ||
| 59 | dev->driver->driver_features &= ~DRIVER_ATOMIC; | ||
| 60 | |||
| 58 | nouveau_hw_save_vga_fonts(dev, 1); | 61 | nouveau_hw_save_vga_fonts(dev, 1); |
| 59 | 62 | ||
| 60 | nv04_crtc_create(dev, 0); | 63 | nv04_crtc_create(dev, 0); |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b83465ae7c1b..9bae4db84cfb 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c | |||
| @@ -1585,8 +1585,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) | |||
| 1585 | *****************************************************************************/ | 1585 | *****************************************************************************/ |
| 1586 | 1586 | ||
| 1587 | static void | 1587 | static void |
| 1588 | nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock) | 1588 | nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock) |
| 1589 | { | 1589 | { |
| 1590 | struct nouveau_drm *drm = nouveau_drm(state->dev); | ||
| 1590 | struct nv50_disp *disp = nv50_disp(drm->dev); | 1591 | struct nv50_disp *disp = nv50_disp(drm->dev); |
| 1591 | struct nv50_core *core = disp->core; | 1592 | struct nv50_core *core = disp->core; |
| 1592 | struct nv50_mstm *mstm; | 1593 | struct nv50_mstm *mstm; |
| @@ -1618,6 +1619,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock) | |||
| 1618 | } | 1619 | } |
| 1619 | 1620 | ||
| 1620 | static void | 1621 | static void |
| 1622 | nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock) | ||
| 1623 | { | ||
| 1624 | struct drm_plane_state *new_plane_state; | ||
| 1625 | struct drm_plane *plane; | ||
| 1626 | int i; | ||
| 1627 | |||
| 1628 | for_each_new_plane_in_state(state, plane, new_plane_state, i) { | ||
| 1629 | struct nv50_wndw *wndw = nv50_wndw(plane); | ||
| 1630 | if (interlock[wndw->interlock.type] & wndw->interlock.data) { | ||
| 1631 | if (wndw->func->update) | ||
| 1632 | wndw->func->update(wndw, interlock); | ||
| 1633 | } | ||
| 1634 | } | ||
| 1635 | } | ||
| 1636 | |||
| 1637 | static void | ||
| 1621 | nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | 1638 | nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) |
| 1622 | { | 1639 | { |
| 1623 | struct drm_device *dev = state->dev; | 1640 | struct drm_device *dev = state->dev; |
| @@ -1684,7 +1701,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 1684 | help->disable(encoder); | 1701 | help->disable(encoder); |
| 1685 | interlock[NV50_DISP_INTERLOCK_CORE] |= 1; | 1702 | interlock[NV50_DISP_INTERLOCK_CORE] |= 1; |
| 1686 | if (outp->flush_disable) { | 1703 | if (outp->flush_disable) { |
| 1687 | nv50_disp_atomic_commit_core(drm, interlock); | 1704 | nv50_disp_atomic_commit_wndw(state, interlock); |
| 1705 | nv50_disp_atomic_commit_core(state, interlock); | ||
| 1688 | memset(interlock, 0x00, sizeof(interlock)); | 1706 | memset(interlock, 0x00, sizeof(interlock)); |
| 1689 | } | 1707 | } |
| 1690 | } | 1708 | } |
| @@ -1693,15 +1711,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 1693 | /* Flush disable. */ | 1711 | /* Flush disable. */ |
| 1694 | if (interlock[NV50_DISP_INTERLOCK_CORE]) { | 1712 | if (interlock[NV50_DISP_INTERLOCK_CORE]) { |
| 1695 | if (atom->flush_disable) { | 1713 | if (atom->flush_disable) { |
| 1696 | for_each_new_plane_in_state(state, plane, new_plane_state, i) { | 1714 | nv50_disp_atomic_commit_wndw(state, interlock); |
| 1697 | struct nv50_wndw *wndw = nv50_wndw(plane); | 1715 | nv50_disp_atomic_commit_core(state, interlock); |
| 1698 | if (interlock[wndw->interlock.type] & wndw->interlock.data) { | ||
| 1699 | if (wndw->func->update) | ||
| 1700 | wndw->func->update(wndw, interlock); | ||
| 1701 | } | ||
| 1702 | } | ||
| 1703 | |||
| 1704 | nv50_disp_atomic_commit_core(drm, interlock); | ||
| 1705 | memset(interlock, 0x00, sizeof(interlock)); | 1716 | memset(interlock, 0x00, sizeof(interlock)); |
| 1706 | } | 1717 | } |
| 1707 | } | 1718 | } |
| @@ -1762,18 +1773,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 1762 | } | 1773 | } |
| 1763 | 1774 | ||
| 1764 | /* Flush update. */ | 1775 | /* Flush update. */ |
| 1765 | for_each_new_plane_in_state(state, plane, new_plane_state, i) { | 1776 | nv50_disp_atomic_commit_wndw(state, interlock); |
| 1766 | struct nv50_wndw *wndw = nv50_wndw(plane); | ||
| 1767 | if (interlock[wndw->interlock.type] & wndw->interlock.data) { | ||
| 1768 | if (wndw->func->update) | ||
| 1769 | wndw->func->update(wndw, interlock); | ||
| 1770 | } | ||
| 1771 | } | ||
| 1772 | 1777 | ||
| 1773 | if (interlock[NV50_DISP_INTERLOCK_CORE]) { | 1778 | if (interlock[NV50_DISP_INTERLOCK_CORE]) { |
| 1774 | if (interlock[NV50_DISP_INTERLOCK_BASE] || | 1779 | if (interlock[NV50_DISP_INTERLOCK_BASE] || |
| 1780 | interlock[NV50_DISP_INTERLOCK_OVLY] || | ||
| 1781 | interlock[NV50_DISP_INTERLOCK_WNDW] || | ||
| 1775 | !atom->state.legacy_cursor_update) | 1782 | !atom->state.legacy_cursor_update) |
| 1776 | nv50_disp_atomic_commit_core(drm, interlock); | 1783 | nv50_disp_atomic_commit_core(state, interlock); |
| 1777 | else | 1784 | else |
| 1778 | disp->core->func->update(disp->core, interlock, false); | 1785 | disp->core->func->update(disp->core, interlock, false); |
| 1779 | } | 1786 | } |
| @@ -1871,7 +1878,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, | |||
| 1871 | nv50_disp_atomic_commit_tail(state); | 1878 | nv50_disp_atomic_commit_tail(state); |
| 1872 | 1879 | ||
| 1873 | drm_for_each_crtc(crtc, dev) { | 1880 | drm_for_each_crtc(crtc, dev) { |
| 1874 | if (crtc->state->enable) { | 1881 | if (crtc->state->active) { |
| 1875 | if (!drm->have_disp_power_ref) { | 1882 | if (!drm->have_disp_power_ref) { |
| 1876 | drm->have_disp_power_ref = true; | 1883 | drm->have_disp_power_ref = true; |
| 1877 | return 0; | 1884 | return 0; |
| @@ -2119,10 +2126,6 @@ nv50_display_destroy(struct drm_device *dev) | |||
| 2119 | kfree(disp); | 2126 | kfree(disp); |
| 2120 | } | 2127 | } |
| 2121 | 2128 | ||
| 2122 | MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); | ||
| 2123 | static int nouveau_atomic = 0; | ||
| 2124 | module_param_named(atomic, nouveau_atomic, int, 0400); | ||
| 2125 | |||
| 2126 | int | 2129 | int |
| 2127 | nv50_display_create(struct drm_device *dev) | 2130 | nv50_display_create(struct drm_device *dev) |
| 2128 | { | 2131 | { |
| @@ -2147,8 +2150,6 @@ nv50_display_create(struct drm_device *dev) | |||
| 2147 | disp->disp = &nouveau_display(dev)->disp; | 2150 | disp->disp = &nouveau_display(dev)->disp; |
| 2148 | dev->mode_config.funcs = &nv50_disp_func; | 2151 | dev->mode_config.funcs = &nv50_disp_func; |
| 2149 | dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; | 2152 | dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; |
| 2150 | if (nouveau_atomic) | ||
| 2151 | dev->driver->driver_features |= DRIVER_ATOMIC; | ||
| 2152 | 2153 | ||
| 2153 | /* small shared memory area we use for notifiers and semaphores */ | 2154 | /* small shared memory area we use for notifiers and semaphores */ |
| 2154 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 2155 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index debbbf0fd4bd..408b955e5c39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
| @@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev) | |||
| 267 | struct nouveau_drm *drm = nouveau_drm(dev); | 267 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 268 | struct nvif_device *device = &drm->client.device; | 268 | struct nvif_device *device = &drm->client.device; |
| 269 | struct drm_connector *connector; | 269 | struct drm_connector *connector; |
| 270 | struct drm_connector_list_iter conn_iter; | ||
| 270 | 271 | ||
| 271 | INIT_LIST_HEAD(&drm->bl_connectors); | 272 | INIT_LIST_HEAD(&drm->bl_connectors); |
| 272 | 273 | ||
| @@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev) | |||
| 275 | return 0; | 276 | return 0; |
| 276 | } | 277 | } |
| 277 | 278 | ||
| 278 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 279 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 280 | drm_for_each_connector_iter(connector, &conn_iter) { | ||
| 279 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && | 281 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && |
| 280 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) | 282 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) |
| 281 | continue; | 283 | continue; |
| @@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev) | |||
| 292 | break; | 294 | break; |
| 293 | } | 295 | } |
| 294 | } | 296 | } |
| 295 | 297 | drm_connector_list_iter_end(&conn_iter); | |
| 296 | 298 | ||
| 297 | return 0; | 299 | return 0; |
| 298 | } | 300 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7b557c354307..af68eae4c626 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index) | |||
| 1208 | struct nouveau_display *disp = nouveau_display(dev); | 1208 | struct nouveau_display *disp = nouveau_display(dev); |
| 1209 | struct nouveau_connector *nv_connector = NULL; | 1209 | struct nouveau_connector *nv_connector = NULL; |
| 1210 | struct drm_connector *connector; | 1210 | struct drm_connector *connector; |
| 1211 | struct drm_connector_list_iter conn_iter; | ||
| 1211 | int type, ret = 0; | 1212 | int type, ret = 0; |
| 1212 | bool dummy; | 1213 | bool dummy; |
| 1213 | 1214 | ||
| 1214 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1215 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 1216 | nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { | ||
| 1215 | nv_connector = nouveau_connector(connector); | 1217 | nv_connector = nouveau_connector(connector); |
| 1216 | if (nv_connector->index == index) | 1218 | if (nv_connector->index == index) { |
| 1219 | drm_connector_list_iter_end(&conn_iter); | ||
| 1217 | return connector; | 1220 | return connector; |
| 1221 | } | ||
| 1218 | } | 1222 | } |
| 1223 | drm_connector_list_iter_end(&conn_iter); | ||
| 1219 | 1224 | ||
| 1220 | nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); | 1225 | nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); |
| 1221 | if (!nv_connector) | 1226 | if (!nv_connector) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index a4d1a059bd3d..dc7454e7f19a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <drm/drm_encoder.h> | 33 | #include <drm/drm_encoder.h> |
| 34 | #include <drm/drm_dp_helper.h> | 34 | #include <drm/drm_dp_helper.h> |
| 35 | #include "nouveau_crtc.h" | 35 | #include "nouveau_crtc.h" |
| 36 | #include "nouveau_encoder.h" | ||
| 36 | 37 | ||
| 37 | struct nvkm_i2c_port; | 38 | struct nvkm_i2c_port; |
| 38 | 39 | ||
| @@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector( | |||
| 60 | return container_of(con, struct nouveau_connector, base); | 61 | return container_of(con, struct nouveau_connector, base); |
| 61 | } | 62 | } |
| 62 | 63 | ||
| 64 | static inline bool | ||
| 65 | nouveau_connector_is_mst(struct drm_connector *connector) | ||
| 66 | { | ||
| 67 | const struct nouveau_encoder *nv_encoder; | ||
| 68 | const struct drm_encoder *encoder; | ||
| 69 | |||
| 70 | if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) | ||
| 71 | return false; | ||
| 72 | |||
| 73 | nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY); | ||
| 74 | if (!nv_encoder) | ||
| 75 | return false; | ||
| 76 | |||
| 77 | encoder = &nv_encoder->base.base; | ||
| 78 | return encoder->encoder_type == DRM_MODE_ENCODER_DPMST; | ||
| 79 | } | ||
| 80 | |||
| 81 | #define nouveau_for_each_non_mst_connector_iter(connector, iter) \ | ||
| 82 | drm_for_each_connector_iter(connector, iter) \ | ||
| 83 | for_each_if(!nouveau_connector_is_mst(connector)) | ||
| 84 | |||
| 63 | static inline struct nouveau_connector * | 85 | static inline struct nouveau_connector * |
| 64 | nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) | 86 | nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) |
| 65 | { | 87 | { |
| 66 | struct drm_device *dev = nv_crtc->base.dev; | 88 | struct drm_device *dev = nv_crtc->base.dev; |
| 67 | struct drm_connector *connector; | 89 | struct drm_connector *connector; |
| 90 | struct drm_connector_list_iter conn_iter; | ||
| 91 | struct nouveau_connector *nv_connector = NULL; | ||
| 68 | struct drm_crtc *crtc = to_drm_crtc(nv_crtc); | 92 | struct drm_crtc *crtc = to_drm_crtc(nv_crtc); |
| 69 | 93 | ||
| 70 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 94 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 71 | if (connector->encoder && connector->encoder->crtc == crtc) | 95 | nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { |
| 72 | return nouveau_connector(connector); | 96 | if (connector->encoder && connector->encoder->crtc == crtc) { |
| 97 | nv_connector = nouveau_connector(connector); | ||
| 98 | break; | ||
| 99 | } | ||
| 73 | } | 100 | } |
| 101 | drm_connector_list_iter_end(&conn_iter); | ||
| 74 | 102 | ||
| 75 | return NULL; | 103 | return nv_connector; |
| 76 | } | 104 | } |
| 77 | 105 | ||
| 78 | struct drm_connector * | 106 | struct drm_connector * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 774b429142bc..ec7861457b84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev) | |||
| 404 | struct nouveau_display *disp = nouveau_display(dev); | 404 | struct nouveau_display *disp = nouveau_display(dev); |
| 405 | struct nouveau_drm *drm = nouveau_drm(dev); | 405 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 406 | struct drm_connector *connector; | 406 | struct drm_connector *connector; |
| 407 | struct drm_connector_list_iter conn_iter; | ||
| 407 | int ret; | 408 | int ret; |
| 408 | 409 | ||
| 409 | ret = disp->init(dev); | 410 | ret = disp->init(dev); |
| @@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev) | |||
| 411 | return ret; | 412 | return ret; |
| 412 | 413 | ||
| 413 | /* enable hotplug interrupts */ | 414 | /* enable hotplug interrupts */ |
| 414 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 415 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 416 | nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { | ||
| 415 | struct nouveau_connector *conn = nouveau_connector(connector); | 417 | struct nouveau_connector *conn = nouveau_connector(connector); |
| 416 | nvif_notify_get(&conn->hpd); | 418 | nvif_notify_get(&conn->hpd); |
| 417 | } | 419 | } |
| 420 | drm_connector_list_iter_end(&conn_iter); | ||
| 418 | 421 | ||
| 419 | /* enable flip completion events */ | 422 | /* enable flip completion events */ |
| 420 | nvif_notify_get(&drm->flip); | 423 | nvif_notify_get(&drm->flip); |
| @@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) | |||
| 427 | struct nouveau_display *disp = nouveau_display(dev); | 430 | struct nouveau_display *disp = nouveau_display(dev); |
| 428 | struct nouveau_drm *drm = nouveau_drm(dev); | 431 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 429 | struct drm_connector *connector; | 432 | struct drm_connector *connector; |
| 433 | struct drm_connector_list_iter conn_iter; | ||
| 430 | 434 | ||
| 431 | if (!suspend) { | 435 | if (!suspend) { |
| 432 | if (drm_drv_uses_atomic_modeset(dev)) | 436 | if (drm_drv_uses_atomic_modeset(dev)) |
| @@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) | |||
| 439 | nvif_notify_put(&drm->flip); | 443 | nvif_notify_put(&drm->flip); |
| 440 | 444 | ||
| 441 | /* disable hotplug interrupts */ | 445 | /* disable hotplug interrupts */ |
| 442 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 446 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 447 | nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { | ||
| 443 | struct nouveau_connector *conn = nouveau_connector(connector); | 448 | struct nouveau_connector *conn = nouveau_connector(connector); |
| 444 | nvif_notify_put(&conn->hpd); | 449 | nvif_notify_put(&conn->hpd); |
| 445 | } | 450 | } |
| 451 | drm_connector_list_iter_end(&conn_iter); | ||
| 446 | 452 | ||
| 447 | drm_kms_helper_poll_disable(dev); | 453 | drm_kms_helper_poll_disable(dev); |
| 448 | disp->fini(dev); | 454 | disp->fini(dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 775443c9af94..f5d3158f0378 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " | |||
| 81 | int nouveau_modeset = -1; | 81 | int nouveau_modeset = -1; |
| 82 | module_param_named(modeset, nouveau_modeset, int, 0400); | 82 | module_param_named(modeset, nouveau_modeset, int, 0400); |
| 83 | 83 | ||
| 84 | MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); | ||
| 85 | static int nouveau_atomic = 0; | ||
| 86 | module_param_named(atomic, nouveau_atomic, int, 0400); | ||
| 87 | |||
| 84 | MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); | 88 | MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); |
| 85 | static int nouveau_runtime_pm = -1; | 89 | static int nouveau_runtime_pm = -1; |
| 86 | module_param_named(runpm, nouveau_runtime_pm, int, 0400); | 90 | module_param_named(runpm, nouveau_runtime_pm, int, 0400); |
| @@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
| 509 | 513 | ||
| 510 | pci_set_master(pdev); | 514 | pci_set_master(pdev); |
| 511 | 515 | ||
| 516 | if (nouveau_atomic) | ||
| 517 | driver_pci.driver_features |= DRIVER_ATOMIC; | ||
| 518 | |||
| 512 | ret = drm_get_pci_dev(pdev, pent, &driver_pci); | 519 | ret = drm_get_pci_dev(pdev, pent, &driver_pci); |
| 513 | if (ret) { | 520 | if (ret) { |
| 514 | nvkm_device_del(&device); | 521 | nvkm_device_del(&device); |
| @@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev) | |||
| 874 | static int | 881 | static int |
| 875 | nouveau_pmops_runtime_idle(struct device *dev) | 882 | nouveau_pmops_runtime_idle(struct device *dev) |
| 876 | { | 883 | { |
| 877 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 878 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
| 879 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | ||
| 880 | struct drm_crtc *crtc; | ||
| 881 | |||
| 882 | if (!nouveau_pmops_runtime()) { | 884 | if (!nouveau_pmops_runtime()) { |
| 883 | pm_runtime_forbid(dev); | 885 | pm_runtime_forbid(dev); |
| 884 | return -EBUSY; | 886 | return -EBUSY; |
| 885 | } | 887 | } |
| 886 | 888 | ||
| 887 | list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) { | ||
| 888 | if (crtc->enabled) { | ||
| 889 | DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); | ||
| 890 | return -EBUSY; | ||
| 891 | } | ||
| 892 | } | ||
| 893 | pm_runtime_mark_last_busy(dev); | 889 | pm_runtime_mark_last_busy(dev); |
| 894 | pm_runtime_autosuspend(dev); | 890 | pm_runtime_autosuspend(dev); |
| 895 | /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ | 891 | /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 300daee74209..e6ccafcb9c41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
| 616 | struct nouveau_bo *nvbo; | 616 | struct nouveau_bo *nvbo; |
| 617 | uint32_t data; | 617 | uint32_t data; |
| 618 | 618 | ||
| 619 | if (unlikely(r->bo_index > req->nr_buffers)) { | 619 | if (unlikely(r->bo_index >= req->nr_buffers)) { |
| 620 | NV_PRINTK(err, cli, "reloc bo index invalid\n"); | 620 | NV_PRINTK(err, cli, "reloc bo index invalid\n"); |
| 621 | ret = -EINVAL; | 621 | ret = -EINVAL; |
| 622 | break; | 622 | break; |
| @@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, | |||
| 626 | if (b->presumed.valid) | 626 | if (b->presumed.valid) |
| 627 | continue; | 627 | continue; |
| 628 | 628 | ||
| 629 | if (unlikely(r->reloc_bo_index > req->nr_buffers)) { | 629 | if (unlikely(r->reloc_bo_index >= req->nr_buffers)) { |
| 630 | NV_PRINTK(err, cli, "reloc container bo index invalid\n"); | 630 | NV_PRINTK(err, cli, "reloc container bo index invalid\n"); |
| 631 | ret = -EINVAL; | 631 | ret = -EINVAL; |
| 632 | break; | 632 | break; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index 73b5d46104bd..434d2fc5bb1c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c | |||
| @@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev) | |||
| 140 | if (fb->func->init) | 140 | if (fb->func->init) |
| 141 | fb->func->init(fb); | 141 | fb->func->init(fb); |
| 142 | 142 | ||
| 143 | if (fb->func->init_remapper) | ||
| 144 | fb->func->init_remapper(fb); | ||
| 145 | |||
| 143 | if (fb->func->init_page) { | 146 | if (fb->func->init_page) { |
| 144 | ret = fb->func->init_page(fb); | 147 | ret = fb->func->init_page(fb); |
| 145 | if (WARN_ON(ret)) | 148 | if (WARN_ON(ret)) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c index dffe1f5e1071..8205ce436b3e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c | |||
| @@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base) | |||
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | void | 39 | void |
| 40 | gp100_fb_init_remapper(struct nvkm_fb *fb) | ||
| 41 | { | ||
| 42 | struct nvkm_device *device = fb->subdev.device; | ||
| 43 | /* Disable address remapper. */ | ||
| 44 | nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000); | ||
| 45 | } | ||
| 46 | |||
| 47 | void | ||
| 40 | gp100_fb_init(struct nvkm_fb *base) | 48 | gp100_fb_init(struct nvkm_fb *base) |
| 41 | { | 49 | { |
| 42 | struct gf100_fb *fb = gf100_fb(base); | 50 | struct gf100_fb *fb = gf100_fb(base); |
| @@ -56,6 +64,7 @@ gp100_fb = { | |||
| 56 | .dtor = gf100_fb_dtor, | 64 | .dtor = gf100_fb_dtor, |
| 57 | .oneinit = gf100_fb_oneinit, | 65 | .oneinit = gf100_fb_oneinit, |
| 58 | .init = gp100_fb_init, | 66 | .init = gp100_fb_init, |
| 67 | .init_remapper = gp100_fb_init_remapper, | ||
| 59 | .init_page = gm200_fb_init_page, | 68 | .init_page = gm200_fb_init_page, |
| 60 | .init_unkn = gp100_fb_init_unkn, | 69 | .init_unkn = gp100_fb_init_unkn, |
| 61 | .ram_new = gp100_ram_new, | 70 | .ram_new = gp100_ram_new, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index b84b9861ef26..b4d74e815674 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c | |||
| @@ -31,6 +31,7 @@ gp102_fb = { | |||
| 31 | .dtor = gf100_fb_dtor, | 31 | .dtor = gf100_fb_dtor, |
| 32 | .oneinit = gf100_fb_oneinit, | 32 | .oneinit = gf100_fb_oneinit, |
| 33 | .init = gp100_fb_init, | 33 | .init = gp100_fb_init, |
| 34 | .init_remapper = gp100_fb_init_remapper, | ||
| 34 | .init_page = gm200_fb_init_page, | 35 | .init_page = gm200_fb_init_page, |
| 35 | .ram_new = gp100_ram_new, | 36 | .ram_new = gp100_ram_new, |
| 36 | }; | 37 | }; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 2857f31466bf..1e4ad61c19e1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h | |||
| @@ -11,6 +11,7 @@ struct nvkm_fb_func { | |||
| 11 | u32 (*tags)(struct nvkm_fb *); | 11 | u32 (*tags)(struct nvkm_fb *); |
| 12 | int (*oneinit)(struct nvkm_fb *); | 12 | int (*oneinit)(struct nvkm_fb *); |
| 13 | void (*init)(struct nvkm_fb *); | 13 | void (*init)(struct nvkm_fb *); |
| 14 | void (*init_remapper)(struct nvkm_fb *); | ||
| 14 | int (*init_page)(struct nvkm_fb *); | 15 | int (*init_page)(struct nvkm_fb *); |
| 15 | void (*init_unkn)(struct nvkm_fb *); | 16 | void (*init_unkn)(struct nvkm_fb *); |
| 16 | void (*intr)(struct nvkm_fb *); | 17 | void (*intr)(struct nvkm_fb *); |
| @@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *); | |||
| 69 | 70 | ||
| 70 | int gm200_fb_init_page(struct nvkm_fb *); | 71 | int gm200_fb_init_page(struct nvkm_fb *); |
| 71 | 72 | ||
| 73 | void gp100_fb_init_remapper(struct nvkm_fb *); | ||
| 72 | void gp100_fb_init_unkn(struct nvkm_fb *); | 74 | void gp100_fb_init_unkn(struct nvkm_fb *); |
| 73 | #endif | 75 | #endif |
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile index 2589f4acd5ae..9c81301d0eed 100644 --- a/drivers/gpu/drm/sun4i/Makefile +++ b/drivers/gpu/drm/sun4i/Makefile | |||
| @@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o | |||
| 32 | obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o | 32 | obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o |
| 33 | obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o | 33 | obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o |
| 34 | 34 | ||
| 35 | obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o | 35 | obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o |
| 36 | ifdef CONFIG_DRM_SUN4I_BACKEND | ||
| 37 | obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o | ||
| 38 | endif | ||
| 36 | obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o | 39 | obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o |
| 37 | obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o | 40 | obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o |
| 38 | obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o | 41 | obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o |
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 776c1513e582..a2bd5876c633 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
| @@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, | |||
| 398 | * unaligned offset is malformed and cause commands stream | 398 | * unaligned offset is malformed and cause commands stream |
| 399 | * corruption on the buffer address relocation. | 399 | * corruption on the buffer address relocation. |
| 400 | */ | 400 | */ |
| 401 | if (offset & 3 || offset >= obj->gem.size) { | 401 | if (offset & 3 || offset > obj->gem.size) { |
| 402 | err = -EINVAL; | 402 | err = -EINVAL; |
| 403 | goto fail; | 403 | goto fail; |
| 404 | } | 404 | } |
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index f1d5f76e9c33..d88073e7d22d 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c | |||
| @@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev) | |||
| 218 | return err; | 218 | return err; |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) | ||
| 222 | goto skip_iommu; | ||
| 223 | |||
| 221 | host->group = iommu_group_get(&pdev->dev); | 224 | host->group = iommu_group_get(&pdev->dev); |
| 222 | if (host->group) { | 225 | if (host->group) { |
| 223 | struct iommu_domain_geometry *geometry; | 226 | struct iommu_domain_geometry *geometry; |
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index e2f4a4d93d20..527a1cddb14f 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c | |||
| @@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job) | |||
| 569 | for (i = 0; i < job->num_unpins; i++) { | 569 | for (i = 0; i < job->num_unpins; i++) { |
| 570 | struct host1x_job_unpin_data *unpin = &job->unpins[i]; | 570 | struct host1x_job_unpin_data *unpin = &job->unpins[i]; |
| 571 | 571 | ||
| 572 | if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { | 572 | if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && |
| 573 | unpin->size && host->domain) { | ||
| 573 | iommu_unmap(host->domain, job->addr_phys[i], | 574 | iommu_unmap(host->domain, job->addr_phys[i], |
| 574 | unpin->size); | 575 | unpin->size); |
| 575 | free_iova(&host->iova, | 576 | free_iova(&host->iova, |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b344a883f116..115ff26e9ced 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -484,14 +484,37 @@ static int dmar_forcedac; | |||
| 484 | static int intel_iommu_strict; | 484 | static int intel_iommu_strict; |
| 485 | static int intel_iommu_superpage = 1; | 485 | static int intel_iommu_superpage = 1; |
| 486 | static int intel_iommu_ecs = 1; | 486 | static int intel_iommu_ecs = 1; |
| 487 | static int intel_iommu_pasid28; | ||
| 487 | static int iommu_identity_mapping; | 488 | static int iommu_identity_mapping; |
| 488 | 489 | ||
| 489 | #define IDENTMAP_ALL 1 | 490 | #define IDENTMAP_ALL 1 |
| 490 | #define IDENTMAP_GFX 2 | 491 | #define IDENTMAP_GFX 2 |
| 491 | #define IDENTMAP_AZALIA 4 | 492 | #define IDENTMAP_AZALIA 4 |
| 492 | 493 | ||
| 493 | #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap)) | 494 | /* Broadwell and Skylake have broken ECS support — normal so-called "second |
| 494 | #define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap)) | 495 | * level" translation of DMA requests-without-PASID doesn't actually happen |
| 496 | * unless you also set the NESTE bit in an extended context-entry. Which of | ||
| 497 | * course means that SVM doesn't work because it's trying to do nested | ||
| 498 | * translation of the physical addresses it finds in the process page tables, | ||
| 499 | * through the IOVA->phys mapping found in the "second level" page tables. | ||
| 500 | * | ||
| 501 | * The VT-d specification was retroactively changed to change the definition | ||
| 502 | * of the capability bits and pretend that Broadwell/Skylake never happened... | ||
| 503 | * but unfortunately the wrong bit was changed. It's ECS which is broken, but | ||
| 504 | * for some reason it was the PASID capability bit which was redefined (from | ||
| 505 | * bit 28 on BDW/SKL to bit 40 in future). | ||
| 506 | * | ||
| 507 | * So our test for ECS needs to eschew those implementations which set the old | ||
| 508 | * PASID capabiity bit 28, since those are the ones on which ECS is broken. | ||
| 509 | * Unless we are working around the 'pasid28' limitations, that is, by putting | ||
| 510 | * the device into passthrough mode for normal DMA and thus masking the bug. | ||
| 511 | */ | ||
| 512 | #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ | ||
| 513 | (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap))) | ||
| 514 | /* PASID support is thus enabled if ECS is enabled and *either* of the old | ||
| 515 | * or new capability bits are set. */ | ||
| 516 | #define pasid_enabled(iommu) (ecs_enabled(iommu) && \ | ||
| 517 | (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap))) | ||
| 495 | 518 | ||
| 496 | int intel_iommu_gfx_mapped; | 519 | int intel_iommu_gfx_mapped; |
| 497 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | 520 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); |
| @@ -554,6 +577,11 @@ static int __init intel_iommu_setup(char *str) | |||
| 554 | printk(KERN_INFO | 577 | printk(KERN_INFO |
| 555 | "Intel-IOMMU: disable extended context table support\n"); | 578 | "Intel-IOMMU: disable extended context table support\n"); |
| 556 | intel_iommu_ecs = 0; | 579 | intel_iommu_ecs = 0; |
| 580 | } else if (!strncmp(str, "pasid28", 7)) { | ||
| 581 | printk(KERN_INFO | ||
| 582 | "Intel-IOMMU: enable pre-production PASID support\n"); | ||
| 583 | intel_iommu_pasid28 = 1; | ||
| 584 | iommu_identity_mapping |= IDENTMAP_GFX; | ||
| 557 | } else if (!strncmp(str, "tboot_noforce", 13)) { | 585 | } else if (!strncmp(str, "tboot_noforce", 13)) { |
| 558 | printk(KERN_INFO | 586 | printk(KERN_INFO |
| 559 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); | 587 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); |
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 07ea6a48aac6..87107c995cb5 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c | |||
| @@ -136,6 +136,7 @@ struct dm_writecache { | |||
| 136 | struct dm_target *ti; | 136 | struct dm_target *ti; |
| 137 | struct dm_dev *dev; | 137 | struct dm_dev *dev; |
| 138 | struct dm_dev *ssd_dev; | 138 | struct dm_dev *ssd_dev; |
| 139 | sector_t start_sector; | ||
| 139 | void *memory_map; | 140 | void *memory_map; |
| 140 | uint64_t memory_map_size; | 141 | uint64_t memory_map_size; |
| 141 | size_t metadata_sectors; | 142 | size_t metadata_sectors; |
| @@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc) | |||
| 293 | } | 294 | } |
| 294 | 295 | ||
| 295 | dax_read_unlock(id); | 296 | dax_read_unlock(id); |
| 297 | |||
| 298 | wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT; | ||
| 299 | wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT; | ||
| 300 | |||
| 296 | return 0; | 301 | return 0; |
| 297 | err3: | 302 | err3: |
| 298 | kvfree(pages); | 303 | kvfree(pages); |
| @@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) | |||
| 311 | static void persistent_memory_release(struct dm_writecache *wc) | 316 | static void persistent_memory_release(struct dm_writecache *wc) |
| 312 | { | 317 | { |
| 313 | if (wc->memory_vmapped) | 318 | if (wc->memory_vmapped) |
| 314 | vunmap(wc->memory_map); | 319 | vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT)); |
| 315 | } | 320 | } |
| 316 | 321 | ||
| 317 | static struct page *persistent_memory_page(void *addr) | 322 | static struct page *persistent_memory_page(void *addr) |
| @@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) | |||
| 359 | 364 | ||
| 360 | static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) | 365 | static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) |
| 361 | { | 366 | { |
| 362 | return wc->metadata_sectors + | 367 | return wc->start_sector + wc->metadata_sectors + |
| 363 | ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); | 368 | ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); |
| 364 | } | 369 | } |
| 365 | 370 | ||
| @@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc) | |||
| 471 | if (unlikely(region.sector + region.count > wc->metadata_sectors)) | 476 | if (unlikely(region.sector + region.count > wc->metadata_sectors)) |
| 472 | region.count = wc->metadata_sectors - region.sector; | 477 | region.count = wc->metadata_sectors - region.sector; |
| 473 | 478 | ||
| 479 | region.sector += wc->start_sector; | ||
| 474 | atomic_inc(&endio.count); | 480 | atomic_inc(&endio.count); |
| 475 | req.bi_op = REQ_OP_WRITE; | 481 | req.bi_op = REQ_OP_WRITE; |
| 476 | req.bi_op_flags = REQ_SYNC; | 482 | req.bi_op_flags = REQ_SYNC; |
| @@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 1946 | } | 1952 | } |
| 1947 | wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); | 1953 | wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); |
| 1948 | 1954 | ||
| 1949 | if (WC_MODE_PMEM(wc)) { | ||
| 1950 | r = persistent_memory_claim(wc); | ||
| 1951 | if (r) { | ||
| 1952 | ti->error = "Unable to map persistent memory for cache"; | ||
| 1953 | goto bad; | ||
| 1954 | } | ||
| 1955 | } | ||
| 1956 | |||
| 1957 | /* | 1955 | /* |
| 1958 | * Parse the cache block size | 1956 | * Parse the cache block size |
| 1959 | */ | 1957 | */ |
| @@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 1982 | 1980 | ||
| 1983 | while (opt_params) { | 1981 | while (opt_params) { |
| 1984 | string = dm_shift_arg(&as), opt_params--; | 1982 | string = dm_shift_arg(&as), opt_params--; |
| 1985 | if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { | 1983 | if (!strcasecmp(string, "start_sector") && opt_params >= 1) { |
| 1984 | unsigned long long start_sector; | ||
| 1985 | string = dm_shift_arg(&as), opt_params--; | ||
| 1986 | if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1) | ||
| 1987 | goto invalid_optional; | ||
| 1988 | wc->start_sector = start_sector; | ||
| 1989 | if (wc->start_sector != start_sector || | ||
| 1990 | wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) | ||
| 1991 | goto invalid_optional; | ||
| 1992 | } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { | ||
| 1986 | string = dm_shift_arg(&as), opt_params--; | 1993 | string = dm_shift_arg(&as), opt_params--; |
| 1987 | if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1) | 1994 | if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1) |
| 1988 | goto invalid_optional; | 1995 | goto invalid_optional; |
| @@ -2039,12 +2046,20 @@ invalid_optional: | |||
| 2039 | goto bad; | 2046 | goto bad; |
| 2040 | } | 2047 | } |
| 2041 | 2048 | ||
| 2042 | if (!WC_MODE_PMEM(wc)) { | 2049 | if (WC_MODE_PMEM(wc)) { |
| 2050 | r = persistent_memory_claim(wc); | ||
| 2051 | if (r) { | ||
| 2052 | ti->error = "Unable to map persistent memory for cache"; | ||
| 2053 | goto bad; | ||
| 2054 | } | ||
| 2055 | } else { | ||
| 2043 | struct dm_io_region region; | 2056 | struct dm_io_region region; |
| 2044 | struct dm_io_request req; | 2057 | struct dm_io_request req; |
| 2045 | size_t n_blocks, n_metadata_blocks; | 2058 | size_t n_blocks, n_metadata_blocks; |
| 2046 | uint64_t n_bitmap_bits; | 2059 | uint64_t n_bitmap_bits; |
| 2047 | 2060 | ||
| 2061 | wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; | ||
| 2062 | |||
| 2048 | bio_list_init(&wc->flush_list); | 2063 | bio_list_init(&wc->flush_list); |
| 2049 | wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); | 2064 | wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); |
| 2050 | if (IS_ERR(wc->flush_thread)) { | 2065 | if (IS_ERR(wc->flush_thread)) { |
| @@ -2097,7 +2112,7 @@ invalid_optional: | |||
| 2097 | } | 2112 | } |
| 2098 | 2113 | ||
| 2099 | region.bdev = wc->ssd_dev->bdev; | 2114 | region.bdev = wc->ssd_dev->bdev; |
| 2100 | region.sector = 0; | 2115 | region.sector = wc->start_sector; |
| 2101 | region.count = wc->metadata_sectors; | 2116 | region.count = wc->metadata_sectors; |
| 2102 | req.bi_op = REQ_OP_READ; | 2117 | req.bi_op = REQ_OP_READ; |
| 2103 | req.bi_op_flags = REQ_SYNC; | 2118 | req.bi_op_flags = REQ_SYNC; |
| @@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type, | |||
| 2265 | 2280 | ||
| 2266 | static struct target_type writecache_target = { | 2281 | static struct target_type writecache_target = { |
| 2267 | .name = "writecache", | 2282 | .name = "writecache", |
| 2268 | .version = {1, 0, 0}, | 2283 | .version = {1, 1, 0}, |
| 2269 | .module = THIS_MODULE, | 2284 | .module = THIS_MODULE, |
| 2270 | .ctr = writecache_ctr, | 2285 | .ctr = writecache_ctr, |
| 2271 | .dtr = writecache_dtr, | 2286 | .dtr = writecache_dtr, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index fc7383106946..91eb8910b1c9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
| @@ -63,8 +63,6 @@ | |||
| 63 | 63 | ||
| 64 | #define AQ_CFG_NAPI_WEIGHT 64U | 64 | #define AQ_CFG_NAPI_WEIGHT 64U |
| 65 | 65 | ||
| 66 | #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U | ||
| 67 | |||
| 68 | /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ | 66 | /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ |
| 69 | 67 | ||
| 70 | #define AQ_NIC_FC_OFF 0U | 68 | #define AQ_NIC_FC_OFF 0U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index a2d416b24ffc..2c6ebd91a9f2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h | |||
| @@ -98,6 +98,8 @@ struct aq_stats_s { | |||
| 98 | #define AQ_HW_MEDIA_TYPE_TP 1U | 98 | #define AQ_HW_MEDIA_TYPE_TP 1U |
| 99 | #define AQ_HW_MEDIA_TYPE_FIBRE 2U | 99 | #define AQ_HW_MEDIA_TYPE_FIBRE 2U |
| 100 | 100 | ||
| 101 | #define AQ_HW_MULTICAST_ADDRESS_MAX 32U | ||
| 102 | |||
| 101 | struct aq_hw_s { | 103 | struct aq_hw_s { |
| 102 | atomic_t flags; | 104 | atomic_t flags; |
| 103 | u8 rbl_enabled:1; | 105 | u8 rbl_enabled:1; |
| @@ -177,7 +179,7 @@ struct aq_hw_ops { | |||
| 177 | unsigned int packet_filter); | 179 | unsigned int packet_filter); |
| 178 | 180 | ||
| 179 | int (*hw_multicast_list_set)(struct aq_hw_s *self, | 181 | int (*hw_multicast_list_set)(struct aq_hw_s *self, |
| 180 | u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] | 182 | u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX] |
| 181 | [ETH_ALEN], | 183 | [ETH_ALEN], |
| 182 | u32 count); | 184 | u32 count); |
| 183 | 185 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index ba5fe8c4125d..e3ae29e523f0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c | |||
| @@ -135,17 +135,10 @@ err_exit: | |||
| 135 | static void aq_ndev_set_multicast_settings(struct net_device *ndev) | 135 | static void aq_ndev_set_multicast_settings(struct net_device *ndev) |
| 136 | { | 136 | { |
| 137 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 137 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
| 138 | int err = 0; | ||
| 139 | 138 | ||
| 140 | err = aq_nic_set_packet_filter(aq_nic, ndev->flags); | 139 | aq_nic_set_packet_filter(aq_nic, ndev->flags); |
| 141 | if (err < 0) | ||
| 142 | return; | ||
| 143 | 140 | ||
| 144 | if (netdev_mc_count(ndev)) { | 141 | aq_nic_set_multicast_list(aq_nic, ndev); |
| 145 | err = aq_nic_set_multicast_list(aq_nic, ndev); | ||
| 146 | if (err < 0) | ||
| 147 | return; | ||
| 148 | } | ||
| 149 | } | 142 | } |
| 150 | 143 | ||
| 151 | static const struct net_device_ops aq_ndev_ops = { | 144 | static const struct net_device_ops aq_ndev_ops = { |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 1a1a6380c128..7a22d0257e04 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
| @@ -563,34 +563,41 @@ err_exit: | |||
| 563 | 563 | ||
| 564 | int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) | 564 | int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) |
| 565 | { | 565 | { |
| 566 | unsigned int packet_filter = self->packet_filter; | ||
| 566 | struct netdev_hw_addr *ha = NULL; | 567 | struct netdev_hw_addr *ha = NULL; |
| 567 | unsigned int i = 0U; | 568 | unsigned int i = 0U; |
| 568 | 569 | ||
| 569 | self->mc_list.count = 0U; | 570 | self->mc_list.count = 0; |
| 570 | 571 | if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { | |
| 571 | netdev_for_each_mc_addr(ha, ndev) { | 572 | packet_filter |= IFF_PROMISC; |
| 572 | ether_addr_copy(self->mc_list.ar[i++], ha->addr); | 573 | } else { |
| 573 | ++self->mc_list.count; | 574 | netdev_for_each_uc_addr(ha, ndev) { |
| 575 | ether_addr_copy(self->mc_list.ar[i++], ha->addr); | ||
| 574 | 576 | ||
| 575 | if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) | 577 | if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) |
| 576 | break; | 578 | break; |
| 579 | } | ||
| 577 | } | 580 | } |
| 578 | 581 | ||
| 579 | if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { | 582 | if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { |
| 580 | /* Number of filters is too big: atlantic does not support this. | 583 | packet_filter |= IFF_ALLMULTI; |
| 581 | * Force all multi filter to support this. | ||
| 582 | * With this we disable all UC filters and setup "all pass" | ||
| 583 | * multicast mask | ||
| 584 | */ | ||
| 585 | self->packet_filter |= IFF_ALLMULTI; | ||
| 586 | self->aq_nic_cfg.mc_list_count = 0; | ||
| 587 | return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, | ||
| 588 | self->packet_filter); | ||
| 589 | } else { | 584 | } else { |
| 590 | return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, | 585 | netdev_for_each_mc_addr(ha, ndev) { |
| 591 | self->mc_list.ar, | 586 | ether_addr_copy(self->mc_list.ar[i++], ha->addr); |
| 592 | self->mc_list.count); | 587 | |
| 588 | if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) | ||
| 589 | break; | ||
| 590 | } | ||
| 591 | } | ||
| 592 | |||
| 593 | if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) { | ||
| 594 | packet_filter |= IFF_MULTICAST; | ||
| 595 | self->mc_list.count = i; | ||
| 596 | self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, | ||
| 597 | self->mc_list.ar, | ||
| 598 | self->mc_list.count); | ||
| 593 | } | 599 | } |
| 600 | return aq_nic_set_packet_filter(self, packet_filter); | ||
| 594 | } | 601 | } |
| 595 | 602 | ||
| 596 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) | 603 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index faa533a0ec47..fecfc401f95d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
| @@ -75,7 +75,7 @@ struct aq_nic_s { | |||
| 75 | struct aq_hw_link_status_s link_status; | 75 | struct aq_hw_link_status_s link_status; |
| 76 | struct { | 76 | struct { |
| 77 | u32 count; | 77 | u32 count; |
| 78 | u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; | 78 | u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN]; |
| 79 | } mc_list; | 79 | } mc_list; |
| 80 | 80 | ||
| 81 | struct pci_dev *pdev; | 81 | struct pci_dev *pdev; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 67e2f9fb9402..8cc6abadc03b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | |||
| @@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, | |||
| 765 | 765 | ||
| 766 | static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, | 766 | static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, |
| 767 | u8 ar_mac | 767 | u8 ar_mac |
| 768 | [AQ_CFG_MULTICAST_ADDRESS_MAX] | 768 | [AQ_HW_MULTICAST_ADDRESS_MAX] |
| 769 | [ETH_ALEN], | 769 | [ETH_ALEN], |
| 770 | u32 count) | 770 | u32 count) |
| 771 | { | 771 | { |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 819f6bcf9b4e..956860a69797 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
| @@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, | |||
| 784 | 784 | ||
| 785 | static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, | 785 | static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, |
| 786 | u8 ar_mac | 786 | u8 ar_mac |
| 787 | [AQ_CFG_MULTICAST_ADDRESS_MAX] | 787 | [AQ_HW_MULTICAST_ADDRESS_MAX] |
| 788 | [ETH_ALEN], | 788 | [ETH_ALEN], |
| 789 | u32 count) | 789 | u32 count) |
| 790 | { | 790 | { |
| @@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, | |||
| 812 | 812 | ||
| 813 | hw_atl_rpfl2_uc_flr_en_set(self, | 813 | hw_atl_rpfl2_uc_flr_en_set(self, |
| 814 | (self->aq_nic_cfg->is_mc_list_enabled), | 814 | (self->aq_nic_cfg->is_mc_list_enabled), |
| 815 | HW_ATL_B0_MAC_MIN + i); | 815 | HW_ATL_B0_MAC_MIN + i); |
| 816 | } | 816 | } |
| 817 | 817 | ||
| 818 | err = aq_hw_err_from_flags(self); | 818 | err = aq_hw_err_from_flags(self); |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d5fca2e5a9bc..a1f60f89e059 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
| @@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev) | |||
| 1946 | if (!priv->is_lite) | 1946 | if (!priv->is_lite) |
| 1947 | priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); | 1947 | priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); |
| 1948 | else | 1948 | else |
| 1949 | priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & | 1949 | priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & |
| 1950 | GIB_FCS_STRIP); | 1950 | GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); |
| 1951 | 1951 | ||
| 1952 | phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, | 1952 | phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, |
| 1953 | 0, priv->phy_interface); | 1953 | 0, priv->phy_interface); |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index d6e5d0cbf3a3..cf440b91fd04 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
| @@ -278,7 +278,8 @@ struct bcm_rsb { | |||
| 278 | #define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) | 278 | #define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) |
| 279 | #define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) | 279 | #define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) |
| 280 | #define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) | 280 | #define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) |
| 281 | #define GIB_FCS_STRIP (1 << 6) | 281 | #define GIB_FCS_STRIP_SHIFT 6 |
| 282 | #define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT) | ||
| 282 | #define GIB_LCL_LOOP_EN (1 << 7) | 283 | #define GIB_LCL_LOOP_EN (1 << 7) |
| 283 | #define GIB_LCL_LOOP_TXEN (1 << 8) | 284 | #define GIB_LCL_LOOP_TXEN (1 << 8) |
| 284 | #define GIB_RMT_LOOP_EN (1 << 9) | 285 | #define GIB_RMT_LOOP_EN (1 << 9) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 176fc9f4d7de..4394c1162be4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) | |||
| 5712 | } | 5712 | } |
| 5713 | vnic->uc_filter_count = 1; | 5713 | vnic->uc_filter_count = 1; |
| 5714 | 5714 | ||
| 5715 | vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; | 5715 | vnic->rx_mask = 0; |
| 5716 | if (bp->dev->flags & IFF_BROADCAST) | ||
| 5717 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; | ||
| 5716 | 5718 | ||
| 5717 | if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) | 5719 | if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) |
| 5718 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; | 5720 | vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
| @@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) | |||
| 5917 | return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); | 5919 | return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); |
| 5918 | } | 5920 | } |
| 5919 | 5921 | ||
| 5920 | void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) | 5922 | static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) |
| 5921 | { | 5923 | { |
| 5922 | bp->hw_resc.max_irqs = max_irqs; | 5924 | bp->hw_resc.max_irqs = max_irqs; |
| 5923 | } | 5925 | } |
| @@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
| 6888 | rc = bnxt_request_irq(bp); | 6890 | rc = bnxt_request_irq(bp); |
| 6889 | if (rc) { | 6891 | if (rc) { |
| 6890 | netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); | 6892 | netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); |
| 6891 | goto open_err; | 6893 | goto open_err_irq; |
| 6892 | } | 6894 | } |
| 6893 | } | 6895 | } |
| 6894 | 6896 | ||
| @@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
| 6928 | open_err: | 6930 | open_err: |
| 6929 | bnxt_debug_dev_exit(bp); | 6931 | bnxt_debug_dev_exit(bp); |
| 6930 | bnxt_disable_napi(bp); | 6932 | bnxt_disable_napi(bp); |
| 6933 | |||
| 6934 | open_err_irq: | ||
| 6931 | bnxt_del_napi(bp); | 6935 | bnxt_del_napi(bp); |
| 6932 | 6936 | ||
| 6933 | open_err_free_mem: | 6937 | open_err_free_mem: |
| @@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev) | |||
| 7214 | 7218 | ||
| 7215 | mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | | 7219 | mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | |
| 7216 | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | | 7220 | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | |
| 7217 | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); | 7221 | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | |
| 7222 | CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); | ||
| 7218 | 7223 | ||
| 7219 | if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) | 7224 | if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) |
| 7220 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; | 7225 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
| 7221 | 7226 | ||
| 7222 | uc_update = bnxt_uc_list_updated(bp); | 7227 | uc_update = bnxt_uc_list_updated(bp); |
| 7223 | 7228 | ||
| 7229 | if (dev->flags & IFF_BROADCAST) | ||
| 7230 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; | ||
| 7224 | if (dev->flags & IFF_ALLMULTI) { | 7231 | if (dev->flags & IFF_ALLMULTI) { |
| 7225 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; | 7232 | mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; |
| 7226 | vnic->mc_list_count = 0; | 7233 | vnic->mc_list_count = 0; |
| @@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) | |||
| 8502 | int rx, tx, cp; | 8509 | int rx, tx, cp; |
| 8503 | 8510 | ||
| 8504 | _bnxt_get_max_rings(bp, &rx, &tx, &cp); | 8511 | _bnxt_get_max_rings(bp, &rx, &tx, &cp); |
| 8512 | *max_rx = rx; | ||
| 8513 | *max_tx = tx; | ||
| 8505 | if (!rx || !tx || !cp) | 8514 | if (!rx || !tx || !cp) |
| 8506 | return -ENOMEM; | 8515 | return -ENOMEM; |
| 8507 | 8516 | ||
| 8508 | *max_rx = rx; | ||
| 8509 | *max_tx = tx; | ||
| 8510 | return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); | 8517 | return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); |
| 8511 | } | 8518 | } |
| 8512 | 8519 | ||
| @@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
| 8520 | /* Not enough rings, try disabling agg rings. */ | 8527 | /* Not enough rings, try disabling agg rings. */ |
| 8521 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; | 8528 | bp->flags &= ~BNXT_FLAG_AGG_RINGS; |
| 8522 | rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); | 8529 | rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); |
| 8523 | if (rc) | 8530 | if (rc) { |
| 8531 | /* set BNXT_FLAG_AGG_RINGS back for consistency */ | ||
| 8532 | bp->flags |= BNXT_FLAG_AGG_RINGS; | ||
| 8524 | return rc; | 8533 | return rc; |
| 8534 | } | ||
| 8525 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; | 8535 | bp->flags |= BNXT_FLAG_NO_AGG_RINGS; |
| 8526 | bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); | 8536 | bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
| 8527 | bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); | 8537 | bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9b14eb610b9f..91575ef97c8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); | |||
| 1470 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); | 1470 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); |
| 1471 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); | 1471 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); |
| 1472 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); | 1472 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); |
| 1473 | void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max); | ||
| 1474 | int bnxt_get_avail_msix(struct bnxt *bp, int num); | 1473 | int bnxt_get_avail_msix(struct bnxt *bp, int num); |
| 1475 | int bnxt_reserve_rings(struct bnxt *bp); | 1474 | int bnxt_reserve_rings(struct bnxt *bp); |
| 1476 | void bnxt_tx_disable(struct bnxt *bp); | 1475 | void bnxt_tx_disable(struct bnxt *bp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 795f45024c20..491bd40a254d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
| @@ -27,6 +27,15 @@ | |||
| 27 | #define BNXT_FID_INVALID 0xffff | 27 | #define BNXT_FID_INVALID 0xffff |
| 28 | #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) | 28 | #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) |
| 29 | 29 | ||
| 30 | #define is_vlan_pcp_wildcarded(vlan_tci_mask) \ | ||
| 31 | ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000) | ||
| 32 | #define is_vlan_pcp_exactmatch(vlan_tci_mask) \ | ||
| 33 | ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK) | ||
| 34 | #define is_vlan_pcp_zero(vlan_tci) \ | ||
| 35 | ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000) | ||
| 36 | #define is_vid_exactmatch(vlan_tci_mask) \ | ||
| 37 | ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) | ||
| 38 | |||
| 30 | /* Return the dst fid of the func for flow forwarding | 39 | /* Return the dst fid of the func for flow forwarding |
| 31 | * For PFs: src_fid is the fid of the PF | 40 | * For PFs: src_fid is the fid of the PF |
| 32 | * For VF-reps: src_fid the fid of the VF | 41 | * For VF-reps: src_fid the fid of the VF |
| @@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len) | |||
| 389 | return true; | 398 | return true; |
| 390 | } | 399 | } |
| 391 | 400 | ||
| 401 | static bool is_vlan_tci_allowed(__be16 vlan_tci_mask, | ||
| 402 | __be16 vlan_tci) | ||
| 403 | { | ||
| 404 | /* VLAN priority must be either exactly zero or fully wildcarded and | ||
| 405 | * VLAN id must be exact match. | ||
| 406 | */ | ||
| 407 | if (is_vid_exactmatch(vlan_tci_mask) && | ||
| 408 | ((is_vlan_pcp_exactmatch(vlan_tci_mask) && | ||
| 409 | is_vlan_pcp_zero(vlan_tci)) || | ||
| 410 | is_vlan_pcp_wildcarded(vlan_tci_mask))) | ||
| 411 | return true; | ||
| 412 | |||
| 413 | return false; | ||
| 414 | } | ||
| 415 | |||
| 392 | static bool bits_set(void *key, int len) | 416 | static bool bits_set(void *key, int len) |
| 393 | { | 417 | { |
| 394 | const u8 *p = key; | 418 | const u8 *p = key; |
| @@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) | |||
| 803 | /* Currently VLAN fields cannot be partial wildcard */ | 827 | /* Currently VLAN fields cannot be partial wildcard */ |
| 804 | if (bits_set(&flow->l2_key.inner_vlan_tci, | 828 | if (bits_set(&flow->l2_key.inner_vlan_tci, |
| 805 | sizeof(flow->l2_key.inner_vlan_tci)) && | 829 | sizeof(flow->l2_key.inner_vlan_tci)) && |
| 806 | !is_exactmatch(&flow->l2_mask.inner_vlan_tci, | 830 | !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, |
| 807 | sizeof(flow->l2_mask.inner_vlan_tci))) { | 831 | flow->l2_key.inner_vlan_tci)) { |
| 808 | netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n"); | 832 | netdev_info(bp->dev, "Unsupported VLAN TCI\n"); |
| 809 | return false; | 833 | return false; |
| 810 | } | 834 | } |
| 811 | if (bits_set(&flow->l2_key.inner_vlan_tpid, | 835 | if (bits_set(&flow->l2_key.inner_vlan_tpid, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 347e4f946eb2..840f6e505f73 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
| @@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, | |||
| 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; | 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
| 170 | } | 170 | } |
| 171 | bnxt_fill_msix_vecs(bp, ent); | 171 | bnxt_fill_msix_vecs(bp, ent); |
| 172 | bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix); | ||
| 173 | bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); | 172 | bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); |
| 174 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; | 173 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; |
| 175 | return avail_msix; | 174 | return avail_msix; |
| @@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
| 192 | msix_requested = edev->ulp_tbl[ulp_id].msix_requested; | 191 | msix_requested = edev->ulp_tbl[ulp_id].msix_requested; |
| 193 | bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); | 192 | bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); |
| 194 | edev->ulp_tbl[ulp_id].msix_requested = 0; | 193 | edev->ulp_tbl[ulp_id].msix_requested = 0; |
| 195 | bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested); | ||
| 196 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; | 194 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; |
| 197 | if (netif_running(dev)) { | 195 | if (netif_running(dev)) { |
| 198 | bnxt_close_nic(bp, true, false); | 196 | bnxt_close_nic(bp, true, false); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3be87efdc93d..aa1374d0af93 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -6,11 +6,15 @@ | |||
| 6 | * Copyright (C) 2004 Sun Microsystems Inc. | 6 | * Copyright (C) 2004 Sun Microsystems Inc. |
| 7 | * Copyright (C) 2005-2016 Broadcom Corporation. | 7 | * Copyright (C) 2005-2016 Broadcom Corporation. |
| 8 | * Copyright (C) 2016-2017 Broadcom Limited. | 8 | * Copyright (C) 2016-2017 Broadcom Limited. |
| 9 | * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" | ||
| 10 | * refers to Broadcom Inc. and/or its subsidiaries. | ||
| 9 | * | 11 | * |
| 10 | * Firmware is: | 12 | * Firmware is: |
| 11 | * Derived from proprietary unpublished source code, | 13 | * Derived from proprietary unpublished source code, |
| 12 | * Copyright (C) 2000-2016 Broadcom Corporation. | 14 | * Copyright (C) 2000-2016 Broadcom Corporation. |
| 13 | * Copyright (C) 2016-2017 Broadcom Ltd. | 15 | * Copyright (C) 2016-2017 Broadcom Ltd. |
| 16 | * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" | ||
| 17 | * refers to Broadcom Inc. and/or its subsidiaries. | ||
| 14 | * | 18 | * |
| 15 | * Permission is hereby granted for the distribution of this firmware | 19 | * Permission is hereby granted for the distribution of this firmware |
| 16 | * data in hexadecimal or equivalent format, provided this copyright | 20 | * data in hexadecimal or equivalent format, provided this copyright |
| @@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
| 9290 | 9294 | ||
| 9291 | tg3_restore_clk(tp); | 9295 | tg3_restore_clk(tp); |
| 9292 | 9296 | ||
| 9297 | /* Increase the core clock speed to fix tx timeout issue for 5762 | ||
| 9298 | * with 100Mbps link speed. | ||
| 9299 | */ | ||
| 9300 | if (tg3_asic_rev(tp) == ASIC_REV_5762) { | ||
| 9301 | val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); | ||
| 9302 | tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | | ||
| 9303 | TG3_CPMU_MAC_ORIDE_ENABLE); | ||
| 9304 | } | ||
| 9305 | |||
| 9293 | /* Reprobe ASF enable state. */ | 9306 | /* Reprobe ASF enable state. */ |
| 9294 | tg3_flag_clear(tp, ENABLE_ASF); | 9307 | tg3_flag_clear(tp, ENABLE_ASF); |
| 9295 | tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | | 9308 | tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 1d61aa3efda1..a772a33b685c 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | * Copyright (C) 2004 Sun Microsystems Inc. | 7 | * Copyright (C) 2004 Sun Microsystems Inc. |
| 8 | * Copyright (C) 2007-2016 Broadcom Corporation. | 8 | * Copyright (C) 2007-2016 Broadcom Corporation. |
| 9 | * Copyright (C) 2016-2017 Broadcom Limited. | 9 | * Copyright (C) 2016-2017 Broadcom Limited. |
| 10 | * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" | ||
| 11 | * refers to Broadcom Inc. and/or its subsidiaries. | ||
| 10 | */ | 12 | */ |
| 11 | 13 | ||
| 12 | #ifndef _T3_H | 14 | #ifndef _T3_H |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 86659823b259..3d45f4c92cf6 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
| @@ -166,6 +166,7 @@ | |||
| 166 | #define GEM_DCFG6 0x0294 /* Design Config 6 */ | 166 | #define GEM_DCFG6 0x0294 /* Design Config 6 */ |
| 167 | #define GEM_DCFG7 0x0298 /* Design Config 7 */ | 167 | #define GEM_DCFG7 0x0298 /* Design Config 7 */ |
| 168 | #define GEM_DCFG8 0x029C /* Design Config 8 */ | 168 | #define GEM_DCFG8 0x029C /* Design Config 8 */ |
| 169 | #define GEM_DCFG10 0x02A4 /* Design Config 10 */ | ||
| 169 | 170 | ||
| 170 | #define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ | 171 | #define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ |
| 171 | #define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ | 172 | #define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ |
| @@ -490,6 +491,12 @@ | |||
| 490 | #define GEM_SCR2CMP_OFFSET 0 | 491 | #define GEM_SCR2CMP_OFFSET 0 |
| 491 | #define GEM_SCR2CMP_SIZE 8 | 492 | #define GEM_SCR2CMP_SIZE 8 |
| 492 | 493 | ||
| 494 | /* Bitfields in DCFG10 */ | ||
| 495 | #define GEM_TXBD_RDBUFF_OFFSET 12 | ||
| 496 | #define GEM_TXBD_RDBUFF_SIZE 4 | ||
| 497 | #define GEM_RXBD_RDBUFF_OFFSET 8 | ||
| 498 | #define GEM_RXBD_RDBUFF_SIZE 4 | ||
| 499 | |||
| 493 | /* Bitfields in TISUBN */ | 500 | /* Bitfields in TISUBN */ |
| 494 | #define GEM_SUBNSINCR_OFFSET 0 | 501 | #define GEM_SUBNSINCR_OFFSET 0 |
| 495 | #define GEM_SUBNSINCR_SIZE 16 | 502 | #define GEM_SUBNSINCR_SIZE 16 |
| @@ -635,6 +642,7 @@ | |||
| 635 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 | 642 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 |
| 636 | #define MACB_CAPS_JUMBO 0x00000020 | 643 | #define MACB_CAPS_JUMBO 0x00000020 |
| 637 | #define MACB_CAPS_GEM_HAS_PTP 0x00000040 | 644 | #define MACB_CAPS_GEM_HAS_PTP 0x00000040 |
| 645 | #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 | ||
| 638 | #define MACB_CAPS_FIFO_MODE 0x10000000 | 646 | #define MACB_CAPS_FIFO_MODE 0x10000000 |
| 639 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 | 647 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 |
| 640 | #define MACB_CAPS_SG_DISABLED 0x40000000 | 648 | #define MACB_CAPS_SG_DISABLED 0x40000000 |
| @@ -1203,6 +1211,9 @@ struct macb { | |||
| 1203 | unsigned int max_tuples; | 1211 | unsigned int max_tuples; |
| 1204 | 1212 | ||
| 1205 | struct tasklet_struct hresp_err_tasklet; | 1213 | struct tasklet_struct hresp_err_tasklet; |
| 1214 | |||
| 1215 | int rx_bd_rd_prefetch; | ||
| 1216 | int tx_bd_rd_prefetch; | ||
| 1206 | }; | 1217 | }; |
| 1207 | 1218 | ||
| 1208 | #ifdef CONFIG_MACB_USE_HWSTAMP | 1219 | #ifdef CONFIG_MACB_USE_HWSTAMP |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 96cc03a6d942..a6c911bb5ce2 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
| @@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp) | |||
| 1811 | { | 1811 | { |
| 1812 | struct macb_queue *queue; | 1812 | struct macb_queue *queue; |
| 1813 | unsigned int q; | 1813 | unsigned int q; |
| 1814 | int size; | ||
| 1814 | 1815 | ||
| 1815 | queue = &bp->queues[0]; | ||
| 1816 | bp->macbgem_ops.mog_free_rx_buffers(bp); | 1816 | bp->macbgem_ops.mog_free_rx_buffers(bp); |
| 1817 | if (queue->rx_ring) { | ||
| 1818 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), | ||
| 1819 | queue->rx_ring, queue->rx_ring_dma); | ||
| 1820 | queue->rx_ring = NULL; | ||
| 1821 | } | ||
| 1822 | 1817 | ||
| 1823 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | 1818 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
| 1824 | kfree(queue->tx_skb); | 1819 | kfree(queue->tx_skb); |
| 1825 | queue->tx_skb = NULL; | 1820 | queue->tx_skb = NULL; |
| 1826 | if (queue->tx_ring) { | 1821 | if (queue->tx_ring) { |
| 1827 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), | 1822 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
| 1823 | dma_free_coherent(&bp->pdev->dev, size, | ||
| 1828 | queue->tx_ring, queue->tx_ring_dma); | 1824 | queue->tx_ring, queue->tx_ring_dma); |
| 1829 | queue->tx_ring = NULL; | 1825 | queue->tx_ring = NULL; |
| 1830 | } | 1826 | } |
| 1827 | if (queue->rx_ring) { | ||
| 1828 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; | ||
| 1829 | dma_free_coherent(&bp->pdev->dev, size, | ||
| 1830 | queue->rx_ring, queue->rx_ring_dma); | ||
| 1831 | queue->rx_ring = NULL; | ||
| 1832 | } | ||
| 1831 | } | 1833 | } |
| 1832 | } | 1834 | } |
| 1833 | 1835 | ||
| @@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp) | |||
| 1874 | int size; | 1876 | int size; |
| 1875 | 1877 | ||
| 1876 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | 1878 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
| 1877 | size = TX_RING_BYTES(bp); | 1879 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
| 1878 | queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 1880 | queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1879 | &queue->tx_ring_dma, | 1881 | &queue->tx_ring_dma, |
| 1880 | GFP_KERNEL); | 1882 | GFP_KERNEL); |
| @@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp) | |||
| 1890 | if (!queue->tx_skb) | 1892 | if (!queue->tx_skb) |
| 1891 | goto out_err; | 1893 | goto out_err; |
| 1892 | 1894 | ||
| 1893 | size = RX_RING_BYTES(bp); | 1895 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; |
| 1894 | queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 1896 | queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1895 | &queue->rx_ring_dma, GFP_KERNEL); | 1897 | &queue->rx_ring_dma, GFP_KERNEL); |
| 1896 | if (!queue->rx_ring) | 1898 | if (!queue->rx_ring) |
| @@ -3797,7 +3799,7 @@ static const struct macb_config np4_config = { | |||
| 3797 | static const struct macb_config zynqmp_config = { | 3799 | static const struct macb_config zynqmp_config = { |
| 3798 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | | 3800 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
| 3799 | MACB_CAPS_JUMBO | | 3801 | MACB_CAPS_JUMBO | |
| 3800 | MACB_CAPS_GEM_HAS_PTP, | 3802 | MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, |
| 3801 | .dma_burst_length = 16, | 3803 | .dma_burst_length = 16, |
| 3802 | .clk_init = macb_clk_init, | 3804 | .clk_init = macb_clk_init, |
| 3803 | .init = macb_init, | 3805 | .init = macb_init, |
| @@ -3858,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev) | |||
| 3858 | void __iomem *mem; | 3860 | void __iomem *mem; |
| 3859 | const char *mac; | 3861 | const char *mac; |
| 3860 | struct macb *bp; | 3862 | struct macb *bp; |
| 3861 | int err; | 3863 | int err, val; |
| 3862 | 3864 | ||
| 3863 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 3865 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 3864 | mem = devm_ioremap_resource(&pdev->dev, regs); | 3866 | mem = devm_ioremap_resource(&pdev->dev, regs); |
| @@ -3947,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev) | |||
| 3947 | else | 3949 | else |
| 3948 | dev->max_mtu = ETH_DATA_LEN; | 3950 | dev->max_mtu = ETH_DATA_LEN; |
| 3949 | 3951 | ||
| 3952 | if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { | ||
| 3953 | val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); | ||
| 3954 | if (val) | ||
| 3955 | bp->rx_bd_rd_prefetch = (2 << (val - 1)) * | ||
| 3956 | macb_dma_desc_get_size(bp); | ||
| 3957 | |||
| 3958 | val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); | ||
| 3959 | if (val) | ||
| 3960 | bp->tx_bd_rd_prefetch = (2 << (val - 1)) * | ||
| 3961 | macb_dma_desc_get_size(bp); | ||
| 3962 | } | ||
| 3963 | |||
| 3950 | mac = of_get_mac_address(np); | 3964 | mac = of_get_mac_address(np); |
| 3951 | if (mac) { | 3965 | if (mac) { |
| 3952 | ether_addr_copy(bp->dev->dev_addr, mac); | 3966 | ether_addr_copy(bp->dev->dev_addr, mac); |
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 043e3c11c42b..92d88c5f76fb 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig | |||
| @@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM | |||
| 15 | 15 | ||
| 16 | config THUNDER_NIC_PF | 16 | config THUNDER_NIC_PF |
| 17 | tristate "Thunder Physical function driver" | 17 | tristate "Thunder Physical function driver" |
| 18 | depends on 64BIT | 18 | depends on 64BIT && PCI |
| 19 | select THUNDER_NIC_BGX | 19 | select THUNDER_NIC_BGX |
| 20 | ---help--- | 20 | ---help--- |
| 21 | This driver supports Thunder's NIC physical function. | 21 | This driver supports Thunder's NIC physical function. |
| @@ -28,13 +28,13 @@ config THUNDER_NIC_PF | |||
| 28 | config THUNDER_NIC_VF | 28 | config THUNDER_NIC_VF |
| 29 | tristate "Thunder Virtual function driver" | 29 | tristate "Thunder Virtual function driver" |
| 30 | imply CAVIUM_PTP | 30 | imply CAVIUM_PTP |
| 31 | depends on 64BIT | 31 | depends on 64BIT && PCI |
| 32 | ---help--- | 32 | ---help--- |
| 33 | This driver supports Thunder's NIC virtual function | 33 | This driver supports Thunder's NIC virtual function |
| 34 | 34 | ||
| 35 | config THUNDER_NIC_BGX | 35 | config THUNDER_NIC_BGX |
| 36 | tristate "Thunder MAC interface driver (BGX)" | 36 | tristate "Thunder MAC interface driver (BGX)" |
| 37 | depends on 64BIT | 37 | depends on 64BIT && PCI |
| 38 | select PHYLIB | 38 | select PHYLIB |
| 39 | select MDIO_THUNDER | 39 | select MDIO_THUNDER |
| 40 | select THUNDER_NIC_RGX | 40 | select THUNDER_NIC_RGX |
| @@ -44,7 +44,7 @@ config THUNDER_NIC_BGX | |||
| 44 | 44 | ||
| 45 | config THUNDER_NIC_RGX | 45 | config THUNDER_NIC_RGX |
| 46 | tristate "Thunder MAC interface driver (RGX)" | 46 | tristate "Thunder MAC interface driver (RGX)" |
| 47 | depends on 64BIT | 47 | depends on 64BIT && PCI |
| 48 | select PHYLIB | 48 | select PHYLIB |
| 49 | select MDIO_THUNDER | 49 | select MDIO_THUNDER |
| 50 | ---help--- | 50 | ---help--- |
| @@ -53,7 +53,7 @@ config THUNDER_NIC_RGX | |||
| 53 | 53 | ||
| 54 | config CAVIUM_PTP | 54 | config CAVIUM_PTP |
| 55 | tristate "Cavium PTP coprocessor as PTP clock" | 55 | tristate "Cavium PTP coprocessor as PTP clock" |
| 56 | depends on 64BIT | 56 | depends on 64BIT && PCI |
| 57 | imply PTP_1588_CLOCK | 57 | imply PTP_1588_CLOCK |
| 58 | default y | 58 | default y |
| 59 | ---help--- | 59 | ---help--- |
| @@ -65,7 +65,7 @@ config CAVIUM_PTP | |||
| 65 | 65 | ||
| 66 | config LIQUIDIO | 66 | config LIQUIDIO |
| 67 | tristate "Cavium LiquidIO support" | 67 | tristate "Cavium LiquidIO support" |
| 68 | depends on 64BIT | 68 | depends on 64BIT && PCI |
| 69 | depends on MAY_USE_DEVLINK | 69 | depends on MAY_USE_DEVLINK |
| 70 | imply PTP_1588_CLOCK | 70 | imply PTP_1588_CLOCK |
| 71 | select FW_LOADER | 71 | select FW_LOADER |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8a815bb57177..7e8454d3b1ad 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c | |||
| @@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console) | |||
| 91 | */ | 91 | */ |
| 92 | #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 | 92 | #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 |
| 93 | 93 | ||
| 94 | /* time to wait for possible in-flight requests in milliseconds */ | ||
| 95 | #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) | ||
| 96 | |||
| 94 | struct lio_trusted_vf_ctx { | 97 | struct lio_trusted_vf_ctx { |
| 95 | struct completion complete; | 98 | struct completion complete; |
| 96 | int status; | 99 | int status; |
| @@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) | |||
| 259 | force_io_queues_off(oct); | 262 | force_io_queues_off(oct); |
| 260 | 263 | ||
| 261 | /* To allow for in-flight requests */ | 264 | /* To allow for in-flight requests */ |
| 262 | schedule_timeout_uninterruptible(100); | 265 | schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); |
| 263 | 266 | ||
| 264 | if (wait_for_pending_requests(oct)) | 267 | if (wait_for_pending_requests(oct)) |
| 265 | dev_err(&oct->pci_dev->dev, "There were pending requests\n"); | 268 | dev_err(&oct->pci_dev->dev, "There were pending requests\n"); |
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 3f6afb54a5eb..bb43ddb7539e 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | |||
| @@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | |||
| 643 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | 643 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) |
| 644 | { | 644 | { |
| 645 | struct octeon_mgmt *p = netdev_priv(netdev); | 645 | struct octeon_mgmt *p = netdev_priv(netdev); |
| 646 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; | 646 | int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
| 647 | 647 | ||
| 648 | netdev->mtu = new_mtu; | 648 | netdev->mtu = new_mtu; |
| 649 | 649 | ||
| 650 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); | 650 | /* HW lifts the limit if the frame is VLAN tagged |
| 651 | * (+4 bytes per each tag, up to two tags) | ||
| 652 | */ | ||
| 653 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet); | ||
| 654 | /* Set the hardware to truncate packets larger than the MTU. The jabber | ||
| 655 | * register must be set to a multiple of 8 bytes, so round up. JABBER is | ||
| 656 | * an unconditional limit, so we need to account for two possible VLAN | ||
| 657 | * tags. | ||
| 658 | */ | ||
| 651 | cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, | 659 | cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, |
| 652 | (size_without_fcs + 7) & 0xfff8); | 660 | (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8); |
| 653 | 661 | ||
| 654 | return 0; | 662 | return 0; |
| 655 | } | 663 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 7b795edd9d3a..a19172dbe6be 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <linux/sched.h> | 51 | #include <linux/sched.h> |
| 52 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
| 53 | #include <linux/uaccess.h> | 53 | #include <linux/uaccess.h> |
| 54 | #include <linux/nospec.h> | ||
| 54 | 55 | ||
| 55 | #include "common.h" | 56 | #include "common.h" |
| 56 | #include "cxgb3_ioctl.h" | 57 | #include "cxgb3_ioctl.h" |
| @@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
| 2268 | 2269 | ||
| 2269 | if (t.qset_idx >= nqsets) | 2270 | if (t.qset_idx >= nqsets) |
| 2270 | return -EINVAL; | 2271 | return -EINVAL; |
| 2272 | t.qset_idx = array_index_nospec(t.qset_idx, nqsets); | ||
| 2271 | 2273 | ||
| 2272 | q = &adapter->params.sge.qset[q1 + t.qset_idx]; | 2274 | q = &adapter->params.sge.qset[q1 + t.qset_idx]; |
| 2273 | t.rspq_size = q->rspq_size; | 2275 | t.rspq_size = q->rspq_size; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 974a868a4824..3720c3e11ebb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap) | |||
| 8702 | }; | 8702 | }; |
| 8703 | 8703 | ||
| 8704 | unsigned int part, manufacturer; | 8704 | unsigned int part, manufacturer; |
| 8705 | unsigned int density, size; | 8705 | unsigned int density, size = 0; |
| 8706 | u32 flashid = 0; | 8706 | u32 flashid = 0; |
| 8707 | int ret; | 8707 | int ret; |
| 8708 | 8708 | ||
| @@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap) | |||
| 8772 | case 0x22: /* 256MB */ | 8772 | case 0x22: /* 256MB */ |
| 8773 | size = 1 << 28; | 8773 | size = 1 << 28; |
| 8774 | break; | 8774 | break; |
| 8775 | |||
| 8776 | default: | ||
| 8777 | dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n", | ||
| 8778 | flashid, density); | ||
| 8779 | return -EINVAL; | ||
| 8780 | } | 8775 | } |
| 8781 | break; | 8776 | break; |
| 8782 | } | 8777 | } |
| @@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap) | |||
| 8792 | case 0x17: /* 64MB */ | 8787 | case 0x17: /* 64MB */ |
| 8793 | size = 1 << 26; | 8788 | size = 1 << 26; |
| 8794 | break; | 8789 | break; |
| 8795 | default: | ||
| 8796 | dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n", | ||
| 8797 | flashid, density); | ||
| 8798 | return -EINVAL; | ||
| 8799 | } | 8790 | } |
| 8800 | break; | 8791 | break; |
| 8801 | } | 8792 | } |
| @@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap) | |||
| 8811 | case 0x18: /* 16MB */ | 8802 | case 0x18: /* 16MB */ |
| 8812 | size = 1 << 24; | 8803 | size = 1 << 24; |
| 8813 | break; | 8804 | break; |
| 8814 | default: | ||
| 8815 | dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n", | ||
| 8816 | flashid, density); | ||
| 8817 | return -EINVAL; | ||
| 8818 | } | 8805 | } |
| 8819 | break; | 8806 | break; |
| 8820 | } | 8807 | } |
| @@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap) | |||
| 8830 | case 0x18: /* 16MB */ | 8817 | case 0x18: /* 16MB */ |
| 8831 | size = 1 << 24; | 8818 | size = 1 << 24; |
| 8832 | break; | 8819 | break; |
| 8833 | default: | ||
| 8834 | dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n", | ||
| 8835 | flashid, density); | ||
| 8836 | return -EINVAL; | ||
| 8837 | } | 8820 | } |
| 8838 | break; | 8821 | break; |
| 8839 | } | 8822 | } |
| 8840 | default: | 8823 | } |
| 8841 | dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n", | 8824 | |
| 8842 | flashid); | 8825 | /* If we didn't recognize the FLASH part, that's no real issue: the |
| 8843 | return -EINVAL; | 8826 | * Hardware/Software contract says that Hardware will _*ALWAYS*_ |
| 8827 | * use a FLASH part which is at least 4MB in size and has 64KB | ||
| 8828 | * sectors. The unrecognized FLASH part is likely to be much larger | ||
| 8829 | * than 4MB, but that's all we really need. | ||
| 8830 | */ | ||
| 8831 | if (size == 0) { | ||
| 8832 | dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n", | ||
| 8833 | flashid); | ||
| 8834 | size = 1 << 22; | ||
| 8844 | } | 8835 | } |
| 8845 | 8836 | ||
| 8846 | /* Store decoded Flash size and fall through into vetting code. */ | 8837 | /* Store decoded Flash size and fall through into vetting code. */ |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index d0e196bff081..ffe7acbeaa22 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, | |||
| 329 | return; | 329 | return; |
| 330 | 330 | ||
| 331 | failure: | 331 | failure: |
| 332 | dev_info(dev, "replenish pools failure\n"); | 332 | if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) |
| 333 | dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); | ||
| 333 | pool->free_map[pool->next_free] = index; | 334 | pool->free_map[pool->next_free] = index; |
| 334 | pool->rx_buff[index].skb = NULL; | 335 | pool->rx_buff[index].skb = NULL; |
| 335 | 336 | ||
| @@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 1617 | &tx_crq); | 1618 | &tx_crq); |
| 1618 | } | 1619 | } |
| 1619 | if (lpar_rc != H_SUCCESS) { | 1620 | if (lpar_rc != H_SUCCESS) { |
| 1620 | dev_err(dev, "tx failed with code %ld\n", lpar_rc); | 1621 | if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) |
| 1622 | dev_err_ratelimited(dev, "tx: send failed\n"); | ||
| 1621 | dev_kfree_skb_any(skb); | 1623 | dev_kfree_skb_any(skb); |
| 1622 | tx_buff->skb = NULL; | 1624 | tx_buff->skb = NULL; |
| 1623 | 1625 | ||
| @@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1825 | 1827 | ||
| 1826 | rc = ibmvnic_login(netdev); | 1828 | rc = ibmvnic_login(netdev); |
| 1827 | if (rc) { | 1829 | if (rc) { |
| 1828 | adapter->state = VNIC_PROBED; | 1830 | adapter->state = reset_state; |
| 1829 | return 0; | 1831 | return rc; |
| 1830 | } | 1832 | } |
| 1831 | 1833 | ||
| 1832 | if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || | 1834 | if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || |
| @@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) | |||
| 3204 | return crq; | 3206 | return crq; |
| 3205 | } | 3207 | } |
| 3206 | 3208 | ||
| 3209 | static void print_subcrq_error(struct device *dev, int rc, const char *func) | ||
| 3210 | { | ||
| 3211 | switch (rc) { | ||
| 3212 | case H_PARAMETER: | ||
| 3213 | dev_warn_ratelimited(dev, | ||
| 3214 | "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", | ||
| 3215 | func, rc); | ||
| 3216 | break; | ||
| 3217 | case H_CLOSED: | ||
| 3218 | dev_warn_ratelimited(dev, | ||
| 3219 | "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", | ||
| 3220 | func, rc); | ||
| 3221 | break; | ||
| 3222 | default: | ||
| 3223 | dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); | ||
| 3224 | break; | ||
| 3225 | } | ||
| 3226 | } | ||
| 3227 | |||
| 3207 | static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, | 3228 | static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, |
| 3208 | union sub_crq *sub_crq) | 3229 | union sub_crq *sub_crq) |
| 3209 | { | 3230 | { |
| @@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, | |||
| 3230 | cpu_to_be64(u64_crq[2]), | 3251 | cpu_to_be64(u64_crq[2]), |
| 3231 | cpu_to_be64(u64_crq[3])); | 3252 | cpu_to_be64(u64_crq[3])); |
| 3232 | 3253 | ||
| 3233 | if (rc) { | 3254 | if (rc) |
| 3234 | if (rc == H_CLOSED) | 3255 | print_subcrq_error(dev, rc, __func__); |
| 3235 | dev_warn(dev, "CRQ Queue closed\n"); | ||
| 3236 | dev_err(dev, "Send error (rc=%d)\n", rc); | ||
| 3237 | } | ||
| 3238 | 3256 | ||
| 3239 | return rc; | 3257 | return rc; |
| 3240 | } | 3258 | } |
| @@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, | |||
| 3252 | cpu_to_be64(remote_handle), | 3270 | cpu_to_be64(remote_handle), |
| 3253 | ioba, num_entries); | 3271 | ioba, num_entries); |
| 3254 | 3272 | ||
| 3255 | if (rc) { | 3273 | if (rc) |
| 3256 | if (rc == H_CLOSED) | 3274 | print_subcrq_error(dev, rc, __func__); |
| 3257 | dev_warn(dev, "CRQ Queue closed\n"); | ||
| 3258 | dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); | ||
| 3259 | } | ||
| 3260 | 3275 | ||
| 3261 | return rc; | 3276 | return rc; |
| 3262 | } | 3277 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 3f5c350716bb..0bd1294ba517 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | |||
| @@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, | |||
| 1871 | if (enable_addr != 0) | 1871 | if (enable_addr != 0) |
| 1872 | rar_high |= IXGBE_RAH_AV; | 1872 | rar_high |= IXGBE_RAH_AV; |
| 1873 | 1873 | ||
| 1874 | /* Record lower 32 bits of MAC address and then make | ||
| 1875 | * sure that write is flushed to hardware before writing | ||
| 1876 | * the upper 16 bits and setting the valid bit. | ||
| 1877 | */ | ||
| 1874 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); | 1878 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); |
| 1879 | IXGBE_WRITE_FLUSH(hw); | ||
| 1875 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | 1880 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); |
| 1876 | 1881 | ||
| 1877 | return 0; | 1882 | return 0; |
| @@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) | |||
| 1903 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | 1908 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); |
| 1904 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); | 1909 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); |
| 1905 | 1910 | ||
| 1906 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); | 1911 | /* Clear the address valid bit and upper 16 bits of the address |
| 1912 | * before clearing the lower bits. This way we aren't updating | ||
| 1913 | * a live filter. | ||
| 1914 | */ | ||
| 1907 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | 1915 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); |
| 1916 | IXGBE_WRITE_FLUSH(hw); | ||
| 1917 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); | ||
| 1908 | 1918 | ||
| 1909 | /* clear VMDq pool/queue selection for this RAR */ | 1919 | /* clear VMDq pool/queue selection for this RAR */ |
| 1910 | hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); | 1920 | hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index c116f459945d..da4322e4daed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | |||
| @@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, | |||
| 839 | } | 839 | } |
| 840 | 840 | ||
| 841 | itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; | 841 | itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; |
| 842 | if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { | 842 | if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { |
| 843 | netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", | 843 | netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", |
| 844 | __func__, itd->sa_idx, xs->xso.offload_handle); | 844 | __func__, itd->sa_idx, xs->xso.offload_handle); |
| 845 | return 0; | 845 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9f54ccbddea7..3360f7b9ee73 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
| @@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | |||
| 474 | { | 474 | { |
| 475 | const struct mlx4_en_frag_info *frag_info = priv->frag_info; | 475 | const struct mlx4_en_frag_info *frag_info = priv->frag_info; |
| 476 | unsigned int truesize = 0; | 476 | unsigned int truesize = 0; |
| 477 | bool release = true; | ||
| 477 | int nr, frag_size; | 478 | int nr, frag_size; |
| 478 | struct page *page; | 479 | struct page *page; |
| 479 | dma_addr_t dma; | 480 | dma_addr_t dma; |
| 480 | bool release; | ||
| 481 | 481 | ||
| 482 | /* Collect used fragments while replacing them in the HW descriptors */ | 482 | /* Collect used fragments while replacing them in the HW descriptors */ |
| 483 | for (nr = 0;; frags++) { | 483 | for (nr = 0;; frags++) { |
| @@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | |||
| 500 | release = page_count(page) != 1 || | 500 | release = page_count(page) != 1 || |
| 501 | page_is_pfmemalloc(page) || | 501 | page_is_pfmemalloc(page) || |
| 502 | page_to_nid(page) != numa_mem_id(); | 502 | page_to_nid(page) != numa_mem_id(); |
| 503 | } else { | 503 | } else if (!priv->rx_headroom) { |
| 504 | /* rx_headroom for non XDP setup is always 0. | ||
| 505 | * When XDP is set, the above condition will | ||
| 506 | * guarantee page is always released. | ||
| 507 | */ | ||
| 504 | u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); | 508 | u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); |
| 505 | 509 | ||
| 506 | frags->page_offset += sz_align; | 510 | frags->page_offset += sz_align; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6aaaf3d9ba31..77b2adb29341 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) | |||
| 4756 | kfree(mlxsw_sp_rt6); | 4756 | kfree(mlxsw_sp_rt6); |
| 4757 | } | 4757 | } |
| 4758 | 4758 | ||
| 4759 | static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) | ||
| 4760 | { | ||
| 4761 | /* RTF_CACHE routes are ignored */ | ||
| 4762 | return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; | ||
| 4763 | } | ||
| 4764 | |||
| 4759 | static struct fib6_info * | 4765 | static struct fib6_info * |
| 4760 | mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) | 4766 | mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) |
| 4761 | { | 4767 | { |
| @@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) | |||
| 4765 | 4771 | ||
| 4766 | static struct mlxsw_sp_fib6_entry * | 4772 | static struct mlxsw_sp_fib6_entry * |
| 4767 | mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, | 4773 | mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, |
| 4768 | const struct fib6_info *nrt, bool append) | 4774 | const struct fib6_info *nrt, bool replace) |
| 4769 | { | 4775 | { |
| 4770 | struct mlxsw_sp_fib6_entry *fib6_entry; | 4776 | struct mlxsw_sp_fib6_entry *fib6_entry; |
| 4771 | 4777 | ||
| 4772 | if (!append) | 4778 | if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) |
| 4773 | return NULL; | 4779 | return NULL; |
| 4774 | 4780 | ||
| 4775 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { | 4781 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { |
| @@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, | |||
| 4784 | break; | 4790 | break; |
| 4785 | if (rt->fib6_metric < nrt->fib6_metric) | 4791 | if (rt->fib6_metric < nrt->fib6_metric) |
| 4786 | continue; | 4792 | continue; |
| 4787 | if (rt->fib6_metric == nrt->fib6_metric) | 4793 | if (rt->fib6_metric == nrt->fib6_metric && |
| 4794 | mlxsw_sp_fib6_rt_can_mp(rt)) | ||
| 4788 | return fib6_entry; | 4795 | return fib6_entry; |
| 4789 | if (rt->fib6_metric > nrt->fib6_metric) | 4796 | if (rt->fib6_metric > nrt->fib6_metric) |
| 4790 | break; | 4797 | break; |
| @@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry * | |||
| 5163 | mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, | 5170 | mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, |
| 5164 | const struct fib6_info *nrt, bool replace) | 5171 | const struct fib6_info *nrt, bool replace) |
| 5165 | { | 5172 | { |
| 5166 | struct mlxsw_sp_fib6_entry *fib6_entry; | 5173 | struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; |
| 5167 | 5174 | ||
| 5168 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { | 5175 | list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { |
| 5169 | struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); | 5176 | struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); |
| @@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, | |||
| 5172 | continue; | 5179 | continue; |
| 5173 | if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) | 5180 | if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) |
| 5174 | break; | 5181 | break; |
| 5175 | if (replace && rt->fib6_metric == nrt->fib6_metric) | 5182 | if (replace && rt->fib6_metric == nrt->fib6_metric) { |
| 5176 | return fib6_entry; | 5183 | if (mlxsw_sp_fib6_rt_can_mp(rt) == |
| 5184 | mlxsw_sp_fib6_rt_can_mp(nrt)) | ||
| 5185 | return fib6_entry; | ||
| 5186 | if (mlxsw_sp_fib6_rt_can_mp(nrt)) | ||
| 5187 | fallback = fallback ?: fib6_entry; | ||
| 5188 | } | ||
| 5177 | if (rt->fib6_metric > nrt->fib6_metric) | 5189 | if (rt->fib6_metric > nrt->fib6_metric) |
| 5178 | return fib6_entry; | 5190 | return fallback ?: fib6_entry; |
| 5179 | } | 5191 | } |
| 5180 | 5192 | ||
| 5181 | return NULL; | 5193 | return fallback; |
| 5182 | } | 5194 | } |
| 5183 | 5195 | ||
| 5184 | static int | 5196 | static int |
| @@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, | |||
| 5304 | } | 5316 | } |
| 5305 | 5317 | ||
| 5306 | static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | 5318 | static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, |
| 5307 | struct fib6_info *rt, bool replace, | 5319 | struct fib6_info *rt, bool replace) |
| 5308 | bool append) | ||
| 5309 | { | 5320 | { |
| 5310 | struct mlxsw_sp_fib6_entry *fib6_entry; | 5321 | struct mlxsw_sp_fib6_entry *fib6_entry; |
| 5311 | struct mlxsw_sp_fib_node *fib_node; | 5322 | struct mlxsw_sp_fib_node *fib_node; |
| @@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | |||
| 5331 | /* Before creating a new entry, try to append route to an existing | 5342 | /* Before creating a new entry, try to append route to an existing |
| 5332 | * multipath entry. | 5343 | * multipath entry. |
| 5333 | */ | 5344 | */ |
| 5334 | fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append); | 5345 | fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); |
| 5335 | if (fib6_entry) { | 5346 | if (fib6_entry) { |
| 5336 | err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); | 5347 | err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); |
| 5337 | if (err) | 5348 | if (err) |
| @@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | |||
| 5339 | return 0; | 5350 | return 0; |
| 5340 | } | 5351 | } |
| 5341 | 5352 | ||
| 5342 | /* We received an append event, yet did not find any route to | ||
| 5343 | * append to. | ||
| 5344 | */ | ||
| 5345 | if (WARN_ON(append)) { | ||
| 5346 | err = -EINVAL; | ||
| 5347 | goto err_fib6_entry_append; | ||
| 5348 | } | ||
| 5349 | |||
| 5350 | fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); | 5353 | fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); |
| 5351 | if (IS_ERR(fib6_entry)) { | 5354 | if (IS_ERR(fib6_entry)) { |
| 5352 | err = PTR_ERR(fib6_entry); | 5355 | err = PTR_ERR(fib6_entry); |
| @@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, | |||
| 5364 | err_fib6_node_entry_link: | 5367 | err_fib6_node_entry_link: |
| 5365 | mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); | 5368 | mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); |
| 5366 | err_fib6_entry_create: | 5369 | err_fib6_entry_create: |
| 5367 | err_fib6_entry_append: | ||
| 5368 | err_fib6_entry_nexthop_add: | 5370 | err_fib6_entry_nexthop_add: |
| 5369 | mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); | 5371 | mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); |
| 5370 | return err; | 5372 | return err; |
| @@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) | |||
| 5715 | struct mlxsw_sp_fib_event_work *fib_work = | 5717 | struct mlxsw_sp_fib_event_work *fib_work = |
| 5716 | container_of(work, struct mlxsw_sp_fib_event_work, work); | 5718 | container_of(work, struct mlxsw_sp_fib_event_work, work); |
| 5717 | struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; | 5719 | struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; |
| 5718 | bool replace, append; | 5720 | bool replace; |
| 5719 | int err; | 5721 | int err; |
| 5720 | 5722 | ||
| 5721 | rtnl_lock(); | 5723 | rtnl_lock(); |
| @@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) | |||
| 5726 | case FIB_EVENT_ENTRY_APPEND: /* fall through */ | 5728 | case FIB_EVENT_ENTRY_APPEND: /* fall through */ |
| 5727 | case FIB_EVENT_ENTRY_ADD: | 5729 | case FIB_EVENT_ENTRY_ADD: |
| 5728 | replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; | 5730 | replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; |
| 5729 | append = fib_work->event == FIB_EVENT_ENTRY_APPEND; | ||
| 5730 | err = mlxsw_sp_router_fib6_add(mlxsw_sp, | 5731 | err = mlxsw_sp_router_fib6_add(mlxsw_sp, |
| 5731 | fib_work->fen6_info.rt, replace, | 5732 | fib_work->fen6_info.rt, replace); |
| 5732 | append); | ||
| 5733 | if (err) | 5733 | if (err) |
| 5734 | mlxsw_sp_router_fib_abort(mlxsw_sp); | 5734 | mlxsw_sp_router_fib_abort(mlxsw_sp); |
| 5735 | mlxsw_sp_rt6_release(fib_work->fen6_info.rt); | 5735 | mlxsw_sp_rt6_release(fib_work->fen6_info.rt); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 00db3401b898..1dfaccd151f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
| @@ -502,6 +502,7 @@ enum BAR_ID { | |||
| 502 | struct qed_nvm_image_info { | 502 | struct qed_nvm_image_info { |
| 503 | u32 num_images; | 503 | u32 num_images; |
| 504 | struct bist_nvm_image_att *image_att; | 504 | struct bist_nvm_image_att *image_att; |
| 505 | bool valid; | ||
| 505 | }; | 506 | }; |
| 506 | 507 | ||
| 507 | #define DRV_MODULE_VERSION \ | 508 | #define DRV_MODULE_VERSION \ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index a14e48489029..4340c4c90bcb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c | |||
| @@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, | |||
| 6723 | format_idx = header & MFW_TRACE_EVENTID_MASK; | 6723 | format_idx = header & MFW_TRACE_EVENTID_MASK; |
| 6724 | 6724 | ||
| 6725 | /* Skip message if its index doesn't exist in the meta data */ | 6725 | /* Skip message if its index doesn't exist in the meta data */ |
| 6726 | if (format_idx > s_mcp_trace_meta.formats_num) { | 6726 | if (format_idx >= s_mcp_trace_meta.formats_num) { |
| 6727 | u8 format_size = | 6727 | u8 format_size = |
| 6728 | (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> | 6728 | (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> |
| 6729 | MFW_TRACE_PRM_SIZE_SHIFT); | 6729 | MFW_TRACE_PRM_SIZE_SHIFT); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 0cbc74d6ca8b..758a9a5127fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, | |||
| 371 | goto err2; | 371 | goto err2; |
| 372 | } | 372 | } |
| 373 | 373 | ||
| 374 | DP_INFO(cdev, "qed_probe completed successffuly\n"); | 374 | DP_INFO(cdev, "qed_probe completed successfully\n"); |
| 375 | 375 | ||
| 376 | return cdev; | 376 | return cdev; |
| 377 | 377 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 4e0b443c9519..9d9e533bccdc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
| @@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, | |||
| 592 | *o_mcp_resp = mb_params.mcp_resp; | 592 | *o_mcp_resp = mb_params.mcp_resp; |
| 593 | *o_mcp_param = mb_params.mcp_param; | 593 | *o_mcp_param = mb_params.mcp_param; |
| 594 | 594 | ||
| 595 | /* nvm_info needs to be updated */ | ||
| 596 | p_hwfn->nvm_info.valid = false; | ||
| 597 | |||
| 595 | return 0; | 598 | return 0; |
| 596 | } | 599 | } |
| 597 | 600 | ||
| @@ -2555,11 +2558,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, | |||
| 2555 | 2558 | ||
| 2556 | int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) | 2559 | int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) |
| 2557 | { | 2560 | { |
| 2558 | struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info; | 2561 | struct qed_nvm_image_info nvm_info; |
| 2559 | struct qed_ptt *p_ptt; | 2562 | struct qed_ptt *p_ptt; |
| 2560 | int rc; | 2563 | int rc; |
| 2561 | u32 i; | 2564 | u32 i; |
| 2562 | 2565 | ||
| 2566 | if (p_hwfn->nvm_info.valid) | ||
| 2567 | return 0; | ||
| 2568 | |||
| 2563 | p_ptt = qed_ptt_acquire(p_hwfn); | 2569 | p_ptt = qed_ptt_acquire(p_hwfn); |
| 2564 | if (!p_ptt) { | 2570 | if (!p_ptt) { |
| 2565 | DP_ERR(p_hwfn, "failed to acquire ptt\n"); | 2571 | DP_ERR(p_hwfn, "failed to acquire ptt\n"); |
| @@ -2567,29 +2573,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) | |||
| 2567 | } | 2573 | } |
| 2568 | 2574 | ||
| 2569 | /* Acquire from MFW the amount of available images */ | 2575 | /* Acquire from MFW the amount of available images */ |
| 2570 | nvm_info->num_images = 0; | 2576 | nvm_info.num_images = 0; |
| 2571 | rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, | 2577 | rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, |
| 2572 | p_ptt, &nvm_info->num_images); | 2578 | p_ptt, &nvm_info.num_images); |
| 2573 | if (rc == -EOPNOTSUPP) { | 2579 | if (rc == -EOPNOTSUPP) { |
| 2574 | DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); | 2580 | DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); |
| 2575 | goto out; | 2581 | goto out; |
| 2576 | } else if (rc || !nvm_info->num_images) { | 2582 | } else if (rc || !nvm_info.num_images) { |
| 2577 | DP_ERR(p_hwfn, "Failed getting number of images\n"); | 2583 | DP_ERR(p_hwfn, "Failed getting number of images\n"); |
| 2578 | goto err0; | 2584 | goto err0; |
| 2579 | } | 2585 | } |
| 2580 | 2586 | ||
| 2581 | nvm_info->image_att = kmalloc_array(nvm_info->num_images, | 2587 | nvm_info.image_att = kmalloc_array(nvm_info.num_images, |
| 2582 | sizeof(struct bist_nvm_image_att), | 2588 | sizeof(struct bist_nvm_image_att), |
| 2583 | GFP_KERNEL); | 2589 | GFP_KERNEL); |
| 2584 | if (!nvm_info->image_att) { | 2590 | if (!nvm_info.image_att) { |
| 2585 | rc = -ENOMEM; | 2591 | rc = -ENOMEM; |
| 2586 | goto err0; | 2592 | goto err0; |
| 2587 | } | 2593 | } |
| 2588 | 2594 | ||
| 2589 | /* Iterate over images and get their attributes */ | 2595 | /* Iterate over images and get their attributes */ |
| 2590 | for (i = 0; i < nvm_info->num_images; i++) { | 2596 | for (i = 0; i < nvm_info.num_images; i++) { |
| 2591 | rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, | 2597 | rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, |
| 2592 | &nvm_info->image_att[i], i); | 2598 | &nvm_info.image_att[i], i); |
| 2593 | if (rc) { | 2599 | if (rc) { |
| 2594 | DP_ERR(p_hwfn, | 2600 | DP_ERR(p_hwfn, |
| 2595 | "Failed getting image index %d attributes\n", i); | 2601 | "Failed getting image index %d attributes\n", i); |
| @@ -2597,14 +2603,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) | |||
| 2597 | } | 2603 | } |
| 2598 | 2604 | ||
| 2599 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, | 2605 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, |
| 2600 | nvm_info->image_att[i].len); | 2606 | nvm_info.image_att[i].len); |
| 2601 | } | 2607 | } |
| 2602 | out: | 2608 | out: |
| 2609 | /* Update hwfn's nvm_info */ | ||
| 2610 | if (nvm_info.num_images) { | ||
| 2611 | p_hwfn->nvm_info.num_images = nvm_info.num_images; | ||
| 2612 | kfree(p_hwfn->nvm_info.image_att); | ||
| 2613 | p_hwfn->nvm_info.image_att = nvm_info.image_att; | ||
| 2614 | p_hwfn->nvm_info.valid = true; | ||
| 2615 | } | ||
| 2616 | |||
| 2603 | qed_ptt_release(p_hwfn, p_ptt); | 2617 | qed_ptt_release(p_hwfn, p_ptt); |
| 2604 | return 0; | 2618 | return 0; |
| 2605 | 2619 | ||
| 2606 | err1: | 2620 | err1: |
| 2607 | kfree(nvm_info->image_att); | 2621 | kfree(nvm_info.image_att); |
| 2608 | err0: | 2622 | err0: |
| 2609 | qed_ptt_release(p_hwfn, p_ptt); | 2623 | qed_ptt_release(p_hwfn, p_ptt); |
| 2610 | return rc; | 2624 | return rc; |
| @@ -2641,6 +2655,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, | |||
| 2641 | return -EINVAL; | 2655 | return -EINVAL; |
| 2642 | } | 2656 | } |
| 2643 | 2657 | ||
| 2658 | qed_mcp_nvm_info_populate(p_hwfn); | ||
| 2644 | for (i = 0; i < p_hwfn->nvm_info.num_images; i++) | 2659 | for (i = 0; i < p_hwfn->nvm_info.num_images; i++) |
| 2645 | if (type == p_hwfn->nvm_info.image_att[i].image_type) | 2660 | if (type == p_hwfn->nvm_info.image_att[i].image_type) |
| 2646 | break; | 2661 | break; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 891f03a7a33d..8d7b9bb910f2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
| @@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, | |||
| 1128 | struct qlcnic_adapter *adapter = dev_get_drvdata(dev); | 1128 | struct qlcnic_adapter *adapter = dev_get_drvdata(dev); |
| 1129 | 1129 | ||
| 1130 | ret = kstrtoul(buf, 16, &data); | 1130 | ret = kstrtoul(buf, 16, &data); |
| 1131 | if (ret) | ||
| 1132 | return ret; | ||
| 1131 | 1133 | ||
| 1132 | switch (data) { | 1134 | switch (data) { |
| 1133 | case QLC_83XX_FLASH_SECTOR_ERASE_CMD: | 1135 | case QLC_83XX_FLASH_SECTOR_ERASE_CMD: |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 5803cd6db406..206f0266463e 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
| @@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev) | |||
| 658 | return ret; | 658 | return ret; |
| 659 | } | 659 | } |
| 660 | 660 | ||
| 661 | netif_start_queue(qca->net_dev); | 661 | /* SPI thread takes care of TX queue */ |
| 662 | 662 | ||
| 663 | return 0; | 663 | return 0; |
| 664 | } | 664 | } |
| @@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) | |||
| 760 | qca->net_dev->stats.tx_errors++; | 760 | qca->net_dev->stats.tx_errors++; |
| 761 | /* Trigger tx queue flush and QCA7000 reset */ | 761 | /* Trigger tx queue flush and QCA7000 reset */ |
| 762 | qca->sync = QCASPI_SYNC_UNKNOWN; | 762 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 763 | |||
| 764 | if (qca->spi_thread) | ||
| 765 | wake_up_process(qca->spi_thread); | ||
| 763 | } | 766 | } |
| 764 | 767 | ||
| 765 | static int | 768 | static int |
| @@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi) | |||
| 878 | 881 | ||
| 879 | if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || | 882 | if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || |
| 880 | (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { | 883 | (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { |
| 881 | dev_info(&spi->dev, "Invalid clkspeed: %d\n", | 884 | dev_err(&spi->dev, "Invalid clkspeed: %d\n", |
| 882 | qcaspi_clkspeed); | 885 | qcaspi_clkspeed); |
| 883 | return -EINVAL; | 886 | return -EINVAL; |
| 884 | } | 887 | } |
| 885 | 888 | ||
| 886 | if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || | 889 | if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || |
| 887 | (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { | 890 | (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { |
| 888 | dev_info(&spi->dev, "Invalid burst len: %d\n", | 891 | dev_err(&spi->dev, "Invalid burst len: %d\n", |
| 889 | qcaspi_burst_len); | 892 | qcaspi_burst_len); |
| 890 | return -EINVAL; | 893 | return -EINVAL; |
| 891 | } | 894 | } |
| 892 | 895 | ||
| 893 | if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || | 896 | if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || |
| 894 | (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { | 897 | (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { |
| 895 | dev_info(&spi->dev, "Invalid pluggable: %d\n", | 898 | dev_err(&spi->dev, "Invalid pluggable: %d\n", |
| 896 | qcaspi_pluggable); | 899 | qcaspi_pluggable); |
| 897 | return -EINVAL; | 900 | return -EINVAL; |
| 898 | } | 901 | } |
| 899 | 902 | ||
| @@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi) | |||
| 955 | } | 958 | } |
| 956 | 959 | ||
| 957 | if (register_netdev(qcaspi_devs)) { | 960 | if (register_netdev(qcaspi_devs)) { |
| 958 | dev_info(&spi->dev, "Unable to register net device %s\n", | 961 | dev_err(&spi->dev, "Unable to register net device %s\n", |
| 959 | qcaspi_devs->name); | 962 | qcaspi_devs->name); |
| 960 | free_netdev(qcaspi_devs); | 963 | free_netdev(qcaspi_devs); |
| 961 | return -EFAULT; | 964 | return -EFAULT; |
| 962 | } | 965 | } |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f4cae2be0fda..a3f69901ac87 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -7789,6 +7789,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7789 | NETIF_F_HW_VLAN_CTAG_RX; | 7789 | NETIF_F_HW_VLAN_CTAG_RX; |
| 7790 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | | 7790 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | |
| 7791 | NETIF_F_HIGHDMA; | 7791 | NETIF_F_HIGHDMA; |
| 7792 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
| 7792 | 7793 | ||
| 7793 | tp->cp_cmd |= RxChkSum | RxVlan; | 7794 | tp->cp_cmd |= RxChkSum | RxVlan; |
| 7794 | 7795 | ||
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 68f122140966..0d811c02ff34 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev) | |||
| 980 | struct ravb_private *priv = netdev_priv(ndev); | 980 | struct ravb_private *priv = netdev_priv(ndev); |
| 981 | struct phy_device *phydev = ndev->phydev; | 981 | struct phy_device *phydev = ndev->phydev; |
| 982 | bool new_state = false; | 982 | bool new_state = false; |
| 983 | unsigned long flags; | ||
| 984 | |||
| 985 | spin_lock_irqsave(&priv->lock, flags); | ||
| 986 | |||
| 987 | /* Disable TX and RX right over here, if E-MAC change is ignored */ | ||
| 988 | if (priv->no_avb_link) | ||
| 989 | ravb_rcv_snd_disable(ndev); | ||
| 983 | 990 | ||
| 984 | if (phydev->link) { | 991 | if (phydev->link) { |
| 985 | if (phydev->duplex != priv->duplex) { | 992 | if (phydev->duplex != priv->duplex) { |
| @@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev) | |||
| 997 | ravb_modify(ndev, ECMR, ECMR_TXF, 0); | 1004 | ravb_modify(ndev, ECMR, ECMR_TXF, 0); |
| 998 | new_state = true; | 1005 | new_state = true; |
| 999 | priv->link = phydev->link; | 1006 | priv->link = phydev->link; |
| 1000 | if (priv->no_avb_link) | ||
| 1001 | ravb_rcv_snd_enable(ndev); | ||
| 1002 | } | 1007 | } |
| 1003 | } else if (priv->link) { | 1008 | } else if (priv->link) { |
| 1004 | new_state = true; | 1009 | new_state = true; |
| 1005 | priv->link = 0; | 1010 | priv->link = 0; |
| 1006 | priv->speed = 0; | 1011 | priv->speed = 0; |
| 1007 | priv->duplex = -1; | 1012 | priv->duplex = -1; |
| 1008 | if (priv->no_avb_link) | ||
| 1009 | ravb_rcv_snd_disable(ndev); | ||
| 1010 | } | 1013 | } |
| 1011 | 1014 | ||
| 1015 | /* Enable TX and RX right over here, if E-MAC change is ignored */ | ||
| 1016 | if (priv->no_avb_link && phydev->link) | ||
| 1017 | ravb_rcv_snd_enable(ndev); | ||
| 1018 | |||
| 1019 | mmiowb(); | ||
| 1020 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 1021 | |||
| 1012 | if (new_state && netif_msg_link(priv)) | 1022 | if (new_state && netif_msg_link(priv)) |
| 1013 | phy_print_status(phydev); | 1023 | phy_print_status(phydev); |
| 1014 | } | 1024 | } |
| @@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev) | |||
| 1096 | return 0; | 1106 | return 0; |
| 1097 | } | 1107 | } |
| 1098 | 1108 | ||
| 1099 | static int ravb_get_link_ksettings(struct net_device *ndev, | ||
| 1100 | struct ethtool_link_ksettings *cmd) | ||
| 1101 | { | ||
| 1102 | struct ravb_private *priv = netdev_priv(ndev); | ||
| 1103 | unsigned long flags; | ||
| 1104 | |||
| 1105 | if (!ndev->phydev) | ||
| 1106 | return -ENODEV; | ||
| 1107 | |||
| 1108 | spin_lock_irqsave(&priv->lock, flags); | ||
| 1109 | phy_ethtool_ksettings_get(ndev->phydev, cmd); | ||
| 1110 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 1111 | |||
| 1112 | return 0; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | static int ravb_set_link_ksettings(struct net_device *ndev, | ||
| 1116 | const struct ethtool_link_ksettings *cmd) | ||
| 1117 | { | ||
| 1118 | struct ravb_private *priv = netdev_priv(ndev); | ||
| 1119 | unsigned long flags; | ||
| 1120 | int error; | ||
| 1121 | |||
| 1122 | if (!ndev->phydev) | ||
| 1123 | return -ENODEV; | ||
| 1124 | |||
| 1125 | spin_lock_irqsave(&priv->lock, flags); | ||
| 1126 | |||
| 1127 | /* Disable TX and RX */ | ||
| 1128 | ravb_rcv_snd_disable(ndev); | ||
| 1129 | |||
| 1130 | error = phy_ethtool_ksettings_set(ndev->phydev, cmd); | ||
| 1131 | if (error) | ||
| 1132 | goto error_exit; | ||
| 1133 | |||
| 1134 | if (cmd->base.duplex == DUPLEX_FULL) | ||
| 1135 | priv->duplex = 1; | ||
| 1136 | else | ||
| 1137 | priv->duplex = 0; | ||
| 1138 | |||
| 1139 | ravb_set_duplex(ndev); | ||
| 1140 | |||
| 1141 | error_exit: | ||
| 1142 | mdelay(1); | ||
| 1143 | |||
| 1144 | /* Enable TX and RX */ | ||
| 1145 | ravb_rcv_snd_enable(ndev); | ||
| 1146 | |||
| 1147 | mmiowb(); | ||
| 1148 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 1149 | |||
| 1150 | return error; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | static int ravb_nway_reset(struct net_device *ndev) | ||
| 1154 | { | ||
| 1155 | struct ravb_private *priv = netdev_priv(ndev); | ||
| 1156 | int error = -ENODEV; | ||
| 1157 | unsigned long flags; | ||
| 1158 | |||
| 1159 | if (ndev->phydev) { | ||
| 1160 | spin_lock_irqsave(&priv->lock, flags); | ||
| 1161 | error = phy_start_aneg(ndev->phydev); | ||
| 1162 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | return error; | ||
| 1166 | } | ||
| 1167 | |||
| 1168 | static u32 ravb_get_msglevel(struct net_device *ndev) | 1109 | static u32 ravb_get_msglevel(struct net_device *ndev) |
| 1169 | { | 1110 | { |
| 1170 | struct ravb_private *priv = netdev_priv(ndev); | 1111 | struct ravb_private *priv = netdev_priv(ndev); |
| @@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) | |||
| 1377 | } | 1318 | } |
| 1378 | 1319 | ||
| 1379 | static const struct ethtool_ops ravb_ethtool_ops = { | 1320 | static const struct ethtool_ops ravb_ethtool_ops = { |
| 1380 | .nway_reset = ravb_nway_reset, | 1321 | .nway_reset = phy_ethtool_nway_reset, |
| 1381 | .get_msglevel = ravb_get_msglevel, | 1322 | .get_msglevel = ravb_get_msglevel, |
| 1382 | .set_msglevel = ravb_set_msglevel, | 1323 | .set_msglevel = ravb_set_msglevel, |
| 1383 | .get_link = ethtool_op_get_link, | 1324 | .get_link = ethtool_op_get_link, |
| @@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = { | |||
| 1387 | .get_ringparam = ravb_get_ringparam, | 1328 | .get_ringparam = ravb_get_ringparam, |
| 1388 | .set_ringparam = ravb_set_ringparam, | 1329 | .set_ringparam = ravb_set_ringparam, |
| 1389 | .get_ts_info = ravb_get_ts_info, | 1330 | .get_ts_info = ravb_get_ts_info, |
| 1390 | .get_link_ksettings = ravb_get_link_ksettings, | 1331 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
| 1391 | .set_link_ksettings = ravb_set_link_ksettings, | 1332 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
| 1392 | .get_wol = ravb_get_wol, | 1333 | .get_wol = ravb_get_wol, |
| 1393 | .set_wol = ravb_set_wol, | 1334 | .set_wol = ravb_set_wol, |
| 1394 | }; | 1335 | }; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e9007b613f17..5614fd231bbe 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
| 1927 | { | 1927 | { |
| 1928 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1928 | struct sh_eth_private *mdp = netdev_priv(ndev); |
| 1929 | struct phy_device *phydev = ndev->phydev; | 1929 | struct phy_device *phydev = ndev->phydev; |
| 1930 | unsigned long flags; | ||
| 1930 | int new_state = 0; | 1931 | int new_state = 0; |
| 1931 | 1932 | ||
| 1933 | spin_lock_irqsave(&mdp->lock, flags); | ||
| 1934 | |||
| 1935 | /* Disable TX and RX right over here, if E-MAC change is ignored */ | ||
| 1936 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
| 1937 | sh_eth_rcv_snd_disable(ndev); | ||
| 1938 | |||
| 1932 | if (phydev->link) { | 1939 | if (phydev->link) { |
| 1933 | if (phydev->duplex != mdp->duplex) { | 1940 | if (phydev->duplex != mdp->duplex) { |
| 1934 | new_state = 1; | 1941 | new_state = 1; |
| @@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
| 1947 | sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); | 1954 | sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); |
| 1948 | new_state = 1; | 1955 | new_state = 1; |
| 1949 | mdp->link = phydev->link; | 1956 | mdp->link = phydev->link; |
| 1950 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
| 1951 | sh_eth_rcv_snd_enable(ndev); | ||
| 1952 | } | 1957 | } |
| 1953 | } else if (mdp->link) { | 1958 | } else if (mdp->link) { |
| 1954 | new_state = 1; | 1959 | new_state = 1; |
| 1955 | mdp->link = 0; | 1960 | mdp->link = 0; |
| 1956 | mdp->speed = 0; | 1961 | mdp->speed = 0; |
| 1957 | mdp->duplex = -1; | 1962 | mdp->duplex = -1; |
| 1958 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
| 1959 | sh_eth_rcv_snd_disable(ndev); | ||
| 1960 | } | 1963 | } |
| 1961 | 1964 | ||
| 1965 | /* Enable TX and RX right over here, if E-MAC change is ignored */ | ||
| 1966 | if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) | ||
| 1967 | sh_eth_rcv_snd_enable(ndev); | ||
| 1968 | |||
| 1969 | mmiowb(); | ||
| 1970 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
| 1971 | |||
| 1962 | if (new_state && netif_msg_link(mdp)) | 1972 | if (new_state && netif_msg_link(mdp)) |
| 1963 | phy_print_status(phydev); | 1973 | phy_print_status(phydev); |
| 1964 | } | 1974 | } |
| @@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev) | |||
| 2030 | return 0; | 2040 | return 0; |
| 2031 | } | 2041 | } |
| 2032 | 2042 | ||
| 2033 | static int sh_eth_get_link_ksettings(struct net_device *ndev, | ||
| 2034 | struct ethtool_link_ksettings *cmd) | ||
| 2035 | { | ||
| 2036 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 2037 | unsigned long flags; | ||
| 2038 | |||
| 2039 | if (!ndev->phydev) | ||
| 2040 | return -ENODEV; | ||
| 2041 | |||
| 2042 | spin_lock_irqsave(&mdp->lock, flags); | ||
| 2043 | phy_ethtool_ksettings_get(ndev->phydev, cmd); | ||
| 2044 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
| 2045 | |||
| 2046 | return 0; | ||
| 2047 | } | ||
| 2048 | |||
| 2049 | static int sh_eth_set_link_ksettings(struct net_device *ndev, | ||
| 2050 | const struct ethtool_link_ksettings *cmd) | ||
| 2051 | { | ||
| 2052 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 2053 | unsigned long flags; | ||
| 2054 | int ret; | ||
| 2055 | |||
| 2056 | if (!ndev->phydev) | ||
| 2057 | return -ENODEV; | ||
| 2058 | |||
| 2059 | spin_lock_irqsave(&mdp->lock, flags); | ||
| 2060 | |||
| 2061 | /* disable tx and rx */ | ||
| 2062 | sh_eth_rcv_snd_disable(ndev); | ||
| 2063 | |||
| 2064 | ret = phy_ethtool_ksettings_set(ndev->phydev, cmd); | ||
| 2065 | if (ret) | ||
| 2066 | goto error_exit; | ||
| 2067 | |||
| 2068 | if (cmd->base.duplex == DUPLEX_FULL) | ||
| 2069 | mdp->duplex = 1; | ||
| 2070 | else | ||
| 2071 | mdp->duplex = 0; | ||
| 2072 | |||
| 2073 | if (mdp->cd->set_duplex) | ||
| 2074 | mdp->cd->set_duplex(ndev); | ||
| 2075 | |||
| 2076 | error_exit: | ||
| 2077 | mdelay(1); | ||
| 2078 | |||
| 2079 | /* enable tx and rx */ | ||
| 2080 | sh_eth_rcv_snd_enable(ndev); | ||
| 2081 | |||
| 2082 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
| 2083 | |||
| 2084 | return ret; | ||
| 2085 | } | ||
| 2086 | |||
| 2087 | /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the | 2043 | /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the |
| 2088 | * version must be bumped as well. Just adding registers up to that | 2044 | * version must be bumped as well. Just adding registers up to that |
| 2089 | * limit is fine, as long as the existing register indices don't | 2045 | * limit is fine, as long as the existing register indices don't |
| @@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, | |||
| 2263 | pm_runtime_put_sync(&mdp->pdev->dev); | 2219 | pm_runtime_put_sync(&mdp->pdev->dev); |
| 2264 | } | 2220 | } |
| 2265 | 2221 | ||
| 2266 | static int sh_eth_nway_reset(struct net_device *ndev) | ||
| 2267 | { | ||
| 2268 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 2269 | unsigned long flags; | ||
| 2270 | int ret; | ||
| 2271 | |||
| 2272 | if (!ndev->phydev) | ||
| 2273 | return -ENODEV; | ||
| 2274 | |||
| 2275 | spin_lock_irqsave(&mdp->lock, flags); | ||
| 2276 | ret = phy_start_aneg(ndev->phydev); | ||
| 2277 | spin_unlock_irqrestore(&mdp->lock, flags); | ||
| 2278 | |||
| 2279 | return ret; | ||
| 2280 | } | ||
| 2281 | |||
| 2282 | static u32 sh_eth_get_msglevel(struct net_device *ndev) | 2222 | static u32 sh_eth_get_msglevel(struct net_device *ndev) |
| 2283 | { | 2223 | { |
| 2284 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2224 | struct sh_eth_private *mdp = netdev_priv(ndev); |
| @@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) | |||
| 2429 | static const struct ethtool_ops sh_eth_ethtool_ops = { | 2369 | static const struct ethtool_ops sh_eth_ethtool_ops = { |
| 2430 | .get_regs_len = sh_eth_get_regs_len, | 2370 | .get_regs_len = sh_eth_get_regs_len, |
| 2431 | .get_regs = sh_eth_get_regs, | 2371 | .get_regs = sh_eth_get_regs, |
| 2432 | .nway_reset = sh_eth_nway_reset, | 2372 | .nway_reset = phy_ethtool_nway_reset, |
| 2433 | .get_msglevel = sh_eth_get_msglevel, | 2373 | .get_msglevel = sh_eth_get_msglevel, |
| 2434 | .set_msglevel = sh_eth_set_msglevel, | 2374 | .set_msglevel = sh_eth_set_msglevel, |
| 2435 | .get_link = ethtool_op_get_link, | 2375 | .get_link = ethtool_op_get_link, |
| @@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { | |||
| 2438 | .get_sset_count = sh_eth_get_sset_count, | 2378 | .get_sset_count = sh_eth_get_sset_count, |
| 2439 | .get_ringparam = sh_eth_get_ringparam, | 2379 | .get_ringparam = sh_eth_get_ringparam, |
| 2440 | .set_ringparam = sh_eth_set_ringparam, | 2380 | .set_ringparam = sh_eth_set_ringparam, |
| 2441 | .get_link_ksettings = sh_eth_get_link_ksettings, | 2381 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
| 2442 | .set_link_ksettings = sh_eth_set_link_ksettings, | 2382 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
| 2443 | .get_wol = sh_eth_get_wol, | 2383 | .get_wol = sh_eth_get_wol, |
| 2444 | .set_wol = sh_eth_set_wol, | 2384 | .set_wol = sh_eth_set_wol, |
| 2445 | }; | 2385 | }; |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 23f0785c0573..7eeac3d6cfe8 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, | |||
| 4288 | return -EPROTONOSUPPORT; | 4288 | return -EPROTONOSUPPORT; |
| 4289 | } | 4289 | } |
| 4290 | 4290 | ||
| 4291 | static s32 efx_ef10_filter_insert(struct efx_nic *efx, | 4291 | static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx, |
| 4292 | struct efx_filter_spec *spec, | 4292 | struct efx_filter_spec *spec, |
| 4293 | bool replace_equal) | 4293 | bool replace_equal) |
| 4294 | { | 4294 | { |
| 4295 | DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); | 4295 | DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); |
| 4296 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 4296 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| @@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, | |||
| 4307 | bool is_mc_recip; | 4307 | bool is_mc_recip; |
| 4308 | s32 rc; | 4308 | s32 rc; |
| 4309 | 4309 | ||
| 4310 | down_read(&efx->filter_sem); | 4310 | WARN_ON(!rwsem_is_locked(&efx->filter_sem)); |
| 4311 | table = efx->filter_state; | 4311 | table = efx->filter_state; |
| 4312 | down_write(&table->lock); | 4312 | down_write(&table->lock); |
| 4313 | 4313 | ||
| @@ -4498,10 +4498,22 @@ out_unlock: | |||
| 4498 | if (rss_locked) | 4498 | if (rss_locked) |
| 4499 | mutex_unlock(&efx->rss_lock); | 4499 | mutex_unlock(&efx->rss_lock); |
| 4500 | up_write(&table->lock); | 4500 | up_write(&table->lock); |
| 4501 | up_read(&efx->filter_sem); | ||
| 4502 | return rc; | 4501 | return rc; |
| 4503 | } | 4502 | } |
| 4504 | 4503 | ||
| 4504 | static s32 efx_ef10_filter_insert(struct efx_nic *efx, | ||
| 4505 | struct efx_filter_spec *spec, | ||
| 4506 | bool replace_equal) | ||
| 4507 | { | ||
| 4508 | s32 ret; | ||
| 4509 | |||
| 4510 | down_read(&efx->filter_sem); | ||
| 4511 | ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal); | ||
| 4512 | up_read(&efx->filter_sem); | ||
| 4513 | |||
| 4514 | return ret; | ||
| 4515 | } | ||
| 4516 | |||
| 4505 | static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) | 4517 | static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) |
| 4506 | { | 4518 | { |
| 4507 | /* no need to do anything here on EF10 */ | 4519 | /* no need to do anything here on EF10 */ |
| @@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 5285 | EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); | 5297 | EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); |
| 5286 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); | 5298 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
| 5287 | efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); | 5299 | efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); |
| 5288 | rc = efx_ef10_filter_insert(efx, &spec, true); | 5300 | rc = efx_ef10_filter_insert_locked(efx, &spec, true); |
| 5289 | if (rc < 0) { | 5301 | if (rc < 0) { |
| 5290 | if (rollback) { | 5302 | if (rollback) { |
| 5291 | netif_info(efx, drv, efx->net_dev, | 5303 | netif_info(efx, drv, efx->net_dev, |
| @@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 5314 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); | 5326 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
| 5315 | eth_broadcast_addr(baddr); | 5327 | eth_broadcast_addr(baddr); |
| 5316 | efx_filter_set_eth_local(&spec, vlan->vid, baddr); | 5328 | efx_filter_set_eth_local(&spec, vlan->vid, baddr); |
| 5317 | rc = efx_ef10_filter_insert(efx, &spec, true); | 5329 | rc = efx_ef10_filter_insert_locked(efx, &spec, true); |
| 5318 | if (rc < 0) { | 5330 | if (rc < 0) { |
| 5319 | netif_warn(efx, drv, efx->net_dev, | 5331 | netif_warn(efx, drv, efx->net_dev, |
| 5320 | "Broadcast filter insert failed rc=%d\n", rc); | 5332 | "Broadcast filter insert failed rc=%d\n", rc); |
| @@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, | |||
| 5370 | if (vlan->vid != EFX_FILTER_VID_UNSPEC) | 5382 | if (vlan->vid != EFX_FILTER_VID_UNSPEC) |
| 5371 | efx_filter_set_eth_local(&spec, vlan->vid, NULL); | 5383 | efx_filter_set_eth_local(&spec, vlan->vid, NULL); |
| 5372 | 5384 | ||
| 5373 | rc = efx_ef10_filter_insert(efx, &spec, true); | 5385 | rc = efx_ef10_filter_insert_locked(efx, &spec, true); |
| 5374 | if (rc < 0) { | 5386 | if (rc < 0) { |
| 5375 | const char *um = multicast ? "Multicast" : "Unicast"; | 5387 | const char *um = multicast ? "Multicast" : "Unicast"; |
| 5376 | const char *encap_name = ""; | 5388 | const char *encap_name = ""; |
| @@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, | |||
| 5430 | filter_flags, 0); | 5442 | filter_flags, 0); |
| 5431 | eth_broadcast_addr(baddr); | 5443 | eth_broadcast_addr(baddr); |
| 5432 | efx_filter_set_eth_local(&spec, vlan->vid, baddr); | 5444 | efx_filter_set_eth_local(&spec, vlan->vid, baddr); |
| 5433 | rc = efx_ef10_filter_insert(efx, &spec, true); | 5445 | rc = efx_ef10_filter_insert_locked(efx, &spec, true); |
| 5434 | if (rc < 0) { | 5446 | if (rc < 0) { |
| 5435 | netif_warn(efx, drv, efx->net_dev, | 5447 | netif_warn(efx, drv, efx->net_dev, |
| 5436 | "Broadcast filter insert failed rc=%d\n", | 5448 | "Broadcast filter insert failed rc=%d\n", |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 570ec72266f3..ce3a177081a8 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
| @@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx) | |||
| 1871 | up_write(&efx->filter_sem); | 1871 | up_write(&efx->filter_sem); |
| 1872 | } | 1872 | } |
| 1873 | 1873 | ||
| 1874 | static void efx_restore_filters(struct efx_nic *efx) | ||
| 1875 | { | ||
| 1876 | down_read(&efx->filter_sem); | ||
| 1877 | efx->type->filter_table_restore(efx); | ||
| 1878 | up_read(&efx->filter_sem); | ||
| 1879 | } | ||
| 1880 | 1874 | ||
| 1881 | /************************************************************************** | 1875 | /************************************************************************** |
| 1882 | * | 1876 | * |
| @@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) | |||
| 2688 | efx_disable_interrupts(efx); | 2682 | efx_disable_interrupts(efx); |
| 2689 | 2683 | ||
| 2690 | mutex_lock(&efx->mac_lock); | 2684 | mutex_lock(&efx->mac_lock); |
| 2685 | down_write(&efx->filter_sem); | ||
| 2691 | mutex_lock(&efx->rss_lock); | 2686 | mutex_lock(&efx->rss_lock); |
| 2692 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && | 2687 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && |
| 2693 | method != RESET_TYPE_DATAPATH) | 2688 | method != RESET_TYPE_DATAPATH) |
| @@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | |||
| 2745 | if (efx->type->rx_restore_rss_contexts) | 2740 | if (efx->type->rx_restore_rss_contexts) |
| 2746 | efx->type->rx_restore_rss_contexts(efx); | 2741 | efx->type->rx_restore_rss_contexts(efx); |
| 2747 | mutex_unlock(&efx->rss_lock); | 2742 | mutex_unlock(&efx->rss_lock); |
| 2748 | down_read(&efx->filter_sem); | 2743 | efx->type->filter_table_restore(efx); |
| 2749 | efx_restore_filters(efx); | 2744 | up_write(&efx->filter_sem); |
| 2750 | up_read(&efx->filter_sem); | ||
| 2751 | if (efx->type->sriov_reset) | 2745 | if (efx->type->sriov_reset) |
| 2752 | efx->type->sriov_reset(efx); | 2746 | efx->type->sriov_reset(efx); |
| 2753 | 2747 | ||
| @@ -2764,6 +2758,7 @@ fail: | |||
| 2764 | efx->port_initialized = false; | 2758 | efx->port_initialized = false; |
| 2765 | 2759 | ||
| 2766 | mutex_unlock(&efx->rss_lock); | 2760 | mutex_unlock(&efx->rss_lock); |
| 2761 | up_write(&efx->filter_sem); | ||
| 2767 | mutex_unlock(&efx->mac_lock); | 2762 | mutex_unlock(&efx->mac_lock); |
| 2768 | 2763 | ||
| 2769 | return rc; | 2764 | return rc; |
| @@ -3473,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx) | |||
| 3473 | 3468 | ||
| 3474 | efx_init_napi(efx); | 3469 | efx_init_napi(efx); |
| 3475 | 3470 | ||
| 3471 | down_write(&efx->filter_sem); | ||
| 3476 | rc = efx->type->init(efx); | 3472 | rc = efx->type->init(efx); |
| 3473 | up_write(&efx->filter_sem); | ||
| 3477 | if (rc) { | 3474 | if (rc) { |
| 3478 | netif_err(efx, probe, efx->net_dev, | 3475 | netif_err(efx, probe, efx->net_dev, |
| 3479 | "failed to initialise NIC\n"); | 3476 | "failed to initialise NIC\n"); |
| @@ -3765,7 +3762,9 @@ static int efx_pm_resume(struct device *dev) | |||
| 3765 | rc = efx->type->reset(efx, RESET_TYPE_ALL); | 3762 | rc = efx->type->reset(efx, RESET_TYPE_ALL); |
| 3766 | if (rc) | 3763 | if (rc) |
| 3767 | return rc; | 3764 | return rc; |
| 3765 | down_write(&efx->filter_sem); | ||
| 3768 | rc = efx->type->init(efx); | 3766 | rc = efx->type->init(efx); |
| 3767 | up_write(&efx->filter_sem); | ||
| 3769 | if (rc) | 3768 | if (rc) |
| 3770 | return rc; | 3769 | return rc; |
| 3771 | rc = efx_pm_thaw(dev); | 3770 | rc = efx_pm_thaw(dev); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 2e6e2a96b4f2..f9a61f90cfbc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | * is done in the "stmmac files" | 37 | * is done in the "stmmac files" |
| 38 | */ | 38 | */ |
| 39 | 39 | ||
| 40 | /* struct emac_variant - Descrive dwmac-sun8i hardware variant | 40 | /* struct emac_variant - Describe dwmac-sun8i hardware variant |
| 41 | * @default_syscon_value: The default value of the EMAC register in syscon | 41 | * @default_syscon_value: The default value of the EMAC register in syscon |
| 42 | * This value is used for disabling properly EMAC | 42 | * This value is used for disabling properly EMAC |
| 43 | * and used as a good starting value in case of the | 43 | * and used as a good starting value in case of the |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6d141f3931eb..72da77b94ecd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) | |||
| 94 | /** | 94 | /** |
| 95 | * stmmac_axi_setup - parse DT parameters for programming the AXI register | 95 | * stmmac_axi_setup - parse DT parameters for programming the AXI register |
| 96 | * @pdev: platform device | 96 | * @pdev: platform device |
| 97 | * @priv: driver private struct. | ||
| 98 | * Description: | 97 | * Description: |
| 99 | * if required, from device-tree the AXI internal register can be tuned | 98 | * if required, from device-tree the AXI internal register can be tuned |
| 100 | * by using platform parameters. | 99 | * by using platform parameters. |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 8e9d0ee1572b..31c3d77b4733 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -1274,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget) | |||
| 1274 | struct hv_device *device = netvsc_channel_to_device(channel); | 1274 | struct hv_device *device = netvsc_channel_to_device(channel); |
| 1275 | struct net_device *ndev = hv_get_drvdata(device); | 1275 | struct net_device *ndev = hv_get_drvdata(device); |
| 1276 | int work_done = 0; | 1276 | int work_done = 0; |
| 1277 | int ret; | ||
| 1277 | 1278 | ||
| 1278 | /* If starting a new interval */ | 1279 | /* If starting a new interval */ |
| 1279 | if (!nvchan->desc) | 1280 | if (!nvchan->desc) |
| @@ -1285,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget) | |||
| 1285 | nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); | 1286 | nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); |
| 1286 | } | 1287 | } |
| 1287 | 1288 | ||
| 1288 | /* If send of pending receive completions suceeded | 1289 | /* Send any pending receive completions */ |
| 1289 | * and did not exhaust NAPI budget this time | 1290 | ret = send_recv_completions(ndev, net_device, nvchan); |
| 1290 | * and not doing busy poll | 1291 | |
| 1292 | /* If it did not exhaust NAPI budget this time | ||
| 1293 | * and not doing busy poll | ||
| 1291 | * then re-enable host interrupts | 1294 | * then re-enable host interrupts |
| 1292 | * and reschedule if ring is not empty. | 1295 | * and reschedule if ring is not empty |
| 1296 | * or sending receive completion failed. | ||
| 1293 | */ | 1297 | */ |
| 1294 | if (send_recv_completions(ndev, net_device, nvchan) == 0 && | 1298 | if (work_done < budget && |
| 1295 | work_done < budget && | ||
| 1296 | napi_complete_done(napi, work_done) && | 1299 | napi_complete_done(napi, work_done) && |
| 1297 | hv_end_read(&channel->inbound) && | 1300 | (ret || hv_end_read(&channel->inbound)) && |
| 1298 | napi_schedule_prep(napi)) { | 1301 | napi_schedule_prep(napi)) { |
| 1299 | hv_begin_read(&channel->inbound); | 1302 | hv_begin_read(&channel->inbound); |
| 1300 | __napi_schedule(napi); | 1303 | __napi_schedule(napi); |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9b4e3c3787e5..408ece27131c 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
| @@ -1338,6 +1338,7 @@ out: | |||
| 1338 | /* setting up multiple channels failed */ | 1338 | /* setting up multiple channels failed */ |
| 1339 | net_device->max_chn = 1; | 1339 | net_device->max_chn = 1; |
| 1340 | net_device->num_chn = 1; | 1340 | net_device->num_chn = 1; |
| 1341 | return 0; | ||
| 1341 | 1342 | ||
| 1342 | err_dev_remv: | 1343 | err_dev_remv: |
| 1343 | rndis_filter_device_remove(dev, net_device); | 1344 | rndis_filter_device_remove(dev, net_device); |
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 64f1b1e77bc0..23a52b9293f3 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c | |||
| @@ -275,6 +275,8 @@ struct adf7242_local { | |||
| 275 | struct spi_message stat_msg; | 275 | struct spi_message stat_msg; |
| 276 | struct spi_transfer stat_xfer; | 276 | struct spi_transfer stat_xfer; |
| 277 | struct dentry *debugfs_root; | 277 | struct dentry *debugfs_root; |
| 278 | struct delayed_work work; | ||
| 279 | struct workqueue_struct *wqueue; | ||
| 278 | unsigned long flags; | 280 | unsigned long flags; |
| 279 | int tx_stat; | 281 | int tx_stat; |
| 280 | bool promiscuous; | 282 | bool promiscuous; |
| @@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp) | |||
| 575 | /* Wait until the ACK is sent */ | 577 | /* Wait until the ACK is sent */ |
| 576 | adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); | 578 | adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); |
| 577 | adf7242_clear_irqstat(lp); | 579 | adf7242_clear_irqstat(lp); |
| 580 | mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); | ||
| 578 | 581 | ||
| 579 | return adf7242_cmd(lp, CMD_RC_RX); | 582 | return adf7242_cmd(lp, CMD_RC_RX); |
| 580 | } | 583 | } |
| 581 | 584 | ||
| 585 | static void adf7242_rx_cal_work(struct work_struct *work) | ||
| 586 | { | ||
| 587 | struct adf7242_local *lp = | ||
| 588 | container_of(work, struct adf7242_local, work.work); | ||
| 589 | |||
| 590 | /* Reissuing RC_RX every 400ms - to adjust for offset | ||
| 591 | * drift in receiver (datasheet page 61, OCL section) | ||
| 592 | */ | ||
| 593 | |||
| 594 | if (!test_bit(FLAG_XMIT, &lp->flags)) { | ||
| 595 | adf7242_cmd(lp, CMD_RC_PHY_RDY); | ||
| 596 | adf7242_cmd_rx(lp); | ||
| 597 | } | ||
| 598 | } | ||
| 599 | |||
| 582 | static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) | 600 | static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) |
| 583 | { | 601 | { |
| 584 | struct adf7242_local *lp = hw->priv; | 602 | struct adf7242_local *lp = hw->priv; |
| @@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw) | |||
| 686 | enable_irq(lp->spi->irq); | 704 | enable_irq(lp->spi->irq); |
| 687 | set_bit(FLAG_START, &lp->flags); | 705 | set_bit(FLAG_START, &lp->flags); |
| 688 | 706 | ||
| 689 | return adf7242_cmd(lp, CMD_RC_RX); | 707 | return adf7242_cmd_rx(lp); |
| 690 | } | 708 | } |
| 691 | 709 | ||
| 692 | static void adf7242_stop(struct ieee802154_hw *hw) | 710 | static void adf7242_stop(struct ieee802154_hw *hw) |
| @@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw) | |||
| 694 | struct adf7242_local *lp = hw->priv; | 712 | struct adf7242_local *lp = hw->priv; |
| 695 | 713 | ||
| 696 | disable_irq(lp->spi->irq); | 714 | disable_irq(lp->spi->irq); |
| 715 | cancel_delayed_work_sync(&lp->work); | ||
| 697 | adf7242_cmd(lp, CMD_RC_IDLE); | 716 | adf7242_cmd(lp, CMD_RC_IDLE); |
| 698 | clear_bit(FLAG_START, &lp->flags); | 717 | clear_bit(FLAG_START, &lp->flags); |
| 699 | adf7242_clear_irqstat(lp); | 718 | adf7242_clear_irqstat(lp); |
| @@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel) | |||
| 719 | adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8); | 738 | adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8); |
| 720 | adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16); | 739 | adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16); |
| 721 | 740 | ||
| 722 | return adf7242_cmd(lp, CMD_RC_RX); | 741 | if (test_bit(FLAG_START, &lp->flags)) |
| 742 | return adf7242_cmd_rx(lp); | ||
| 743 | else | ||
| 744 | return adf7242_cmd(lp, CMD_RC_PHY_RDY); | ||
| 723 | } | 745 | } |
| 724 | 746 | ||
| 725 | static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw, | 747 | static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw, |
| @@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) | |||
| 814 | /* ensure existing instances of the IRQ handler have completed */ | 836 | /* ensure existing instances of the IRQ handler have completed */ |
| 815 | disable_irq(lp->spi->irq); | 837 | disable_irq(lp->spi->irq); |
| 816 | set_bit(FLAG_XMIT, &lp->flags); | 838 | set_bit(FLAG_XMIT, &lp->flags); |
| 839 | cancel_delayed_work_sync(&lp->work); | ||
| 817 | reinit_completion(&lp->tx_complete); | 840 | reinit_completion(&lp->tx_complete); |
| 818 | adf7242_cmd(lp, CMD_RC_PHY_RDY); | 841 | adf7242_cmd(lp, CMD_RC_PHY_RDY); |
| 819 | adf7242_clear_irqstat(lp); | 842 | adf7242_clear_irqstat(lp); |
| @@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data) | |||
| 952 | unsigned int xmit; | 975 | unsigned int xmit; |
| 953 | u8 irq1; | 976 | u8 irq1; |
| 954 | 977 | ||
| 978 | mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400)); | ||
| 955 | adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); | 979 | adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); |
| 956 | 980 | ||
| 957 | if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) | 981 | if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) |
| @@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi) | |||
| 1241 | spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg); | 1265 | spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg); |
| 1242 | 1266 | ||
| 1243 | spi_set_drvdata(spi, lp); | 1267 | spi_set_drvdata(spi, lp); |
| 1268 | INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); | ||
| 1269 | lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), | ||
| 1270 | WQ_MEM_RECLAIM); | ||
| 1244 | 1271 | ||
| 1245 | ret = adf7242_hw_init(lp); | 1272 | ret = adf7242_hw_init(lp); |
| 1246 | if (ret) | 1273 | if (ret) |
| @@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi) | |||
| 1284 | if (!IS_ERR_OR_NULL(lp->debugfs_root)) | 1311 | if (!IS_ERR_OR_NULL(lp->debugfs_root)) |
| 1285 | debugfs_remove_recursive(lp->debugfs_root); | 1312 | debugfs_remove_recursive(lp->debugfs_root); |
| 1286 | 1313 | ||
| 1314 | cancel_delayed_work_sync(&lp->work); | ||
| 1315 | destroy_workqueue(lp->wqueue); | ||
| 1316 | |||
| 1287 | ieee802154_unregister_hw(lp->hw); | 1317 | ieee802154_unregister_hw(lp->hw); |
| 1288 | mutex_destroy(&lp->bmux); | 1318 | mutex_destroy(&lp->bmux); |
| 1289 | ieee802154_free_hw(lp->hw); | 1319 | ieee802154_free_hw(lp->hw); |
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 77abedf0b524..3d9e91579866 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c | |||
| @@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) | |||
| 940 | static int | 940 | static int |
| 941 | at86rf230_ed(struct ieee802154_hw *hw, u8 *level) | 941 | at86rf230_ed(struct ieee802154_hw *hw, u8 *level) |
| 942 | { | 942 | { |
| 943 | BUG_ON(!level); | 943 | WARN_ON(!level); |
| 944 | *level = 0xbe; | 944 | *level = 0xbe; |
| 945 | return 0; | 945 | return 0; |
| 946 | } | 946 | } |
| @@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, | |||
| 1121 | if (changed & IEEE802154_AFILT_SADDR_CHANGED) { | 1121 | if (changed & IEEE802154_AFILT_SADDR_CHANGED) { |
| 1122 | u16 addr = le16_to_cpu(filt->short_addr); | 1122 | u16 addr = le16_to_cpu(filt->short_addr); |
| 1123 | 1123 | ||
| 1124 | dev_vdbg(&lp->spi->dev, | 1124 | dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__); |
| 1125 | "at86rf230_set_hw_addr_filt called for saddr\n"); | ||
| 1126 | __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); | 1125 | __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); |
| 1127 | __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); | 1126 | __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); |
| 1128 | } | 1127 | } |
| @@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, | |||
| 1130 | if (changed & IEEE802154_AFILT_PANID_CHANGED) { | 1129 | if (changed & IEEE802154_AFILT_PANID_CHANGED) { |
| 1131 | u16 pan = le16_to_cpu(filt->pan_id); | 1130 | u16 pan = le16_to_cpu(filt->pan_id); |
| 1132 | 1131 | ||
| 1133 | dev_vdbg(&lp->spi->dev, | 1132 | dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__); |
| 1134 | "at86rf230_set_hw_addr_filt called for pan id\n"); | ||
| 1135 | __at86rf230_write(lp, RG_PAN_ID_0, pan); | 1133 | __at86rf230_write(lp, RG_PAN_ID_0, pan); |
| 1136 | __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); | 1134 | __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); |
| 1137 | } | 1135 | } |
| @@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, | |||
| 1140 | u8 i, addr[8]; | 1138 | u8 i, addr[8]; |
| 1141 | 1139 | ||
| 1142 | memcpy(addr, &filt->ieee_addr, 8); | 1140 | memcpy(addr, &filt->ieee_addr, 8); |
| 1143 | dev_vdbg(&lp->spi->dev, | 1141 | dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__); |
| 1144 | "at86rf230_set_hw_addr_filt called for IEEE addr\n"); | ||
| 1145 | for (i = 0; i < 8; i++) | 1142 | for (i = 0; i < 8; i++) |
| 1146 | __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); | 1143 | __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); |
| 1147 | } | 1144 | } |
| 1148 | 1145 | ||
| 1149 | if (changed & IEEE802154_AFILT_PANC_CHANGED) { | 1146 | if (changed & IEEE802154_AFILT_PANC_CHANGED) { |
| 1150 | dev_vdbg(&lp->spi->dev, | 1147 | dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__); |
| 1151 | "at86rf230_set_hw_addr_filt called for panc change\n"); | ||
| 1152 | if (filt->pan_coord) | 1148 | if (filt->pan_coord) |
| 1153 | at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); | 1149 | at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); |
| 1154 | else | 1150 | else |
| @@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw, | |||
| 1252 | return at86rf230_write_subreg(lp, SR_CCA_MODE, val); | 1248 | return at86rf230_write_subreg(lp, SR_CCA_MODE, val); |
| 1253 | } | 1249 | } |
| 1254 | 1250 | ||
| 1255 | |||
| 1256 | static int | 1251 | static int |
| 1257 | at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) | 1252 | at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) |
| 1258 | { | 1253 | { |
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 0d673f7682ee..176395e4b7bb 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c | |||
| @@ -49,7 +49,7 @@ struct fakelb_phy { | |||
| 49 | 49 | ||
| 50 | static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) | 50 | static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) |
| 51 | { | 51 | { |
| 52 | BUG_ON(!level); | 52 | WARN_ON(!level); |
| 53 | *level = 0xbe; | 53 | *level = 0xbe; |
| 54 | 54 | ||
| 55 | return 0; | 55 | return 0; |
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index de0d7f28a181..e428277781ac 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c | |||
| @@ -15,10 +15,11 @@ | |||
| 15 | */ | 15 | */ |
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/gpio.h> | 18 | #include <linux/gpio/consumer.h> |
| 19 | #include <linux/spi/spi.h> | 19 | #include <linux/spi/spi.h> |
| 20 | #include <linux/workqueue.h> | 20 | #include <linux/workqueue.h> |
| 21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/irq.h> | ||
| 22 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
| 23 | #include <linux/of_gpio.h> | 24 | #include <linux/of_gpio.h> |
| 24 | #include <linux/regmap.h> | 25 | #include <linux/regmap.h> |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index b8f57e9b9379..1cd439bdf608 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
| @@ -130,8 +130,9 @@ | |||
| 130 | #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) | 130 | #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) |
| 131 | #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) | 131 | #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) |
| 132 | 132 | ||
| 133 | #define MII_88E1121_PHY_LED_CTRL 16 | 133 | #define MII_PHY_LED_CTRL 16 |
| 134 | #define MII_88E1121_PHY_LED_DEF 0x0030 | 134 | #define MII_88E1121_PHY_LED_DEF 0x0030 |
| 135 | #define MII_88E1510_PHY_LED_DEF 0x1177 | ||
| 135 | 136 | ||
| 136 | #define MII_M1011_PHY_STATUS 0x11 | 137 | #define MII_M1011_PHY_STATUS 0x11 |
| 137 | #define MII_M1011_PHY_STATUS_1000 0x8000 | 138 | #define MII_M1011_PHY_STATUS_1000 0x8000 |
| @@ -632,8 +633,40 @@ error: | |||
| 632 | return err; | 633 | return err; |
| 633 | } | 634 | } |
| 634 | 635 | ||
| 636 | static void marvell_config_led(struct phy_device *phydev) | ||
| 637 | { | ||
| 638 | u16 def_config; | ||
| 639 | int err; | ||
| 640 | |||
| 641 | switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) { | ||
| 642 | /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ | ||
| 643 | case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R): | ||
| 644 | case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S): | ||
| 645 | def_config = MII_88E1121_PHY_LED_DEF; | ||
| 646 | break; | ||
| 647 | /* Default PHY LED config: | ||
| 648 | * LED[0] .. 1000Mbps Link | ||
| 649 | * LED[1] .. 100Mbps Link | ||
| 650 | * LED[2] .. Blink, Activity | ||
| 651 | */ | ||
| 652 | case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510): | ||
| 653 | def_config = MII_88E1510_PHY_LED_DEF; | ||
| 654 | break; | ||
| 655 | default: | ||
| 656 | return; | ||
| 657 | } | ||
| 658 | |||
| 659 | err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL, | ||
| 660 | def_config); | ||
| 661 | if (err < 0) | ||
| 662 | pr_warn("Fail to config marvell phy LED.\n"); | ||
| 663 | } | ||
| 664 | |||
| 635 | static int marvell_config_init(struct phy_device *phydev) | 665 | static int marvell_config_init(struct phy_device *phydev) |
| 636 | { | 666 | { |
| 667 | /* Set defalut LED */ | ||
| 668 | marvell_config_led(phydev); | ||
| 669 | |||
| 637 | /* Set registers from marvell,reg-init DT property */ | 670 | /* Set registers from marvell,reg-init DT property */ |
| 638 | return marvell_of_reg_init(phydev); | 671 | return marvell_of_reg_init(phydev); |
| 639 | } | 672 | } |
| @@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev) | |||
| 813 | return genphy_soft_reset(phydev); | 846 | return genphy_soft_reset(phydev); |
| 814 | } | 847 | } |
| 815 | 848 | ||
| 816 | static int m88e1121_config_init(struct phy_device *phydev) | ||
| 817 | { | ||
| 818 | int err; | ||
| 819 | |||
| 820 | /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ | ||
| 821 | err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, | ||
| 822 | MII_88E1121_PHY_LED_CTRL, | ||
| 823 | MII_88E1121_PHY_LED_DEF); | ||
| 824 | if (err < 0) | ||
| 825 | return err; | ||
| 826 | |||
| 827 | /* Set marvell,reg-init configuration from device tree */ | ||
| 828 | return marvell_config_init(phydev); | ||
| 829 | } | ||
| 830 | |||
| 831 | static int m88e1318_config_init(struct phy_device *phydev) | 849 | static int m88e1318_config_init(struct phy_device *phydev) |
| 832 | { | 850 | { |
| 833 | if (phy_interrupt_is_valid(phydev)) { | 851 | if (phy_interrupt_is_valid(phydev)) { |
| @@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev) | |||
| 841 | return err; | 859 | return err; |
| 842 | } | 860 | } |
| 843 | 861 | ||
| 844 | return m88e1121_config_init(phydev); | 862 | return marvell_config_init(phydev); |
| 845 | } | 863 | } |
| 846 | 864 | ||
| 847 | static int m88e1510_config_init(struct phy_device *phydev) | 865 | static int m88e1510_config_init(struct phy_device *phydev) |
| @@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = { | |||
| 2087 | .features = PHY_GBIT_FEATURES, | 2105 | .features = PHY_GBIT_FEATURES, |
| 2088 | .flags = PHY_HAS_INTERRUPT, | 2106 | .flags = PHY_HAS_INTERRUPT, |
| 2089 | .probe = &m88e1121_probe, | 2107 | .probe = &m88e1121_probe, |
| 2090 | .config_init = &m88e1121_config_init, | 2108 | .config_init = &marvell_config_init, |
| 2091 | .config_aneg = &m88e1121_config_aneg, | 2109 | .config_aneg = &m88e1121_config_aneg, |
| 2092 | .read_status = &marvell_read_status, | 2110 | .read_status = &marvell_read_status, |
| 2093 | .ack_interrupt = &marvell_ack_interrupt, | 2111 | .ack_interrupt = &marvell_ack_interrupt, |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index bd0f339f69fd..b9f5f40a7ac1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback); | |||
| 1724 | 1724 | ||
| 1725 | static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) | 1725 | static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) |
| 1726 | { | 1726 | { |
| 1727 | /* The default values for phydev->supported are provided by the PHY | 1727 | phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | |
| 1728 | * driver "features" member, we want to reset to sane defaults first | 1728 | PHY_10BT_FEATURES); |
| 1729 | * before supporting higher speeds. | ||
| 1730 | */ | ||
| 1731 | phydev->supported &= PHY_DEFAULT_FEATURES; | ||
| 1732 | 1729 | ||
| 1733 | switch (max_speed) { | 1730 | switch (max_speed) { |
| 1734 | default: | 1731 | default: |
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index d437f4f5ed52..740655261e5b 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
| @@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus) | |||
| 349 | } | 349 | } |
| 350 | if (bus->started) | 350 | if (bus->started) |
| 351 | bus->socket_ops->start(bus->sfp); | 351 | bus->socket_ops->start(bus->sfp); |
| 352 | bus->netdev->sfp_bus = bus; | ||
| 353 | bus->registered = true; | 352 | bus->registered = true; |
| 354 | return 0; | 353 | return 0; |
| 355 | } | 354 | } |
| @@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus) | |||
| 364 | if (bus->phydev && ops && ops->disconnect_phy) | 363 | if (bus->phydev && ops && ops->disconnect_phy) |
| 365 | ops->disconnect_phy(bus->upstream); | 364 | ops->disconnect_phy(bus->upstream); |
| 366 | } | 365 | } |
| 367 | bus->netdev->sfp_bus = NULL; | ||
| 368 | bus->registered = false; | 366 | bus->registered = false; |
| 369 | } | 367 | } |
| 370 | 368 | ||
| @@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus) | |||
| 436 | } | 434 | } |
| 437 | EXPORT_SYMBOL_GPL(sfp_upstream_stop); | 435 | EXPORT_SYMBOL_GPL(sfp_upstream_stop); |
| 438 | 436 | ||
| 437 | static void sfp_upstream_clear(struct sfp_bus *bus) | ||
| 438 | { | ||
| 439 | bus->upstream_ops = NULL; | ||
| 440 | bus->upstream = NULL; | ||
| 441 | bus->netdev->sfp_bus = NULL; | ||
| 442 | bus->netdev = NULL; | ||
| 443 | } | ||
| 444 | |||
| 439 | /** | 445 | /** |
| 440 | * sfp_register_upstream() - Register the neighbouring device | 446 | * sfp_register_upstream() - Register the neighbouring device |
| 441 | * @fwnode: firmware node for the SFP bus | 447 | * @fwnode: firmware node for the SFP bus |
| @@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, | |||
| 461 | bus->upstream_ops = ops; | 467 | bus->upstream_ops = ops; |
| 462 | bus->upstream = upstream; | 468 | bus->upstream = upstream; |
| 463 | bus->netdev = ndev; | 469 | bus->netdev = ndev; |
| 470 | ndev->sfp_bus = bus; | ||
| 464 | 471 | ||
| 465 | if (bus->sfp) | 472 | if (bus->sfp) { |
| 466 | ret = sfp_register_bus(bus); | 473 | ret = sfp_register_bus(bus); |
| 474 | if (ret) | ||
| 475 | sfp_upstream_clear(bus); | ||
| 476 | } | ||
| 467 | rtnl_unlock(); | 477 | rtnl_unlock(); |
| 468 | } | 478 | } |
| 469 | 479 | ||
| @@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus) | |||
| 488 | rtnl_lock(); | 498 | rtnl_lock(); |
| 489 | if (bus->sfp) | 499 | if (bus->sfp) |
| 490 | sfp_unregister_bus(bus); | 500 | sfp_unregister_bus(bus); |
| 491 | bus->upstream = NULL; | 501 | sfp_upstream_clear(bus); |
| 492 | bus->netdev = NULL; | ||
| 493 | rtnl_unlock(); | 502 | rtnl_unlock(); |
| 494 | 503 | ||
| 495 | sfp_bus_put(bus); | 504 | sfp_bus_put(bus); |
| @@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus) | |||
| 561 | } | 570 | } |
| 562 | EXPORT_SYMBOL_GPL(sfp_module_remove); | 571 | EXPORT_SYMBOL_GPL(sfp_module_remove); |
| 563 | 572 | ||
| 573 | static void sfp_socket_clear(struct sfp_bus *bus) | ||
| 574 | { | ||
| 575 | bus->sfp_dev = NULL; | ||
| 576 | bus->sfp = NULL; | ||
| 577 | bus->socket_ops = NULL; | ||
| 578 | } | ||
| 579 | |||
| 564 | struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, | 580 | struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, |
| 565 | const struct sfp_socket_ops *ops) | 581 | const struct sfp_socket_ops *ops) |
| 566 | { | 582 | { |
| @@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, | |||
| 573 | bus->sfp = sfp; | 589 | bus->sfp = sfp; |
| 574 | bus->socket_ops = ops; | 590 | bus->socket_ops = ops; |
| 575 | 591 | ||
| 576 | if (bus->netdev) | 592 | if (bus->netdev) { |
| 577 | ret = sfp_register_bus(bus); | 593 | ret = sfp_register_bus(bus); |
| 594 | if (ret) | ||
| 595 | sfp_socket_clear(bus); | ||
| 596 | } | ||
| 578 | rtnl_unlock(); | 597 | rtnl_unlock(); |
| 579 | } | 598 | } |
| 580 | 599 | ||
| @@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus) | |||
| 592 | rtnl_lock(); | 611 | rtnl_lock(); |
| 593 | if (bus->netdev) | 612 | if (bus->netdev) |
| 594 | sfp_unregister_bus(bus); | 613 | sfp_unregister_bus(bus); |
| 595 | bus->sfp_dev = NULL; | 614 | sfp_socket_clear(bus); |
| 596 | bus->sfp = NULL; | ||
| 597 | bus->socket_ops = NULL; | ||
| 598 | rtnl_unlock(); | 615 | rtnl_unlock(); |
| 599 | 616 | ||
| 600 | sfp_bus_put(bus); | 617 | sfp_bus_put(bus); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a192a017cc68..f5727baac84a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
| 1688 | case XDP_TX: | 1688 | case XDP_TX: |
| 1689 | get_page(alloc_frag->page); | 1689 | get_page(alloc_frag->page); |
| 1690 | alloc_frag->offset += buflen; | 1690 | alloc_frag->offset += buflen; |
| 1691 | if (tun_xdp_tx(tun->dev, &xdp)) | 1691 | if (tun_xdp_tx(tun->dev, &xdp) < 0) |
| 1692 | goto err_redirect; | 1692 | goto err_redirect; |
| 1693 | rcu_read_unlock(); | 1693 | rcu_read_unlock(); |
| 1694 | local_bh_enable(); | 1694 | local_bh_enable(); |
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 3d4f7959dabb..b1b3d8f7e67d 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
| @@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev) | |||
| 642 | priv->presvd_phy_advertise); | 642 | priv->presvd_phy_advertise); |
| 643 | 643 | ||
| 644 | /* Restore BMCR */ | 644 | /* Restore BMCR */ |
| 645 | if (priv->presvd_phy_bmcr & BMCR_ANENABLE) | ||
| 646 | priv->presvd_phy_bmcr |= BMCR_ANRESTART; | ||
| 647 | |||
| 645 | asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, | 648 | asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, |
| 646 | priv->presvd_phy_bmcr); | 649 | priv->presvd_phy_bmcr); |
| 647 | 650 | ||
| 648 | mii_nway_restart(&dev->mii); | ||
| 649 | priv->presvd_phy_advertise = 0; | 651 | priv->presvd_phy_advertise = 0; |
| 650 | priv->presvd_phy_bmcr = 0; | 652 | priv->presvd_phy_bmcr = 0; |
| 651 | } | 653 | } |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 2e4130746c40..ed10d49eb5e0 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
| @@ -3344,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) | |||
| 3344 | pkt_cnt = 0; | 3344 | pkt_cnt = 0; |
| 3345 | count = 0; | 3345 | count = 0; |
| 3346 | length = 0; | 3346 | length = 0; |
| 3347 | spin_lock_irqsave(&tqp->lock, flags); | ||
| 3347 | for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { | 3348 | for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { |
| 3348 | if (skb_is_gso(skb)) { | 3349 | if (skb_is_gso(skb)) { |
| 3349 | if (pkt_cnt) { | 3350 | if (pkt_cnt) { |
| @@ -3352,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) | |||
| 3352 | } | 3353 | } |
| 3353 | count = 1; | 3354 | count = 1; |
| 3354 | length = skb->len - TX_OVERHEAD; | 3355 | length = skb->len - TX_OVERHEAD; |
| 3355 | skb2 = skb_dequeue(tqp); | 3356 | __skb_unlink(skb, tqp); |
| 3357 | spin_unlock_irqrestore(&tqp->lock, flags); | ||
| 3356 | goto gso_skb; | 3358 | goto gso_skb; |
| 3357 | } | 3359 | } |
| 3358 | 3360 | ||
| @@ -3361,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) | |||
| 3361 | skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); | 3363 | skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); |
| 3362 | pkt_cnt++; | 3364 | pkt_cnt++; |
| 3363 | } | 3365 | } |
| 3366 | spin_unlock_irqrestore(&tqp->lock, flags); | ||
| 3364 | 3367 | ||
| 3365 | /* copy to a single skb */ | 3368 | /* copy to a single skb */ |
| 3366 | skb = alloc_skb(skb_totallen, GFP_ATOMIC); | 3369 | skb = alloc_skb(skb_totallen, GFP_ATOMIC); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8fac8e132c5b..38502809420b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -1253,6 +1253,7 @@ static const struct usb_device_id products[] = { | |||
| 1253 | {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ | 1253 | {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ |
| 1254 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ | 1254 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ |
| 1255 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ | 1255 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ |
| 1256 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ | ||
| 1256 | {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ | 1257 | {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ |
| 1257 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ | 1258 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ |
| 1258 | 1259 | ||
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 5f565bd574da..48ba80a8ca5c 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c | |||
| @@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev) | |||
| 681 | (netdev->flags & IFF_ALLMULTI)) { | 681 | (netdev->flags & IFF_ALLMULTI)) { |
| 682 | rx_creg &= 0xfffe; | 682 | rx_creg &= 0xfffe; |
| 683 | rx_creg |= 0x0002; | 683 | rx_creg |= 0x0002; |
| 684 | dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); | 684 | dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name); |
| 685 | } else { | 685 | } else { |
| 686 | /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ | 686 | /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ |
| 687 | rx_creg &= 0x00fc; | 687 | rx_creg &= 0x00fc; |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 7a6a1fe79309..05553d252446 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
| @@ -82,6 +82,9 @@ static bool turbo_mode = true; | |||
| 82 | module_param(turbo_mode, bool, 0644); | 82 | module_param(turbo_mode, bool, 0644); |
| 83 | MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); | 83 | MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); |
| 84 | 84 | ||
| 85 | static int smsc75xx_link_ok_nopm(struct usbnet *dev); | ||
| 86 | static int smsc75xx_phy_gig_workaround(struct usbnet *dev); | ||
| 87 | |||
| 85 | static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, | 88 | static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, |
| 86 | u32 *data, int in_pm) | 89 | u32 *data, int in_pm) |
| 87 | { | 90 | { |
| @@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) | |||
| 852 | return -EIO; | 855 | return -EIO; |
| 853 | } | 856 | } |
| 854 | 857 | ||
| 858 | /* phy workaround for gig link */ | ||
| 859 | smsc75xx_phy_gig_workaround(dev); | ||
| 860 | |||
| 855 | smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, | 861 | smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, |
| 856 | ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | | 862 | ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | |
| 857 | ADVERTISE_PAUSE_ASYM); | 863 | ADVERTISE_PAUSE_ASYM); |
| @@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) | |||
| 987 | return -EIO; | 993 | return -EIO; |
| 988 | } | 994 | } |
| 989 | 995 | ||
| 996 | static int smsc75xx_phy_gig_workaround(struct usbnet *dev) | ||
| 997 | { | ||
| 998 | struct mii_if_info *mii = &dev->mii; | ||
| 999 | int ret = 0, timeout = 0; | ||
| 1000 | u32 buf, link_up = 0; | ||
| 1001 | |||
| 1002 | /* Set the phy in Gig loopback */ | ||
| 1003 | smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040); | ||
| 1004 | |||
| 1005 | /* Wait for the link up */ | ||
| 1006 | do { | ||
| 1007 | link_up = smsc75xx_link_ok_nopm(dev); | ||
| 1008 | usleep_range(10000, 20000); | ||
| 1009 | timeout++; | ||
| 1010 | } while ((!link_up) && (timeout < 1000)); | ||
| 1011 | |||
| 1012 | if (timeout >= 1000) { | ||
| 1013 | netdev_warn(dev->net, "Timeout waiting for PHY link up\n"); | ||
| 1014 | return -EIO; | ||
| 1015 | } | ||
| 1016 | |||
| 1017 | /* phy reset */ | ||
| 1018 | ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); | ||
| 1019 | if (ret < 0) { | ||
| 1020 | netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); | ||
| 1021 | return ret; | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | buf |= PMT_CTL_PHY_RST; | ||
| 1025 | |||
| 1026 | ret = smsc75xx_write_reg(dev, PMT_CTL, buf); | ||
| 1027 | if (ret < 0) { | ||
| 1028 | netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); | ||
| 1029 | return ret; | ||
| 1030 | } | ||
| 1031 | |||
| 1032 | timeout = 0; | ||
| 1033 | do { | ||
| 1034 | usleep_range(10000, 20000); | ||
| 1035 | ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); | ||
| 1036 | if (ret < 0) { | ||
| 1037 | netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", | ||
| 1038 | ret); | ||
| 1039 | return ret; | ||
| 1040 | } | ||
| 1041 | timeout++; | ||
| 1042 | } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); | ||
| 1043 | |||
| 1044 | if (timeout >= 100) { | ||
| 1045 | netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); | ||
| 1046 | return -EIO; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | return 0; | ||
| 1050 | } | ||
| 1051 | |||
| 990 | static int smsc75xx_reset(struct usbnet *dev) | 1052 | static int smsc75xx_reset(struct usbnet *dev) |
| 991 | { | 1053 | { |
| 992 | struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); | 1054 | struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index e9c2fb318c03..836e0a47b94a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
| @@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) | |||
| 6058 | ath10k_mac_max_vht_nss(vht_mcs_mask))); | 6058 | ath10k_mac_max_vht_nss(vht_mcs_mask))); |
| 6059 | 6059 | ||
| 6060 | if (changed & IEEE80211_RC_BW_CHANGED) { | 6060 | if (changed & IEEE80211_RC_BW_CHANGED) { |
| 6061 | ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", | 6061 | enum wmi_phy_mode mode; |
| 6062 | sta->addr, bw); | 6062 | |
| 6063 | mode = chan_to_phymode(&def); | ||
| 6064 | ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n", | ||
| 6065 | sta->addr, bw, mode); | ||
| 6066 | |||
| 6067 | err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, | ||
| 6068 | WMI_PEER_PHYMODE, mode); | ||
| 6069 | if (err) { | ||
| 6070 | ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", | ||
| 6071 | sta->addr, mode, err); | ||
| 6072 | goto exit; | ||
| 6073 | } | ||
| 6063 | 6074 | ||
| 6064 | err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, | 6075 | err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, |
| 6065 | WMI_PEER_CHAN_WIDTH, bw); | 6076 | WMI_PEER_CHAN_WIDTH, bw); |
| @@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) | |||
| 6100 | sta->addr); | 6111 | sta->addr); |
| 6101 | } | 6112 | } |
| 6102 | 6113 | ||
| 6114 | exit: | ||
| 6103 | mutex_unlock(&ar->conf_mutex); | 6115 | mutex_unlock(&ar->conf_mutex); |
| 6104 | } | 6116 | } |
| 6105 | 6117 | ||
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index b48db54e9865..d68afb65402a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h | |||
| @@ -6144,6 +6144,7 @@ enum wmi_peer_param { | |||
| 6144 | WMI_PEER_NSS = 0x5, | 6144 | WMI_PEER_NSS = 0x5, |
| 6145 | WMI_PEER_USE_4ADDR = 0x6, | 6145 | WMI_PEER_USE_4ADDR = 0x6, |
| 6146 | WMI_PEER_DEBUG = 0xa, | 6146 | WMI_PEER_DEBUG = 0xa, |
| 6147 | WMI_PEER_PHYMODE = 0xd, | ||
| 6147 | WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ | 6148 | WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ |
| 6148 | }; | 6149 | }; |
| 6149 | 6150 | ||
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c index 1279064a3b71..51a038022c8b 100644 --- a/drivers/net/wireless/ath/wcn36xx/testmode.c +++ b/drivers/net/wireless/ath/wcn36xx/testmode.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2018, The Linux Foundation. All rights reserved. | 2 | * Copyright (c) 2018, The Linux Foundation. All rights reserved. |
| 3 | * | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for any | 4 | * Permission to use, copy, modify, and/or distribute this software for any |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index c99a191e8d69..a907d7b065fa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
| @@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) | |||
| 4296 | brcmf_dbg(TRACE, "Enter\n"); | 4296 | brcmf_dbg(TRACE, "Enter\n"); |
| 4297 | 4297 | ||
| 4298 | if (bus) { | 4298 | if (bus) { |
| 4299 | /* Stop watchdog task */ | ||
| 4300 | if (bus->watchdog_tsk) { | ||
| 4301 | send_sig(SIGTERM, bus->watchdog_tsk, 1); | ||
| 4302 | kthread_stop(bus->watchdog_tsk); | ||
| 4303 | bus->watchdog_tsk = NULL; | ||
| 4304 | } | ||
| 4305 | |||
| 4299 | /* De-register interrupt handler */ | 4306 | /* De-register interrupt handler */ |
| 4300 | brcmf_sdiod_intr_unregister(bus->sdiodev); | 4307 | brcmf_sdiod_intr_unregister(bus->sdiodev); |
| 4301 | 4308 | ||
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 6e3cf9817730..88f4c89f89ba 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c | |||
| @@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) | |||
| 644 | MWIFIEX_FUNC_SHUTDOWN); | 644 | MWIFIEX_FUNC_SHUTDOWN); |
| 645 | } | 645 | } |
| 646 | 646 | ||
| 647 | if (adapter->workqueue) | ||
| 648 | flush_workqueue(adapter->workqueue); | ||
| 649 | |||
| 650 | mwifiex_usb_free(card); | ||
| 651 | |||
| 652 | mwifiex_dbg(adapter, FATAL, | 647 | mwifiex_dbg(adapter, FATAL, |
| 653 | "%s: removing card\n", __func__); | 648 | "%s: removing card\n", __func__); |
| 654 | mwifiex_remove_card(adapter); | 649 | mwifiex_remove_card(adapter); |
| @@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) | |||
| 1356 | { | 1351 | { |
| 1357 | struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; | 1352 | struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; |
| 1358 | 1353 | ||
| 1354 | mwifiex_usb_free(card); | ||
| 1355 | |||
| 1359 | mwifiex_usb_cleanup_tx_aggr(adapter); | 1356 | mwifiex_usb_cleanup_tx_aggr(adapter); |
| 1360 | 1357 | ||
| 1361 | card->adapter = NULL; | 1358 | card->adapter = NULL; |
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c index 9d2f9a776ef1..b804abd464ae 100644 --- a/drivers/net/wireless/mediatek/mt7601u/phy.c +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c | |||
| @@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev) | |||
| 986 | */ | 986 | */ |
| 987 | spin_lock_bh(&dev->con_mon_lock); | 987 | spin_lock_bh(&dev->con_mon_lock); |
| 988 | avg_rssi = ewma_rssi_read(&dev->avg_rssi); | 988 | avg_rssi = ewma_rssi_read(&dev->avg_rssi); |
| 989 | WARN_ON_ONCE(avg_rssi == 0); | 989 | spin_unlock_bh(&dev->con_mon_lock); |
| 990 | if (avg_rssi == 0) | ||
| 991 | return; | ||
| 992 | |||
| 990 | avg_rssi = -avg_rssi; | 993 | avg_rssi = -avg_rssi; |
| 991 | if (avg_rssi <= -70) | 994 | if (avg_rssi <= -70) |
| 992 | val -= 0x20; | 995 | val -= 0x20; |
| 993 | else if (avg_rssi <= -60) | 996 | else if (avg_rssi <= -60) |
| 994 | val -= 0x10; | 997 | val -= 0x10; |
| 995 | spin_unlock_bh(&dev->con_mon_lock); | ||
| 996 | 998 | ||
| 997 | if (val != mt7601u_bbp_rr(dev, 66)) | 999 | if (val != mt7601u_bbp_rr(dev, 66)) |
| 998 | mt7601u_bbp_wr(dev, 66, val); | 1000 | mt7601u_bbp_wr(dev, 66, val); |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 220e2b710208..ae0ca8006849 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | |||
| @@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, | |||
| 654 | vif = qtnf_mac_get_base_vif(mac); | 654 | vif = qtnf_mac_get_base_vif(mac); |
| 655 | if (!vif) { | 655 | if (!vif) { |
| 656 | pr_err("MAC%u: primary VIF is not configured\n", mac->macid); | 656 | pr_err("MAC%u: primary VIF is not configured\n", mac->macid); |
| 657 | ret = -EFAULT; | 657 | return -EFAULT; |
| 658 | goto out; | ||
| 659 | } | 658 | } |
| 660 | 659 | ||
| 661 | if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { | 660 | if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { |
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 39c817eddd78..54c9f6ab0c8c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c | |||
| @@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) | |||
| 484 | 484 | ||
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | void rtl_deinit_deferred_work(struct ieee80211_hw *hw) | 487 | void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq) |
| 488 | { | 488 | { |
| 489 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 489 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 490 | 490 | ||
| 491 | del_timer_sync(&rtlpriv->works.watchdog_timer); | 491 | del_timer_sync(&rtlpriv->works.watchdog_timer); |
| 492 | 492 | ||
| 493 | cancel_delayed_work(&rtlpriv->works.watchdog_wq); | 493 | cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq); |
| 494 | cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); | 494 | if (ips_wq) |
| 495 | cancel_delayed_work(&rtlpriv->works.ps_work); | 495 | cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); |
| 496 | cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); | 496 | else |
| 497 | cancel_delayed_work(&rtlpriv->works.fwevt_wq); | 497 | cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); |
| 498 | cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); | 498 | cancel_delayed_work_sync(&rtlpriv->works.ps_work); |
| 499 | cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq); | ||
| 500 | cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq); | ||
| 501 | cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq); | ||
| 499 | } | 502 | } |
| 500 | EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); | 503 | EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); |
| 501 | 504 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 912f205779c3..a7ae40eaa3cd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h | |||
| @@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw); | |||
| 121 | void rtl_deinit_rfkill(struct ieee80211_hw *hw); | 121 | void rtl_deinit_rfkill(struct ieee80211_hw *hw); |
| 122 | 122 | ||
| 123 | void rtl_watch_dog_timer_callback(struct timer_list *t); | 123 | void rtl_watch_dog_timer_callback(struct timer_list *t); |
| 124 | void rtl_deinit_deferred_work(struct ieee80211_hw *hw); | 124 | void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq); |
| 125 | 125 | ||
| 126 | bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); | 126 | bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); |
| 127 | int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, | 127 | int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, |
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index cfea57efa7f4..4bf7967590ca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c | |||
| @@ -130,7 +130,6 @@ found_alt: | |||
| 130 | firmware->size); | 130 | firmware->size); |
| 131 | rtlpriv->rtlhal.wowlan_fwsize = firmware->size; | 131 | rtlpriv->rtlhal.wowlan_fwsize = firmware->size; |
| 132 | } | 132 | } |
| 133 | rtlpriv->rtlhal.fwsize = firmware->size; | ||
| 134 | release_firmware(firmware); | 133 | release_firmware(firmware); |
| 135 | } | 134 | } |
| 136 | 135 | ||
| @@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) | |||
| 196 | /* reset sec info */ | 195 | /* reset sec info */ |
| 197 | rtl_cam_reset_sec_info(hw); | 196 | rtl_cam_reset_sec_info(hw); |
| 198 | 197 | ||
| 199 | rtl_deinit_deferred_work(hw); | 198 | rtl_deinit_deferred_work(hw, false); |
| 200 | } | 199 | } |
| 201 | rtlpriv->intf_ops->adapter_stop(hw); | 200 | rtlpriv->intf_ops->adapter_stop(hw); |
| 202 | 201 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index ae13bcfb3bf0..5d1fda16fc8c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c | |||
| @@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev) | |||
| 2377 | ieee80211_unregister_hw(hw); | 2377 | ieee80211_unregister_hw(hw); |
| 2378 | rtlmac->mac80211_registered = 0; | 2378 | rtlmac->mac80211_registered = 0; |
| 2379 | } else { | 2379 | } else { |
| 2380 | rtl_deinit_deferred_work(hw); | 2380 | rtl_deinit_deferred_work(hw, false); |
| 2381 | rtlpriv->intf_ops->adapter_stop(hw); | 2381 | rtlpriv->intf_ops->adapter_stop(hw); |
| 2382 | } | 2382 | } |
| 2383 | rtlpriv->cfg->ops->disable_interrupt(hw); | 2383 | rtlpriv->cfg->ops->disable_interrupt(hw); |
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 71af24e2e051..479a4cfc245d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c | |||
| @@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw) | |||
| 71 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 71 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 72 | 72 | ||
| 73 | /*<1> Stop all timer */ | 73 | /*<1> Stop all timer */ |
| 74 | rtl_deinit_deferred_work(hw); | 74 | rtl_deinit_deferred_work(hw, true); |
| 75 | 75 | ||
| 76 | /*<2> Disable Interrupt */ | 76 | /*<2> Disable Interrupt */ |
| 77 | rtlpriv->cfg->ops->disable_interrupt(hw); | 77 | rtlpriv->cfg->ops->disable_interrupt(hw); |
| @@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) | |||
| 292 | struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); | 292 | struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); |
| 293 | enum rf_pwrstate rtstate; | 293 | enum rf_pwrstate rtstate; |
| 294 | 294 | ||
| 295 | cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); | 295 | cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); |
| 296 | 296 | ||
| 297 | mutex_lock(&rtlpriv->locks.ips_mutex); | 297 | mutex_lock(&rtlpriv->locks.ips_mutex); |
| 298 | if (ppsc->inactiveps) { | 298 | if (ppsc->inactiveps) { |
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index f9faffc498bc..2ac5004d7a40 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c | |||
| @@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf) | |||
| 1132 | ieee80211_unregister_hw(hw); | 1132 | ieee80211_unregister_hw(hw); |
| 1133 | rtlmac->mac80211_registered = 0; | 1133 | rtlmac->mac80211_registered = 0; |
| 1134 | } else { | 1134 | } else { |
| 1135 | rtl_deinit_deferred_work(hw); | 1135 | rtl_deinit_deferred_work(hw, false); |
| 1136 | rtlpriv->intf_ops->adapter_stop(hw); | 1136 | rtlpriv->intf_ops->adapter_stop(hw); |
| 1137 | } | 1137 | } |
| 1138 | /*deinit rfkill */ | 1138 | /*deinit rfkill */ |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 848f549164cd..466e3c8582f0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -102,7 +102,7 @@ static u32 phandle_cache_mask; | |||
| 102 | * - the phandle lookup overhead reduction provided by the cache | 102 | * - the phandle lookup overhead reduction provided by the cache |
| 103 | * will likely be less | 103 | * will likely be less |
| 104 | */ | 104 | */ |
| 105 | static void of_populate_phandle_cache(void) | 105 | void of_populate_phandle_cache(void) |
| 106 | { | 106 | { |
| 107 | unsigned long flags; | 107 | unsigned long flags; |
| 108 | u32 cache_entries; | 108 | u32 cache_entries; |
| @@ -134,8 +134,7 @@ out: | |||
| 134 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | 134 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | #ifndef CONFIG_MODULES | 137 | int of_free_phandle_cache(void) |
| 138 | static int __init of_free_phandle_cache(void) | ||
| 139 | { | 138 | { |
| 140 | unsigned long flags; | 139 | unsigned long flags; |
| 141 | 140 | ||
| @@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void) | |||
| 148 | 147 | ||
| 149 | return 0; | 148 | return 0; |
| 150 | } | 149 | } |
| 150 | #if !defined(CONFIG_MODULES) | ||
| 151 | late_initcall_sync(of_free_phandle_cache); | 151 | late_initcall_sync(of_free_phandle_cache); |
| 152 | #endif | 152 | #endif |
| 153 | 153 | ||
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 891d780c076a..216175d11d3d 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h | |||
| @@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree); | |||
| 79 | #if defined(CONFIG_OF_OVERLAY) | 79 | #if defined(CONFIG_OF_OVERLAY) |
| 80 | void of_overlay_mutex_lock(void); | 80 | void of_overlay_mutex_lock(void); |
| 81 | void of_overlay_mutex_unlock(void); | 81 | void of_overlay_mutex_unlock(void); |
| 82 | int of_free_phandle_cache(void); | ||
| 83 | void of_populate_phandle_cache(void); | ||
| 82 | #else | 84 | #else |
| 83 | static inline void of_overlay_mutex_lock(void) {}; | 85 | static inline void of_overlay_mutex_lock(void) {}; |
| 84 | static inline void of_overlay_mutex_unlock(void) {}; | 86 | static inline void of_overlay_mutex_unlock(void) {}; |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 7baa53e5b1d7..eda57ef12fd0 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
| @@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree, | |||
| 804 | goto err_free_overlay_changeset; | 804 | goto err_free_overlay_changeset; |
| 805 | } | 805 | } |
| 806 | 806 | ||
| 807 | of_populate_phandle_cache(); | ||
| 808 | |||
| 807 | ret = __of_changeset_apply_notify(&ovcs->cset); | 809 | ret = __of_changeset_apply_notify(&ovcs->cset); |
| 808 | if (ret) | 810 | if (ret) |
| 809 | pr_err("overlay changeset entry notify error %d\n", ret); | 811 | pr_err("overlay changeset entry notify error %d\n", ret); |
| @@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id) | |||
| 1046 | 1048 | ||
| 1047 | list_del(&ovcs->ovcs_list); | 1049 | list_del(&ovcs->ovcs_list); |
| 1048 | 1050 | ||
| 1051 | /* | ||
| 1052 | * Disable phandle cache. Avoids race condition that would arise | ||
| 1053 | * from removing cache entry when the associated node is deleted. | ||
| 1054 | */ | ||
| 1055 | of_free_phandle_cache(); | ||
| 1056 | |||
| 1049 | ret_apply = 0; | 1057 | ret_apply = 0; |
| 1050 | ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); | 1058 | ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); |
| 1059 | |||
| 1060 | of_populate_phandle_cache(); | ||
| 1061 | |||
| 1051 | if (ret) { | 1062 | if (ret) { |
| 1052 | if (ret_apply) | 1063 | if (ret_apply) |
| 1053 | devicetree_state_flags |= DTSF_REVERT_FAIL; | 1064 | devicetree_state_flags |= DTSF_REVERT_FAIL; |
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 781aa03aeede..29a05759a294 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c | |||
| @@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 363 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { | 363 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { |
| 364 | switch (resource_type(win->res)) { | 364 | switch (resource_type(win->res)) { |
| 365 | case IORESOURCE_IO: | 365 | case IORESOURCE_IO: |
| 366 | ret = pci_remap_iospace(win->res, pp->io_base); | 366 | ret = devm_pci_remap_iospace(dev, win->res, |
| 367 | pp->io_base); | ||
| 367 | if (ret) { | 368 | if (ret) { |
| 368 | dev_warn(dev, "Error %d: failed to map resource %pR\n", | 369 | dev_warn(dev, "Error %d: failed to map resource %pR\n", |
| 369 | ret, win->res); | 370 | ret, win->res); |
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index d3172d5d3d35..0fae816fba39 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c | |||
| @@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) | |||
| 849 | 0, 0xF8000000, 0, | 849 | 0, 0xF8000000, 0, |
| 850 | lower_32_bits(res->start), | 850 | lower_32_bits(res->start), |
| 851 | OB_PCIE_IO); | 851 | OB_PCIE_IO); |
| 852 | err = pci_remap_iospace(res, iobase); | 852 | err = devm_pci_remap_iospace(dev, res, iobase); |
| 853 | if (err) { | 853 | if (err) { |
| 854 | dev_warn(dev, "error %d: failed to map resource %pR\n", | 854 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
| 855 | err, res); | 855 | err, res); |
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index 20bb2564a6b3..bf5ece5d9291 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c | |||
| @@ -503,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev) | |||
| 503 | dev_err(dev, "illegal IO mem size\n"); | 503 | dev_err(dev, "illegal IO mem size\n"); |
| 504 | return -EINVAL; | 504 | return -EINVAL; |
| 505 | } | 505 | } |
| 506 | ret = pci_remap_iospace(io, io_base); | 506 | ret = devm_pci_remap_iospace(dev, io, io_base); |
| 507 | if (ret) { | 507 | if (ret) { |
| 508 | dev_warn(dev, "error %d: failed to map resource %pR\n", | 508 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
| 509 | ret, io); | 509 | ret, io); |
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 6cc5036ac83c..f6325f1a89e8 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c | |||
| @@ -1073,6 +1073,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 1073 | struct pci_bus *pbus; | 1073 | struct pci_bus *pbus; |
| 1074 | struct pci_dev *pdev; | 1074 | struct pci_dev *pdev; |
| 1075 | struct cpumask *dest; | 1075 | struct cpumask *dest; |
| 1076 | unsigned long flags; | ||
| 1076 | struct compose_comp_ctxt comp; | 1077 | struct compose_comp_ctxt comp; |
| 1077 | struct tran_int_desc *int_desc; | 1078 | struct tran_int_desc *int_desc; |
| 1078 | struct { | 1079 | struct { |
| @@ -1164,14 +1165,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 1164 | * the channel callback directly when channel->target_cpu is | 1165 | * the channel callback directly when channel->target_cpu is |
| 1165 | * the current CPU. When the higher level interrupt code | 1166 | * the current CPU. When the higher level interrupt code |
| 1166 | * calls us with interrupt enabled, let's add the | 1167 | * calls us with interrupt enabled, let's add the |
| 1167 | * local_bh_disable()/enable() to avoid race. | 1168 | * local_irq_save()/restore() to avoid race: |
| 1169 | * hv_pci_onchannelcallback() can also run in tasklet. | ||
| 1168 | */ | 1170 | */ |
| 1169 | local_bh_disable(); | 1171 | local_irq_save(flags); |
| 1170 | 1172 | ||
| 1171 | if (hbus->hdev->channel->target_cpu == smp_processor_id()) | 1173 | if (hbus->hdev->channel->target_cpu == smp_processor_id()) |
| 1172 | hv_pci_onchannelcallback(hbus); | 1174 | hv_pci_onchannelcallback(hbus); |
| 1173 | 1175 | ||
| 1174 | local_bh_enable(); | 1176 | local_irq_restore(flags); |
| 1175 | 1177 | ||
| 1176 | if (hpdev->state == hv_pcichild_ejecting) { | 1178 | if (hpdev->state == hv_pcichild_ejecting) { |
| 1177 | dev_err_once(&hbus->hdev->device, | 1179 | dev_err_once(&hbus->hdev->device, |
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index 68b8bfbdb867..d219404bad92 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c | |||
| @@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3, | |||
| 537 | v3->io_bus_addr = io->start - win->offset; | 537 | v3->io_bus_addr = io->start - win->offset; |
| 538 | dev_dbg(dev, "I/O window %pR, bus addr %pap\n", | 538 | dev_dbg(dev, "I/O window %pR, bus addr %pap\n", |
| 539 | io, &v3->io_bus_addr); | 539 | io, &v3->io_bus_addr); |
| 540 | ret = pci_remap_iospace(io, io_base); | 540 | ret = devm_pci_remap_iospace(dev, io, io_base); |
| 541 | if (ret) { | 541 | if (ret) { |
| 542 | dev_warn(dev, | 542 | dev_warn(dev, |
| 543 | "error %d: failed to map resource %pR\n", | 543 | "error %d: failed to map resource %pR\n", |
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c index 994f32061b32..f59ad2728c0b 100644 --- a/drivers/pci/controller/pci-versatile.c +++ b/drivers/pci/controller/pci-versatile.c | |||
| @@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, | |||
| 82 | 82 | ||
| 83 | switch (resource_type(res)) { | 83 | switch (resource_type(res)) { |
| 84 | case IORESOURCE_IO: | 84 | case IORESOURCE_IO: |
| 85 | err = pci_remap_iospace(res, iobase); | 85 | err = devm_pci_remap_iospace(dev, res, iobase); |
| 86 | if (err) { | 86 | if (err) { |
| 87 | dev_warn(dev, "error %d: failed to map resource %pR\n", | 87 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
| 88 | err, res); | 88 | err, res); |
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index d854d67e873c..ffda3e8b4742 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c | |||
| @@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, | |||
| 423 | case IORESOURCE_IO: | 423 | case IORESOURCE_IO: |
| 424 | xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, | 424 | xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, |
| 425 | res->start - window->offset); | 425 | res->start - window->offset); |
| 426 | ret = pci_remap_iospace(res, io_base); | 426 | ret = devm_pci_remap_iospace(dev, res, io_base); |
| 427 | if (ret < 0) | 427 | if (ret < 0) |
| 428 | return ret; | 428 | return ret; |
| 429 | break; | 429 | break; |
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 0baabe30858f..861dda69f366 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c | |||
| @@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie) | |||
| 1109 | if (err < 0) | 1109 | if (err < 0) |
| 1110 | return err; | 1110 | return err; |
| 1111 | 1111 | ||
| 1112 | pci_remap_iospace(&pcie->pio, pcie->io.start); | 1112 | devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); |
| 1113 | 1113 | ||
| 1114 | return 0; | 1114 | return 0; |
| 1115 | } | 1115 | } |
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index bf53fad636a5..825fa24427a3 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c | |||
| @@ -137,25 +137,60 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) | |||
| 137 | } | 137 | } |
| 138 | EXPORT_SYMBOL_GPL(pci_epf_alloc_space); | 138 | EXPORT_SYMBOL_GPL(pci_epf_alloc_space); |
| 139 | 139 | ||
| 140 | /** | 140 | static void pci_epf_remove_cfs(struct pci_epf_driver *driver) |
| 141 | * pci_epf_unregister_driver() - unregister the PCI EPF driver | ||
| 142 | * @driver: the PCI EPF driver that has to be unregistered | ||
| 143 | * | ||
| 144 | * Invoke to unregister the PCI EPF driver. | ||
| 145 | */ | ||
| 146 | void pci_epf_unregister_driver(struct pci_epf_driver *driver) | ||
| 147 | { | 141 | { |
| 148 | struct config_group *group, *tmp; | 142 | struct config_group *group, *tmp; |
| 149 | 143 | ||
| 144 | if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS)) | ||
| 145 | return; | ||
| 146 | |||
| 150 | mutex_lock(&pci_epf_mutex); | 147 | mutex_lock(&pci_epf_mutex); |
| 151 | list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) | 148 | list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) |
| 152 | pci_ep_cfs_remove_epf_group(group); | 149 | pci_ep_cfs_remove_epf_group(group); |
| 153 | list_del(&driver->epf_group); | 150 | list_del(&driver->epf_group); |
| 154 | mutex_unlock(&pci_epf_mutex); | 151 | mutex_unlock(&pci_epf_mutex); |
| 152 | } | ||
| 153 | |||
| 154 | /** | ||
| 155 | * pci_epf_unregister_driver() - unregister the PCI EPF driver | ||
| 156 | * @driver: the PCI EPF driver that has to be unregistered | ||
| 157 | * | ||
| 158 | * Invoke to unregister the PCI EPF driver. | ||
| 159 | */ | ||
| 160 | void pci_epf_unregister_driver(struct pci_epf_driver *driver) | ||
| 161 | { | ||
| 162 | pci_epf_remove_cfs(driver); | ||
| 155 | driver_unregister(&driver->driver); | 163 | driver_unregister(&driver->driver); |
| 156 | } | 164 | } |
| 157 | EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); | 165 | EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); |
| 158 | 166 | ||
| 167 | static int pci_epf_add_cfs(struct pci_epf_driver *driver) | ||
| 168 | { | ||
| 169 | struct config_group *group; | ||
| 170 | const struct pci_epf_device_id *id; | ||
| 171 | |||
| 172 | if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS)) | ||
| 173 | return 0; | ||
| 174 | |||
| 175 | INIT_LIST_HEAD(&driver->epf_group); | ||
| 176 | |||
| 177 | id = driver->id_table; | ||
| 178 | while (id->name[0]) { | ||
| 179 | group = pci_ep_cfs_add_epf_group(id->name); | ||
| 180 | if (IS_ERR(group)) { | ||
| 181 | pci_epf_remove_cfs(driver); | ||
| 182 | return PTR_ERR(group); | ||
| 183 | } | ||
| 184 | |||
| 185 | mutex_lock(&pci_epf_mutex); | ||
| 186 | list_add_tail(&group->group_entry, &driver->epf_group); | ||
| 187 | mutex_unlock(&pci_epf_mutex); | ||
| 188 | id++; | ||
| 189 | } | ||
| 190 | |||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | |||
| 159 | /** | 194 | /** |
| 160 | * __pci_epf_register_driver() - register a new PCI EPF driver | 195 | * __pci_epf_register_driver() - register a new PCI EPF driver |
| 161 | * @driver: structure representing PCI EPF driver | 196 | * @driver: structure representing PCI EPF driver |
| @@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver, | |||
| 167 | struct module *owner) | 202 | struct module *owner) |
| 168 | { | 203 | { |
| 169 | int ret; | 204 | int ret; |
| 170 | struct config_group *group; | ||
| 171 | const struct pci_epf_device_id *id; | ||
| 172 | 205 | ||
| 173 | if (!driver->ops) | 206 | if (!driver->ops) |
| 174 | return -EINVAL; | 207 | return -EINVAL; |
| @@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver, | |||
| 183 | if (ret) | 216 | if (ret) |
| 184 | return ret; | 217 | return ret; |
| 185 | 218 | ||
| 186 | INIT_LIST_HEAD(&driver->epf_group); | 219 | pci_epf_add_cfs(driver); |
| 187 | |||
| 188 | id = driver->id_table; | ||
| 189 | while (id->name[0]) { | ||
| 190 | group = pci_ep_cfs_add_epf_group(id->name); | ||
| 191 | mutex_lock(&pci_epf_mutex); | ||
| 192 | list_add_tail(&group->group_entry, &driver->epf_group); | ||
| 193 | mutex_unlock(&pci_epf_mutex); | ||
| 194 | id++; | ||
| 195 | } | ||
| 196 | 220 | ||
| 197 | return 0; | 221 | return 0; |
| 198 | } | 222 | } |
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index d088c9147f10..69a60d6ebd73 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
| @@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev, | |||
| 612 | 612 | ||
| 613 | switch (resource_type(res)) { | 613 | switch (resource_type(res)) { |
| 614 | case IORESOURCE_IO: | 614 | case IORESOURCE_IO: |
| 615 | err = pci_remap_iospace(res, iobase); | 615 | err = devm_pci_remap_iospace(dev, res, iobase); |
| 616 | if (err) { | 616 | if (err) { |
| 617 | dev_warn(dev, "error %d: failed to map resource %pR\n", | 617 | dev_warn(dev, "error %d: failed to map resource %pR\n", |
| 618 | err, res); | 618 | err, res); |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 97acba712e4e..316496e99da9 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res) | |||
| 3579 | } | 3579 | } |
| 3580 | EXPORT_SYMBOL(pci_unmap_iospace); | 3580 | EXPORT_SYMBOL(pci_unmap_iospace); |
| 3581 | 3581 | ||
| 3582 | static void devm_pci_unmap_iospace(struct device *dev, void *ptr) | ||
| 3583 | { | ||
| 3584 | struct resource **res = ptr; | ||
| 3585 | |||
| 3586 | pci_unmap_iospace(*res); | ||
| 3587 | } | ||
| 3588 | |||
| 3589 | /** | ||
| 3590 | * devm_pci_remap_iospace - Managed pci_remap_iospace() | ||
| 3591 | * @dev: Generic device to remap IO address for | ||
| 3592 | * @res: Resource describing the I/O space | ||
| 3593 | * @phys_addr: physical address of range to be mapped | ||
| 3594 | * | ||
| 3595 | * Managed pci_remap_iospace(). Map is automatically unmapped on driver | ||
| 3596 | * detach. | ||
| 3597 | */ | ||
| 3598 | int devm_pci_remap_iospace(struct device *dev, const struct resource *res, | ||
| 3599 | phys_addr_t phys_addr) | ||
| 3600 | { | ||
| 3601 | const struct resource **ptr; | ||
| 3602 | int error; | ||
| 3603 | |||
| 3604 | ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); | ||
| 3605 | if (!ptr) | ||
| 3606 | return -ENOMEM; | ||
| 3607 | |||
| 3608 | error = pci_remap_iospace(res, phys_addr); | ||
| 3609 | if (error) { | ||
| 3610 | devres_free(ptr); | ||
| 3611 | } else { | ||
| 3612 | *ptr = res; | ||
| 3613 | devres_add(dev, ptr); | ||
| 3614 | } | ||
| 3615 | |||
| 3616 | return error; | ||
| 3617 | } | ||
| 3618 | EXPORT_SYMBOL(devm_pci_remap_iospace); | ||
| 3619 | |||
| 3582 | /** | 3620 | /** |
| 3583 | * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() | 3621 | * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() |
| 3584 | * @dev: Generic device to remap IO address for | 3622 | * @dev: Generic device to remap IO address for |
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c index 35c17653c694..87618a4e90e4 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c | |||
| @@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev, | |||
| 460 | const struct nsp_pin_function *func; | 460 | const struct nsp_pin_function *func; |
| 461 | const struct nsp_pin_group *grp; | 461 | const struct nsp_pin_group *grp; |
| 462 | 462 | ||
| 463 | if (grp_select > pinctrl->num_groups || | 463 | if (grp_select >= pinctrl->num_groups || |
| 464 | func_select > pinctrl->num_functions) | 464 | func_select >= pinctrl->num_functions) |
| 465 | return -EINVAL; | 465 | return -EINVAL; |
| 466 | 466 | ||
| 467 | func = &pinctrl->functions[func_select]; | 467 | func = &pinctrl->functions[func_select]; |
| @@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev) | |||
| 577 | return PTR_ERR(pinctrl->base0); | 577 | return PTR_ERR(pinctrl->base0); |
| 578 | 578 | ||
| 579 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 579 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 580 | if (!res) | ||
| 581 | return -EINVAL; | ||
| 580 | pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, | 582 | pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, |
| 581 | resource_size(res)); | 583 | resource_size(res)); |
| 582 | if (!pinctrl->base1) { | 584 | if (!pinctrl->base1) { |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c index e3f1ab2290fc..4c4740ffeb9c 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c | |||
| @@ -1424,7 +1424,7 @@ static struct pinctrl_desc mtk_desc = { | |||
| 1424 | 1424 | ||
| 1425 | static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) | 1425 | static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) |
| 1426 | { | 1426 | { |
| 1427 | struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent); | 1427 | struct mtk_pinctrl *hw = gpiochip_get_data(chip); |
| 1428 | int value, err; | 1428 | int value, err; |
| 1429 | 1429 | ||
| 1430 | err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value); | 1430 | err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value); |
| @@ -1436,7 +1436,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) | |||
| 1436 | 1436 | ||
| 1437 | static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) | 1437 | static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) |
| 1438 | { | 1438 | { |
| 1439 | struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent); | 1439 | struct mtk_pinctrl *hw = gpiochip_get_data(chip); |
| 1440 | 1440 | ||
| 1441 | mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value); | 1441 | mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value); |
| 1442 | } | 1442 | } |
| @@ -1508,11 +1508,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np) | |||
| 1508 | if (ret < 0) | 1508 | if (ret < 0) |
| 1509 | return ret; | 1509 | return ret; |
| 1510 | 1510 | ||
| 1511 | ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0, | 1511 | /* Just for backward compatible for these old pinctrl nodes without |
| 1512 | chip->ngpio); | 1512 | * "gpio-ranges" property. Otherwise, called directly from a |
| 1513 | if (ret < 0) { | 1513 | * DeviceTree-supported pinctrl driver is DEPRECATED. |
| 1514 | gpiochip_remove(chip); | 1514 | * Please see Section 2.1 of |
| 1515 | return ret; | 1515 | * Documentation/devicetree/bindings/gpio/gpio.txt on how to |
| 1516 | * bind pinctrl and gpio drivers via the "gpio-ranges" property. | ||
| 1517 | */ | ||
| 1518 | if (!of_find_property(np, "gpio-ranges", NULL)) { | ||
| 1519 | ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0, | ||
| 1520 | chip->ngpio); | ||
| 1521 | if (ret < 0) { | ||
| 1522 | gpiochip_remove(chip); | ||
| 1523 | return ret; | ||
| 1524 | } | ||
| 1516 | } | 1525 | } |
| 1517 | 1526 | ||
| 1518 | return 0; | 1527 | return 0; |
| @@ -1695,15 +1704,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev) | |||
| 1695 | mtk_desc.custom_conf_items = mtk_conf_items; | 1704 | mtk_desc.custom_conf_items = mtk_conf_items; |
| 1696 | #endif | 1705 | #endif |
| 1697 | 1706 | ||
| 1698 | hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw); | 1707 | err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw, |
| 1699 | if (IS_ERR(hw->pctrl)) | 1708 | &hw->pctrl); |
| 1700 | return PTR_ERR(hw->pctrl); | 1709 | if (err) |
| 1710 | return err; | ||
| 1701 | 1711 | ||
| 1702 | /* Setup groups descriptions per SoC types */ | 1712 | /* Setup groups descriptions per SoC types */ |
| 1703 | err = mtk_build_groups(hw); | 1713 | err = mtk_build_groups(hw); |
| 1704 | if (err) { | 1714 | if (err) { |
| 1705 | dev_err(&pdev->dev, "Failed to build groups\n"); | 1715 | dev_err(&pdev->dev, "Failed to build groups\n"); |
| 1706 | return 0; | 1716 | return err; |
| 1707 | } | 1717 | } |
| 1708 | 1718 | ||
| 1709 | /* Setup functions descriptions per SoC types */ | 1719 | /* Setup functions descriptions per SoC types */ |
| @@ -1713,17 +1723,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev) | |||
| 1713 | return err; | 1723 | return err; |
| 1714 | } | 1724 | } |
| 1715 | 1725 | ||
| 1716 | err = mtk_build_gpiochip(hw, pdev->dev.of_node); | 1726 | /* For able to make pinctrl_claim_hogs, we must not enable pinctrl |
| 1717 | if (err) { | 1727 | * until all groups and functions are being added one. |
| 1718 | dev_err(&pdev->dev, "Failed to add gpio_chip\n"); | 1728 | */ |
| 1729 | err = pinctrl_enable(hw->pctrl); | ||
| 1730 | if (err) | ||
| 1719 | return err; | 1731 | return err; |
| 1720 | } | ||
| 1721 | 1732 | ||
| 1722 | err = mtk_build_eint(hw, pdev); | 1733 | err = mtk_build_eint(hw, pdev); |
| 1723 | if (err) | 1734 | if (err) |
| 1724 | dev_warn(&pdev->dev, | 1735 | dev_warn(&pdev->dev, |
| 1725 | "Failed to add EINT, but pinctrl still can work\n"); | 1736 | "Failed to add EINT, but pinctrl still can work\n"); |
| 1726 | 1737 | ||
| 1738 | /* Build gpiochip should be after pinctrl_enable is done */ | ||
| 1739 | err = mtk_build_gpiochip(hw, pdev->dev.of_node); | ||
| 1740 | if (err) { | ||
| 1741 | dev_err(&pdev->dev, "Failed to add gpio_chip\n"); | ||
| 1742 | return err; | ||
| 1743 | } | ||
| 1744 | |||
| 1727 | platform_set_drvdata(pdev, hw); | 1745 | platform_set_drvdata(pdev, hw); |
| 1728 | 1746 | ||
| 1729 | return 0; | 1747 | return 0; |
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index a1d7156d0a43..6a1b6058b991 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c | |||
| @@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, | |||
| 536 | ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); | 536 | ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); |
| 537 | } else { | 537 | } else { |
| 538 | ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false); | 538 | ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false); |
| 539 | ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input); | 539 | ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input); |
| 540 | ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false); | 540 | ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false); |
| 541 | } | 541 | } |
| 542 | 542 | ||
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c index b02caf316711..eeb58b3bbc9a 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c | |||
| @@ -21,15 +21,13 @@ | |||
| 21 | #include "core.h" | 21 | #include "core.h" |
| 22 | #include "sh_pfc.h" | 22 | #include "sh_pfc.h" |
| 23 | 23 | ||
| 24 | #define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH | ||
| 25 | |||
| 26 | #define CPU_ALL_PORT(fn, sfx) \ | 24 | #define CPU_ALL_PORT(fn, sfx) \ |
| 27 | PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ | 25 | PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \ |
| 28 | PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \ | 26 | PORT_GP_28(1, fn, sfx), \ |
| 29 | PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ | 27 | PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \ |
| 30 | PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ | 28 | PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \ |
| 31 | PORT_GP_CFG_6(4, fn, sfx, CFG_FLAGS), \ | 29 | PORT_GP_6(4, fn, sfx), \ |
| 32 | PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS) | 30 | PORT_GP_15(5, fn, sfx) |
| 33 | /* | 31 | /* |
| 34 | * F_() : just information | 32 | * F_() : just information |
| 35 | * FM() : macro for FN_xxx / xxx_MARK | 33 | * FM() : macro for FN_xxx / xxx_MARK |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index f1fa8612db40..06978c14c83b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
| @@ -2185,7 +2185,7 @@ static int __init dell_init(void) | |||
| 2185 | dell_fill_request(&buffer, token->location, 0, 0, 0); | 2185 | dell_fill_request(&buffer, token->location, 0, 0, 0); |
| 2186 | ret = dell_send_request(&buffer, | 2186 | ret = dell_send_request(&buffer, |
| 2187 | CLASS_TOKEN_READ, SELECT_TOKEN_AC); | 2187 | CLASS_TOKEN_READ, SELECT_TOKEN_AC); |
| 2188 | if (ret) | 2188 | if (ret == 0) |
| 2189 | max_intensity = buffer.output[3]; | 2189 | max_intensity = buffer.output[3]; |
| 2190 | } | 2190 | } |
| 2191 | 2191 | ||
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 547dbdac9d54..01b0e2bb3319 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
| @@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, | |||
| 89 | case PTP_PF_PHYSYNC: | 89 | case PTP_PF_PHYSYNC: |
| 90 | if (chan != 0) | 90 | if (chan != 0) |
| 91 | return -EINVAL; | 91 | return -EINVAL; |
| 92 | break; | ||
| 92 | default: | 93 | default: |
| 93 | return -EINVAL; | 94 | return -EINVAL; |
| 94 | } | 95 | } |
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 2a3977823812..a39be94d110c 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h | |||
| @@ -107,12 +107,12 @@ cxlflash_assign_ops(struct dev_dependent_vals *ddv) | |||
| 107 | { | 107 | { |
| 108 | const struct cxlflash_backend_ops *ops = NULL; | 108 | const struct cxlflash_backend_ops *ops = NULL; |
| 109 | 109 | ||
| 110 | #ifdef CONFIG_OCXL | 110 | #ifdef CONFIG_OCXL_BASE |
| 111 | if (ddv->flags & CXLFLASH_OCXL_DEV) | 111 | if (ddv->flags & CXLFLASH_OCXL_DEV) |
| 112 | ops = &cxlflash_ocxl_ops; | 112 | ops = &cxlflash_ocxl_ops; |
| 113 | #endif | 113 | #endif |
| 114 | 114 | ||
| 115 | #ifdef CONFIG_CXL | 115 | #ifdef CONFIG_CXL_BASE |
| 116 | if (!(ddv->flags & CXLFLASH_OCXL_DEV)) | 116 | if (!(ddv->flags & CXLFLASH_OCXL_DEV)) |
| 117 | ops = &cxlflash_cxl_ops; | 117 | ops = &cxlflash_cxl_ops; |
| 118 | #endif | 118 | #endif |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 15c7f3b6f35e..58bb70b886d7 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -3440,11 +3440,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h, | |||
| 3440 | struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; | 3440 | struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; |
| 3441 | u16 bmic_device_index = 0; | 3441 | u16 bmic_device_index = 0; |
| 3442 | 3442 | ||
| 3443 | bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); | 3443 | encl_dev->eli = |
| 3444 | |||
| 3445 | encl_dev->sas_address = | ||
| 3446 | hpsa_get_enclosure_logical_identifier(h, scsi3addr); | 3444 | hpsa_get_enclosure_logical_identifier(h, scsi3addr); |
| 3447 | 3445 | ||
| 3446 | bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); | ||
| 3447 | |||
| 3448 | if (encl_dev->target == -1 || encl_dev->lun == -1) { | 3448 | if (encl_dev->target == -1 || encl_dev->lun == -1) { |
| 3449 | rc = IO_OK; | 3449 | rc = IO_OK; |
| 3450 | goto out; | 3450 | goto out; |
| @@ -9697,7 +9697,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy) | |||
| 9697 | static int | 9697 | static int |
| 9698 | hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) | 9698 | hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) |
| 9699 | { | 9699 | { |
| 9700 | *identifier = rphy->identify.sas_address; | 9700 | struct Scsi_Host *shost = phy_to_shost(rphy); |
| 9701 | struct ctlr_info *h; | ||
| 9702 | struct hpsa_scsi_dev_t *sd; | ||
| 9703 | |||
| 9704 | if (!shost) | ||
| 9705 | return -ENXIO; | ||
| 9706 | |||
| 9707 | h = shost_to_hba(shost); | ||
| 9708 | |||
| 9709 | if (!h) | ||
| 9710 | return -ENXIO; | ||
| 9711 | |||
| 9712 | sd = hpsa_find_device_by_sas_rphy(h, rphy); | ||
| 9713 | if (!sd) | ||
| 9714 | return -ENXIO; | ||
| 9715 | |||
| 9716 | *identifier = sd->eli; | ||
| 9717 | |||
| 9701 | return 0; | 9718 | return 0; |
| 9702 | } | 9719 | } |
| 9703 | 9720 | ||
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index fb9f5e7f8209..59e023696fff 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
| @@ -68,6 +68,7 @@ struct hpsa_scsi_dev_t { | |||
| 68 | #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" | 68 | #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" |
| 69 | unsigned char device_id[16]; /* from inquiry pg. 0x83 */ | 69 | unsigned char device_id[16]; /* from inquiry pg. 0x83 */ |
| 70 | u64 sas_address; | 70 | u64 sas_address; |
| 71 | u64 eli; /* from report diags. */ | ||
| 71 | unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ | 72 | unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ |
| 72 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ | 73 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ |
| 73 | unsigned char rev; /* byte 2 of inquiry data */ | 74 | unsigned char rev; /* byte 2 of inquiry data */ |
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 90394cef0f41..0a5dd5595dd3 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c | |||
| @@ -3295,6 +3295,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) | |||
| 3295 | 3295 | ||
| 3296 | init_completion(&qedf->flogi_compl); | 3296 | init_completion(&qedf->flogi_compl); |
| 3297 | 3297 | ||
| 3298 | status = qed_ops->common->update_drv_state(qedf->cdev, true); | ||
| 3299 | if (status) | ||
| 3300 | QEDF_ERR(&(qedf->dbg_ctx), | ||
| 3301 | "Failed to send drv state to MFW.\n"); | ||
| 3302 | |||
| 3298 | memset(&link_params, 0, sizeof(struct qed_link_params)); | 3303 | memset(&link_params, 0, sizeof(struct qed_link_params)); |
| 3299 | link_params.link_up = true; | 3304 | link_params.link_up = true; |
| 3300 | status = qed_ops->common->set_link(qedf->cdev, &link_params); | 3305 | status = qed_ops->common->set_link(qedf->cdev, &link_params); |
| @@ -3343,6 +3348,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 3343 | static void __qedf_remove(struct pci_dev *pdev, int mode) | 3348 | static void __qedf_remove(struct pci_dev *pdev, int mode) |
| 3344 | { | 3349 | { |
| 3345 | struct qedf_ctx *qedf; | 3350 | struct qedf_ctx *qedf; |
| 3351 | int rc; | ||
| 3346 | 3352 | ||
| 3347 | if (!pdev) { | 3353 | if (!pdev) { |
| 3348 | QEDF_ERR(NULL, "pdev is NULL.\n"); | 3354 | QEDF_ERR(NULL, "pdev is NULL.\n"); |
| @@ -3437,6 +3443,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) | |||
| 3437 | qed_ops->common->set_power_state(qedf->cdev, PCI_D0); | 3443 | qed_ops->common->set_power_state(qedf->cdev, PCI_D0); |
| 3438 | pci_set_drvdata(pdev, NULL); | 3444 | pci_set_drvdata(pdev, NULL); |
| 3439 | } | 3445 | } |
| 3446 | |||
| 3447 | rc = qed_ops->common->update_drv_state(qedf->cdev, false); | ||
| 3448 | if (rc) | ||
| 3449 | QEDF_ERR(&(qedf->dbg_ctx), | ||
| 3450 | "Failed to send drv state to MFW.\n"); | ||
| 3451 | |||
| 3440 | qed_ops->common->slowpath_stop(qedf->cdev); | 3452 | qed_ops->common->slowpath_stop(qedf->cdev); |
| 3441 | qed_ops->common->remove(qedf->cdev); | 3453 | qed_ops->common->remove(qedf->cdev); |
| 3442 | 3454 | ||
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cf274a79e77a..091ec1207bea 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
| @@ -2273,6 +2273,7 @@ kset_free: | |||
| 2273 | static void __qedi_remove(struct pci_dev *pdev, int mode) | 2273 | static void __qedi_remove(struct pci_dev *pdev, int mode) |
| 2274 | { | 2274 | { |
| 2275 | struct qedi_ctx *qedi = pci_get_drvdata(pdev); | 2275 | struct qedi_ctx *qedi = pci_get_drvdata(pdev); |
| 2276 | int rval; | ||
| 2276 | 2277 | ||
| 2277 | if (qedi->tmf_thread) { | 2278 | if (qedi->tmf_thread) { |
| 2278 | flush_workqueue(qedi->tmf_thread); | 2279 | flush_workqueue(qedi->tmf_thread); |
| @@ -2302,6 +2303,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode) | |||
| 2302 | if (mode == QEDI_MODE_NORMAL) | 2303 | if (mode == QEDI_MODE_NORMAL) |
| 2303 | qedi_free_iscsi_pf_param(qedi); | 2304 | qedi_free_iscsi_pf_param(qedi); |
| 2304 | 2305 | ||
| 2306 | rval = qedi_ops->common->update_drv_state(qedi->cdev, false); | ||
| 2307 | if (rval) | ||
| 2308 | QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n"); | ||
| 2309 | |||
| 2305 | if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { | 2310 | if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { |
| 2306 | qedi_ops->common->slowpath_stop(qedi->cdev); | 2311 | qedi_ops->common->slowpath_stop(qedi->cdev); |
| 2307 | qedi_ops->common->remove(qedi->cdev); | 2312 | qedi_ops->common->remove(qedi->cdev); |
| @@ -2576,6 +2581,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) | |||
| 2576 | if (qedi_setup_boot_info(qedi)) | 2581 | if (qedi_setup_boot_info(qedi)) |
| 2577 | QEDI_ERR(&qedi->dbg_ctx, | 2582 | QEDI_ERR(&qedi->dbg_ctx, |
| 2578 | "No iSCSI boot target configured\n"); | 2583 | "No iSCSI boot target configured\n"); |
| 2584 | |||
| 2585 | rc = qedi_ops->common->update_drv_state(qedi->cdev, true); | ||
| 2586 | if (rc) | ||
| 2587 | QEDI_ERR(&qedi->dbg_ctx, | ||
| 2588 | "Failed to send drv state to MFW\n"); | ||
| 2589 | |||
| 2579 | } | 2590 | } |
| 2580 | 2591 | ||
| 2581 | return 0; | 2592 | return 0; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9442e18aef6f..0f94b1d62d3f 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -361,6 +361,8 @@ struct ct_arg { | |||
| 361 | dma_addr_t rsp_dma; | 361 | dma_addr_t rsp_dma; |
| 362 | u32 req_size; | 362 | u32 req_size; |
| 363 | u32 rsp_size; | 363 | u32 rsp_size; |
| 364 | u32 req_allocated_size; | ||
| 365 | u32 rsp_allocated_size; | ||
| 364 | void *req; | 366 | void *req; |
| 365 | void *rsp; | 367 | void *rsp; |
| 366 | port_id_t id; | 368 | port_id_t id; |
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 4bc2b66b299f..2c35b0b2baa0 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
| @@ -556,7 +556,7 @@ err2: | |||
| 556 | /* please ignore kernel warning. otherwise, we have mem leak. */ | 556 | /* please ignore kernel warning. otherwise, we have mem leak. */ |
| 557 | if (sp->u.iocb_cmd.u.ctarg.req) { | 557 | if (sp->u.iocb_cmd.u.ctarg.req) { |
| 558 | dma_free_coherent(&vha->hw->pdev->dev, | 558 | dma_free_coherent(&vha->hw->pdev->dev, |
| 559 | sizeof(struct ct_sns_pkt), | 559 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, |
| 560 | sp->u.iocb_cmd.u.ctarg.req, | 560 | sp->u.iocb_cmd.u.ctarg.req, |
| 561 | sp->u.iocb_cmd.u.ctarg.req_dma); | 561 | sp->u.iocb_cmd.u.ctarg.req_dma); |
| 562 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 562 | sp->u.iocb_cmd.u.ctarg.req = NULL; |
| @@ -564,7 +564,7 @@ err2: | |||
| 564 | 564 | ||
| 565 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 565 | if (sp->u.iocb_cmd.u.ctarg.rsp) { |
| 566 | dma_free_coherent(&vha->hw->pdev->dev, | 566 | dma_free_coherent(&vha->hw->pdev->dev, |
| 567 | sizeof(struct ct_sns_pkt), | 567 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, |
| 568 | sp->u.iocb_cmd.u.ctarg.rsp, | 568 | sp->u.iocb_cmd.u.ctarg.rsp, |
| 569 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 569 | sp->u.iocb_cmd.u.ctarg.rsp_dma); |
| 570 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 570 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; |
| @@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) | |||
| 617 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, | 617 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 618 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, | 618 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, |
| 619 | GFP_KERNEL); | 619 | GFP_KERNEL); |
| 620 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 620 | if (!sp->u.iocb_cmd.u.ctarg.req) { | 621 | if (!sp->u.iocb_cmd.u.ctarg.req) { |
| 621 | ql_log(ql_log_warn, vha, 0xd041, | 622 | ql_log(ql_log_warn, vha, 0xd041, |
| 622 | "%s: Failed to allocate ct_sns request.\n", | 623 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) | |||
| 627 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, | 628 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 628 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, | 629 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, |
| 629 | GFP_KERNEL); | 630 | GFP_KERNEL); |
| 631 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 630 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { | 632 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { |
| 631 | ql_log(ql_log_warn, vha, 0xd042, | 633 | ql_log(ql_log_warn, vha, 0xd042, |
| 632 | "%s: Failed to allocate ct_sns request.\n", | 634 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, | |||
| 712 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, | 714 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 713 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, | 715 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, |
| 714 | GFP_KERNEL); | 716 | GFP_KERNEL); |
| 717 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 715 | if (!sp->u.iocb_cmd.u.ctarg.req) { | 718 | if (!sp->u.iocb_cmd.u.ctarg.req) { |
| 716 | ql_log(ql_log_warn, vha, 0xd041, | 719 | ql_log(ql_log_warn, vha, 0xd041, |
| 717 | "%s: Failed to allocate ct_sns request.\n", | 720 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, | |||
| 722 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, | 725 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 723 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, | 726 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, |
| 724 | GFP_KERNEL); | 727 | GFP_KERNEL); |
| 728 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 725 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { | 729 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { |
| 726 | ql_log(ql_log_warn, vha, 0xd042, | 730 | ql_log(ql_log_warn, vha, 0xd042, |
| 727 | "%s: Failed to allocate ct_sns request.\n", | 731 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, | |||
| 802 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, | 806 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 803 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, | 807 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, |
| 804 | GFP_KERNEL); | 808 | GFP_KERNEL); |
| 809 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 805 | if (!sp->u.iocb_cmd.u.ctarg.req) { | 810 | if (!sp->u.iocb_cmd.u.ctarg.req) { |
| 806 | ql_log(ql_log_warn, vha, 0xd041, | 811 | ql_log(ql_log_warn, vha, 0xd041, |
| 807 | "%s: Failed to allocate ct_sns request.\n", | 812 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, | |||
| 812 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, | 817 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 813 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, | 818 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, |
| 814 | GFP_KERNEL); | 819 | GFP_KERNEL); |
| 820 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 815 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { | 821 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { |
| 816 | ql_log(ql_log_warn, vha, 0xd042, | 822 | ql_log(ql_log_warn, vha, 0xd042, |
| 817 | "%s: Failed to allocate ct_sns request.\n", | 823 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) | |||
| 909 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, | 915 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 910 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, | 916 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, |
| 911 | GFP_KERNEL); | 917 | GFP_KERNEL); |
| 918 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 912 | if (!sp->u.iocb_cmd.u.ctarg.req) { | 919 | if (!sp->u.iocb_cmd.u.ctarg.req) { |
| 913 | ql_log(ql_log_warn, vha, 0xd041, | 920 | ql_log(ql_log_warn, vha, 0xd041, |
| 914 | "%s: Failed to allocate ct_sns request.\n", | 921 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) | |||
| 919 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, | 926 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 920 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, | 927 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, |
| 921 | GFP_KERNEL); | 928 | GFP_KERNEL); |
| 929 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 922 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { | 930 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { |
| 923 | ql_log(ql_log_warn, vha, 0xd042, | 931 | ql_log(ql_log_warn, vha, 0xd042, |
| 924 | "%s: Failed to allocate ct_sns request.\n", | 932 | "%s: Failed to allocate ct_sns request.\n", |
| @@ -3388,14 +3396,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) | |||
| 3388 | { | 3396 | { |
| 3389 | if (sp->u.iocb_cmd.u.ctarg.req) { | 3397 | if (sp->u.iocb_cmd.u.ctarg.req) { |
| 3390 | dma_free_coherent(&vha->hw->pdev->dev, | 3398 | dma_free_coherent(&vha->hw->pdev->dev, |
| 3391 | sizeof(struct ct_sns_pkt), | 3399 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, |
| 3392 | sp->u.iocb_cmd.u.ctarg.req, | 3400 | sp->u.iocb_cmd.u.ctarg.req, |
| 3393 | sp->u.iocb_cmd.u.ctarg.req_dma); | 3401 | sp->u.iocb_cmd.u.ctarg.req_dma); |
| 3394 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 3402 | sp->u.iocb_cmd.u.ctarg.req = NULL; |
| 3395 | } | 3403 | } |
| 3396 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 3404 | if (sp->u.iocb_cmd.u.ctarg.rsp) { |
| 3397 | dma_free_coherent(&vha->hw->pdev->dev, | 3405 | dma_free_coherent(&vha->hw->pdev->dev, |
| 3398 | sizeof(struct ct_sns_pkt), | 3406 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, |
| 3399 | sp->u.iocb_cmd.u.ctarg.rsp, | 3407 | sp->u.iocb_cmd.u.ctarg.rsp, |
| 3400 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 3408 | sp->u.iocb_cmd.u.ctarg.rsp_dma); |
| 3401 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 3409 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; |
| @@ -3596,14 +3604,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) | |||
| 3596 | /* please ignore kernel warning. otherwise, we have mem leak. */ | 3604 | /* please ignore kernel warning. otherwise, we have mem leak. */ |
| 3597 | if (sp->u.iocb_cmd.u.ctarg.req) { | 3605 | if (sp->u.iocb_cmd.u.ctarg.req) { |
| 3598 | dma_free_coherent(&vha->hw->pdev->dev, | 3606 | dma_free_coherent(&vha->hw->pdev->dev, |
| 3599 | sizeof(struct ct_sns_pkt), | 3607 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, |
| 3600 | sp->u.iocb_cmd.u.ctarg.req, | 3608 | sp->u.iocb_cmd.u.ctarg.req, |
| 3601 | sp->u.iocb_cmd.u.ctarg.req_dma); | 3609 | sp->u.iocb_cmd.u.ctarg.req_dma); |
| 3602 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 3610 | sp->u.iocb_cmd.u.ctarg.req = NULL; |
| 3603 | } | 3611 | } |
| 3604 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 3612 | if (sp->u.iocb_cmd.u.ctarg.rsp) { |
| 3605 | dma_free_coherent(&vha->hw->pdev->dev, | 3613 | dma_free_coherent(&vha->hw->pdev->dev, |
| 3606 | sizeof(struct ct_sns_pkt), | 3614 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, |
| 3607 | sp->u.iocb_cmd.u.ctarg.rsp, | 3615 | sp->u.iocb_cmd.u.ctarg.rsp, |
| 3608 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 3616 | sp->u.iocb_cmd.u.ctarg.rsp_dma); |
| 3609 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 3617 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; |
| @@ -3654,6 +3662,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) | |||
| 3654 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, | 3662 | sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 3655 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, | 3663 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, |
| 3656 | GFP_KERNEL); | 3664 | GFP_KERNEL); |
| 3665 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 3657 | if (!sp->u.iocb_cmd.u.ctarg.req) { | 3666 | if (!sp->u.iocb_cmd.u.ctarg.req) { |
| 3658 | ql_log(ql_log_warn, vha, 0xd041, | 3667 | ql_log(ql_log_warn, vha, 0xd041, |
| 3659 | "Failed to allocate ct_sns request.\n"); | 3668 | "Failed to allocate ct_sns request.\n"); |
| @@ -3663,6 +3672,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) | |||
| 3663 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, | 3672 | sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, |
| 3664 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, | 3673 | sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, |
| 3665 | GFP_KERNEL); | 3674 | GFP_KERNEL); |
| 3675 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 3666 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { | 3676 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { |
| 3667 | ql_log(ql_log_warn, vha, 0xd042, | 3677 | ql_log(ql_log_warn, vha, 0xd042, |
| 3668 | "Failed to allocate ct_sns request.\n"); | 3678 | "Failed to allocate ct_sns request.\n"); |
| @@ -4142,14 +4152,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) | |||
| 4142 | */ | 4152 | */ |
| 4143 | if (sp->u.iocb_cmd.u.ctarg.req) { | 4153 | if (sp->u.iocb_cmd.u.ctarg.req) { |
| 4144 | dma_free_coherent(&vha->hw->pdev->dev, | 4154 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4145 | sizeof(struct ct_sns_pkt), | 4155 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, |
| 4146 | sp->u.iocb_cmd.u.ctarg.req, | 4156 | sp->u.iocb_cmd.u.ctarg.req, |
| 4147 | sp->u.iocb_cmd.u.ctarg.req_dma); | 4157 | sp->u.iocb_cmd.u.ctarg.req_dma); |
| 4148 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 4158 | sp->u.iocb_cmd.u.ctarg.req = NULL; |
| 4149 | } | 4159 | } |
| 4150 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 4160 | if (sp->u.iocb_cmd.u.ctarg.rsp) { |
| 4151 | dma_free_coherent(&vha->hw->pdev->dev, | 4161 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4152 | sizeof(struct ct_sns_pkt), | 4162 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, |
| 4153 | sp->u.iocb_cmd.u.ctarg.rsp, | 4163 | sp->u.iocb_cmd.u.ctarg.rsp, |
| 4154 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 4164 | sp->u.iocb_cmd.u.ctarg.rsp_dma); |
| 4155 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 4165 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; |
| @@ -4179,14 +4189,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) | |||
| 4179 | /* please ignore kernel warning. Otherwise, we have mem leak. */ | 4189 | /* please ignore kernel warning. Otherwise, we have mem leak. */ |
| 4180 | if (sp->u.iocb_cmd.u.ctarg.req) { | 4190 | if (sp->u.iocb_cmd.u.ctarg.req) { |
| 4181 | dma_free_coherent(&vha->hw->pdev->dev, | 4191 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4182 | sizeof(struct ct_sns_pkt), | 4192 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, |
| 4183 | sp->u.iocb_cmd.u.ctarg.req, | 4193 | sp->u.iocb_cmd.u.ctarg.req, |
| 4184 | sp->u.iocb_cmd.u.ctarg.req_dma); | 4194 | sp->u.iocb_cmd.u.ctarg.req_dma); |
| 4185 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 4195 | sp->u.iocb_cmd.u.ctarg.req = NULL; |
| 4186 | } | 4196 | } |
| 4187 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 4197 | if (sp->u.iocb_cmd.u.ctarg.rsp) { |
| 4188 | dma_free_coherent(&vha->hw->pdev->dev, | 4198 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4189 | sizeof(struct ct_sns_pkt), | 4199 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, |
| 4190 | sp->u.iocb_cmd.u.ctarg.rsp, | 4200 | sp->u.iocb_cmd.u.ctarg.rsp, |
| 4191 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 4201 | sp->u.iocb_cmd.u.ctarg.rsp_dma); |
| 4192 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 4202 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; |
| @@ -4281,14 +4291,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, | |||
| 4281 | done_free_sp: | 4291 | done_free_sp: |
| 4282 | if (sp->u.iocb_cmd.u.ctarg.req) { | 4292 | if (sp->u.iocb_cmd.u.ctarg.req) { |
| 4283 | dma_free_coherent(&vha->hw->pdev->dev, | 4293 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4284 | sizeof(struct ct_sns_pkt), | 4294 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, |
| 4285 | sp->u.iocb_cmd.u.ctarg.req, | 4295 | sp->u.iocb_cmd.u.ctarg.req, |
| 4286 | sp->u.iocb_cmd.u.ctarg.req_dma); | 4296 | sp->u.iocb_cmd.u.ctarg.req_dma); |
| 4287 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 4297 | sp->u.iocb_cmd.u.ctarg.req = NULL; |
| 4288 | } | 4298 | } |
| 4289 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 4299 | if (sp->u.iocb_cmd.u.ctarg.rsp) { |
| 4290 | dma_free_coherent(&vha->hw->pdev->dev, | 4300 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4291 | sizeof(struct ct_sns_pkt), | 4301 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, |
| 4292 | sp->u.iocb_cmd.u.ctarg.rsp, | 4302 | sp->u.iocb_cmd.u.ctarg.rsp, |
| 4293 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 4303 | sp->u.iocb_cmd.u.ctarg.rsp_dma); |
| 4294 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 4304 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; |
| @@ -4349,6 +4359,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) | |||
| 4349 | sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( | 4359 | sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( |
| 4350 | &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), | 4360 | &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), |
| 4351 | &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); | 4361 | &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); |
| 4362 | sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 4352 | if (!sp->u.iocb_cmd.u.ctarg.req) { | 4363 | if (!sp->u.iocb_cmd.u.ctarg.req) { |
| 4353 | ql_log(ql_log_warn, vha, 0xffff, | 4364 | ql_log(ql_log_warn, vha, 0xffff, |
| 4354 | "Failed to allocate ct_sns request.\n"); | 4365 | "Failed to allocate ct_sns request.\n"); |
| @@ -4366,6 +4377,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) | |||
| 4366 | sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( | 4377 | sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( |
| 4367 | &vha->hw->pdev->dev, rspsz, | 4378 | &vha->hw->pdev->dev, rspsz, |
| 4368 | &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); | 4379 | &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); |
| 4380 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); | ||
| 4369 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { | 4381 | if (!sp->u.iocb_cmd.u.ctarg.rsp) { |
| 4370 | ql_log(ql_log_warn, vha, 0xffff, | 4382 | ql_log(ql_log_warn, vha, 0xffff, |
| 4371 | "Failed to allocate ct_sns request.\n"); | 4383 | "Failed to allocate ct_sns request.\n"); |
| @@ -4425,14 +4437,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) | |||
| 4425 | done_free_sp: | 4437 | done_free_sp: |
| 4426 | if (sp->u.iocb_cmd.u.ctarg.req) { | 4438 | if (sp->u.iocb_cmd.u.ctarg.req) { |
| 4427 | dma_free_coherent(&vha->hw->pdev->dev, | 4439 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4428 | sizeof(struct ct_sns_pkt), | 4440 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, |
| 4429 | sp->u.iocb_cmd.u.ctarg.req, | 4441 | sp->u.iocb_cmd.u.ctarg.req, |
| 4430 | sp->u.iocb_cmd.u.ctarg.req_dma); | 4442 | sp->u.iocb_cmd.u.ctarg.req_dma); |
| 4431 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 4443 | sp->u.iocb_cmd.u.ctarg.req = NULL; |
| 4432 | } | 4444 | } |
| 4433 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 4445 | if (sp->u.iocb_cmd.u.ctarg.rsp) { |
| 4434 | dma_free_coherent(&vha->hw->pdev->dev, | 4446 | dma_free_coherent(&vha->hw->pdev->dev, |
| 4435 | sizeof(struct ct_sns_pkt), | 4447 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, |
| 4436 | sp->u.iocb_cmd.u.ctarg.rsp, | 4448 | sp->u.iocb_cmd.u.ctarg.rsp, |
| 4437 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 4449 | sp->u.iocb_cmd.u.ctarg.rsp_dma); |
| 4438 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 4450 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 7b675243bd16..db0e3279e07a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -591,12 +591,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, | |||
| 591 | conflict_fcport = | 591 | conflict_fcport = |
| 592 | qla2x00_find_fcport_by_wwpn(vha, | 592 | qla2x00_find_fcport_by_wwpn(vha, |
| 593 | e->port_name, 0); | 593 | e->port_name, 0); |
| 594 | ql_dbg(ql_dbg_disc, vha, 0x20e6, | 594 | if (conflict_fcport) { |
| 595 | "%s %d %8phC post del sess\n", | 595 | qlt_schedule_sess_for_deletion |
| 596 | __func__, __LINE__, | 596 | (conflict_fcport); |
| 597 | conflict_fcport->port_name); | 597 | ql_dbg(ql_dbg_disc, vha, 0x20e6, |
| 598 | qlt_schedule_sess_for_deletion | 598 | "%s %d %8phC post del sess\n", |
| 599 | (conflict_fcport); | 599 | __func__, __LINE__, |
| 600 | conflict_fcport->port_name); | ||
| 601 | } | ||
| 600 | } | 602 | } |
| 601 | 603 | ||
| 602 | /* FW already picked this loop id for another fcport */ | 604 | /* FW already picked this loop id for another fcport */ |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e881fce7477a..9f309e572be4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -3180,6 +3180,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 3180 | "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", | 3180 | "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", |
| 3181 | req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); | 3181 | req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); |
| 3182 | 3182 | ||
| 3183 | ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); | ||
| 3184 | |||
| 3183 | if (ha->isp_ops->initialize_adapter(base_vha)) { | 3185 | if (ha->isp_ops->initialize_adapter(base_vha)) { |
| 3184 | ql_log(ql_log_fatal, base_vha, 0x00d6, | 3186 | ql_log(ql_log_fatal, base_vha, 0x00d6, |
| 3185 | "Failed to initialize adapter - Adapter flags %x.\n", | 3187 | "Failed to initialize adapter - Adapter flags %x.\n", |
| @@ -3216,8 +3218,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 3216 | host->can_queue, base_vha->req, | 3218 | host->can_queue, base_vha->req, |
| 3217 | base_vha->mgmt_svr_loop_id, host->sg_tablesize); | 3219 | base_vha->mgmt_svr_loop_id, host->sg_tablesize); |
| 3218 | 3220 | ||
| 3219 | ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); | ||
| 3220 | |||
| 3221 | if (ha->mqenable) { | 3221 | if (ha->mqenable) { |
| 3222 | bool mq = false; | 3222 | bool mq = false; |
| 3223 | bool startit = false; | 3223 | bool startit = false; |
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index a14fef11776e..2bf3bf73886e 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
| @@ -391,7 +391,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf) | |||
| 391 | * Check that all zones of the device are equal. The last zone can however | 391 | * Check that all zones of the device are equal. The last zone can however |
| 392 | * be smaller. The zone size must also be a power of two number of LBAs. | 392 | * be smaller. The zone size must also be a power of two number of LBAs. |
| 393 | * | 393 | * |
| 394 | * Returns the zone size in bytes upon success or an error code upon failure. | 394 | * Returns the zone size in number of blocks upon success or an error code |
| 395 | * upon failure. | ||
| 395 | */ | 396 | */ |
| 396 | static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) | 397 | static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) |
| 397 | { | 398 | { |
| @@ -401,7 +402,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) | |||
| 401 | unsigned char *rec; | 402 | unsigned char *rec; |
| 402 | unsigned int buf_len; | 403 | unsigned int buf_len; |
| 403 | unsigned int list_length; | 404 | unsigned int list_length; |
| 404 | int ret; | 405 | s64 ret; |
| 405 | u8 same; | 406 | u8 same; |
| 406 | 407 | ||
| 407 | /* Get a buffer */ | 408 | /* Get a buffer */ |
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index b423a309a6e0..125b58eff936 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
| 29 | #include <linux/vfio.h> | 29 | #include <linux/vfio.h> |
| 30 | #include <linux/vgaarb.h> | 30 | #include <linux/vgaarb.h> |
| 31 | #include <linux/nospec.h> | ||
| 31 | 32 | ||
| 32 | #include "vfio_pci_private.h" | 33 | #include "vfio_pci_private.h" |
| 33 | 34 | ||
| @@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data, | |||
| 727 | if (info.index >= | 728 | if (info.index >= |
| 728 | VFIO_PCI_NUM_REGIONS + vdev->num_regions) | 729 | VFIO_PCI_NUM_REGIONS + vdev->num_regions) |
| 729 | return -EINVAL; | 730 | return -EINVAL; |
| 731 | info.index = array_index_nospec(info.index, | ||
| 732 | VFIO_PCI_NUM_REGIONS + | ||
| 733 | vdev->num_regions); | ||
| 730 | 734 | ||
| 731 | i = info.index - VFIO_PCI_NUM_REGIONS; | 735 | i = info.index - VFIO_PCI_NUM_REGIONS; |
| 732 | 736 | ||
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 759a5bdd40e1..7cd63b0c1a46 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
| @@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container, | |||
| 457 | } | 457 | } |
| 458 | 458 | ||
| 459 | static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, | 459 | static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, |
| 460 | unsigned long tce, unsigned long size, | 460 | unsigned long tce, unsigned long shift, |
| 461 | unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) | 461 | unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) |
| 462 | { | 462 | { |
| 463 | long ret = 0; | 463 | long ret = 0; |
| 464 | struct mm_iommu_table_group_mem_t *mem; | 464 | struct mm_iommu_table_group_mem_t *mem; |
| 465 | 465 | ||
| 466 | mem = mm_iommu_lookup(container->mm, tce, size); | 466 | mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); |
| 467 | if (!mem) | 467 | if (!mem) |
| 468 | return -EINVAL; | 468 | return -EINVAL; |
| 469 | 469 | ||
| 470 | ret = mm_iommu_ua_to_hpa(mem, tce, phpa); | 470 | ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); |
| 471 | if (ret) | 471 | if (ret) |
| 472 | return -EINVAL; | 472 | return -EINVAL; |
| 473 | 473 | ||
| @@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, | |||
| 487 | if (!pua) | 487 | if (!pua) |
| 488 | return; | 488 | return; |
| 489 | 489 | ||
| 490 | ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), | 490 | ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift, |
| 491 | &hpa, &mem); | 491 | &hpa, &mem); |
| 492 | if (ret) | 492 | if (ret) |
| 493 | pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", | 493 | pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", |
| @@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container, | |||
| 611 | entry + i); | 611 | entry + i); |
| 612 | 612 | ||
| 613 | ret = tce_iommu_prereg_ua_to_hpa(container, | 613 | ret = tce_iommu_prereg_ua_to_hpa(container, |
| 614 | tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); | 614 | tce, tbl->it_page_shift, &hpa, &mem); |
| 615 | if (ret) | 615 | if (ret) |
| 616 | break; | 616 | break; |
| 617 | 617 | ||
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e55843f536bc..b3e45714d28f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -4238,8 +4238,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) | |||
| 4238 | struct extent_map *em; | 4238 | struct extent_map *em; |
| 4239 | u64 start = page_offset(page); | 4239 | u64 start = page_offset(page); |
| 4240 | u64 end = start + PAGE_SIZE - 1; | 4240 | u64 end = start + PAGE_SIZE - 1; |
| 4241 | struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree; | 4241 | struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); |
| 4242 | struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree; | 4242 | struct extent_io_tree *tree = &btrfs_inode->io_tree; |
| 4243 | struct extent_map_tree *map = &btrfs_inode->extent_tree; | ||
| 4243 | 4244 | ||
| 4244 | if (gfpflags_allow_blocking(mask) && | 4245 | if (gfpflags_allow_blocking(mask) && |
| 4245 | page->mapping->host->i_size > SZ_16M) { | 4246 | page->mapping->host->i_size > SZ_16M) { |
| @@ -4262,6 +4263,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) | |||
| 4262 | extent_map_end(em) - 1, | 4263 | extent_map_end(em) - 1, |
| 4263 | EXTENT_LOCKED | EXTENT_WRITEBACK, | 4264 | EXTENT_LOCKED | EXTENT_WRITEBACK, |
| 4264 | 0, NULL)) { | 4265 | 0, NULL)) { |
| 4266 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
| 4267 | &btrfs_inode->runtime_flags); | ||
| 4265 | remove_extent_mapping(map, em); | 4268 | remove_extent_mapping(map, em); |
| 4266 | /* once for the rb tree */ | 4269 | /* once for the rb tree */ |
| 4267 | free_extent_map(em); | 4270 | free_extent_map(em); |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 43ecbe620dea..b077544b5232 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -3327,11 +3327,13 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp) | |||
| 3327 | if (pg) { | 3327 | if (pg) { |
| 3328 | unlock_page(pg); | 3328 | unlock_page(pg); |
| 3329 | put_page(pg); | 3329 | put_page(pg); |
| 3330 | cmp->src_pages[i] = NULL; | ||
| 3330 | } | 3331 | } |
| 3331 | pg = cmp->dst_pages[i]; | 3332 | pg = cmp->dst_pages[i]; |
| 3332 | if (pg) { | 3333 | if (pg) { |
| 3333 | unlock_page(pg); | 3334 | unlock_page(pg); |
| 3334 | put_page(pg); | 3335 | put_page(pg); |
| 3336 | cmp->dst_pages[i] = NULL; | ||
| 3335 | } | 3337 | } |
| 3336 | } | 3338 | } |
| 3337 | } | 3339 | } |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 572306036477..6702896cdb8f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -1151,11 +1151,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
| 1151 | return ret; | 1151 | return ret; |
| 1152 | } | 1152 | } |
| 1153 | 1153 | ||
| 1154 | if (sctx->is_dev_replace && !is_metadata && !have_csum) { | ||
| 1155 | sblocks_for_recheck = NULL; | ||
| 1156 | goto nodatasum_case; | ||
| 1157 | } | ||
| 1158 | |||
| 1159 | /* | 1154 | /* |
| 1160 | * read all mirrors one after the other. This includes to | 1155 | * read all mirrors one after the other. This includes to |
| 1161 | * re-read the extent or metadata block that failed (that was | 1156 | * re-read the extent or metadata block that failed (that was |
| @@ -1268,13 +1263,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) | |||
| 1268 | goto out; | 1263 | goto out; |
| 1269 | } | 1264 | } |
| 1270 | 1265 | ||
| 1271 | if (!is_metadata && !have_csum) { | 1266 | /* |
| 1267 | * NOTE: Even for nodatasum case, it's still possible that it's a | ||
| 1268 | * compressed data extent, thus scrub_fixup_nodatasum(), which write | ||
| 1269 | * inode page cache onto disk, could cause serious data corruption. | ||
| 1270 | * | ||
| 1271 | * So here we could only read from disk, and hope our recovery could | ||
| 1272 | * reach disk before the newer write. | ||
| 1273 | */ | ||
| 1274 | if (0 && !is_metadata && !have_csum) { | ||
| 1272 | struct scrub_fixup_nodatasum *fixup_nodatasum; | 1275 | struct scrub_fixup_nodatasum *fixup_nodatasum; |
| 1273 | 1276 | ||
| 1274 | WARN_ON(sctx->is_dev_replace); | 1277 | WARN_ON(sctx->is_dev_replace); |
| 1275 | 1278 | ||
| 1276 | nodatasum_case: | ||
| 1277 | |||
| 1278 | /* | 1279 | /* |
| 1279 | * !is_metadata and !have_csum, this means that the data | 1280 | * !is_metadata and !have_csum, this means that the data |
| 1280 | * might not be COWed, that it might be modified | 1281 | * might not be COWed, that it might be modified |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e034ad9e23b4..1da162928d1a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -1146,6 +1146,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
| 1146 | { | 1146 | { |
| 1147 | int ret; | 1147 | int ret; |
| 1148 | 1148 | ||
| 1149 | mutex_lock(&uuid_mutex); | ||
| 1149 | mutex_lock(&fs_devices->device_list_mutex); | 1150 | mutex_lock(&fs_devices->device_list_mutex); |
| 1150 | if (fs_devices->opened) { | 1151 | if (fs_devices->opened) { |
| 1151 | fs_devices->opened++; | 1152 | fs_devices->opened++; |
| @@ -1155,6 +1156,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
| 1155 | ret = open_fs_devices(fs_devices, flags, holder); | 1156 | ret = open_fs_devices(fs_devices, flags, holder); |
| 1156 | } | 1157 | } |
| 1157 | mutex_unlock(&fs_devices->device_list_mutex); | 1158 | mutex_unlock(&fs_devices->device_list_mutex); |
| 1159 | mutex_unlock(&uuid_mutex); | ||
| 1158 | 1160 | ||
| 1159 | return ret; | 1161 | return ret; |
| 1160 | } | 1162 | } |
| @@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) | |||
| 290 | struct vm_area_struct *vma = NULL; | 290 | struct vm_area_struct *vma = NULL; |
| 291 | struct mm_struct *mm = bprm->mm; | 291 | struct mm_struct *mm = bprm->mm; |
| 292 | 292 | ||
| 293 | bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 293 | bprm->vma = vma = vm_area_alloc(mm); |
| 294 | if (!vma) | 294 | if (!vma) |
| 295 | return -ENOMEM; | 295 | return -ENOMEM; |
| 296 | 296 | ||
| @@ -298,7 +298,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) | |||
| 298 | err = -EINTR; | 298 | err = -EINTR; |
| 299 | goto err_free; | 299 | goto err_free; |
| 300 | } | 300 | } |
| 301 | vma->vm_mm = mm; | ||
| 302 | 301 | ||
| 303 | /* | 302 | /* |
| 304 | * Place the stack at the largest stack address the architecture | 303 | * Place the stack at the largest stack address the architecture |
| @@ -311,7 +310,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) | |||
| 311 | vma->vm_start = vma->vm_end - PAGE_SIZE; | 310 | vma->vm_start = vma->vm_end - PAGE_SIZE; |
| 312 | vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; | 311 | vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; |
| 313 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | 312 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
| 314 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 315 | 313 | ||
| 316 | err = insert_vm_struct(mm, vma); | 314 | err = insert_vm_struct(mm, vma); |
| 317 | if (err) | 315 | if (err) |
| @@ -326,7 +324,7 @@ err: | |||
| 326 | up_write(&mm->mmap_sem); | 324 | up_write(&mm->mmap_sem); |
| 327 | err_free: | 325 | err_free: |
| 328 | bprm->vma = NULL; | 326 | bprm->vma = NULL; |
| 329 | kmem_cache_free(vm_area_cachep, vma); | 327 | vm_area_free(vma); |
| 330 | return err; | 328 | return err; |
| 331 | } | 329 | } |
| 332 | 330 | ||
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 065dc919a0ce..bfd589ea74c0 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
| @@ -707,13 +707,21 @@ static void fat_set_state(struct super_block *sb, | |||
| 707 | brelse(bh); | 707 | brelse(bh); |
| 708 | } | 708 | } |
| 709 | 709 | ||
| 710 | static void fat_reset_iocharset(struct fat_mount_options *opts) | ||
| 711 | { | ||
| 712 | if (opts->iocharset != fat_default_iocharset) { | ||
| 713 | /* Note: opts->iocharset can be NULL here */ | ||
| 714 | kfree(opts->iocharset); | ||
| 715 | opts->iocharset = fat_default_iocharset; | ||
| 716 | } | ||
| 717 | } | ||
| 718 | |||
| 710 | static void delayed_free(struct rcu_head *p) | 719 | static void delayed_free(struct rcu_head *p) |
| 711 | { | 720 | { |
| 712 | struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu); | 721 | struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu); |
| 713 | unload_nls(sbi->nls_disk); | 722 | unload_nls(sbi->nls_disk); |
| 714 | unload_nls(sbi->nls_io); | 723 | unload_nls(sbi->nls_io); |
| 715 | if (sbi->options.iocharset != fat_default_iocharset) | 724 | fat_reset_iocharset(&sbi->options); |
| 716 | kfree(sbi->options.iocharset); | ||
| 717 | kfree(sbi); | 725 | kfree(sbi); |
| 718 | } | 726 | } |
| 719 | 727 | ||
| @@ -1132,7 +1140,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, | |||
| 1132 | opts->fs_fmask = opts->fs_dmask = current_umask(); | 1140 | opts->fs_fmask = opts->fs_dmask = current_umask(); |
| 1133 | opts->allow_utime = -1; | 1141 | opts->allow_utime = -1; |
| 1134 | opts->codepage = fat_default_codepage; | 1142 | opts->codepage = fat_default_codepage; |
| 1135 | opts->iocharset = fat_default_iocharset; | 1143 | fat_reset_iocharset(opts); |
| 1136 | if (is_vfat) { | 1144 | if (is_vfat) { |
| 1137 | opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95; | 1145 | opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95; |
| 1138 | opts->rodir = 0; | 1146 | opts->rodir = 0; |
| @@ -1289,8 +1297,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, | |||
| 1289 | 1297 | ||
| 1290 | /* vfat specific */ | 1298 | /* vfat specific */ |
| 1291 | case Opt_charset: | 1299 | case Opt_charset: |
| 1292 | if (opts->iocharset != fat_default_iocharset) | 1300 | fat_reset_iocharset(opts); |
| 1293 | kfree(opts->iocharset); | ||
| 1294 | iocharset = match_strdup(&args[0]); | 1301 | iocharset = match_strdup(&args[0]); |
| 1295 | if (!iocharset) | 1302 | if (!iocharset) |
| 1296 | return -ENOMEM; | 1303 | return -ENOMEM; |
| @@ -1881,8 +1888,7 @@ out_fail: | |||
| 1881 | iput(fat_inode); | 1888 | iput(fat_inode); |
| 1882 | unload_nls(sbi->nls_io); | 1889 | unload_nls(sbi->nls_io); |
| 1883 | unload_nls(sbi->nls_disk); | 1890 | unload_nls(sbi->nls_disk); |
| 1884 | if (sbi->options.iocharset != fat_default_iocharset) | 1891 | fat_reset_iocharset(&sbi->options); |
| 1885 | kfree(sbi->options.iocharset); | ||
| 1886 | sb->s_fs_info = NULL; | 1892 | sb->s_fs_info = NULL; |
| 1887 | kfree(sbi); | 1893 | kfree(sbi); |
| 1888 | return error; | 1894 | return error; |
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 79795c5fa7c3..d50c2f0a655a 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #ifndef _BPF_CGROUP_H | 2 | #ifndef _BPF_CGROUP_H |
| 3 | #define _BPF_CGROUP_H | 3 | #define _BPF_CGROUP_H |
| 4 | 4 | ||
| 5 | #include <linux/errno.h> | ||
| 5 | #include <linux/jump_label.h> | 6 | #include <linux/jump_label.h> |
| 6 | #include <uapi/linux/bpf.h> | 7 | #include <uapi/linux/bpf.h> |
| 7 | 8 | ||
diff --git a/include/linux/filter.h b/include/linux/filter.h index 300baad62c88..c73dd7396886 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -765,8 +765,8 @@ static inline bool bpf_dump_raw_ok(void) | |||
| 765 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | 765 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
| 766 | const struct bpf_insn *patch, u32 len); | 766 | const struct bpf_insn *patch, u32 len); |
| 767 | 767 | ||
| 768 | static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, | 768 | static inline int xdp_ok_fwd_dev(const struct net_device *fwd, |
| 769 | struct net_device *fwd) | 769 | unsigned int pktlen) |
| 770 | { | 770 | { |
| 771 | unsigned int len; | 771 | unsigned int len; |
| 772 | 772 | ||
| @@ -774,7 +774,7 @@ static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, | |||
| 774 | return -ENETDOWN; | 774 | return -ENETDOWN; |
| 775 | 775 | ||
| 776 | len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; | 776 | len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; |
| 777 | if (skb->len > len) | 777 | if (pktlen > len) |
| 778 | return -EMSGSIZE; | 778 | return -EMSGSIZE; |
| 779 | 779 | ||
| 780 | return 0; | 780 | return 0; |
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 3efa3b861d44..941b11811f85 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #define __FSL_GUTS_H__ | 16 | #define __FSL_GUTS_H__ |
| 17 | 17 | ||
| 18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 19 | #include <linux/io.h> | ||
| 19 | 20 | ||
| 20 | /** | 21 | /** |
| 21 | * Global Utility Registers. | 22 | * Global Utility Registers. |
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 7843b98e1c6e..c20c7e197d07 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
| @@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev) | |||
| 105 | 105 | ||
| 106 | static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) | 106 | static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) |
| 107 | { | 107 | { |
| 108 | return -1; | 108 | return -EINVAL; |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, | 111 | static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, |
| 112 | struct bridge_vlan_info *p_vinfo) | 112 | struct bridge_vlan_info *p_vinfo) |
| 113 | { | 113 | { |
| 114 | return -1; | 114 | return -EINVAL; |
| 115 | } | 115 | } |
| 116 | #endif | 116 | #endif |
| 117 | 117 | ||
diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f8231854b5d6..119f53941c12 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h | |||
| @@ -109,6 +109,8 @@ struct ip_mc_list { | |||
| 109 | extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); | 109 | extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); |
| 110 | extern int igmp_rcv(struct sk_buff *); | 110 | extern int igmp_rcv(struct sk_buff *); |
| 111 | extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); | 111 | extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); |
| 112 | extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, | ||
| 113 | unsigned int mode); | ||
| 112 | extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); | 114 | extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); |
| 113 | extern void ip_mc_drop_socket(struct sock *sk); | 115 | extern void ip_mc_drop_socket(struct sock *sk); |
| 114 | extern int ip_mc_source(int add, int omode, struct sock *sk, | 116 | extern int ip_mc_source(int add, int omode, struct sock *sk, |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1df940196ab2..ef169d67df92 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -121,6 +121,7 @@ | |||
| 121 | #define ecap_srs(e) ((e >> 31) & 0x1) | 121 | #define ecap_srs(e) ((e >> 31) & 0x1) |
| 122 | #define ecap_ers(e) ((e >> 30) & 0x1) | 122 | #define ecap_ers(e) ((e >> 30) & 0x1) |
| 123 | #define ecap_prs(e) ((e >> 29) & 0x1) | 123 | #define ecap_prs(e) ((e >> 29) & 0x1) |
| 124 | #define ecap_broken_pasid(e) ((e >> 28) & 0x1) | ||
| 124 | #define ecap_dis(e) ((e >> 27) & 0x1) | 125 | #define ecap_dis(e) ((e >> 27) & 0x1) |
| 125 | #define ecap_nest(e) ((e >> 26) & 0x1) | 126 | #define ecap_nest(e) ((e >> 26) & 0x1) |
| 126 | #define ecap_mts(e) ((e >> 25) & 0x1) | 127 | #define ecap_mts(e) ((e >> 25) & 0x1) |
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 4f5f8c21e283..1eb6f244588d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | */ | 27 | */ |
| 28 | #define MARVELL_PHY_ID_88E6390 0x01410f90 | 28 | #define MARVELL_PHY_ID_88E6390 0x01410f90 |
| 29 | 29 | ||
| 30 | #define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) | ||
| 31 | |||
| 30 | /* struct phy_device dev_flags definitions */ | 32 | /* struct phy_device dev_flags definitions */ |
| 31 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 | 33 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 |
| 32 | #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 | 34 | #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 |
diff --git a/include/linux/mm.h b/include/linux/mm.h index a0fbb9ffe380..d3a3842316b8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, | |||
| 155 | * mmap() functions). | 155 | * mmap() functions). |
| 156 | */ | 156 | */ |
| 157 | 157 | ||
| 158 | extern struct kmem_cache *vm_area_cachep; | 158 | struct vm_area_struct *vm_area_alloc(struct mm_struct *); |
| 159 | struct vm_area_struct *vm_area_dup(struct vm_area_struct *); | ||
| 160 | void vm_area_free(struct vm_area_struct *); | ||
| 159 | 161 | ||
| 160 | #ifndef CONFIG_MMU | 162 | #ifndef CONFIG_MMU |
| 161 | extern struct rb_root nommu_region_tree; | 163 | extern struct rb_root nommu_region_tree; |
| @@ -2132,7 +2134,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, | |||
| 2132 | struct mminit_pfnnid_cache *state); | 2134 | struct mminit_pfnnid_cache *state); |
| 2133 | #endif | 2135 | #endif |
| 2134 | 2136 | ||
| 2135 | #ifdef CONFIG_HAVE_MEMBLOCK | 2137 | #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) |
| 2136 | void zero_resv_unavail(void); | 2138 | void zero_resv_unavail(void); |
| 2137 | #else | 2139 | #else |
| 2138 | static inline void zero_resv_unavail(void) {} | 2140 | static inline void zero_resv_unavail(void) {} |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 340029b2fb38..abd5d5e17aee 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -1240,6 +1240,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, | |||
| 1240 | unsigned long pci_address_to_pio(phys_addr_t addr); | 1240 | unsigned long pci_address_to_pio(phys_addr_t addr); |
| 1241 | phys_addr_t pci_pio_to_address(unsigned long pio); | 1241 | phys_addr_t pci_pio_to_address(unsigned long pio); |
| 1242 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); | 1242 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); |
| 1243 | int devm_pci_remap_iospace(struct device *dev, const struct resource *res, | ||
| 1244 | phys_addr_t phys_addr); | ||
| 1243 | void pci_unmap_iospace(struct resource *res); | 1245 | void pci_unmap_iospace(struct resource *res); |
| 1244 | void __iomem *devm_pci_remap_cfgspace(struct device *dev, | 1246 | void __iomem *devm_pci_remap_cfgspace(struct device *dev, |
| 1245 | resource_size_t offset, | 1247 | resource_size_t offset, |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 164cdedf6012..610a201126ee 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t; | |||
| 630 | * @hash: the packet hash | 630 | * @hash: the packet hash |
| 631 | * @queue_mapping: Queue mapping for multiqueue devices | 631 | * @queue_mapping: Queue mapping for multiqueue devices |
| 632 | * @xmit_more: More SKBs are pending for this queue | 632 | * @xmit_more: More SKBs are pending for this queue |
| 633 | * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves | ||
| 633 | * @ndisc_nodetype: router type (from link layer) | 634 | * @ndisc_nodetype: router type (from link layer) |
| 634 | * @ooo_okay: allow the mapping of a socket to a queue to be changed | 635 | * @ooo_okay: allow the mapping of a socket to a queue to be changed |
| 635 | * @l4_hash: indicate hash is a canonical 4-tuple hash over transport | 636 | * @l4_hash: indicate hash is a canonical 4-tuple hash over transport |
| @@ -735,7 +736,7 @@ struct sk_buff { | |||
| 735 | peeked:1, | 736 | peeked:1, |
| 736 | head_frag:1, | 737 | head_frag:1, |
| 737 | xmit_more:1, | 738 | xmit_more:1, |
| 738 | __unused:1; /* one bit hole */ | 739 | pfmemalloc:1; |
| 739 | 740 | ||
| 740 | /* fields enclosed in headers_start/headers_end are copied | 741 | /* fields enclosed in headers_start/headers_end are copied |
| 741 | * using a single memcpy() in __copy_skb_header() | 742 | * using a single memcpy() in __copy_skb_header() |
| @@ -754,31 +755,30 @@ struct sk_buff { | |||
| 754 | 755 | ||
| 755 | __u8 __pkt_type_offset[0]; | 756 | __u8 __pkt_type_offset[0]; |
| 756 | __u8 pkt_type:3; | 757 | __u8 pkt_type:3; |
| 757 | __u8 pfmemalloc:1; | ||
| 758 | __u8 ignore_df:1; | 758 | __u8 ignore_df:1; |
| 759 | |||
| 760 | __u8 nf_trace:1; | 759 | __u8 nf_trace:1; |
| 761 | __u8 ip_summed:2; | 760 | __u8 ip_summed:2; |
| 762 | __u8 ooo_okay:1; | 761 | __u8 ooo_okay:1; |
| 762 | |||
| 763 | __u8 l4_hash:1; | 763 | __u8 l4_hash:1; |
| 764 | __u8 sw_hash:1; | 764 | __u8 sw_hash:1; |
| 765 | __u8 wifi_acked_valid:1; | 765 | __u8 wifi_acked_valid:1; |
| 766 | __u8 wifi_acked:1; | 766 | __u8 wifi_acked:1; |
| 767 | |||
| 768 | __u8 no_fcs:1; | 767 | __u8 no_fcs:1; |
| 769 | /* Indicates the inner headers are valid in the skbuff. */ | 768 | /* Indicates the inner headers are valid in the skbuff. */ |
| 770 | __u8 encapsulation:1; | 769 | __u8 encapsulation:1; |
| 771 | __u8 encap_hdr_csum:1; | 770 | __u8 encap_hdr_csum:1; |
| 772 | __u8 csum_valid:1; | 771 | __u8 csum_valid:1; |
| 772 | |||
| 773 | __u8 csum_complete_sw:1; | 773 | __u8 csum_complete_sw:1; |
| 774 | __u8 csum_level:2; | 774 | __u8 csum_level:2; |
| 775 | __u8 csum_not_inet:1; | 775 | __u8 csum_not_inet:1; |
| 776 | |||
| 777 | __u8 dst_pending_confirm:1; | 776 | __u8 dst_pending_confirm:1; |
| 778 | #ifdef CONFIG_IPV6_NDISC_NODETYPE | 777 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
| 779 | __u8 ndisc_nodetype:2; | 778 | __u8 ndisc_nodetype:2; |
| 780 | #endif | 779 | #endif |
| 781 | __u8 ipvs_property:1; | 780 | __u8 ipvs_property:1; |
| 781 | |||
| 782 | __u8 inner_protocol_type:1; | 782 | __u8 inner_protocol_type:1; |
| 783 | __u8 remcsum_offload:1; | 783 | __u8 remcsum_offload:1; |
| 784 | #ifdef CONFIG_NET_SWITCHDEV | 784 | #ifdef CONFIG_NET_SWITCHDEV |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 59656fc580df..7b9c82de11cc 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
| @@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) | |||
| 66 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); | 66 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i) | ||
| 70 | { | ||
| 71 | return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == | ||
| 72 | RTF_GATEWAY; | ||
| 73 | } | ||
| 74 | |||
| 69 | void ip6_route_input(struct sk_buff *skb); | 75 | void ip6_route_input(struct sk_buff *skb); |
| 70 | struct dst_entry *ip6_route_input_lookup(struct net *net, | 76 | struct dst_entry *ip6_route_input_lookup(struct net *net, |
| 71 | struct net_device *dev, | 77 | struct net_device *dev, |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 16475c269749..8f73be494503 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
| @@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, | |||
| 355 | struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, | 355 | struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, |
| 356 | struct ipv6_txoptions *opt, | 356 | struct ipv6_txoptions *opt, |
| 357 | int newtype, | 357 | int newtype, |
| 358 | struct ipv6_opt_hdr __user *newopt, | 358 | struct ipv6_opt_hdr *newopt); |
| 359 | int newoptlen); | ||
| 360 | struct ipv6_txoptions * | ||
| 361 | ipv6_renew_options_kern(struct sock *sk, | ||
| 362 | struct ipv6_txoptions *opt, | ||
| 363 | int newtype, | ||
| 364 | struct ipv6_opt_hdr *newopt, | ||
| 365 | int newoptlen); | ||
| 366 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, | 359 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, |
| 367 | struct ipv6_txoptions *opt); | 360 | struct ipv6_txoptions *opt); |
| 368 | 361 | ||
| @@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, | |||
| 830 | * to minimize possbility that any useful information to an | 823 | * to minimize possbility that any useful information to an |
| 831 | * attacker is leaked. Only lower 20 bits are relevant. | 824 | * attacker is leaked. Only lower 20 bits are relevant. |
| 832 | */ | 825 | */ |
| 833 | rol32(hash, 16); | 826 | hash = rol32(hash, 16); |
| 834 | 827 | ||
| 835 | flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; | 828 | flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; |
| 836 | 829 | ||
| @@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void); | |||
| 1107 | 1100 | ||
| 1108 | int ipv6_sock_mc_join(struct sock *sk, int ifindex, | 1101 | int ipv6_sock_mc_join(struct sock *sk, int ifindex, |
| 1109 | const struct in6_addr *addr); | 1102 | const struct in6_addr *addr); |
| 1103 | int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, | ||
| 1104 | const struct in6_addr *addr, unsigned int mode); | ||
| 1110 | int ipv6_sock_mc_drop(struct sock *sk, int ifindex, | 1105 | int ipv6_sock_mc_drop(struct sock *sk, int ifindex, |
| 1111 | const struct in6_addr *addr); | 1106 | const struct in6_addr *addr); |
| 1112 | #endif /* _NET_IPV6_H */ | 1107 | #endif /* _NET_IPV6_H */ |
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index e0c0c2558ec4..a05134507e7b 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h | |||
| @@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops; | |||
| 65 | extern struct static_key_false nft_counters_enabled; | 65 | extern struct static_key_false nft_counters_enabled; |
| 66 | extern struct static_key_false nft_trace_enabled; | 66 | extern struct static_key_false nft_trace_enabled; |
| 67 | 67 | ||
| 68 | extern struct nft_set_type nft_set_rhash_type; | ||
| 69 | extern struct nft_set_type nft_set_hash_type; | ||
| 70 | extern struct nft_set_type nft_set_hash_fast_type; | ||
| 71 | extern struct nft_set_type nft_set_rbtree_type; | ||
| 72 | extern struct nft_set_type nft_set_bitmap_type; | ||
| 73 | |||
| 68 | #endif /* _NET_NF_TABLES_CORE_H */ | 74 | #endif /* _NET_NF_TABLES_CORE_H */ |
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h index 9754a50ecde9..4cc64c8446eb 100644 --- a/include/net/netfilter/nf_tproxy.h +++ b/include/net/netfilter/nf_tproxy.h | |||
| @@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, | |||
| 64 | * belonging to established connections going through that one. | 64 | * belonging to established connections going through that one. |
| 65 | */ | 65 | */ |
| 66 | struct sock * | 66 | struct sock * |
| 67 | nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, | 67 | nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, |
| 68 | const u8 protocol, | 68 | const u8 protocol, |
| 69 | const __be32 saddr, const __be32 daddr, | 69 | const __be32 saddr, const __be32 daddr, |
| 70 | const __be16 sport, const __be16 dport, | 70 | const __be16 sport, const __be16 dport, |
| @@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, | |||
| 103 | struct sock *sk); | 103 | struct sock *sk); |
| 104 | 104 | ||
| 105 | struct sock * | 105 | struct sock * |
| 106 | nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, | 106 | nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, |
| 107 | const u8 protocol, | 107 | const u8 protocol, |
| 108 | const struct in6_addr *saddr, const struct in6_addr *daddr, | 108 | const struct in6_addr *saddr, const struct in6_addr *daddr, |
| 109 | const __be16 sport, const __be16 dport, | 109 | const __be16 sport, const __be16 dport, |
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h index 9470fd7e4350..32d2454c0479 100644 --- a/include/net/tc_act/tc_csum.h +++ b/include/net/tc_act/tc_csum.h | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | #include <linux/tc_act/tc_csum.h> | 7 | #include <linux/tc_act/tc_csum.h> |
| 8 | 8 | ||
| 9 | struct tcf_csum_params { | 9 | struct tcf_csum_params { |
| 10 | int action; | ||
| 11 | u32 update_flags; | 10 | u32 update_flags; |
| 12 | struct rcu_head rcu; | 11 | struct rcu_head rcu; |
| 13 | }; | 12 | }; |
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h index efef0b4b1b2b..46b8c7f1c8d5 100644 --- a/include/net/tc_act/tc_tunnel_key.h +++ b/include/net/tc_act/tc_tunnel_key.h | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | struct tcf_tunnel_key_params { | 18 | struct tcf_tunnel_key_params { |
| 19 | struct rcu_head rcu; | 19 | struct rcu_head rcu; |
| 20 | int tcft_action; | 20 | int tcft_action; |
| 21 | int action; | ||
| 22 | struct metadata_dst *tcft_enc_metadata; | 21 | struct metadata_dst *tcft_enc_metadata; |
| 23 | }; | 22 | }; |
| 24 | 23 | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index 800582b5dd54..3482d13d655b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -828,6 +828,10 @@ struct tcp_skb_cb { | |||
| 828 | 828 | ||
| 829 | #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) | 829 | #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) |
| 830 | 830 | ||
| 831 | static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) | ||
| 832 | { | ||
| 833 | TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); | ||
| 834 | } | ||
| 831 | 835 | ||
| 832 | #if IS_ENABLED(CONFIG_IPV6) | 836 | #if IS_ENABLED(CONFIG_IPV6) |
| 833 | /* This is the variant of inet6_iif() that must be used by TCP, | 837 | /* This is the variant of inet6_iif() that must be used by TCP, |
| @@ -908,8 +912,6 @@ enum tcp_ca_event { | |||
| 908 | CA_EVENT_LOSS, /* loss timeout */ | 912 | CA_EVENT_LOSS, /* loss timeout */ |
| 909 | CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ | 913 | CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ |
| 910 | CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ | 914 | CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ |
| 911 | CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ | ||
| 912 | CA_EVENT_NON_DELAYED_ACK, | ||
| 913 | }; | 915 | }; |
| 914 | 916 | ||
| 915 | /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ | 917 | /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ |
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 9fe472f2ac95..7161856bcf9c 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h | |||
| @@ -60,6 +60,10 @@ struct xdp_sock { | |||
| 60 | bool zc; | 60 | bool zc; |
| 61 | /* Protects multiple processes in the control path */ | 61 | /* Protects multiple processes in the control path */ |
| 62 | struct mutex mutex; | 62 | struct mutex mutex; |
| 63 | /* Mutual exclusion of NAPI TX thread and sendmsg error paths | ||
| 64 | * in the SKB destructor callback. | ||
| 65 | */ | ||
| 66 | spinlock_t tx_completion_lock; | ||
| 63 | u64 rx_dropped; | 67 | u64 rx_dropped; |
| 64 | }; | 68 | }; |
| 65 | 69 | ||
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 4ca65b56084f..7363f18e65a5 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
| @@ -226,7 +226,7 @@ enum tunable_id { | |||
| 226 | ETHTOOL_TX_COPYBREAK, | 226 | ETHTOOL_TX_COPYBREAK, |
| 227 | ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ | 227 | ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ |
| 228 | /* | 228 | /* |
| 229 | * Add your fresh new tubale attribute above and remember to update | 229 | * Add your fresh new tunable attribute above and remember to update |
| 230 | * tunable_strings[] in net/core/ethtool.c | 230 | * tunable_strings[] in net/core/ethtool.c |
| 231 | */ | 231 | */ |
| 232 | __ETHTOOL_TUNABLE_COUNT, | 232 | __ETHTOOL_TUNABLE_COUNT, |
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 29eb659aa77a..e3f6ed8a7064 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h | |||
| @@ -127,6 +127,10 @@ enum { | |||
| 127 | 127 | ||
| 128 | #define TCP_CM_INQ TCP_INQ | 128 | #define TCP_CM_INQ TCP_INQ |
| 129 | 129 | ||
| 130 | #define TCP_REPAIR_ON 1 | ||
| 131 | #define TCP_REPAIR_OFF 0 | ||
| 132 | #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ | ||
| 133 | |||
| 130 | struct tcp_repair_opt { | 134 | struct tcp_repair_opt { |
| 131 | __u32 opt_code; | 135 | __u32 opt_code; |
| 132 | __u32 opt_val; | 136 | __u32 opt_val; |
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2d49d18b793a..e016ac3afa24 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
| @@ -991,16 +991,13 @@ static void btf_int_bits_seq_show(const struct btf *btf, | |||
| 991 | void *data, u8 bits_offset, | 991 | void *data, u8 bits_offset, |
| 992 | struct seq_file *m) | 992 | struct seq_file *m) |
| 993 | { | 993 | { |
| 994 | u16 left_shift_bits, right_shift_bits; | ||
| 994 | u32 int_data = btf_type_int(t); | 995 | u32 int_data = btf_type_int(t); |
| 995 | u16 nr_bits = BTF_INT_BITS(int_data); | 996 | u16 nr_bits = BTF_INT_BITS(int_data); |
| 996 | u16 total_bits_offset; | 997 | u16 total_bits_offset; |
| 997 | u16 nr_copy_bytes; | 998 | u16 nr_copy_bytes; |
| 998 | u16 nr_copy_bits; | 999 | u16 nr_copy_bits; |
| 999 | u8 nr_upper_bits; | 1000 | u64 print_num; |
| 1000 | union { | ||
| 1001 | u64 u64_num; | ||
| 1002 | u8 u8_nums[8]; | ||
| 1003 | } print_num; | ||
| 1004 | 1001 | ||
| 1005 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); | 1002 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); |
| 1006 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); | 1003 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
| @@ -1008,21 +1005,20 @@ static void btf_int_bits_seq_show(const struct btf *btf, | |||
| 1008 | nr_copy_bits = nr_bits + bits_offset; | 1005 | nr_copy_bits = nr_bits + bits_offset; |
| 1009 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); | 1006 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); |
| 1010 | 1007 | ||
| 1011 | print_num.u64_num = 0; | 1008 | print_num = 0; |
| 1012 | memcpy(&print_num.u64_num, data, nr_copy_bytes); | 1009 | memcpy(&print_num, data, nr_copy_bytes); |
| 1013 | 1010 | ||
| 1014 | /* Ditch the higher order bits */ | 1011 | #ifdef __BIG_ENDIAN_BITFIELD |
| 1015 | nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits); | 1012 | left_shift_bits = bits_offset; |
| 1016 | if (nr_upper_bits) { | 1013 | #else |
| 1017 | /* We need to mask out some bits of the upper byte. */ | 1014 | left_shift_bits = BITS_PER_U64 - nr_copy_bits; |
| 1018 | u8 mask = (1 << nr_upper_bits) - 1; | 1015 | #endif |
| 1016 | right_shift_bits = BITS_PER_U64 - nr_bits; | ||
| 1019 | 1017 | ||
| 1020 | print_num.u8_nums[nr_copy_bytes - 1] &= mask; | 1018 | print_num <<= left_shift_bits; |
| 1021 | } | 1019 | print_num >>= right_shift_bits; |
| 1022 | |||
| 1023 | print_num.u64_num >>= bits_offset; | ||
| 1024 | 1020 | ||
| 1025 | seq_printf(m, "0x%llx", print_num.u64_num); | 1021 | seq_printf(m, "0x%llx", print_num); |
| 1026 | } | 1022 | } |
| 1027 | 1023 | ||
| 1028 | static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, | 1024 | static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, |
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 642c97f6d1b8..d361fc1e3bf3 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
| @@ -334,10 +334,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, | |||
| 334 | { | 334 | { |
| 335 | struct net_device *dev = dst->dev; | 335 | struct net_device *dev = dst->dev; |
| 336 | struct xdp_frame *xdpf; | 336 | struct xdp_frame *xdpf; |
| 337 | int err; | ||
| 337 | 338 | ||
| 338 | if (!dev->netdev_ops->ndo_xdp_xmit) | 339 | if (!dev->netdev_ops->ndo_xdp_xmit) |
| 339 | return -EOPNOTSUPP; | 340 | return -EOPNOTSUPP; |
| 340 | 341 | ||
| 342 | err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); | ||
| 343 | if (unlikely(err)) | ||
| 344 | return err; | ||
| 345 | |||
| 341 | xdpf = convert_to_xdp_frame(xdp); | 346 | xdpf = convert_to_xdp_frame(xdp); |
| 342 | if (unlikely(!xdpf)) | 347 | if (unlikely(!xdpf)) |
| 343 | return -EOVERFLOW; | 348 | return -EOVERFLOW; |
| @@ -350,7 +355,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, | |||
| 350 | { | 355 | { |
| 351 | int err; | 356 | int err; |
| 352 | 357 | ||
| 353 | err = __xdp_generic_ok_fwd_dev(skb, dst->dev); | 358 | err = xdp_ok_fwd_dev(dst->dev, skb->len); |
| 354 | if (unlikely(err)) | 359 | if (unlikely(err)) |
| 355 | return err; | 360 | return err; |
| 356 | skb->dev = dst->dev; | 361 | skb->dev = dst->dev; |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 3ca2198a6d22..513d9dfcf4ee 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
| @@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |||
| 747 | * old element will be freed immediately. | 747 | * old element will be freed immediately. |
| 748 | * Otherwise return an error | 748 | * Otherwise return an error |
| 749 | */ | 749 | */ |
| 750 | atomic_dec(&htab->count); | 750 | l_new = ERR_PTR(-E2BIG); |
| 751 | return ERR_PTR(-E2BIG); | 751 | goto dec_count; |
| 752 | } | 752 | } |
| 753 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, | 753 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, |
| 754 | htab->map.numa_node); | 754 | htab->map.numa_node); |
| 755 | if (!l_new) | 755 | if (!l_new) { |
| 756 | return ERR_PTR(-ENOMEM); | 756 | l_new = ERR_PTR(-ENOMEM); |
| 757 | goto dec_count; | ||
| 758 | } | ||
| 757 | } | 759 | } |
| 758 | 760 | ||
| 759 | memcpy(l_new->key, key, key_size); | 761 | memcpy(l_new->key, key, key_size); |
| @@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |||
| 766 | GFP_ATOMIC | __GFP_NOWARN); | 768 | GFP_ATOMIC | __GFP_NOWARN); |
| 767 | if (!pptr) { | 769 | if (!pptr) { |
| 768 | kfree(l_new); | 770 | kfree(l_new); |
| 769 | return ERR_PTR(-ENOMEM); | 771 | l_new = ERR_PTR(-ENOMEM); |
| 772 | goto dec_count; | ||
| 770 | } | 773 | } |
| 771 | } | 774 | } |
| 772 | 775 | ||
| @@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |||
| 780 | 783 | ||
| 781 | l_new->hash = hash; | 784 | l_new->hash = hash; |
| 782 | return l_new; | 785 | return l_new; |
| 786 | dec_count: | ||
| 787 | atomic_dec(&htab->count); | ||
| 788 | return l_new; | ||
| 783 | } | 789 | } |
| 784 | 790 | ||
| 785 | static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, | 791 | static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index cf7b6a6dbd1f..98fb7938beea 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
| @@ -312,10 +312,12 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
| 312 | struct smap_psock *psock; | 312 | struct smap_psock *psock; |
| 313 | struct sock *osk; | 313 | struct sock *osk; |
| 314 | 314 | ||
| 315 | lock_sock(sk); | ||
| 315 | rcu_read_lock(); | 316 | rcu_read_lock(); |
| 316 | psock = smap_psock_sk(sk); | 317 | psock = smap_psock_sk(sk); |
| 317 | if (unlikely(!psock)) { | 318 | if (unlikely(!psock)) { |
| 318 | rcu_read_unlock(); | 319 | rcu_read_unlock(); |
| 320 | release_sock(sk); | ||
| 319 | return sk->sk_prot->close(sk, timeout); | 321 | return sk->sk_prot->close(sk, timeout); |
| 320 | } | 322 | } |
| 321 | 323 | ||
| @@ -371,6 +373,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
| 371 | e = psock_map_pop(sk, psock); | 373 | e = psock_map_pop(sk, psock); |
| 372 | } | 374 | } |
| 373 | rcu_read_unlock(); | 375 | rcu_read_unlock(); |
| 376 | release_sock(sk); | ||
| 374 | close_fun(sk, timeout); | 377 | close_fun(sk, timeout); |
| 375 | } | 378 | } |
| 376 | 379 | ||
| @@ -568,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | |||
| 568 | while (sg[i].length) { | 571 | while (sg[i].length) { |
| 569 | free += sg[i].length; | 572 | free += sg[i].length; |
| 570 | sk_mem_uncharge(sk, sg[i].length); | 573 | sk_mem_uncharge(sk, sg[i].length); |
| 571 | put_page(sg_page(&sg[i])); | 574 | if (!md->skb) |
| 575 | put_page(sg_page(&sg[i])); | ||
| 572 | sg[i].length = 0; | 576 | sg[i].length = 0; |
| 573 | sg[i].page_link = 0; | 577 | sg[i].page_link = 0; |
| 574 | sg[i].offset = 0; | 578 | sg[i].offset = 0; |
| @@ -577,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | |||
| 577 | if (i == MAX_SKB_FRAGS) | 581 | if (i == MAX_SKB_FRAGS) |
| 578 | i = 0; | 582 | i = 0; |
| 579 | } | 583 | } |
| 584 | if (md->skb) | ||
| 585 | consume_skb(md->skb); | ||
| 580 | 586 | ||
| 581 | return free; | 587 | return free; |
| 582 | } | 588 | } |
| @@ -1230,7 +1236,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) | |||
| 1230 | */ | 1236 | */ |
| 1231 | TCP_SKB_CB(skb)->bpf.sk_redir = NULL; | 1237 | TCP_SKB_CB(skb)->bpf.sk_redir = NULL; |
| 1232 | skb->sk = psock->sock; | 1238 | skb->sk = psock->sock; |
| 1233 | bpf_compute_data_pointers(skb); | 1239 | bpf_compute_data_end_sk_skb(skb); |
| 1234 | preempt_disable(); | 1240 | preempt_disable(); |
| 1235 | rc = (*prog->bpf_func)(skb, prog->insnsi); | 1241 | rc = (*prog->bpf_func)(skb, prog->insnsi); |
| 1236 | preempt_enable(); | 1242 | preempt_enable(); |
| @@ -1485,7 +1491,7 @@ static int smap_parse_func_strparser(struct strparser *strp, | |||
| 1485 | * any socket yet. | 1491 | * any socket yet. |
| 1486 | */ | 1492 | */ |
| 1487 | skb->sk = psock->sock; | 1493 | skb->sk = psock->sock; |
| 1488 | bpf_compute_data_pointers(skb); | 1494 | bpf_compute_data_end_sk_skb(skb); |
| 1489 | rc = (*prog->bpf_func)(skb, prog->insnsi); | 1495 | rc = (*prog->bpf_func)(skb, prog->insnsi); |
| 1490 | skb->sk = NULL; | 1496 | skb->sk = NULL; |
| 1491 | rcu_read_unlock(); | 1497 | rcu_read_unlock(); |
| @@ -1896,7 +1902,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
| 1896 | e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); | 1902 | e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); |
| 1897 | if (!e) { | 1903 | if (!e) { |
| 1898 | err = -ENOMEM; | 1904 | err = -ENOMEM; |
| 1899 | goto out_progs; | 1905 | goto out_free; |
| 1900 | } | 1906 | } |
| 1901 | } | 1907 | } |
| 1902 | 1908 | ||
| @@ -2069,7 +2075,13 @@ static int sock_map_update_elem(struct bpf_map *map, | |||
| 2069 | return -EOPNOTSUPP; | 2075 | return -EOPNOTSUPP; |
| 2070 | } | 2076 | } |
| 2071 | 2077 | ||
| 2078 | lock_sock(skops.sk); | ||
| 2079 | preempt_disable(); | ||
| 2080 | rcu_read_lock(); | ||
| 2072 | err = sock_map_ctx_update_elem(&skops, map, key, flags); | 2081 | err = sock_map_ctx_update_elem(&skops, map, key, flags); |
| 2082 | rcu_read_unlock(); | ||
| 2083 | preempt_enable(); | ||
| 2084 | release_sock(skops.sk); | ||
| 2073 | fput(socket->file); | 2085 | fput(socket->file); |
| 2074 | return err; | 2086 | return err; |
| 2075 | } | 2087 | } |
| @@ -2342,7 +2354,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
| 2342 | if (err) | 2354 | if (err) |
| 2343 | goto err; | 2355 | goto err; |
| 2344 | 2356 | ||
| 2345 | /* bpf_map_update_elem() can be called in_irq() */ | 2357 | /* psock is valid here because otherwise above *ctx_update_elem would |
| 2358 | * have thrown an error. It is safe to skip error check. | ||
| 2359 | */ | ||
| 2360 | psock = smap_psock_sk(sock); | ||
| 2346 | raw_spin_lock_bh(&b->lock); | 2361 | raw_spin_lock_bh(&b->lock); |
| 2347 | l_old = lookup_elem_raw(head, hash, key, key_size); | 2362 | l_old = lookup_elem_raw(head, hash, key, key_size); |
| 2348 | if (l_old && map_flags == BPF_NOEXIST) { | 2363 | if (l_old && map_flags == BPF_NOEXIST) { |
| @@ -2360,12 +2375,6 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
| 2360 | goto bucket_err; | 2375 | goto bucket_err; |
| 2361 | } | 2376 | } |
| 2362 | 2377 | ||
| 2363 | psock = smap_psock_sk(sock); | ||
| 2364 | if (unlikely(!psock)) { | ||
| 2365 | err = -EINVAL; | ||
| 2366 | goto bucket_err; | ||
| 2367 | } | ||
| 2368 | |||
| 2369 | rcu_assign_pointer(e->hash_link, l_new); | 2378 | rcu_assign_pointer(e->hash_link, l_new); |
| 2370 | rcu_assign_pointer(e->htab, | 2379 | rcu_assign_pointer(e->htab, |
| 2371 | container_of(map, struct bpf_htab, map)); | 2380 | container_of(map, struct bpf_htab, map)); |
| @@ -2388,12 +2397,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
| 2388 | raw_spin_unlock_bh(&b->lock); | 2397 | raw_spin_unlock_bh(&b->lock); |
| 2389 | return 0; | 2398 | return 0; |
| 2390 | bucket_err: | 2399 | bucket_err: |
| 2400 | smap_release_sock(psock, sock); | ||
| 2391 | raw_spin_unlock_bh(&b->lock); | 2401 | raw_spin_unlock_bh(&b->lock); |
| 2392 | err: | 2402 | err: |
| 2393 | kfree(e); | 2403 | kfree(e); |
| 2394 | psock = smap_psock_sk(sock); | ||
| 2395 | if (psock) | ||
| 2396 | smap_release_sock(psock, sock); | ||
| 2397 | return err; | 2404 | return err; |
| 2398 | } | 2405 | } |
| 2399 | 2406 | ||
| @@ -2415,7 +2422,13 @@ static int sock_hash_update_elem(struct bpf_map *map, | |||
| 2415 | return -EINVAL; | 2422 | return -EINVAL; |
| 2416 | } | 2423 | } |
| 2417 | 2424 | ||
| 2425 | lock_sock(skops.sk); | ||
| 2426 | preempt_disable(); | ||
| 2427 | rcu_read_lock(); | ||
| 2418 | err = sock_hash_ctx_update_elem(&skops, map, key, flags); | 2428 | err = sock_hash_ctx_update_elem(&skops, map, key, flags); |
| 2429 | rcu_read_unlock(); | ||
| 2430 | preempt_enable(); | ||
| 2431 | release_sock(skops.sk); | ||
| 2419 | fput(socket->file); | 2432 | fput(socket->file); |
| 2420 | return err; | 2433 | return err; |
| 2421 | } | 2434 | } |
| @@ -2472,10 +2485,8 @@ struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) | |||
| 2472 | b = __select_bucket(htab, hash); | 2485 | b = __select_bucket(htab, hash); |
| 2473 | head = &b->head; | 2486 | head = &b->head; |
| 2474 | 2487 | ||
| 2475 | raw_spin_lock_bh(&b->lock); | ||
| 2476 | l = lookup_elem_raw(head, hash, key, key_size); | 2488 | l = lookup_elem_raw(head, hash, key, key_size); |
| 2477 | sk = l ? l->sk : NULL; | 2489 | sk = l ? l->sk : NULL; |
| 2478 | raw_spin_unlock_bh(&b->lock); | ||
| 2479 | return sk; | 2490 | return sk; |
| 2480 | } | 2491 | } |
| 2481 | 2492 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d10ecd78105f..a31a1ba0f8ea 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr) | |||
| 735 | if (bpf_map_is_dev_bound(map)) { | 735 | if (bpf_map_is_dev_bound(map)) { |
| 736 | err = bpf_map_offload_update_elem(map, key, value, attr->flags); | 736 | err = bpf_map_offload_update_elem(map, key, value, attr->flags); |
| 737 | goto out; | 737 | goto out; |
| 738 | } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) { | 738 | } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || |
| 739 | map->map_type == BPF_MAP_TYPE_SOCKHASH || | ||
| 740 | map->map_type == BPF_MAP_TYPE_SOCKMAP) { | ||
| 739 | err = map->ops->map_update_elem(map, key, value, attr->flags); | 741 | err = map->ops->map_update_elem(map, key, value, attr->flags); |
| 740 | goto out; | 742 | goto out; |
| 741 | } | 743 | } |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9e2bf834f13a..63aaac52a265 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
| 5430 | if (insn->code != (BPF_JMP | BPF_CALL) || | 5430 | if (insn->code != (BPF_JMP | BPF_CALL) || |
| 5431 | insn->src_reg != BPF_PSEUDO_CALL) | 5431 | insn->src_reg != BPF_PSEUDO_CALL) |
| 5432 | continue; | 5432 | continue; |
| 5433 | /* Upon error here we cannot fall back to interpreter but | ||
| 5434 | * need a hard reject of the program. Thus -EFAULT is | ||
| 5435 | * propagated in any case. | ||
| 5436 | */ | ||
| 5433 | subprog = find_subprog(env, i + insn->imm + 1); | 5437 | subprog = find_subprog(env, i + insn->imm + 1); |
| 5434 | if (subprog < 0) { | 5438 | if (subprog < 0) { |
| 5435 | WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", | 5439 | WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", |
| @@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
| 5450 | 5454 | ||
| 5451 | func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); | 5455 | func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); |
| 5452 | if (!func) | 5456 | if (!func) |
| 5453 | return -ENOMEM; | 5457 | goto out_undo_insn; |
| 5454 | 5458 | ||
| 5455 | for (i = 0; i < env->subprog_cnt; i++) { | 5459 | for (i = 0; i < env->subprog_cnt; i++) { |
| 5456 | subprog_start = subprog_end; | 5460 | subprog_start = subprog_end; |
| @@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
| 5515 | tmp = bpf_int_jit_compile(func[i]); | 5519 | tmp = bpf_int_jit_compile(func[i]); |
| 5516 | if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { | 5520 | if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { |
| 5517 | verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); | 5521 | verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); |
| 5518 | err = -EFAULT; | 5522 | err = -ENOTSUPP; |
| 5519 | goto out_free; | 5523 | goto out_free; |
| 5520 | } | 5524 | } |
| 5521 | cond_resched(); | 5525 | cond_resched(); |
| @@ -5552,6 +5556,7 @@ out_free: | |||
| 5552 | if (func[i]) | 5556 | if (func[i]) |
| 5553 | bpf_jit_free(func[i]); | 5557 | bpf_jit_free(func[i]); |
| 5554 | kfree(func); | 5558 | kfree(func); |
| 5559 | out_undo_insn: | ||
| 5555 | /* cleanup main prog to be interpreted */ | 5560 | /* cleanup main prog to be interpreted */ |
| 5556 | prog->jit_requested = 0; | 5561 | prog->jit_requested = 0; |
| 5557 | for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { | 5562 | for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { |
| @@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env) | |||
| 5578 | err = jit_subprogs(env); | 5583 | err = jit_subprogs(env); |
| 5579 | if (err == 0) | 5584 | if (err == 0) |
| 5580 | return 0; | 5585 | return 0; |
| 5586 | if (err == -EFAULT) | ||
| 5587 | return err; | ||
| 5581 | } | 5588 | } |
| 5582 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | 5589 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
| 5583 | for (i = 0; i < prog->len; i++, insn++) { | 5590 | for (i = 0; i < prog->len; i++, insn++) { |
diff --git a/kernel/fork.c b/kernel/fork.c index 9440d61b925c..a191c05e757d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -303,11 +303,38 @@ struct kmem_cache *files_cachep; | |||
| 303 | struct kmem_cache *fs_cachep; | 303 | struct kmem_cache *fs_cachep; |
| 304 | 304 | ||
| 305 | /* SLAB cache for vm_area_struct structures */ | 305 | /* SLAB cache for vm_area_struct structures */ |
| 306 | struct kmem_cache *vm_area_cachep; | 306 | static struct kmem_cache *vm_area_cachep; |
| 307 | 307 | ||
| 308 | /* SLAB cache for mm_struct structures (tsk->mm) */ | 308 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
| 309 | static struct kmem_cache *mm_cachep; | 309 | static struct kmem_cache *mm_cachep; |
| 310 | 310 | ||
| 311 | struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) | ||
| 312 | { | ||
| 313 | struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | ||
| 314 | |||
| 315 | if (vma) { | ||
| 316 | vma->vm_mm = mm; | ||
| 317 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 318 | } | ||
| 319 | return vma; | ||
| 320 | } | ||
| 321 | |||
| 322 | struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) | ||
| 323 | { | ||
| 324 | struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | ||
| 325 | |||
| 326 | if (new) { | ||
| 327 | *new = *orig; | ||
| 328 | INIT_LIST_HEAD(&new->anon_vma_chain); | ||
| 329 | } | ||
| 330 | return new; | ||
| 331 | } | ||
| 332 | |||
| 333 | void vm_area_free(struct vm_area_struct *vma) | ||
| 334 | { | ||
| 335 | kmem_cache_free(vm_area_cachep, vma); | ||
| 336 | } | ||
| 337 | |||
| 311 | static void account_kernel_stack(struct task_struct *tsk, int account) | 338 | static void account_kernel_stack(struct task_struct *tsk, int account) |
| 312 | { | 339 | { |
| 313 | void *stack = task_stack_page(tsk); | 340 | void *stack = task_stack_page(tsk); |
| @@ -455,11 +482,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, | |||
| 455 | goto fail_nomem; | 482 | goto fail_nomem; |
| 456 | charge = len; | 483 | charge = len; |
| 457 | } | 484 | } |
| 458 | tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 485 | tmp = vm_area_dup(mpnt); |
| 459 | if (!tmp) | 486 | if (!tmp) |
| 460 | goto fail_nomem; | 487 | goto fail_nomem; |
| 461 | *tmp = *mpnt; | ||
| 462 | INIT_LIST_HEAD(&tmp->anon_vma_chain); | ||
| 463 | retval = vma_dup_policy(mpnt, tmp); | 488 | retval = vma_dup_policy(mpnt, tmp); |
| 464 | if (retval) | 489 | if (retval) |
| 465 | goto fail_nomem_policy; | 490 | goto fail_nomem_policy; |
| @@ -539,7 +564,7 @@ fail_uprobe_end: | |||
| 539 | fail_nomem_anon_vma_fork: | 564 | fail_nomem_anon_vma_fork: |
| 540 | mpol_put(vma_policy(tmp)); | 565 | mpol_put(vma_policy(tmp)); |
| 541 | fail_nomem_policy: | 566 | fail_nomem_policy: |
| 542 | kmem_cache_free(vm_area_cachep, tmp); | 567 | vm_area_free(tmp); |
| 543 | fail_nomem: | 568 | fail_nomem: |
| 544 | retval = -ENOMEM; | 569 | retval = -ENOMEM; |
| 545 | vm_unacct_memory(charge); | 570 | vm_unacct_memory(charge); |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fbfc3f1d368a..10c7b51c0d1f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -2290,8 +2290,17 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) | |||
| 2290 | if (task_on_rq_queued(p) && p->dl.dl_runtime) | 2290 | if (task_on_rq_queued(p) && p->dl.dl_runtime) |
| 2291 | task_non_contending(p); | 2291 | task_non_contending(p); |
| 2292 | 2292 | ||
| 2293 | if (!task_on_rq_queued(p)) | 2293 | if (!task_on_rq_queued(p)) { |
| 2294 | /* | ||
| 2295 | * Inactive timer is armed. However, p is leaving DEADLINE and | ||
| 2296 | * might migrate away from this rq while continuing to run on | ||
| 2297 | * some other class. We need to remove its contribution from | ||
| 2298 | * this rq running_bw now, or sub_rq_bw (below) will complain. | ||
| 2299 | */ | ||
| 2300 | if (p->dl.dl_non_contending) | ||
| 2301 | sub_running_bw(&p->dl, &rq->dl); | ||
| 2294 | sub_rq_bw(&p->dl, &rq->dl); | 2302 | sub_rq_bw(&p->dl, &rq->dl); |
| 2303 | } | ||
| 2295 | 2304 | ||
| 2296 | /* | 2305 | /* |
| 2297 | * We cannot use inactive_task_timer() to invoke sub_running_bw() | 2306 | * We cannot use inactive_task_timer() to invoke sub_running_bw() |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 900dcfee542c..75ffc1d1a2e0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -79,12 +79,16 @@ static void wakeup_softirqd(void) | |||
| 79 | 79 | ||
| 80 | /* | 80 | /* |
| 81 | * If ksoftirqd is scheduled, we do not want to process pending softirqs | 81 | * If ksoftirqd is scheduled, we do not want to process pending softirqs |
| 82 | * right now. Let ksoftirqd handle this at its own rate, to get fairness. | 82 | * right now. Let ksoftirqd handle this at its own rate, to get fairness, |
| 83 | * unless we're doing some of the synchronous softirqs. | ||
| 83 | */ | 84 | */ |
| 84 | static bool ksoftirqd_running(void) | 85 | #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) |
| 86 | static bool ksoftirqd_running(unsigned long pending) | ||
| 85 | { | 87 | { |
| 86 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | 88 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
| 87 | 89 | ||
| 90 | if (pending & SOFTIRQ_NOW_MASK) | ||
| 91 | return false; | ||
| 88 | return tsk && (tsk->state == TASK_RUNNING); | 92 | return tsk && (tsk->state == TASK_RUNNING); |
| 89 | } | 93 | } |
| 90 | 94 | ||
| @@ -328,7 +332,7 @@ asmlinkage __visible void do_softirq(void) | |||
| 328 | 332 | ||
| 329 | pending = local_softirq_pending(); | 333 | pending = local_softirq_pending(); |
| 330 | 334 | ||
| 331 | if (pending && !ksoftirqd_running()) | 335 | if (pending && !ksoftirqd_running(pending)) |
| 332 | do_softirq_own_stack(); | 336 | do_softirq_own_stack(); |
| 333 | 337 | ||
| 334 | local_irq_restore(flags); | 338 | local_irq_restore(flags); |
| @@ -355,7 +359,7 @@ void irq_enter(void) | |||
| 355 | 359 | ||
| 356 | static inline void invoke_softirq(void) | 360 | static inline void invoke_softirq(void) |
| 357 | { | 361 | { |
| 358 | if (ksoftirqd_running()) | 362 | if (ksoftirqd_running(local_softirq_pending())) |
| 359 | return; | 363 | return; |
| 360 | 364 | ||
| 361 | if (!force_irqthreads) { | 365 | if (!force_irqthreads) { |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index f89014a2c238..1ff523dae6e2 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -270,7 +270,11 @@ unlock: | |||
| 270 | goto retry; | 270 | goto retry; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | wake_up_q(&wakeq); | 273 | if (!err) { |
| 274 | preempt_disable(); | ||
| 275 | wake_up_q(&wakeq); | ||
| 276 | preempt_enable(); | ||
| 277 | } | ||
| 274 | 278 | ||
| 275 | return err; | 279 | return err; |
| 276 | } | 280 | } |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 7e43cd54c84c..8be175df3075 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -596,15 +596,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, | |||
| 596 | return ret; | 596 | return ret; |
| 597 | } | 597 | } |
| 598 | 598 | ||
| 599 | static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, | ||
| 600 | struct iov_iter *i) | ||
| 601 | { | ||
| 602 | struct pipe_inode_info *pipe = i->pipe; | ||
| 603 | size_t n, off, xfer = 0; | ||
| 604 | int idx; | ||
| 605 | |||
| 606 | if (!sanity(i)) | ||
| 607 | return 0; | ||
| 608 | |||
| 609 | bytes = n = push_pipe(i, bytes, &idx, &off); | ||
| 610 | if (unlikely(!n)) | ||
| 611 | return 0; | ||
| 612 | for ( ; n; idx = next_idx(idx, pipe), off = 0) { | ||
| 613 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); | ||
| 614 | unsigned long rem; | ||
| 615 | |||
| 616 | rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr, | ||
| 617 | chunk); | ||
| 618 | i->idx = idx; | ||
| 619 | i->iov_offset = off + chunk - rem; | ||
| 620 | xfer += chunk - rem; | ||
| 621 | if (rem) | ||
| 622 | break; | ||
| 623 | n -= chunk; | ||
| 624 | addr += chunk; | ||
| 625 | } | ||
| 626 | i->count -= xfer; | ||
| 627 | return xfer; | ||
| 628 | } | ||
| 629 | |||
| 630 | /** | ||
| 631 | * _copy_to_iter_mcsafe - copy to user with source-read error exception handling | ||
| 632 | * @addr: source kernel address | ||
| 633 | * @bytes: total transfer length | ||
| 634 | * @iter: destination iterator | ||
| 635 | * | ||
| 636 | * The pmem driver arranges for filesystem-dax to use this facility via | ||
| 637 | * dax_copy_to_iter() for protecting read/write to persistent memory. | ||
| 638 | * Unless / until an architecture can guarantee identical performance | ||
| 639 | * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a | ||
| 640 | * performance regression to switch more users to the mcsafe version. | ||
| 641 | * | ||
| 642 | * Otherwise, the main differences between this and typical _copy_to_iter(). | ||
| 643 | * | ||
| 644 | * * Typical tail/residue handling after a fault retries the copy | ||
| 645 | * byte-by-byte until the fault happens again. Re-triggering machine | ||
| 646 | * checks is potentially fatal so the implementation uses source | ||
| 647 | * alignment and poison alignment assumptions to avoid re-triggering | ||
| 648 | * hardware exceptions. | ||
| 649 | * | ||
| 650 | * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. | ||
| 651 | * Compare to copy_to_iter() where only ITER_IOVEC attempts might return | ||
| 652 | * a short copy. | ||
| 653 | * | ||
| 654 | * See MCSAFE_TEST for self-test. | ||
| 655 | */ | ||
| 599 | size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) | 656 | size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) |
| 600 | { | 657 | { |
| 601 | const char *from = addr; | 658 | const char *from = addr; |
| 602 | unsigned long rem, curr_addr, s_addr = (unsigned long) addr; | 659 | unsigned long rem, curr_addr, s_addr = (unsigned long) addr; |
| 603 | 660 | ||
| 604 | if (unlikely(i->type & ITER_PIPE)) { | 661 | if (unlikely(i->type & ITER_PIPE)) |
| 605 | WARN_ON(1); | 662 | return copy_pipe_to_iter_mcsafe(addr, bytes, i); |
| 606 | return 0; | ||
| 607 | } | ||
| 608 | if (iter_is_iovec(i)) | 663 | if (iter_is_iovec(i)) |
| 609 | might_fault(); | 664 | might_fault(); |
| 610 | iterate_and_advance(i, bytes, v, | 665 | iterate_and_advance(i, bytes, v, |
| @@ -701,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
| 701 | EXPORT_SYMBOL(_copy_from_iter_nocache); | 756 | EXPORT_SYMBOL(_copy_from_iter_nocache); |
| 702 | 757 | ||
| 703 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE | 758 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 759 | /** | ||
| 760 | * _copy_from_iter_flushcache - write destination through cpu cache | ||
| 761 | * @addr: destination kernel address | ||
| 762 | * @bytes: total transfer length | ||
| 763 | * @iter: source iterator | ||
| 764 | * | ||
| 765 | * The pmem driver arranges for filesystem-dax to use this facility via | ||
| 766 | * dax_copy_from_iter() for ensuring that writes to persistent memory | ||
| 767 | * are flushed through the CPU cache. It is differentiated from | ||
| 768 | * _copy_from_iter_nocache() in that guarantees all data is flushed for | ||
| 769 | * all iterator types. The _copy_from_iter_nocache() only attempts to | ||
| 770 | * bypass the cache for the ITER_IOVEC case, and on some archs may use | ||
| 771 | * instructions that strand dirty-data in the cache. | ||
| 772 | */ | ||
| 704 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) | 773 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
| 705 | { | 774 | { |
| 706 | char *to = addr; | 775 | char *to = addr; |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9427b5766134..e5c8586cf717 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) | |||
| 774 | skip++; | 774 | skip++; |
| 775 | if (list == iter->list) { | 775 | if (list == iter->list) { |
| 776 | iter->p = p; | 776 | iter->p = p; |
| 777 | skip = skip; | 777 | iter->skip = skip; |
| 778 | goto found; | 778 | goto found; |
| 779 | } | 779 | } |
| 780 | } | 780 | } |
| @@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | |||
| 964 | 964 | ||
| 965 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) | 965 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
| 966 | { | 966 | { |
| 967 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | 967 | size_t retsize; |
| 968 | (unsigned long)params->min_size); | 968 | |
| 969 | if (params->nelem_hint) | ||
| 970 | retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | ||
| 971 | (unsigned long)params->min_size); | ||
| 972 | else | ||
| 973 | retsize = max(HASH_DEFAULT_SIZE, | ||
| 974 | (unsigned long)params->min_size); | ||
| 975 | |||
| 976 | return retsize; | ||
| 969 | } | 977 | } |
| 970 | 978 | ||
| 971 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) | 979 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
| @@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 1022 | struct bucket_table *tbl; | 1030 | struct bucket_table *tbl; |
| 1023 | size_t size; | 1031 | size_t size; |
| 1024 | 1032 | ||
| 1025 | size = HASH_DEFAULT_SIZE; | ||
| 1026 | |||
| 1027 | if ((!params->key_len && !params->obj_hashfn) || | 1033 | if ((!params->key_len && !params->obj_hashfn) || |
| 1028 | (params->obj_hashfn && !params->obj_cmpfn)) | 1034 | (params->obj_hashfn && !params->obj_cmpfn)) |
| 1029 | return -EINVAL; | 1035 | return -EINVAL; |
| @@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 1050 | 1056 | ||
| 1051 | ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); | 1057 | ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
| 1052 | 1058 | ||
| 1053 | if (params->nelem_hint) | 1059 | size = rounded_hashtable_size(&ht->p); |
| 1054 | size = rounded_hashtable_size(&ht->p); | ||
| 1055 | 1060 | ||
| 1056 | if (params->locks_mul) | 1061 | if (params->locks_mul) |
| 1057 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | 1062 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
| @@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
| 1143 | void (*free_fn)(void *ptr, void *arg), | 1148 | void (*free_fn)(void *ptr, void *arg), |
| 1144 | void *arg) | 1149 | void *arg) |
| 1145 | { | 1150 | { |
| 1146 | struct bucket_table *tbl; | 1151 | struct bucket_table *tbl, *next_tbl; |
| 1147 | unsigned int i; | 1152 | unsigned int i; |
| 1148 | 1153 | ||
| 1149 | cancel_work_sync(&ht->run_work); | 1154 | cancel_work_sync(&ht->run_work); |
| 1150 | 1155 | ||
| 1151 | mutex_lock(&ht->mutex); | 1156 | mutex_lock(&ht->mutex); |
| 1152 | tbl = rht_dereference(ht->tbl, ht); | 1157 | tbl = rht_dereference(ht->tbl, ht); |
| 1158 | restart: | ||
| 1153 | if (free_fn) { | 1159 | if (free_fn) { |
| 1154 | for (i = 0; i < tbl->size; i++) { | 1160 | for (i = 0; i < tbl->size; i++) { |
| 1155 | struct rhash_head *pos, *next; | 1161 | struct rhash_head *pos, *next; |
| @@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
| 1166 | } | 1172 | } |
| 1167 | } | 1173 | } |
| 1168 | 1174 | ||
| 1175 | next_tbl = rht_dereference(tbl->future_tbl, ht); | ||
| 1169 | bucket_table_free(tbl); | 1176 | bucket_table_free(tbl); |
| 1177 | if (next_tbl) { | ||
| 1178 | tbl = next_tbl; | ||
| 1179 | goto restart; | ||
| 1180 | } | ||
| 1170 | mutex_unlock(&ht->mutex); | 1181 | mutex_unlock(&ht->mutex); |
| 1171 | } | 1182 | } |
| 1172 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); | 1183 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1cd7c1a57a14..25346bd99364 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -2084,6 +2084,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 2084 | if (vma_is_dax(vma)) | 2084 | if (vma_is_dax(vma)) |
| 2085 | return; | 2085 | return; |
| 2086 | page = pmd_page(_pmd); | 2086 | page = pmd_page(_pmd); |
| 2087 | if (!PageDirty(page) && pmd_dirty(_pmd)) | ||
| 2088 | set_page_dirty(page); | ||
| 2087 | if (!PageReferenced(page) && pmd_young(_pmd)) | 2089 | if (!PageReferenced(page) && pmd_young(_pmd)) |
| 2088 | SetPageReferenced(page); | 2090 | SetPageReferenced(page); |
| 2089 | page_remove_rmap(page, true); | 2091 | page_remove_rmap(page, true); |
diff --git a/mm/memblock.c b/mm/memblock.c index 11e46f83e1ad..4b5d245fafc1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/kmemleak.h> | 20 | #include <linux/kmemleak.h> |
| 21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
| 22 | #include <linux/memblock.h> | 22 | #include <linux/memblock.h> |
| 23 | #include <linux/bootmem.h> | ||
| 23 | 24 | ||
| 24 | #include <asm/sections.h> | 25 | #include <asm/sections.h> |
| 25 | #include <linux/io.h> | 26 | #include <linux/io.h> |
| @@ -1225,6 +1226,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i | |||
| 1225 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | 1226 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
| 1226 | } | 1227 | } |
| 1227 | 1228 | ||
| 1229 | #if defined(CONFIG_NO_BOOTMEM) | ||
| 1228 | /** | 1230 | /** |
| 1229 | * memblock_virt_alloc_internal - allocate boot memory block | 1231 | * memblock_virt_alloc_internal - allocate boot memory block |
| 1230 | * @size: size of memory block to be allocated in bytes | 1232 | * @size: size of memory block to be allocated in bytes |
| @@ -1432,6 +1434,7 @@ void * __init memblock_virt_alloc_try_nid( | |||
| 1432 | (u64)max_addr); | 1434 | (u64)max_addr); |
| 1433 | return NULL; | 1435 | return NULL; |
| 1434 | } | 1436 | } |
| 1437 | #endif | ||
| 1435 | 1438 | ||
| 1436 | /** | 1439 | /** |
| 1437 | * __memblock_free_early - free boot memory block | 1440 | * __memblock_free_early - free boot memory block |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e6f0d5ef320a..8c0280b3143e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) | |||
| 850 | int nid; | 850 | int nid; |
| 851 | int i; | 851 | int i; |
| 852 | 852 | ||
| 853 | while ((memcg = parent_mem_cgroup(memcg))) { | 853 | for (; memcg; memcg = parent_mem_cgroup(memcg)) { |
| 854 | for_each_node(nid) { | 854 | for_each_node(nid) { |
| 855 | mz = mem_cgroup_nodeinfo(memcg, nid); | 855 | mz = mem_cgroup_nodeinfo(memcg, nid); |
| 856 | for (i = 0; i <= DEF_PRIORITY; i++) { | 856 | for (i = 0; i <= DEF_PRIORITY; i++) { |
| @@ -182,7 +182,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
| 182 | if (vma->vm_file) | 182 | if (vma->vm_file) |
| 183 | fput(vma->vm_file); | 183 | fput(vma->vm_file); |
| 184 | mpol_put(vma_policy(vma)); | 184 | mpol_put(vma_policy(vma)); |
| 185 | kmem_cache_free(vm_area_cachep, vma); | 185 | vm_area_free(vma); |
| 186 | return next; | 186 | return next; |
| 187 | } | 187 | } |
| 188 | 188 | ||
| @@ -911,7 +911,7 @@ again: | |||
| 911 | anon_vma_merge(vma, next); | 911 | anon_vma_merge(vma, next); |
| 912 | mm->map_count--; | 912 | mm->map_count--; |
| 913 | mpol_put(vma_policy(next)); | 913 | mpol_put(vma_policy(next)); |
| 914 | kmem_cache_free(vm_area_cachep, next); | 914 | vm_area_free(next); |
| 915 | /* | 915 | /* |
| 916 | * In mprotect's case 6 (see comments on vma_merge), | 916 | * In mprotect's case 6 (see comments on vma_merge), |
| 917 | * we must remove another next too. It would clutter | 917 | * we must remove another next too. It would clutter |
| @@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr, | |||
| 1729 | * specific mapper. the address has already been validated, but | 1729 | * specific mapper. the address has already been validated, but |
| 1730 | * not unmapped, but the maps are removed from the list. | 1730 | * not unmapped, but the maps are removed from the list. |
| 1731 | */ | 1731 | */ |
| 1732 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 1732 | vma = vm_area_alloc(mm); |
| 1733 | if (!vma) { | 1733 | if (!vma) { |
| 1734 | error = -ENOMEM; | 1734 | error = -ENOMEM; |
| 1735 | goto unacct_error; | 1735 | goto unacct_error; |
| 1736 | } | 1736 | } |
| 1737 | 1737 | ||
| 1738 | vma->vm_mm = mm; | ||
| 1739 | vma->vm_start = addr; | 1738 | vma->vm_start = addr; |
| 1740 | vma->vm_end = addr + len; | 1739 | vma->vm_end = addr + len; |
| 1741 | vma->vm_flags = vm_flags; | 1740 | vma->vm_flags = vm_flags; |
| 1742 | vma->vm_page_prot = vm_get_page_prot(vm_flags); | 1741 | vma->vm_page_prot = vm_get_page_prot(vm_flags); |
| 1743 | vma->vm_pgoff = pgoff; | 1742 | vma->vm_pgoff = pgoff; |
| 1744 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 1745 | 1743 | ||
| 1746 | if (file) { | 1744 | if (file) { |
| 1747 | if (vm_flags & VM_DENYWRITE) { | 1745 | if (vm_flags & VM_DENYWRITE) { |
| @@ -1832,7 +1830,7 @@ allow_write_and_free_vma: | |||
| 1832 | if (vm_flags & VM_DENYWRITE) | 1830 | if (vm_flags & VM_DENYWRITE) |
| 1833 | allow_write_access(file); | 1831 | allow_write_access(file); |
| 1834 | free_vma: | 1832 | free_vma: |
| 1835 | kmem_cache_free(vm_area_cachep, vma); | 1833 | vm_area_free(vma); |
| 1836 | unacct_error: | 1834 | unacct_error: |
| 1837 | if (charged) | 1835 | if (charged) |
| 1838 | vm_unacct_memory(charged); | 1836 | vm_unacct_memory(charged); |
| @@ -2620,15 +2618,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2620 | return err; | 2618 | return err; |
| 2621 | } | 2619 | } |
| 2622 | 2620 | ||
| 2623 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 2621 | new = vm_area_dup(vma); |
| 2624 | if (!new) | 2622 | if (!new) |
| 2625 | return -ENOMEM; | 2623 | return -ENOMEM; |
| 2626 | 2624 | ||
| 2627 | /* most fields are the same, copy all, and then fixup */ | ||
| 2628 | *new = *vma; | ||
| 2629 | |||
| 2630 | INIT_LIST_HEAD(&new->anon_vma_chain); | ||
| 2631 | |||
| 2632 | if (new_below) | 2625 | if (new_below) |
| 2633 | new->vm_end = addr; | 2626 | new->vm_end = addr; |
| 2634 | else { | 2627 | else { |
| @@ -2669,7 +2662,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2669 | out_free_mpol: | 2662 | out_free_mpol: |
| 2670 | mpol_put(vma_policy(new)); | 2663 | mpol_put(vma_policy(new)); |
| 2671 | out_free_vma: | 2664 | out_free_vma: |
| 2672 | kmem_cache_free(vm_area_cachep, new); | 2665 | vm_area_free(new); |
| 2673 | return err; | 2666 | return err; |
| 2674 | } | 2667 | } |
| 2675 | 2668 | ||
| @@ -2984,14 +2977,12 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla | |||
| 2984 | /* | 2977 | /* |
| 2985 | * create a vma struct for an anonymous mapping | 2978 | * create a vma struct for an anonymous mapping |
| 2986 | */ | 2979 | */ |
| 2987 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 2980 | vma = vm_area_alloc(mm); |
| 2988 | if (!vma) { | 2981 | if (!vma) { |
| 2989 | vm_unacct_memory(len >> PAGE_SHIFT); | 2982 | vm_unacct_memory(len >> PAGE_SHIFT); |
| 2990 | return -ENOMEM; | 2983 | return -ENOMEM; |
| 2991 | } | 2984 | } |
| 2992 | 2985 | ||
| 2993 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 2994 | vma->vm_mm = mm; | ||
| 2995 | vma->vm_start = addr; | 2986 | vma->vm_start = addr; |
| 2996 | vma->vm_end = addr + len; | 2987 | vma->vm_end = addr + len; |
| 2997 | vma->vm_pgoff = pgoff; | 2988 | vma->vm_pgoff = pgoff; |
| @@ -3202,16 +3193,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
| 3202 | } | 3193 | } |
| 3203 | *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); | 3194 | *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); |
| 3204 | } else { | 3195 | } else { |
| 3205 | new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 3196 | new_vma = vm_area_dup(vma); |
| 3206 | if (!new_vma) | 3197 | if (!new_vma) |
| 3207 | goto out; | 3198 | goto out; |
| 3208 | *new_vma = *vma; | ||
| 3209 | new_vma->vm_start = addr; | 3199 | new_vma->vm_start = addr; |
| 3210 | new_vma->vm_end = addr + len; | 3200 | new_vma->vm_end = addr + len; |
| 3211 | new_vma->vm_pgoff = pgoff; | 3201 | new_vma->vm_pgoff = pgoff; |
| 3212 | if (vma_dup_policy(vma, new_vma)) | 3202 | if (vma_dup_policy(vma, new_vma)) |
| 3213 | goto out_free_vma; | 3203 | goto out_free_vma; |
| 3214 | INIT_LIST_HEAD(&new_vma->anon_vma_chain); | ||
| 3215 | if (anon_vma_clone(new_vma, vma)) | 3204 | if (anon_vma_clone(new_vma, vma)) |
| 3216 | goto out_free_mempol; | 3205 | goto out_free_mempol; |
| 3217 | if (new_vma->vm_file) | 3206 | if (new_vma->vm_file) |
| @@ -3226,7 +3215,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
| 3226 | out_free_mempol: | 3215 | out_free_mempol: |
| 3227 | mpol_put(vma_policy(new_vma)); | 3216 | mpol_put(vma_policy(new_vma)); |
| 3228 | out_free_vma: | 3217 | out_free_vma: |
| 3229 | kmem_cache_free(vm_area_cachep, new_vma); | 3218 | vm_area_free(new_vma); |
| 3230 | out: | 3219 | out: |
| 3231 | return NULL; | 3220 | return NULL; |
| 3232 | } | 3221 | } |
| @@ -3350,12 +3339,10 @@ static struct vm_area_struct *__install_special_mapping( | |||
| 3350 | int ret; | 3339 | int ret; |
| 3351 | struct vm_area_struct *vma; | 3340 | struct vm_area_struct *vma; |
| 3352 | 3341 | ||
| 3353 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 3342 | vma = vm_area_alloc(mm); |
| 3354 | if (unlikely(vma == NULL)) | 3343 | if (unlikely(vma == NULL)) |
| 3355 | return ERR_PTR(-ENOMEM); | 3344 | return ERR_PTR(-ENOMEM); |
| 3356 | 3345 | ||
| 3357 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 3358 | vma->vm_mm = mm; | ||
| 3359 | vma->vm_start = addr; | 3346 | vma->vm_start = addr; |
| 3360 | vma->vm_end = addr + len; | 3347 | vma->vm_end = addr + len; |
| 3361 | 3348 | ||
| @@ -3376,7 +3363,7 @@ static struct vm_area_struct *__install_special_mapping( | |||
| 3376 | return vma; | 3363 | return vma; |
| 3377 | 3364 | ||
| 3378 | out: | 3365 | out: |
| 3379 | kmem_cache_free(vm_area_cachep, vma); | 3366 | vm_area_free(vma); |
| 3380 | return ERR_PTR(ret); | 3367 | return ERR_PTR(ret); |
| 3381 | } | 3368 | } |
| 3382 | 3369 | ||
diff --git a/mm/nommu.c b/mm/nommu.c index 4452d8bd9ae4..1d22fdbf7d7c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) | |||
| 769 | if (vma->vm_file) | 769 | if (vma->vm_file) |
| 770 | fput(vma->vm_file); | 770 | fput(vma->vm_file); |
| 771 | put_nommu_region(vma->vm_region); | 771 | put_nommu_region(vma->vm_region); |
| 772 | kmem_cache_free(vm_area_cachep, vma); | 772 | vm_area_free(vma); |
| 773 | } | 773 | } |
| 774 | 774 | ||
| 775 | /* | 775 | /* |
| @@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file, | |||
| 1204 | if (!region) | 1204 | if (!region) |
| 1205 | goto error_getting_region; | 1205 | goto error_getting_region; |
| 1206 | 1206 | ||
| 1207 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 1207 | vma = vm_area_alloc(current->mm); |
| 1208 | if (!vma) | 1208 | if (!vma) |
| 1209 | goto error_getting_vma; | 1209 | goto error_getting_vma; |
| 1210 | 1210 | ||
| @@ -1212,7 +1212,6 @@ unsigned long do_mmap(struct file *file, | |||
| 1212 | region->vm_flags = vm_flags; | 1212 | region->vm_flags = vm_flags; |
| 1213 | region->vm_pgoff = pgoff; | 1213 | region->vm_pgoff = pgoff; |
| 1214 | 1214 | ||
| 1215 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
| 1216 | vma->vm_flags = vm_flags; | 1215 | vma->vm_flags = vm_flags; |
| 1217 | vma->vm_pgoff = pgoff; | 1216 | vma->vm_pgoff = pgoff; |
| 1218 | 1217 | ||
| @@ -1368,7 +1367,7 @@ error: | |||
| 1368 | kmem_cache_free(vm_region_jar, region); | 1367 | kmem_cache_free(vm_region_jar, region); |
| 1369 | if (vma->vm_file) | 1368 | if (vma->vm_file) |
| 1370 | fput(vma->vm_file); | 1369 | fput(vma->vm_file); |
| 1371 | kmem_cache_free(vm_area_cachep, vma); | 1370 | vm_area_free(vma); |
| 1372 | return ret; | 1371 | return ret; |
| 1373 | 1372 | ||
| 1374 | sharing_violation: | 1373 | sharing_violation: |
| @@ -1469,14 +1468,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1469 | if (!region) | 1468 | if (!region) |
| 1470 | return -ENOMEM; | 1469 | return -ENOMEM; |
| 1471 | 1470 | ||
| 1472 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 1471 | new = vm_area_dup(vma); |
| 1473 | if (!new) { | 1472 | if (!new) { |
| 1474 | kmem_cache_free(vm_region_jar, region); | 1473 | kmem_cache_free(vm_region_jar, region); |
| 1475 | return -ENOMEM; | 1474 | return -ENOMEM; |
| 1476 | } | 1475 | } |
| 1477 | 1476 | ||
| 1478 | /* most fields are the same, copy all, and then fixup */ | 1477 | /* most fields are the same, copy all, and then fixup */ |
| 1479 | *new = *vma; | ||
| 1480 | *region = *vma->vm_region; | 1478 | *region = *vma->vm_region; |
| 1481 | new->vm_region = region; | 1479 | new->vm_region = region; |
| 1482 | 1480 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5d800d61ddb7..a790ef4be74e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, | |||
| 6383 | free_area_init_core(pgdat); | 6383 | free_area_init_core(pgdat); |
| 6384 | } | 6384 | } |
| 6385 | 6385 | ||
| 6386 | #ifdef CONFIG_HAVE_MEMBLOCK | 6386 | #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) |
| 6387 | /* | 6387 | /* |
| 6388 | * Only struct pages that are backed by physical memory are zeroed and | 6388 | * Only struct pages that are backed by physical memory are zeroed and |
| 6389 | * initialized by going through __init_single_page(). But, there are some | 6389 | * initialized by going through __init_single_page(). But, there are some |
| @@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void) | |||
| 6421 | if (pgcnt) | 6421 | if (pgcnt) |
| 6422 | pr_info("Reserved but unavailable: %lld pages", pgcnt); | 6422 | pr_info("Reserved but unavailable: %lld pages", pgcnt); |
| 6423 | } | 6423 | } |
| 6424 | #endif /* CONFIG_HAVE_MEMBLOCK */ | 6424 | #endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ |
| 6425 | 6425 | ||
| 6426 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 6426 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 6427 | 6427 | ||
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index be09a9883825..73bf6a93a3cf 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
| @@ -2732,7 +2732,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
| 2732 | { | 2732 | { |
| 2733 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; | 2733 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; |
| 2734 | struct batadv_neigh_node *router; | 2734 | struct batadv_neigh_node *router; |
| 2735 | struct batadv_gw_node *curr_gw; | 2735 | struct batadv_gw_node *curr_gw = NULL; |
| 2736 | int ret = 0; | 2736 | int ret = 0; |
| 2737 | void *hdr; | 2737 | void *hdr; |
| 2738 | 2738 | ||
| @@ -2780,6 +2780,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
| 2780 | ret = 0; | 2780 | ret = 0; |
| 2781 | 2781 | ||
| 2782 | out: | 2782 | out: |
| 2783 | if (curr_gw) | ||
| 2784 | batadv_gw_node_put(curr_gw); | ||
| 2783 | if (router_ifinfo) | 2785 | if (router_ifinfo) |
| 2784 | batadv_neigh_ifinfo_put(router_ifinfo); | 2786 | batadv_neigh_ifinfo_put(router_ifinfo); |
| 2785 | if (router) | 2787 | if (router) |
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index ec93337ee259..6baec4e68898 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c | |||
| @@ -927,7 +927,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
| 927 | { | 927 | { |
| 928 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; | 928 | struct batadv_neigh_ifinfo *router_ifinfo = NULL; |
| 929 | struct batadv_neigh_node *router; | 929 | struct batadv_neigh_node *router; |
| 930 | struct batadv_gw_node *curr_gw; | 930 | struct batadv_gw_node *curr_gw = NULL; |
| 931 | int ret = 0; | 931 | int ret = 0; |
| 932 | void *hdr; | 932 | void *hdr; |
| 933 | 933 | ||
| @@ -995,6 +995,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, | |||
| 995 | ret = 0; | 995 | ret = 0; |
| 996 | 996 | ||
| 997 | out: | 997 | out: |
| 998 | if (curr_gw) | ||
| 999 | batadv_gw_node_put(curr_gw); | ||
| 998 | if (router_ifinfo) | 1000 | if (router_ifinfo) |
| 999 | batadv_neigh_ifinfo_put(router_ifinfo); | 1001 | batadv_neigh_ifinfo_put(router_ifinfo); |
| 1000 | if (router) | 1002 | if (router) |
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 4229b01ac7b5..87479c60670e 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include "debugfs.h" | 19 | #include "debugfs.h" |
| 20 | #include "main.h" | 20 | #include "main.h" |
| 21 | 21 | ||
| 22 | #include <linux/dcache.h> | ||
| 22 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
| 23 | #include <linux/err.h> | 24 | #include <linux/err.h> |
| 24 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
| @@ -344,6 +345,25 @@ out: | |||
| 344 | } | 345 | } |
| 345 | 346 | ||
| 346 | /** | 347 | /** |
| 348 | * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif | ||
| 349 | * @hard_iface: hard interface which was renamed | ||
| 350 | */ | ||
| 351 | void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface) | ||
| 352 | { | ||
| 353 | const char *name = hard_iface->net_dev->name; | ||
| 354 | struct dentry *dir; | ||
| 355 | struct dentry *d; | ||
| 356 | |||
| 357 | dir = hard_iface->debug_dir; | ||
| 358 | if (!dir) | ||
| 359 | return; | ||
| 360 | |||
| 361 | d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name); | ||
| 362 | if (!d) | ||
| 363 | pr_err("Can't rename debugfs dir to %s\n", name); | ||
| 364 | } | ||
| 365 | |||
| 366 | /** | ||
| 347 | * batadv_debugfs_del_hardif() - delete the base directory for a hard interface | 367 | * batadv_debugfs_del_hardif() - delete the base directory for a hard interface |
| 348 | * in debugfs. | 368 | * in debugfs. |
| 349 | * @hard_iface: hard interface which is deleted. | 369 | * @hard_iface: hard interface which is deleted. |
| @@ -414,6 +434,26 @@ out: | |||
| 414 | } | 434 | } |
| 415 | 435 | ||
| 416 | /** | 436 | /** |
| 437 | * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif | ||
| 438 | * @dev: net_device which was renamed | ||
| 439 | */ | ||
| 440 | void batadv_debugfs_rename_meshif(struct net_device *dev) | ||
| 441 | { | ||
| 442 | struct batadv_priv *bat_priv = netdev_priv(dev); | ||
| 443 | const char *name = dev->name; | ||
| 444 | struct dentry *dir; | ||
| 445 | struct dentry *d; | ||
| 446 | |||
| 447 | dir = bat_priv->debug_dir; | ||
| 448 | if (!dir) | ||
| 449 | return; | ||
| 450 | |||
| 451 | d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name); | ||
| 452 | if (!d) | ||
| 453 | pr_err("Can't rename debugfs dir to %s\n", name); | ||
| 454 | } | ||
| 455 | |||
| 456 | /** | ||
| 417 | * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries | 457 | * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries |
| 418 | * @dev: netdev struct of the soft interface | 458 | * @dev: netdev struct of the soft interface |
| 419 | */ | 459 | */ |
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index 37b069698b04..08a592ffbee5 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h | |||
| @@ -30,8 +30,10 @@ struct net_device; | |||
| 30 | void batadv_debugfs_init(void); | 30 | void batadv_debugfs_init(void); |
| 31 | void batadv_debugfs_destroy(void); | 31 | void batadv_debugfs_destroy(void); |
| 32 | int batadv_debugfs_add_meshif(struct net_device *dev); | 32 | int batadv_debugfs_add_meshif(struct net_device *dev); |
| 33 | void batadv_debugfs_rename_meshif(struct net_device *dev); | ||
| 33 | void batadv_debugfs_del_meshif(struct net_device *dev); | 34 | void batadv_debugfs_del_meshif(struct net_device *dev); |
| 34 | int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface); | 35 | int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface); |
| 36 | void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface); | ||
| 35 | void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface); | 37 | void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface); |
| 36 | 38 | ||
| 37 | #else | 39 | #else |
| @@ -49,6 +51,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev) | |||
| 49 | return 0; | 51 | return 0; |
| 50 | } | 52 | } |
| 51 | 53 | ||
| 54 | static inline void batadv_debugfs_rename_meshif(struct net_device *dev) | ||
| 55 | { | ||
| 56 | } | ||
| 57 | |||
| 52 | static inline void batadv_debugfs_del_meshif(struct net_device *dev) | 58 | static inline void batadv_debugfs_del_meshif(struct net_device *dev) |
| 53 | { | 59 | { |
| 54 | } | 60 | } |
| @@ -60,6 +66,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface) | |||
| 60 | } | 66 | } |
| 61 | 67 | ||
| 62 | static inline | 68 | static inline |
| 69 | void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface) | ||
| 70 | { | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline | ||
| 63 | void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface) | 74 | void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface) |
| 64 | { | 75 | { |
| 65 | } | 76 | } |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index c405d15befd6..2f0d42f2f913 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
| @@ -989,6 +989,32 @@ void batadv_hardif_remove_interfaces(void) | |||
| 989 | rtnl_unlock(); | 989 | rtnl_unlock(); |
| 990 | } | 990 | } |
| 991 | 991 | ||
| 992 | /** | ||
| 993 | * batadv_hard_if_event_softif() - Handle events for soft interfaces | ||
| 994 | * @event: NETDEV_* event to handle | ||
| 995 | * @net_dev: net_device which generated an event | ||
| 996 | * | ||
| 997 | * Return: NOTIFY_* result | ||
| 998 | */ | ||
| 999 | static int batadv_hard_if_event_softif(unsigned long event, | ||
| 1000 | struct net_device *net_dev) | ||
| 1001 | { | ||
| 1002 | struct batadv_priv *bat_priv; | ||
| 1003 | |||
| 1004 | switch (event) { | ||
| 1005 | case NETDEV_REGISTER: | ||
| 1006 | batadv_sysfs_add_meshif(net_dev); | ||
| 1007 | bat_priv = netdev_priv(net_dev); | ||
| 1008 | batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS); | ||
| 1009 | break; | ||
| 1010 | case NETDEV_CHANGENAME: | ||
| 1011 | batadv_debugfs_rename_meshif(net_dev); | ||
| 1012 | break; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | return NOTIFY_DONE; | ||
| 1016 | } | ||
| 1017 | |||
| 992 | static int batadv_hard_if_event(struct notifier_block *this, | 1018 | static int batadv_hard_if_event(struct notifier_block *this, |
| 993 | unsigned long event, void *ptr) | 1019 | unsigned long event, void *ptr) |
| 994 | { | 1020 | { |
| @@ -997,12 +1023,8 @@ static int batadv_hard_if_event(struct notifier_block *this, | |||
| 997 | struct batadv_hard_iface *primary_if = NULL; | 1023 | struct batadv_hard_iface *primary_if = NULL; |
| 998 | struct batadv_priv *bat_priv; | 1024 | struct batadv_priv *bat_priv; |
| 999 | 1025 | ||
| 1000 | if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) { | 1026 | if (batadv_softif_is_valid(net_dev)) |
| 1001 | batadv_sysfs_add_meshif(net_dev); | 1027 | return batadv_hard_if_event_softif(event, net_dev); |
| 1002 | bat_priv = netdev_priv(net_dev); | ||
| 1003 | batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS); | ||
| 1004 | return NOTIFY_DONE; | ||
| 1005 | } | ||
| 1006 | 1028 | ||
| 1007 | hard_iface = batadv_hardif_get_by_netdev(net_dev); | 1029 | hard_iface = batadv_hardif_get_by_netdev(net_dev); |
| 1008 | if (!hard_iface && (event == NETDEV_REGISTER || | 1030 | if (!hard_iface && (event == NETDEV_REGISTER || |
| @@ -1051,6 +1073,9 @@ static int batadv_hard_if_event(struct notifier_block *this, | |||
| 1051 | if (batadv_is_wifi_hardif(hard_iface)) | 1073 | if (batadv_is_wifi_hardif(hard_iface)) |
| 1052 | hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; | 1074 | hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; |
| 1053 | break; | 1075 | break; |
| 1076 | case NETDEV_CHANGENAME: | ||
| 1077 | batadv_debugfs_rename_hardif(hard_iface); | ||
| 1078 | break; | ||
| 1054 | default: | 1079 | default: |
| 1055 | break; | 1080 | break; |
| 1056 | } | 1081 | } |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 3986551397ca..12a2b7d21376 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -1705,7 +1705,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
| 1705 | ether_addr_copy(common->addr, tt_addr); | 1705 | ether_addr_copy(common->addr, tt_addr); |
| 1706 | common->vid = vid; | 1706 | common->vid = vid; |
| 1707 | 1707 | ||
| 1708 | common->flags = flags; | 1708 | if (!is_multicast_ether_addr(common->addr)) |
| 1709 | common->flags = flags & (~BATADV_TT_SYNC_MASK); | ||
| 1710 | |||
| 1709 | tt_global_entry->roam_at = 0; | 1711 | tt_global_entry->roam_at = 0; |
| 1710 | /* node must store current time in case of roaming. This is | 1712 | /* node must store current time in case of roaming. This is |
| 1711 | * needed to purge this entry out on timeout (if nobody claims | 1713 | * needed to purge this entry out on timeout (if nobody claims |
| @@ -1768,7 +1770,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
| 1768 | * TT_CLIENT_TEMP, therefore they have to be copied in the | 1770 | * TT_CLIENT_TEMP, therefore they have to be copied in the |
| 1769 | * client entry | 1771 | * client entry |
| 1770 | */ | 1772 | */ |
| 1771 | common->flags |= flags & (~BATADV_TT_SYNC_MASK); | 1773 | if (!is_multicast_ether_addr(common->addr)) |
| 1774 | common->flags |= flags & (~BATADV_TT_SYNC_MASK); | ||
| 1772 | 1775 | ||
| 1773 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only | 1776 | /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only |
| 1774 | * one originator left in the list and we previously received a | 1777 | * one originator left in the list and we previously received a |
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 68c3578343b4..22a78eedf4b1 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c | |||
| @@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 96 | u32 size = kattr->test.data_size_in; | 96 | u32 size = kattr->test.data_size_in; |
| 97 | u32 repeat = kattr->test.repeat; | 97 | u32 repeat = kattr->test.repeat; |
| 98 | u32 retval, duration; | 98 | u32 retval, duration; |
| 99 | int hh_len = ETH_HLEN; | ||
| 99 | struct sk_buff *skb; | 100 | struct sk_buff *skb; |
| 100 | void *data; | 101 | void *data; |
| 101 | int ret; | 102 | int ret; |
| @@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 131 | skb_reset_network_header(skb); | 132 | skb_reset_network_header(skb); |
| 132 | 133 | ||
| 133 | if (is_l2) | 134 | if (is_l2) |
| 134 | __skb_push(skb, ETH_HLEN); | 135 | __skb_push(skb, hh_len); |
| 135 | if (is_direct_pkt_access) | 136 | if (is_direct_pkt_access) |
| 136 | bpf_compute_data_pointers(skb); | 137 | bpf_compute_data_pointers(skb); |
| 137 | retval = bpf_test_run(prog, skb, repeat, &duration); | 138 | retval = bpf_test_run(prog, skb, repeat, &duration); |
| 138 | if (!is_l2) | 139 | if (!is_l2) { |
| 139 | __skb_push(skb, ETH_HLEN); | 140 | if (skb_headroom(skb) < hh_len) { |
| 141 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); | ||
| 142 | |||
| 143 | if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { | ||
| 144 | kfree_skb(skb); | ||
| 145 | return -ENOMEM; | ||
| 146 | } | ||
| 147 | } | ||
| 148 | memset(__skb_push(skb, hh_len), 0, hh_len); | ||
| 149 | } | ||
| 150 | |||
| 140 | size = skb->len; | 151 | size = skb->len; |
| 141 | /* bpf program can never convert linear skb to non-linear */ | 152 | /* bpf program can never convert linear skb to non-linear */ |
| 142 | if (WARN_ON_ONCE(skb_is_nonlinear(skb))) | 153 | if (WARN_ON_ONCE(skb_is_nonlinear(skb))) |
diff --git a/net/core/filter.c b/net/core/filter.c index 0ca6907d7efe..06da770f543f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) | |||
| 459 | (!unaligned_ok && offset >= 0 && | 459 | (!unaligned_ok && offset >= 0 && |
| 460 | offset + ip_align >= 0 && | 460 | offset + ip_align >= 0 && |
| 461 | offset + ip_align % size == 0))) { | 461 | offset + ip_align % size == 0))) { |
| 462 | bool ldx_off_ok = offset <= S16_MAX; | ||
| 463 | |||
| 462 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); | 464 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); |
| 463 | *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); | 465 | *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); |
| 464 | *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian); | 466 | *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, |
| 465 | *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D, | 467 | size, 2 + endian + (!ldx_off_ok * 2)); |
| 466 | offset); | 468 | if (ldx_off_ok) { |
| 469 | *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, | ||
| 470 | BPF_REG_D, offset); | ||
| 471 | } else { | ||
| 472 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); | ||
| 473 | *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); | ||
| 474 | *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, | ||
| 475 | BPF_REG_TMP, 0); | ||
| 476 | } | ||
| 467 | if (endian) | 477 | if (endian) |
| 468 | *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); | 478 | *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); |
| 469 | *insn++ = BPF_JMP_A(8); | 479 | *insn++ = BPF_JMP_A(8); |
| @@ -1762,6 +1772,37 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = { | |||
| 1762 | .arg2_type = ARG_ANYTHING, | 1772 | .arg2_type = ARG_ANYTHING, |
| 1763 | }; | 1773 | }; |
| 1764 | 1774 | ||
| 1775 | static inline int sk_skb_try_make_writable(struct sk_buff *skb, | ||
| 1776 | unsigned int write_len) | ||
| 1777 | { | ||
| 1778 | int err = __bpf_try_make_writable(skb, write_len); | ||
| 1779 | |||
| 1780 | bpf_compute_data_end_sk_skb(skb); | ||
| 1781 | return err; | ||
| 1782 | } | ||
| 1783 | |||
| 1784 | BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) | ||
| 1785 | { | ||
| 1786 | /* Idea is the following: should the needed direct read/write | ||
| 1787 | * test fail during runtime, we can pull in more data and redo | ||
| 1788 | * again, since implicitly, we invalidate previous checks here. | ||
| 1789 | * | ||
| 1790 | * Or, since we know how much we need to make read/writeable, | ||
| 1791 | * this can be done once at the program beginning for direct | ||
| 1792 | * access case. By this we overcome limitations of only current | ||
| 1793 | * headroom being accessible. | ||
| 1794 | */ | ||
| 1795 | return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | static const struct bpf_func_proto sk_skb_pull_data_proto = { | ||
| 1799 | .func = sk_skb_pull_data, | ||
| 1800 | .gpl_only = false, | ||
| 1801 | .ret_type = RET_INTEGER, | ||
| 1802 | .arg1_type = ARG_PTR_TO_CTX, | ||
| 1803 | .arg2_type = ARG_ANYTHING, | ||
| 1804 | }; | ||
| 1805 | |||
| 1765 | BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, | 1806 | BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, |
| 1766 | u64, from, u64, to, u64, flags) | 1807 | u64, from, u64, to, u64, flags) |
| 1767 | { | 1808 | { |
| @@ -2779,7 +2820,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
| 2779 | 2820 | ||
| 2780 | static u32 __bpf_skb_max_len(const struct sk_buff *skb) | 2821 | static u32 __bpf_skb_max_len(const struct sk_buff *skb) |
| 2781 | { | 2822 | { |
| 2782 | return skb->dev->mtu + skb->dev->hard_header_len; | 2823 | return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : |
| 2824 | SKB_MAX_ALLOC; | ||
| 2783 | } | 2825 | } |
| 2784 | 2826 | ||
| 2785 | static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff) | 2827 | static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff) |
| @@ -2863,8 +2905,8 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) | |||
| 2863 | return __skb_trim_rcsum(skb, new_len); | 2905 | return __skb_trim_rcsum(skb, new_len); |
| 2864 | } | 2906 | } |
| 2865 | 2907 | ||
| 2866 | BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, | 2908 | static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, |
| 2867 | u64, flags) | 2909 | u64 flags) |
| 2868 | { | 2910 | { |
| 2869 | u32 max_len = __bpf_skb_max_len(skb); | 2911 | u32 max_len = __bpf_skb_max_len(skb); |
| 2870 | u32 min_len = __bpf_skb_min_len(skb); | 2912 | u32 min_len = __bpf_skb_min_len(skb); |
| @@ -2900,6 +2942,13 @@ BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, | |||
| 2900 | if (!ret && skb_is_gso(skb)) | 2942 | if (!ret && skb_is_gso(skb)) |
| 2901 | skb_gso_reset(skb); | 2943 | skb_gso_reset(skb); |
| 2902 | } | 2944 | } |
| 2945 | return ret; | ||
| 2946 | } | ||
| 2947 | |||
| 2948 | BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, | ||
| 2949 | u64, flags) | ||
| 2950 | { | ||
| 2951 | int ret = __bpf_skb_change_tail(skb, new_len, flags); | ||
| 2903 | 2952 | ||
| 2904 | bpf_compute_data_pointers(skb); | 2953 | bpf_compute_data_pointers(skb); |
| 2905 | return ret; | 2954 | return ret; |
| @@ -2914,9 +2963,27 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = { | |||
| 2914 | .arg3_type = ARG_ANYTHING, | 2963 | .arg3_type = ARG_ANYTHING, |
| 2915 | }; | 2964 | }; |
| 2916 | 2965 | ||
| 2917 | BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, | 2966 | BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, |
| 2918 | u64, flags) | 2967 | u64, flags) |
| 2919 | { | 2968 | { |
| 2969 | int ret = __bpf_skb_change_tail(skb, new_len, flags); | ||
| 2970 | |||
| 2971 | bpf_compute_data_end_sk_skb(skb); | ||
| 2972 | return ret; | ||
| 2973 | } | ||
| 2974 | |||
| 2975 | static const struct bpf_func_proto sk_skb_change_tail_proto = { | ||
| 2976 | .func = sk_skb_change_tail, | ||
| 2977 | .gpl_only = false, | ||
| 2978 | .ret_type = RET_INTEGER, | ||
| 2979 | .arg1_type = ARG_PTR_TO_CTX, | ||
| 2980 | .arg2_type = ARG_ANYTHING, | ||
| 2981 | .arg3_type = ARG_ANYTHING, | ||
| 2982 | }; | ||
| 2983 | |||
| 2984 | static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, | ||
| 2985 | u64 flags) | ||
| 2986 | { | ||
| 2920 | u32 max_len = __bpf_skb_max_len(skb); | 2987 | u32 max_len = __bpf_skb_max_len(skb); |
| 2921 | u32 new_len = skb->len + head_room; | 2988 | u32 new_len = skb->len + head_room; |
| 2922 | int ret; | 2989 | int ret; |
| @@ -2941,8 +3008,16 @@ BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, | |||
| 2941 | skb_reset_mac_header(skb); | 3008 | skb_reset_mac_header(skb); |
| 2942 | } | 3009 | } |
| 2943 | 3010 | ||
| 3011 | return ret; | ||
| 3012 | } | ||
| 3013 | |||
| 3014 | BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, | ||
| 3015 | u64, flags) | ||
| 3016 | { | ||
| 3017 | int ret = __bpf_skb_change_head(skb, head_room, flags); | ||
| 3018 | |||
| 2944 | bpf_compute_data_pointers(skb); | 3019 | bpf_compute_data_pointers(skb); |
| 2945 | return 0; | 3020 | return ret; |
| 2946 | } | 3021 | } |
| 2947 | 3022 | ||
| 2948 | static const struct bpf_func_proto bpf_skb_change_head_proto = { | 3023 | static const struct bpf_func_proto bpf_skb_change_head_proto = { |
| @@ -2954,6 +3029,23 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = { | |||
| 2954 | .arg3_type = ARG_ANYTHING, | 3029 | .arg3_type = ARG_ANYTHING, |
| 2955 | }; | 3030 | }; |
| 2956 | 3031 | ||
| 3032 | BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, | ||
| 3033 | u64, flags) | ||
| 3034 | { | ||
| 3035 | int ret = __bpf_skb_change_head(skb, head_room, flags); | ||
| 3036 | |||
| 3037 | bpf_compute_data_end_sk_skb(skb); | ||
| 3038 | return ret; | ||
| 3039 | } | ||
| 3040 | |||
| 3041 | static const struct bpf_func_proto sk_skb_change_head_proto = { | ||
| 3042 | .func = sk_skb_change_head, | ||
| 3043 | .gpl_only = false, | ||
| 3044 | .ret_type = RET_INTEGER, | ||
| 3045 | .arg1_type = ARG_PTR_TO_CTX, | ||
| 3046 | .arg2_type = ARG_ANYTHING, | ||
| 3047 | .arg3_type = ARG_ANYTHING, | ||
| 3048 | }; | ||
| 2957 | static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) | 3049 | static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) |
| 2958 | { | 3050 | { |
| 2959 | return xdp_data_meta_unsupported(xdp) ? 0 : | 3051 | return xdp_data_meta_unsupported(xdp) ? 0 : |
| @@ -3046,12 +3138,16 @@ static int __bpf_tx_xdp(struct net_device *dev, | |||
| 3046 | u32 index) | 3138 | u32 index) |
| 3047 | { | 3139 | { |
| 3048 | struct xdp_frame *xdpf; | 3140 | struct xdp_frame *xdpf; |
| 3049 | int sent; | 3141 | int err, sent; |
| 3050 | 3142 | ||
| 3051 | if (!dev->netdev_ops->ndo_xdp_xmit) { | 3143 | if (!dev->netdev_ops->ndo_xdp_xmit) { |
| 3052 | return -EOPNOTSUPP; | 3144 | return -EOPNOTSUPP; |
| 3053 | } | 3145 | } |
| 3054 | 3146 | ||
| 3147 | err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); | ||
| 3148 | if (unlikely(err)) | ||
| 3149 | return err; | ||
| 3150 | |||
| 3055 | xdpf = convert_to_xdp_frame(xdp); | 3151 | xdpf = convert_to_xdp_frame(xdp); |
| 3056 | if (unlikely(!xdpf)) | 3152 | if (unlikely(!xdpf)) |
| 3057 | return -EOVERFLOW; | 3153 | return -EOVERFLOW; |
| @@ -3285,7 +3381,8 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, | |||
| 3285 | goto err; | 3381 | goto err; |
| 3286 | } | 3382 | } |
| 3287 | 3383 | ||
| 3288 | if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd)))) | 3384 | err = xdp_ok_fwd_dev(fwd, skb->len); |
| 3385 | if (unlikely(err)) | ||
| 3289 | goto err; | 3386 | goto err; |
| 3290 | 3387 | ||
| 3291 | skb->dev = fwd; | 3388 | skb->dev = fwd; |
| @@ -4439,10 +4536,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = { | |||
| 4439 | .arg4_type = ARG_CONST_SIZE | 4536 | .arg4_type = ARG_CONST_SIZE |
| 4440 | }; | 4537 | }; |
| 4441 | 4538 | ||
| 4539 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) | ||
| 4442 | BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, | 4540 | BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, |
| 4443 | const void *, from, u32, len) | 4541 | const void *, from, u32, len) |
| 4444 | { | 4542 | { |
| 4445 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) | ||
| 4446 | struct seg6_bpf_srh_state *srh_state = | 4543 | struct seg6_bpf_srh_state *srh_state = |
| 4447 | this_cpu_ptr(&seg6_bpf_srh_states); | 4544 | this_cpu_ptr(&seg6_bpf_srh_states); |
| 4448 | void *srh_tlvs, *srh_end, *ptr; | 4545 | void *srh_tlvs, *srh_end, *ptr; |
| @@ -4468,9 +4565,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, | |||
| 4468 | 4565 | ||
| 4469 | memcpy(skb->data + offset, from, len); | 4566 | memcpy(skb->data + offset, from, len); |
| 4470 | return 0; | 4567 | return 0; |
| 4471 | #else /* CONFIG_IPV6_SEG6_BPF */ | ||
| 4472 | return -EOPNOTSUPP; | ||
| 4473 | #endif | ||
| 4474 | } | 4568 | } |
| 4475 | 4569 | ||
| 4476 | static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { | 4570 | static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { |
| @@ -4486,7 +4580,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { | |||
| 4486 | BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, | 4580 | BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, |
| 4487 | u32, action, void *, param, u32, param_len) | 4581 | u32, action, void *, param, u32, param_len) |
| 4488 | { | 4582 | { |
| 4489 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) | ||
| 4490 | struct seg6_bpf_srh_state *srh_state = | 4583 | struct seg6_bpf_srh_state *srh_state = |
| 4491 | this_cpu_ptr(&seg6_bpf_srh_states); | 4584 | this_cpu_ptr(&seg6_bpf_srh_states); |
| 4492 | struct ipv6_sr_hdr *srh; | 4585 | struct ipv6_sr_hdr *srh; |
| @@ -4534,9 +4627,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, | |||
| 4534 | default: | 4627 | default: |
| 4535 | return -EINVAL; | 4628 | return -EINVAL; |
| 4536 | } | 4629 | } |
| 4537 | #else /* CONFIG_IPV6_SEG6_BPF */ | ||
| 4538 | return -EOPNOTSUPP; | ||
| 4539 | #endif | ||
| 4540 | } | 4630 | } |
| 4541 | 4631 | ||
| 4542 | static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { | 4632 | static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { |
| @@ -4552,7 +4642,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { | |||
| 4552 | BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, | 4642 | BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, |
| 4553 | s32, len) | 4643 | s32, len) |
| 4554 | { | 4644 | { |
| 4555 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) | ||
| 4556 | struct seg6_bpf_srh_state *srh_state = | 4645 | struct seg6_bpf_srh_state *srh_state = |
| 4557 | this_cpu_ptr(&seg6_bpf_srh_states); | 4646 | this_cpu_ptr(&seg6_bpf_srh_states); |
| 4558 | void *srh_end, *srh_tlvs, *ptr; | 4647 | void *srh_end, *srh_tlvs, *ptr; |
| @@ -4596,9 +4685,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, | |||
| 4596 | srh_state->hdrlen += len; | 4685 | srh_state->hdrlen += len; |
| 4597 | srh_state->valid = 0; | 4686 | srh_state->valid = 0; |
| 4598 | return 0; | 4687 | return 0; |
| 4599 | #else /* CONFIG_IPV6_SEG6_BPF */ | ||
| 4600 | return -EOPNOTSUPP; | ||
| 4601 | #endif | ||
| 4602 | } | 4688 | } |
| 4603 | 4689 | ||
| 4604 | static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { | 4690 | static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { |
| @@ -4609,6 +4695,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { | |||
| 4609 | .arg2_type = ARG_ANYTHING, | 4695 | .arg2_type = ARG_ANYTHING, |
| 4610 | .arg3_type = ARG_ANYTHING, | 4696 | .arg3_type = ARG_ANYTHING, |
| 4611 | }; | 4697 | }; |
| 4698 | #endif /* CONFIG_IPV6_SEG6_BPF */ | ||
| 4612 | 4699 | ||
| 4613 | bool bpf_helper_changes_pkt_data(void *func) | 4700 | bool bpf_helper_changes_pkt_data(void *func) |
| 4614 | { | 4701 | { |
| @@ -4617,9 +4704,12 @@ bool bpf_helper_changes_pkt_data(void *func) | |||
| 4617 | func == bpf_skb_store_bytes || | 4704 | func == bpf_skb_store_bytes || |
| 4618 | func == bpf_skb_change_proto || | 4705 | func == bpf_skb_change_proto || |
| 4619 | func == bpf_skb_change_head || | 4706 | func == bpf_skb_change_head || |
| 4707 | func == sk_skb_change_head || | ||
| 4620 | func == bpf_skb_change_tail || | 4708 | func == bpf_skb_change_tail || |
| 4709 | func == sk_skb_change_tail || | ||
| 4621 | func == bpf_skb_adjust_room || | 4710 | func == bpf_skb_adjust_room || |
| 4622 | func == bpf_skb_pull_data || | 4711 | func == bpf_skb_pull_data || |
| 4712 | func == sk_skb_pull_data || | ||
| 4623 | func == bpf_clone_redirect || | 4713 | func == bpf_clone_redirect || |
| 4624 | func == bpf_l3_csum_replace || | 4714 | func == bpf_l3_csum_replace || |
| 4625 | func == bpf_l4_csum_replace || | 4715 | func == bpf_l4_csum_replace || |
| @@ -4627,11 +4717,12 @@ bool bpf_helper_changes_pkt_data(void *func) | |||
| 4627 | func == bpf_xdp_adjust_meta || | 4717 | func == bpf_xdp_adjust_meta || |
| 4628 | func == bpf_msg_pull_data || | 4718 | func == bpf_msg_pull_data || |
| 4629 | func == bpf_xdp_adjust_tail || | 4719 | func == bpf_xdp_adjust_tail || |
| 4630 | func == bpf_lwt_push_encap || | 4720 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) |
| 4631 | func == bpf_lwt_seg6_store_bytes || | 4721 | func == bpf_lwt_seg6_store_bytes || |
| 4632 | func == bpf_lwt_seg6_adjust_srh || | 4722 | func == bpf_lwt_seg6_adjust_srh || |
| 4633 | func == bpf_lwt_seg6_action | 4723 | func == bpf_lwt_seg6_action || |
| 4634 | ) | 4724 | #endif |
| 4725 | func == bpf_lwt_push_encap) | ||
| 4635 | return true; | 4726 | return true; |
| 4636 | 4727 | ||
| 4637 | return false; | 4728 | return false; |
| @@ -4871,11 +4962,11 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |||
| 4871 | case BPF_FUNC_skb_load_bytes: | 4962 | case BPF_FUNC_skb_load_bytes: |
| 4872 | return &bpf_skb_load_bytes_proto; | 4963 | return &bpf_skb_load_bytes_proto; |
| 4873 | case BPF_FUNC_skb_pull_data: | 4964 | case BPF_FUNC_skb_pull_data: |
| 4874 | return &bpf_skb_pull_data_proto; | 4965 | return &sk_skb_pull_data_proto; |
| 4875 | case BPF_FUNC_skb_change_tail: | 4966 | case BPF_FUNC_skb_change_tail: |
| 4876 | return &bpf_skb_change_tail_proto; | 4967 | return &sk_skb_change_tail_proto; |
| 4877 | case BPF_FUNC_skb_change_head: | 4968 | case BPF_FUNC_skb_change_head: |
| 4878 | return &bpf_skb_change_head_proto; | 4969 | return &sk_skb_change_head_proto; |
| 4879 | case BPF_FUNC_get_socket_cookie: | 4970 | case BPF_FUNC_get_socket_cookie: |
| 4880 | return &bpf_get_socket_cookie_proto; | 4971 | return &bpf_get_socket_cookie_proto; |
| 4881 | case BPF_FUNC_get_socket_uid: | 4972 | case BPF_FUNC_get_socket_uid: |
| @@ -4966,12 +5057,14 @@ static const struct bpf_func_proto * | |||
| 4966 | lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | 5057 | lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
| 4967 | { | 5058 | { |
| 4968 | switch (func_id) { | 5059 | switch (func_id) { |
| 5060 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) | ||
| 4969 | case BPF_FUNC_lwt_seg6_store_bytes: | 5061 | case BPF_FUNC_lwt_seg6_store_bytes: |
| 4970 | return &bpf_lwt_seg6_store_bytes_proto; | 5062 | return &bpf_lwt_seg6_store_bytes_proto; |
| 4971 | case BPF_FUNC_lwt_seg6_action: | 5063 | case BPF_FUNC_lwt_seg6_action: |
| 4972 | return &bpf_lwt_seg6_action_proto; | 5064 | return &bpf_lwt_seg6_action_proto; |
| 4973 | case BPF_FUNC_lwt_seg6_adjust_srh: | 5065 | case BPF_FUNC_lwt_seg6_adjust_srh: |
| 4974 | return &bpf_lwt_seg6_adjust_srh_proto; | 5066 | return &bpf_lwt_seg6_adjust_srh_proto; |
| 5067 | #endif | ||
| 4975 | default: | 5068 | default: |
| 4976 | return lwt_out_func_proto(func_id, prog); | 5069 | return lwt_out_func_proto(func_id, prog); |
| 4977 | } | 5070 | } |
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index b2b2323bdc84..188d693cb251 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
| @@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, | |||
| 77 | d->lock = lock; | 77 | d->lock = lock; |
| 78 | spin_lock_bh(lock); | 78 | spin_lock_bh(lock); |
| 79 | } | 79 | } |
| 80 | if (d->tail) | 80 | if (d->tail) { |
| 81 | return gnet_stats_copy(d, type, NULL, 0, padattr); | 81 | int ret = gnet_stats_copy(d, type, NULL, 0, padattr); |
| 82 | |||
| 83 | /* The initial attribute added in gnet_stats_copy() may be | ||
| 84 | * preceded by a padding attribute, in which case d->tail will | ||
| 85 | * end up pointing at the padding instead of the real attribute. | ||
| 86 | * Fix this so gnet_stats_finish_copy() adjusts the length of | ||
| 87 | * the right attribute. | ||
| 88 | */ | ||
| 89 | if (ret == 0 && d->tail->nla_type == padattr) | ||
| 90 | d->tail = (struct nlattr *)((char *)d->tail + | ||
| 91 | NLA_ALIGN(d->tail->nla_len)); | ||
| 92 | return ret; | ||
| 93 | } | ||
| 82 | 94 | ||
| 83 | return 0; | 95 | return 0; |
| 84 | } | 96 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index eba8dae22c25..8e51f8555e11 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
| 858 | n->cloned = 1; | 858 | n->cloned = 1; |
| 859 | n->nohdr = 0; | 859 | n->nohdr = 0; |
| 860 | n->peeked = 0; | 860 | n->peeked = 0; |
| 861 | C(pfmemalloc); | ||
| 861 | n->destructor = NULL; | 862 | n->destructor = NULL; |
| 862 | C(tail); | 863 | C(tail); |
| 863 | C(end); | 864 | C(end); |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 40c851693f77..0c9478b91fa5 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
| @@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) | |||
| 86 | opt++; | 86 | opt++; |
| 87 | kdebug("options: '%s'", opt); | 87 | kdebug("options: '%s'", opt); |
| 88 | do { | 88 | do { |
| 89 | int opt_len, opt_nlen; | ||
| 89 | const char *eq; | 90 | const char *eq; |
| 90 | int opt_len, opt_nlen, opt_vlen, tmp; | 91 | char optval[128]; |
| 91 | 92 | ||
| 92 | next_opt = memchr(opt, '#', end - opt) ?: end; | 93 | next_opt = memchr(opt, '#', end - opt) ?: end; |
| 93 | opt_len = next_opt - opt; | 94 | opt_len = next_opt - opt; |
| 94 | if (opt_len <= 0 || opt_len > 128) { | 95 | if (opt_len <= 0 || opt_len > sizeof(optval)) { |
| 95 | pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", | 96 | pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", |
| 96 | opt_len); | 97 | opt_len); |
| 97 | return -EINVAL; | 98 | return -EINVAL; |
| 98 | } | 99 | } |
| 99 | 100 | ||
| 100 | eq = memchr(opt, '=', opt_len) ?: end; | 101 | eq = memchr(opt, '=', opt_len); |
| 101 | opt_nlen = eq - opt; | 102 | if (eq) { |
| 102 | eq++; | 103 | opt_nlen = eq - opt; |
| 103 | opt_vlen = next_opt - eq; /* will be -1 if no value */ | 104 | eq++; |
| 105 | memcpy(optval, eq, next_opt - eq); | ||
| 106 | optval[next_opt - eq] = '\0'; | ||
| 107 | } else { | ||
| 108 | opt_nlen = opt_len; | ||
| 109 | optval[0] = '\0'; | ||
| 110 | } | ||
| 104 | 111 | ||
| 105 | tmp = opt_vlen >= 0 ? opt_vlen : 0; | 112 | kdebug("option '%*.*s' val '%s'", |
| 106 | kdebug("option '%*.*s' val '%*.*s'", | 113 | opt_nlen, opt_nlen, opt, optval); |
| 107 | opt_nlen, opt_nlen, opt, tmp, tmp, eq); | ||
| 108 | 114 | ||
| 109 | /* see if it's an error number representing a DNS error | 115 | /* see if it's an error number representing a DNS error |
| 110 | * that's to be recorded as the result in this key */ | 116 | * that's to be recorded as the result in this key */ |
| 111 | if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && | 117 | if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && |
| 112 | memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { | 118 | memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { |
| 113 | kdebug("dns error number option"); | 119 | kdebug("dns error number option"); |
| 114 | if (opt_vlen <= 0) | ||
| 115 | goto bad_option_value; | ||
| 116 | 120 | ||
| 117 | ret = kstrtoul(eq, 10, &derrno); | 121 | ret = kstrtoul(optval, 10, &derrno); |
| 118 | if (ret < 0) | 122 | if (ret < 0) |
| 119 | goto bad_option_value; | 123 | goto bad_option_value; |
| 120 | 124 | ||
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 275449b0d633..3297e7fa9945 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c | |||
| @@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n) | |||
| 90 | return 0; | 90 | return 0; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static int lowpan_get_iflink(const struct net_device *dev) | ||
| 94 | { | ||
| 95 | return lowpan_802154_dev(dev)->wdev->ifindex; | ||
| 96 | } | ||
| 97 | |||
| 93 | static const struct net_device_ops lowpan_netdev_ops = { | 98 | static const struct net_device_ops lowpan_netdev_ops = { |
| 94 | .ndo_init = lowpan_dev_init, | 99 | .ndo_init = lowpan_dev_init, |
| 95 | .ndo_start_xmit = lowpan_xmit, | 100 | .ndo_start_xmit = lowpan_xmit, |
| 96 | .ndo_open = lowpan_open, | 101 | .ndo_open = lowpan_open, |
| 97 | .ndo_stop = lowpan_stop, | 102 | .ndo_stop = lowpan_stop, |
| 98 | .ndo_neigh_construct = lowpan_neigh_construct, | 103 | .ndo_neigh_construct = lowpan_neigh_construct, |
| 104 | .ndo_get_iflink = lowpan_get_iflink, | ||
| 99 | }; | 105 | }; |
| 100 | 106 | ||
| 101 | static void lowpan_setup(struct net_device *ldev) | 107 | static void lowpan_setup(struct net_device *ldev) |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index b21833651394..e46cdd310e5f 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) | |||
| 300 | if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { | 300 | if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { |
| 301 | struct flowi4 fl4 = { | 301 | struct flowi4 fl4 = { |
| 302 | .flowi4_iif = LOOPBACK_IFINDEX, | 302 | .flowi4_iif = LOOPBACK_IFINDEX, |
| 303 | .flowi4_oif = l3mdev_master_ifindex_rcu(dev), | ||
| 303 | .daddr = ip_hdr(skb)->saddr, | 304 | .daddr = ip_hdr(skb)->saddr, |
| 304 | .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), | 305 | .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), |
| 305 | .flowi4_scope = scope, | 306 | .flowi4_scope = scope, |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 85b617b655bc..b3c899a630a0 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
| @@ -1200,13 +1200,14 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) | |||
| 1200 | spin_lock_bh(&im->lock); | 1200 | spin_lock_bh(&im->lock); |
| 1201 | if (pmc) { | 1201 | if (pmc) { |
| 1202 | im->interface = pmc->interface; | 1202 | im->interface = pmc->interface; |
| 1203 | im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; | ||
| 1204 | im->sfmode = pmc->sfmode; | 1203 | im->sfmode = pmc->sfmode; |
| 1205 | if (pmc->sfmode == MCAST_INCLUDE) { | 1204 | if (pmc->sfmode == MCAST_INCLUDE) { |
| 1206 | im->tomb = pmc->tomb; | 1205 | im->tomb = pmc->tomb; |
| 1207 | im->sources = pmc->sources; | 1206 | im->sources = pmc->sources; |
| 1208 | for (psf = im->sources; psf; psf = psf->sf_next) | 1207 | for (psf = im->sources; psf; psf = psf->sf_next) |
| 1209 | psf->sf_crcount = im->crcount; | 1208 | psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
| 1209 | } else { | ||
| 1210 | im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; | ||
| 1210 | } | 1211 | } |
| 1211 | in_dev_put(pmc->interface); | 1212 | in_dev_put(pmc->interface); |
| 1212 | kfree(pmc); | 1213 | kfree(pmc); |
| @@ -1288,7 +1289,7 @@ static void igmp_group_dropped(struct ip_mc_list *im) | |||
| 1288 | #endif | 1289 | #endif |
| 1289 | } | 1290 | } |
| 1290 | 1291 | ||
| 1291 | static void igmp_group_added(struct ip_mc_list *im) | 1292 | static void igmp_group_added(struct ip_mc_list *im, unsigned int mode) |
| 1292 | { | 1293 | { |
| 1293 | struct in_device *in_dev = im->interface; | 1294 | struct in_device *in_dev = im->interface; |
| 1294 | #ifdef CONFIG_IP_MULTICAST | 1295 | #ifdef CONFIG_IP_MULTICAST |
| @@ -1316,7 +1317,13 @@ static void igmp_group_added(struct ip_mc_list *im) | |||
| 1316 | } | 1317 | } |
| 1317 | /* else, v3 */ | 1318 | /* else, v3 */ |
| 1318 | 1319 | ||
| 1319 | im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; | 1320 | /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should |
| 1321 | * not send filter-mode change record as the mode should be from | ||
| 1322 | * IN() to IN(A). | ||
| 1323 | */ | ||
| 1324 | if (mode == MCAST_EXCLUDE) | ||
| 1325 | im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; | ||
| 1326 | |||
| 1320 | igmp_ifc_event(in_dev); | 1327 | igmp_ifc_event(in_dev); |
| 1321 | #endif | 1328 | #endif |
| 1322 | } | 1329 | } |
| @@ -1381,8 +1388,7 @@ static void ip_mc_hash_remove(struct in_device *in_dev, | |||
| 1381 | /* | 1388 | /* |
| 1382 | * A socket has joined a multicast group on device dev. | 1389 | * A socket has joined a multicast group on device dev. |
| 1383 | */ | 1390 | */ |
| 1384 | 1391 | void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) | |
| 1385 | void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | ||
| 1386 | { | 1392 | { |
| 1387 | struct ip_mc_list *im; | 1393 | struct ip_mc_list *im; |
| 1388 | #ifdef CONFIG_IP_MULTICAST | 1394 | #ifdef CONFIG_IP_MULTICAST |
| @@ -1394,7 +1400,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
| 1394 | for_each_pmc_rtnl(in_dev, im) { | 1400 | for_each_pmc_rtnl(in_dev, im) { |
| 1395 | if (im->multiaddr == addr) { | 1401 | if (im->multiaddr == addr) { |
| 1396 | im->users++; | 1402 | im->users++; |
| 1397 | ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); | 1403 | ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0); |
| 1398 | goto out; | 1404 | goto out; |
| 1399 | } | 1405 | } |
| 1400 | } | 1406 | } |
| @@ -1408,8 +1414,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
| 1408 | in_dev_hold(in_dev); | 1414 | in_dev_hold(in_dev); |
| 1409 | im->multiaddr = addr; | 1415 | im->multiaddr = addr; |
| 1410 | /* initial mode is (EX, empty) */ | 1416 | /* initial mode is (EX, empty) */ |
| 1411 | im->sfmode = MCAST_EXCLUDE; | 1417 | im->sfmode = mode; |
| 1412 | im->sfcount[MCAST_EXCLUDE] = 1; | 1418 | im->sfcount[mode] = 1; |
| 1413 | refcount_set(&im->refcnt, 1); | 1419 | refcount_set(&im->refcnt, 1); |
| 1414 | spin_lock_init(&im->lock); | 1420 | spin_lock_init(&im->lock); |
| 1415 | #ifdef CONFIG_IP_MULTICAST | 1421 | #ifdef CONFIG_IP_MULTICAST |
| @@ -1426,12 +1432,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
| 1426 | #ifdef CONFIG_IP_MULTICAST | 1432 | #ifdef CONFIG_IP_MULTICAST |
| 1427 | igmpv3_del_delrec(in_dev, im); | 1433 | igmpv3_del_delrec(in_dev, im); |
| 1428 | #endif | 1434 | #endif |
| 1429 | igmp_group_added(im); | 1435 | igmp_group_added(im, mode); |
| 1430 | if (!in_dev->dead) | 1436 | if (!in_dev->dead) |
| 1431 | ip_rt_multicast_event(in_dev); | 1437 | ip_rt_multicast_event(in_dev); |
| 1432 | out: | 1438 | out: |
| 1433 | return; | 1439 | return; |
| 1434 | } | 1440 | } |
| 1441 | |||
| 1442 | void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | ||
| 1443 | { | ||
| 1444 | __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); | ||
| 1445 | } | ||
| 1435 | EXPORT_SYMBOL(ip_mc_inc_group); | 1446 | EXPORT_SYMBOL(ip_mc_inc_group); |
| 1436 | 1447 | ||
| 1437 | static int ip_mc_check_iphdr(struct sk_buff *skb) | 1448 | static int ip_mc_check_iphdr(struct sk_buff *skb) |
| @@ -1688,7 +1699,7 @@ void ip_mc_remap(struct in_device *in_dev) | |||
| 1688 | #ifdef CONFIG_IP_MULTICAST | 1699 | #ifdef CONFIG_IP_MULTICAST |
| 1689 | igmpv3_del_delrec(in_dev, pmc); | 1700 | igmpv3_del_delrec(in_dev, pmc); |
| 1690 | #endif | 1701 | #endif |
| 1691 | igmp_group_added(pmc); | 1702 | igmp_group_added(pmc, pmc->sfmode); |
| 1692 | } | 1703 | } |
| 1693 | } | 1704 | } |
| 1694 | 1705 | ||
| @@ -1751,7 +1762,7 @@ void ip_mc_up(struct in_device *in_dev) | |||
| 1751 | #ifdef CONFIG_IP_MULTICAST | 1762 | #ifdef CONFIG_IP_MULTICAST |
| 1752 | igmpv3_del_delrec(in_dev, pmc); | 1763 | igmpv3_del_delrec(in_dev, pmc); |
| 1753 | #endif | 1764 | #endif |
| 1754 | igmp_group_added(pmc); | 1765 | igmp_group_added(pmc, pmc->sfmode); |
| 1755 | } | 1766 | } |
| 1756 | } | 1767 | } |
| 1757 | 1768 | ||
| @@ -2130,8 +2141,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc) | |||
| 2130 | 2141 | ||
| 2131 | /* Join a multicast group | 2142 | /* Join a multicast group |
| 2132 | */ | 2143 | */ |
| 2133 | 2144 | static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, | |
| 2134 | int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) | 2145 | unsigned int mode) |
| 2135 | { | 2146 | { |
| 2136 | __be32 addr = imr->imr_multiaddr.s_addr; | 2147 | __be32 addr = imr->imr_multiaddr.s_addr; |
| 2137 | struct ip_mc_socklist *iml, *i; | 2148 | struct ip_mc_socklist *iml, *i; |
| @@ -2172,15 +2183,30 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) | |||
| 2172 | memcpy(&iml->multi, imr, sizeof(*imr)); | 2183 | memcpy(&iml->multi, imr, sizeof(*imr)); |
| 2173 | iml->next_rcu = inet->mc_list; | 2184 | iml->next_rcu = inet->mc_list; |
| 2174 | iml->sflist = NULL; | 2185 | iml->sflist = NULL; |
| 2175 | iml->sfmode = MCAST_EXCLUDE; | 2186 | iml->sfmode = mode; |
| 2176 | rcu_assign_pointer(inet->mc_list, iml); | 2187 | rcu_assign_pointer(inet->mc_list, iml); |
| 2177 | ip_mc_inc_group(in_dev, addr); | 2188 | __ip_mc_inc_group(in_dev, addr, mode); |
| 2178 | err = 0; | 2189 | err = 0; |
| 2179 | done: | 2190 | done: |
| 2180 | return err; | 2191 | return err; |
| 2181 | } | 2192 | } |
| 2193 | |||
| 2194 | /* Join ASM (Any-Source Multicast) group | ||
| 2195 | */ | ||
| 2196 | int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) | ||
| 2197 | { | ||
| 2198 | return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE); | ||
| 2199 | } | ||
| 2182 | EXPORT_SYMBOL(ip_mc_join_group); | 2200 | EXPORT_SYMBOL(ip_mc_join_group); |
| 2183 | 2201 | ||
| 2202 | /* Join SSM (Source-Specific Multicast) group | ||
| 2203 | */ | ||
| 2204 | int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, | ||
| 2205 | unsigned int mode) | ||
| 2206 | { | ||
| 2207 | return __ip_mc_join_group(sk, imr, mode); | ||
| 2208 | } | ||
| 2209 | |||
| 2184 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | 2210 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, |
| 2185 | struct in_device *in_dev) | 2211 | struct in_device *in_dev) |
| 2186 | { | 2212 | { |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index c9e35b81d093..1e4cf3ab560f 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg) | |||
| 90 | 90 | ||
| 91 | void inet_frags_exit_net(struct netns_frags *nf) | 91 | void inet_frags_exit_net(struct netns_frags *nf) |
| 92 | { | 92 | { |
| 93 | nf->low_thresh = 0; /* prevent creation of new frags */ | 93 | nf->high_thresh = 0; /* prevent creation of new frags */ |
| 94 | 94 | ||
| 95 | rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL); | 95 | rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL); |
| 96 | } | 96 | } |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fc32fdbeefa6..64c76dcf7386 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
| @@ -984,7 +984,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
| 984 | mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; | 984 | mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; |
| 985 | mreq.imr_address.s_addr = mreqs.imr_interface; | 985 | mreq.imr_address.s_addr = mreqs.imr_interface; |
| 986 | mreq.imr_ifindex = 0; | 986 | mreq.imr_ifindex = 0; |
| 987 | err = ip_mc_join_group(sk, &mreq); | 987 | err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); |
| 988 | if (err && err != -EADDRINUSE) | 988 | if (err && err != -EADDRINUSE) |
| 989 | break; | 989 | break; |
| 990 | omode = MCAST_INCLUDE; | 990 | omode = MCAST_INCLUDE; |
| @@ -1061,7 +1061,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
| 1061 | mreq.imr_multiaddr = psin->sin_addr; | 1061 | mreq.imr_multiaddr = psin->sin_addr; |
| 1062 | mreq.imr_address.s_addr = 0; | 1062 | mreq.imr_address.s_addr = 0; |
| 1063 | mreq.imr_ifindex = greqs.gsr_interface; | 1063 | mreq.imr_ifindex = greqs.gsr_interface; |
| 1064 | err = ip_mc_join_group(sk, &mreq); | 1064 | err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); |
| 1065 | if (err && err != -EADDRINUSE) | 1065 | if (err && err != -EADDRINUSE) |
| 1066 | break; | 1066 | break; |
| 1067 | greqs.gsr_interface = mreq.imr_ifindex; | 1067 | greqs.gsr_interface = mreq.imr_ifindex; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index ca0dad90803a..e77872c93c20 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -1898,6 +1898,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = { | |||
| 1898 | .checkentry = icmp_checkentry, | 1898 | .checkentry = icmp_checkentry, |
| 1899 | .proto = IPPROTO_ICMP, | 1899 | .proto = IPPROTO_ICMP, |
| 1900 | .family = NFPROTO_IPV4, | 1900 | .family = NFPROTO_IPV4, |
| 1901 | .me = THIS_MODULE, | ||
| 1901 | }, | 1902 | }, |
| 1902 | }; | 1903 | }; |
| 1903 | 1904 | ||
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c index 805e83ec3ad9..164714104965 100644 --- a/net/ipv4/netfilter/nf_tproxy_ipv4.c +++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c | |||
| @@ -37,7 +37,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, | |||
| 37 | * to a listener socket if there's one */ | 37 | * to a listener socket if there's one */ |
| 38 | struct sock *sk2; | 38 | struct sock *sk2; |
| 39 | 39 | ||
| 40 | sk2 = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, | 40 | sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol, |
| 41 | iph->saddr, laddr ? laddr : iph->daddr, | 41 | iph->saddr, laddr ? laddr : iph->daddr, |
| 42 | hp->source, lport ? lport : hp->dest, | 42 | hp->source, lport ? lport : hp->dest, |
| 43 | skb->dev, NF_TPROXY_LOOKUP_LISTENER); | 43 | skb->dev, NF_TPROXY_LOOKUP_LISTENER); |
| @@ -71,7 +71,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) | |||
| 71 | EXPORT_SYMBOL_GPL(nf_tproxy_laddr4); | 71 | EXPORT_SYMBOL_GPL(nf_tproxy_laddr4); |
| 72 | 72 | ||
| 73 | struct sock * | 73 | struct sock * |
| 74 | nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, | 74 | nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, |
| 75 | const u8 protocol, | 75 | const u8 protocol, |
| 76 | const __be32 saddr, const __be32 daddr, | 76 | const __be32 saddr, const __be32 daddr, |
| 77 | const __be16 sport, const __be16 dport, | 77 | const __be16 sport, const __be16 dport, |
| @@ -79,16 +79,21 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, | |||
| 79 | const enum nf_tproxy_lookup_t lookup_type) | 79 | const enum nf_tproxy_lookup_t lookup_type) |
| 80 | { | 80 | { |
| 81 | struct sock *sk; | 81 | struct sock *sk; |
| 82 | struct tcphdr *tcph; | ||
| 83 | 82 | ||
| 84 | switch (protocol) { | 83 | switch (protocol) { |
| 85 | case IPPROTO_TCP: | 84 | case IPPROTO_TCP: { |
| 85 | struct tcphdr _hdr, *hp; | ||
| 86 | |||
| 87 | hp = skb_header_pointer(skb, ip_hdrlen(skb), | ||
| 88 | sizeof(struct tcphdr), &_hdr); | ||
| 89 | if (hp == NULL) | ||
| 90 | return NULL; | ||
| 91 | |||
| 86 | switch (lookup_type) { | 92 | switch (lookup_type) { |
| 87 | case NF_TPROXY_LOOKUP_LISTENER: | 93 | case NF_TPROXY_LOOKUP_LISTENER: |
| 88 | tcph = hp; | ||
| 89 | sk = inet_lookup_listener(net, &tcp_hashinfo, skb, | 94 | sk = inet_lookup_listener(net, &tcp_hashinfo, skb, |
| 90 | ip_hdrlen(skb) + | 95 | ip_hdrlen(skb) + |
| 91 | __tcp_hdrlen(tcph), | 96 | __tcp_hdrlen(hp), |
| 92 | saddr, sport, | 97 | saddr, sport, |
| 93 | daddr, dport, | 98 | daddr, dport, |
| 94 | in->ifindex, 0); | 99 | in->ifindex, 0); |
| @@ -110,6 +115,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, | |||
| 110 | BUG(); | 115 | BUG(); |
| 111 | } | 116 | } |
| 112 | break; | 117 | break; |
| 118 | } | ||
| 113 | case IPPROTO_UDP: | 119 | case IPPROTO_UDP: |
| 114 | sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, | 120 | sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, |
| 115 | in->ifindex); | 121 | in->ifindex); |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index af0a857d8352..5fa335fd3852 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
| @@ -189,8 +189,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, | |||
| 189 | if (write && ret == 0) { | 189 | if (write && ret == 0) { |
| 190 | low = make_kgid(user_ns, urange[0]); | 190 | low = make_kgid(user_ns, urange[0]); |
| 191 | high = make_kgid(user_ns, urange[1]); | 191 | high = make_kgid(user_ns, urange[1]); |
| 192 | if (!gid_valid(low) || !gid_valid(high) || | 192 | if (!gid_valid(low) || !gid_valid(high)) |
| 193 | (urange[1] < urange[0]) || gid_lt(high, low)) { | 193 | return -EINVAL; |
| 194 | if (urange[1] < urange[0] || gid_lt(high, low)) { | ||
| 194 | low = make_kgid(&init_user_ns, 1); | 195 | low = make_kgid(&init_user_ns, 1); |
| 195 | high = make_kgid(&init_user_ns, 0); | 196 | high = make_kgid(&init_user_ns, 0); |
| 196 | } | 197 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e7b53d2a971f..4491faf83f4f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1998,7 +1998,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, | |||
| 1998 | * shouldn't happen. | 1998 | * shouldn't happen. |
| 1999 | */ | 1999 | */ |
| 2000 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), | 2000 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), |
| 2001 | "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", | 2001 | "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", |
| 2002 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, | 2002 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, |
| 2003 | flags)) | 2003 | flags)) |
| 2004 | break; | 2004 | break; |
| @@ -2013,7 +2013,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, | |||
| 2013 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) | 2013 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 2014 | goto found_fin_ok; | 2014 | goto found_fin_ok; |
| 2015 | WARN(!(flags & MSG_PEEK), | 2015 | WARN(!(flags & MSG_PEEK), |
| 2016 | "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", | 2016 | "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", |
| 2017 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); | 2017 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); |
| 2018 | } | 2018 | } |
| 2019 | 2019 | ||
| @@ -2562,6 +2562,8 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
| 2562 | 2562 | ||
| 2563 | tcp_clear_xmit_timers(sk); | 2563 | tcp_clear_xmit_timers(sk); |
| 2564 | __skb_queue_purge(&sk->sk_receive_queue); | 2564 | __skb_queue_purge(&sk->sk_receive_queue); |
| 2565 | tp->copied_seq = tp->rcv_nxt; | ||
| 2566 | tp->urg_data = 0; | ||
| 2565 | tcp_write_queue_purge(sk); | 2567 | tcp_write_queue_purge(sk); |
| 2566 | tcp_fastopen_active_disable_ofo_check(sk); | 2568 | tcp_fastopen_active_disable_ofo_check(sk); |
| 2567 | skb_rbtree_purge(&tp->out_of_order_queue); | 2569 | skb_rbtree_purge(&tp->out_of_order_queue); |
| @@ -2821,14 +2823,17 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
| 2821 | case TCP_REPAIR: | 2823 | case TCP_REPAIR: |
| 2822 | if (!tcp_can_repair_sock(sk)) | 2824 | if (!tcp_can_repair_sock(sk)) |
| 2823 | err = -EPERM; | 2825 | err = -EPERM; |
| 2824 | else if (val == 1) { | 2826 | else if (val == TCP_REPAIR_ON) { |
| 2825 | tp->repair = 1; | 2827 | tp->repair = 1; |
| 2826 | sk->sk_reuse = SK_FORCE_REUSE; | 2828 | sk->sk_reuse = SK_FORCE_REUSE; |
| 2827 | tp->repair_queue = TCP_NO_QUEUE; | 2829 | tp->repair_queue = TCP_NO_QUEUE; |
| 2828 | } else if (val == 0) { | 2830 | } else if (val == TCP_REPAIR_OFF) { |
| 2829 | tp->repair = 0; | 2831 | tp->repair = 0; |
| 2830 | sk->sk_reuse = SK_NO_REUSE; | 2832 | sk->sk_reuse = SK_NO_REUSE; |
| 2831 | tcp_send_window_probe(sk); | 2833 | tcp_send_window_probe(sk); |
| 2834 | } else if (val == TCP_REPAIR_OFF_NO_WP) { | ||
| 2835 | tp->repair = 0; | ||
| 2836 | sk->sk_reuse = SK_NO_REUSE; | ||
| 2832 | } else | 2837 | } else |
| 2833 | err = -EINVAL; | 2838 | err = -EINVAL; |
| 2834 | 2839 | ||
| @@ -3720,8 +3725,7 @@ int tcp_abort(struct sock *sk, int err) | |||
| 3720 | struct request_sock *req = inet_reqsk(sk); | 3725 | struct request_sock *req = inet_reqsk(sk); |
| 3721 | 3726 | ||
| 3722 | local_bh_disable(); | 3727 | local_bh_disable(); |
| 3723 | inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, | 3728 | inet_csk_reqsk_queue_drop(req->rsk_listener, req); |
| 3724 | req); | ||
| 3725 | local_bh_enable(); | 3729 | local_bh_enable(); |
| 3726 | return 0; | 3730 | return 0; |
| 3727 | } | 3731 | } |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 5f5e5936760e..5869f89ca656 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
| @@ -55,7 +55,6 @@ struct dctcp { | |||
| 55 | u32 dctcp_alpha; | 55 | u32 dctcp_alpha; |
| 56 | u32 next_seq; | 56 | u32 next_seq; |
| 57 | u32 ce_state; | 57 | u32 ce_state; |
| 58 | u32 delayed_ack_reserved; | ||
| 59 | u32 loss_cwnd; | 58 | u32 loss_cwnd; |
| 60 | }; | 59 | }; |
| 61 | 60 | ||
| @@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk) | |||
| 96 | 95 | ||
| 97 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); | 96 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); |
| 98 | 97 | ||
| 99 | ca->delayed_ack_reserved = 0; | ||
| 100 | ca->loss_cwnd = 0; | 98 | ca->loss_cwnd = 0; |
| 101 | ca->ce_state = 0; | 99 | ca->ce_state = 0; |
| 102 | 100 | ||
| @@ -134,7 +132,8 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) | |||
| 134 | /* State has changed from CE=0 to CE=1 and delayed | 132 | /* State has changed from CE=0 to CE=1 and delayed |
| 135 | * ACK has not sent yet. | 133 | * ACK has not sent yet. |
| 136 | */ | 134 | */ |
| 137 | if (!ca->ce_state && ca->delayed_ack_reserved) { | 135 | if (!ca->ce_state && |
| 136 | inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { | ||
| 138 | u32 tmp_rcv_nxt; | 137 | u32 tmp_rcv_nxt; |
| 139 | 138 | ||
| 140 | /* Save current rcv_nxt. */ | 139 | /* Save current rcv_nxt. */ |
| @@ -164,7 +163,8 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) | |||
| 164 | /* State has changed from CE=1 to CE=0 and delayed | 163 | /* State has changed from CE=1 to CE=0 and delayed |
| 165 | * ACK has not sent yet. | 164 | * ACK has not sent yet. |
| 166 | */ | 165 | */ |
| 167 | if (ca->ce_state && ca->delayed_ack_reserved) { | 166 | if (ca->ce_state && |
| 167 | inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { | ||
| 168 | u32 tmp_rcv_nxt; | 168 | u32 tmp_rcv_nxt; |
| 169 | 169 | ||
| 170 | /* Save current rcv_nxt. */ | 170 | /* Save current rcv_nxt. */ |
| @@ -248,25 +248,6 @@ static void dctcp_state(struct sock *sk, u8 new_state) | |||
| 248 | } | 248 | } |
| 249 | } | 249 | } |
| 250 | 250 | ||
| 251 | static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev) | ||
| 252 | { | ||
| 253 | struct dctcp *ca = inet_csk_ca(sk); | ||
| 254 | |||
| 255 | switch (ev) { | ||
| 256 | case CA_EVENT_DELAYED_ACK: | ||
| 257 | if (!ca->delayed_ack_reserved) | ||
| 258 | ca->delayed_ack_reserved = 1; | ||
| 259 | break; | ||
| 260 | case CA_EVENT_NON_DELAYED_ACK: | ||
| 261 | if (ca->delayed_ack_reserved) | ||
| 262 | ca->delayed_ack_reserved = 0; | ||
| 263 | break; | ||
| 264 | default: | ||
| 265 | /* Don't care for the rest. */ | ||
| 266 | break; | ||
| 267 | } | ||
| 268 | } | ||
| 269 | |||
| 270 | static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) | 251 | static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) |
| 271 | { | 252 | { |
| 272 | switch (ev) { | 253 | switch (ev) { |
| @@ -276,10 +257,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) | |||
| 276 | case CA_EVENT_ECN_NO_CE: | 257 | case CA_EVENT_ECN_NO_CE: |
| 277 | dctcp_ce_state_1_to_0(sk); | 258 | dctcp_ce_state_1_to_0(sk); |
| 278 | break; | 259 | break; |
| 279 | case CA_EVENT_DELAYED_ACK: | ||
| 280 | case CA_EVENT_NON_DELAYED_ACK: | ||
| 281 | dctcp_update_ack_reserved(sk, ev); | ||
| 282 | break; | ||
| 283 | default: | 260 | default: |
| 284 | /* Don't care for the rest. */ | 261 | /* Don't care for the rest. */ |
| 285 | break; | 262 | break; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bea17f1e8302..3b2711e33e4c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -156,11 +156,24 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) | |||
| 156 | */ | 156 | */ |
| 157 | if (tcptw->tw_ts_recent_stamp && | 157 | if (tcptw->tw_ts_recent_stamp && |
| 158 | (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { | 158 | (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { |
| 159 | tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; | 159 | /* In case of repair and re-using TIME-WAIT sockets we still |
| 160 | if (tp->write_seq == 0) | 160 | * want to be sure that it is safe as above but honor the |
| 161 | tp->write_seq = 1; | 161 | * sequence numbers and time stamps set as part of the repair |
| 162 | tp->rx_opt.ts_recent = tcptw->tw_ts_recent; | 162 | * process. |
| 163 | tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; | 163 | * |
| 164 | * Without this check re-using a TIME-WAIT socket with TCP | ||
| 165 | * repair would accumulate a -1 on the repair assigned | ||
| 166 | * sequence number. The first time it is reused the sequence | ||
| 167 | * is -1, the second time -2, etc. This fixes that issue | ||
| 168 | * without appearing to create any others. | ||
| 169 | */ | ||
| 170 | if (likely(!tp->repair)) { | ||
| 171 | tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; | ||
| 172 | if (tp->write_seq == 0) | ||
| 173 | tp->write_seq = 1; | ||
| 174 | tp->rx_opt.ts_recent = tcptw->tw_ts_recent; | ||
| 175 | tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; | ||
| 176 | } | ||
| 164 | sock_hold(sktw); | 177 | sock_hold(sktw); |
| 165 | return 1; | 178 | return 1; |
| 166 | } | 179 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8e08b409c71e..00e5a300ddb9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -3523,8 +3523,6 @@ void tcp_send_delayed_ack(struct sock *sk) | |||
| 3523 | int ato = icsk->icsk_ack.ato; | 3523 | int ato = icsk->icsk_ack.ato; |
| 3524 | unsigned long timeout; | 3524 | unsigned long timeout; |
| 3525 | 3525 | ||
| 3526 | tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); | ||
| 3527 | |||
| 3528 | if (ato > TCP_DELACK_MIN) { | 3526 | if (ato > TCP_DELACK_MIN) { |
| 3529 | const struct tcp_sock *tp = tcp_sk(sk); | 3527 | const struct tcp_sock *tp = tcp_sk(sk); |
| 3530 | int max_ato = HZ / 2; | 3528 | int max_ato = HZ / 2; |
| @@ -3581,8 +3579,6 @@ void tcp_send_ack(struct sock *sk) | |||
| 3581 | if (sk->sk_state == TCP_CLOSE) | 3579 | if (sk->sk_state == TCP_CLOSE) |
| 3582 | return; | 3580 | return; |
| 3583 | 3581 | ||
| 3584 | tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); | ||
| 3585 | |||
| 3586 | /* We are not putting this on the write queue, so | 3582 | /* We are not putting this on the write queue, so |
| 3587 | * tcp_transmit_skb() will set the ownership to this | 3583 | * tcp_transmit_skb() will set the ownership to this |
| 3588 | * sock. | 3584 | * sock. |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 0eff75525da1..b3885ca22d6f 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
| @@ -108,6 +108,7 @@ config IPV6_MIP6 | |||
| 108 | config IPV6_ILA | 108 | config IPV6_ILA |
| 109 | tristate "IPv6: Identifier Locator Addressing (ILA)" | 109 | tristate "IPv6: Identifier Locator Addressing (ILA)" |
| 110 | depends on NETFILTER | 110 | depends on NETFILTER |
| 111 | select DST_CACHE | ||
| 111 | select LWTUNNEL | 112 | select LWTUNNEL |
| 112 | ---help--- | 113 | ---help--- |
| 113 | Support for IPv6 Identifier Locator Addressing (ILA). | 114 | Support for IPv6 Identifier Locator Addressing (ILA). |
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 1323b9679cf7..1c0bb9fb76e6 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c | |||
| @@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop) | |||
| 799 | { | 799 | { |
| 800 | struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; | 800 | struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; |
| 801 | 801 | ||
| 802 | txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS, | 802 | txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop); |
| 803 | hop, hop ? ipv6_optlen(hop) : 0); | ||
| 804 | txopt_put(old); | 803 | txopt_put(old); |
| 805 | if (IS_ERR(txopts)) | 804 | if (IS_ERR(txopts)) |
| 806 | return PTR_ERR(txopts); | 805 | return PTR_ERR(txopts); |
| @@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req, | |||
| 1222 | if (IS_ERR(new)) | 1221 | if (IS_ERR(new)) |
| 1223 | return PTR_ERR(new); | 1222 | return PTR_ERR(new); |
| 1224 | 1223 | ||
| 1225 | txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, | 1224 | txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); |
| 1226 | new, new ? ipv6_optlen(new) : 0); | ||
| 1227 | 1225 | ||
| 1228 | kfree(new); | 1226 | kfree(new); |
| 1229 | 1227 | ||
| @@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req) | |||
| 1260 | if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) | 1258 | if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) |
| 1261 | return; /* Nothing to do */ | 1259 | return; /* Nothing to do */ |
| 1262 | 1260 | ||
| 1263 | txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, | 1261 | txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); |
| 1264 | new, new ? ipv6_optlen(new) : 0); | ||
| 1265 | 1262 | ||
| 1266 | if (!IS_ERR(txopts)) { | 1263 | if (!IS_ERR(txopts)) { |
| 1267 | txopts = xchg(&req_inet->ipv6_opt, txopts); | 1264 | txopts = xchg(&req_inet->ipv6_opt, txopts); |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 5bc2bf3733ab..20291c2036fc 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
| @@ -1015,29 +1015,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) | |||
| 1015 | } | 1015 | } |
| 1016 | EXPORT_SYMBOL_GPL(ipv6_dup_options); | 1016 | EXPORT_SYMBOL_GPL(ipv6_dup_options); |
| 1017 | 1017 | ||
| 1018 | static int ipv6_renew_option(void *ohdr, | 1018 | static void ipv6_renew_option(int renewtype, |
| 1019 | struct ipv6_opt_hdr __user *newopt, int newoptlen, | 1019 | struct ipv6_opt_hdr **dest, |
| 1020 | int inherit, | 1020 | struct ipv6_opt_hdr *old, |
| 1021 | struct ipv6_opt_hdr **hdr, | 1021 | struct ipv6_opt_hdr *new, |
| 1022 | char **p) | 1022 | int newtype, char **p) |
| 1023 | { | 1023 | { |
| 1024 | if (inherit) { | 1024 | struct ipv6_opt_hdr *src; |
| 1025 | if (ohdr) { | 1025 | |
| 1026 | memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); | 1026 | src = (renewtype == newtype ? new : old); |
| 1027 | *hdr = (struct ipv6_opt_hdr *)*p; | 1027 | if (!src) |
| 1028 | *p += CMSG_ALIGN(ipv6_optlen(*hdr)); | 1028 | return; |
| 1029 | } | 1029 | |
| 1030 | } else { | 1030 | memcpy(*p, src, ipv6_optlen(src)); |
| 1031 | if (newopt) { | 1031 | *dest = (struct ipv6_opt_hdr *)*p; |
| 1032 | if (copy_from_user(*p, newopt, newoptlen)) | 1032 | *p += CMSG_ALIGN(ipv6_optlen(*dest)); |
| 1033 | return -EFAULT; | ||
| 1034 | *hdr = (struct ipv6_opt_hdr *)*p; | ||
| 1035 | if (ipv6_optlen(*hdr) > newoptlen) | ||
| 1036 | return -EINVAL; | ||
| 1037 | *p += CMSG_ALIGN(newoptlen); | ||
| 1038 | } | ||
| 1039 | } | ||
| 1040 | return 0; | ||
| 1041 | } | 1033 | } |
| 1042 | 1034 | ||
| 1043 | /** | 1035 | /** |
| @@ -1063,13 +1055,11 @@ static int ipv6_renew_option(void *ohdr, | |||
| 1063 | */ | 1055 | */ |
| 1064 | struct ipv6_txoptions * | 1056 | struct ipv6_txoptions * |
| 1065 | ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | 1057 | ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, |
| 1066 | int newtype, | 1058 | int newtype, struct ipv6_opt_hdr *newopt) |
| 1067 | struct ipv6_opt_hdr __user *newopt, int newoptlen) | ||
| 1068 | { | 1059 | { |
| 1069 | int tot_len = 0; | 1060 | int tot_len = 0; |
| 1070 | char *p; | 1061 | char *p; |
| 1071 | struct ipv6_txoptions *opt2; | 1062 | struct ipv6_txoptions *opt2; |
| 1072 | int err; | ||
| 1073 | 1063 | ||
| 1074 | if (opt) { | 1064 | if (opt) { |
| 1075 | if (newtype != IPV6_HOPOPTS && opt->hopopt) | 1065 | if (newtype != IPV6_HOPOPTS && opt->hopopt) |
| @@ -1082,8 +1072,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | |||
| 1082 | tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); | 1072 | tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); |
| 1083 | } | 1073 | } |
| 1084 | 1074 | ||
| 1085 | if (newopt && newoptlen) | 1075 | if (newopt) |
| 1086 | tot_len += CMSG_ALIGN(newoptlen); | 1076 | tot_len += CMSG_ALIGN(ipv6_optlen(newopt)); |
| 1087 | 1077 | ||
| 1088 | if (!tot_len) | 1078 | if (!tot_len) |
| 1089 | return NULL; | 1079 | return NULL; |
| @@ -1098,29 +1088,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | |||
| 1098 | opt2->tot_len = tot_len; | 1088 | opt2->tot_len = tot_len; |
| 1099 | p = (char *)(opt2 + 1); | 1089 | p = (char *)(opt2 + 1); |
| 1100 | 1090 | ||
| 1101 | err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen, | 1091 | ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt, |
| 1102 | newtype != IPV6_HOPOPTS, | 1092 | (opt ? opt->hopopt : NULL), |
| 1103 | &opt2->hopopt, &p); | 1093 | newopt, newtype, &p); |
| 1104 | if (err) | 1094 | ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt, |
| 1105 | goto out; | 1095 | (opt ? opt->dst0opt : NULL), |
| 1106 | 1096 | newopt, newtype, &p); | |
| 1107 | err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen, | 1097 | ipv6_renew_option(IPV6_RTHDR, |
| 1108 | newtype != IPV6_RTHDRDSTOPTS, | 1098 | (struct ipv6_opt_hdr **)&opt2->srcrt, |
| 1109 | &opt2->dst0opt, &p); | 1099 | (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL), |
| 1110 | if (err) | 1100 | newopt, newtype, &p); |
| 1111 | goto out; | 1101 | ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt, |
| 1112 | 1102 | (opt ? opt->dst1opt : NULL), | |
| 1113 | err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen, | 1103 | newopt, newtype, &p); |
| 1114 | newtype != IPV6_RTHDR, | ||
| 1115 | (struct ipv6_opt_hdr **)&opt2->srcrt, &p); | ||
| 1116 | if (err) | ||
| 1117 | goto out; | ||
| 1118 | |||
| 1119 | err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen, | ||
| 1120 | newtype != IPV6_DSTOPTS, | ||
| 1121 | &opt2->dst1opt, &p); | ||
| 1122 | if (err) | ||
| 1123 | goto out; | ||
| 1124 | 1104 | ||
| 1125 | opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + | 1105 | opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + |
| 1126 | (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + | 1106 | (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + |
| @@ -1128,37 +1108,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | |||
| 1128 | opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); | 1108 | opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); |
| 1129 | 1109 | ||
| 1130 | return opt2; | 1110 | return opt2; |
| 1131 | out: | ||
| 1132 | sock_kfree_s(sk, opt2, opt2->tot_len); | ||
| 1133 | return ERR_PTR(err); | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | /** | ||
| 1137 | * ipv6_renew_options_kern - replace a specific ext hdr with a new one. | ||
| 1138 | * | ||
| 1139 | * @sk: sock from which to allocate memory | ||
| 1140 | * @opt: original options | ||
| 1141 | * @newtype: option type to replace in @opt | ||
| 1142 | * @newopt: new option of type @newtype to replace (kernel-mem) | ||
| 1143 | * @newoptlen: length of @newopt | ||
| 1144 | * | ||
| 1145 | * See ipv6_renew_options(). The difference is that @newopt is | ||
| 1146 | * kernel memory, rather than user memory. | ||
| 1147 | */ | ||
| 1148 | struct ipv6_txoptions * | ||
| 1149 | ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt, | ||
| 1150 | int newtype, struct ipv6_opt_hdr *newopt, | ||
| 1151 | int newoptlen) | ||
| 1152 | { | ||
| 1153 | struct ipv6_txoptions *ret_val; | ||
| 1154 | const mm_segment_t old_fs = get_fs(); | ||
| 1155 | |||
| 1156 | set_fs(KERNEL_DS); | ||
| 1157 | ret_val = ipv6_renew_options(sk, opt, newtype, | ||
| 1158 | (struct ipv6_opt_hdr __user *)newopt, | ||
| 1159 | newoptlen); | ||
| 1160 | set_fs(old_fs); | ||
| 1161 | return ret_val; | ||
| 1162 | } | 1111 | } |
| 1163 | 1112 | ||
| 1164 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, | 1113 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1fb2f3118d60..d212738e9d10 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -935,20 +935,19 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 935 | { | 935 | { |
| 936 | struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, | 936 | struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, |
| 937 | lockdep_is_held(&rt->fib6_table->tb6_lock)); | 937 | lockdep_is_held(&rt->fib6_table->tb6_lock)); |
| 938 | enum fib_event_type event = FIB_EVENT_ENTRY_ADD; | 938 | struct fib6_info *iter = NULL; |
| 939 | struct fib6_info *iter = NULL, *match = NULL; | ||
| 940 | struct fib6_info __rcu **ins; | 939 | struct fib6_info __rcu **ins; |
| 940 | struct fib6_info __rcu **fallback_ins = NULL; | ||
| 941 | int replace = (info->nlh && | 941 | int replace = (info->nlh && |
| 942 | (info->nlh->nlmsg_flags & NLM_F_REPLACE)); | 942 | (info->nlh->nlmsg_flags & NLM_F_REPLACE)); |
| 943 | int append = (info->nlh && | ||
| 944 | (info->nlh->nlmsg_flags & NLM_F_APPEND)); | ||
| 945 | int add = (!info->nlh || | 943 | int add = (!info->nlh || |
| 946 | (info->nlh->nlmsg_flags & NLM_F_CREATE)); | 944 | (info->nlh->nlmsg_flags & NLM_F_CREATE)); |
| 947 | int found = 0; | 945 | int found = 0; |
| 946 | bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); | ||
| 948 | u16 nlflags = NLM_F_EXCL; | 947 | u16 nlflags = NLM_F_EXCL; |
| 949 | int err; | 948 | int err; |
| 950 | 949 | ||
| 951 | if (append) | 950 | if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND)) |
| 952 | nlflags |= NLM_F_APPEND; | 951 | nlflags |= NLM_F_APPEND; |
| 953 | 952 | ||
| 954 | ins = &fn->leaf; | 953 | ins = &fn->leaf; |
| @@ -970,8 +969,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 970 | 969 | ||
| 971 | nlflags &= ~NLM_F_EXCL; | 970 | nlflags &= ~NLM_F_EXCL; |
| 972 | if (replace) { | 971 | if (replace) { |
| 973 | found++; | 972 | if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { |
| 974 | break; | 973 | found++; |
| 974 | break; | ||
| 975 | } | ||
| 976 | if (rt_can_ecmp) | ||
| 977 | fallback_ins = fallback_ins ?: ins; | ||
| 978 | goto next_iter; | ||
| 975 | } | 979 | } |
| 976 | 980 | ||
| 977 | if (rt6_duplicate_nexthop(iter, rt)) { | 981 | if (rt6_duplicate_nexthop(iter, rt)) { |
| @@ -986,51 +990,71 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 986 | fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); | 990 | fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); |
| 987 | return -EEXIST; | 991 | return -EEXIST; |
| 988 | } | 992 | } |
| 989 | 993 | /* If we have the same destination and the same metric, | |
| 990 | /* first route that matches */ | 994 | * but not the same gateway, then the route we try to |
| 991 | if (!match) | 995 | * add is sibling to this route, increment our counter |
| 992 | match = iter; | 996 | * of siblings, and later we will add our route to the |
| 997 | * list. | ||
| 998 | * Only static routes (which don't have flag | ||
| 999 | * RTF_EXPIRES) are used for ECMPv6. | ||
| 1000 | * | ||
| 1001 | * To avoid long list, we only had siblings if the | ||
| 1002 | * route have a gateway. | ||
| 1003 | */ | ||
| 1004 | if (rt_can_ecmp && | ||
| 1005 | rt6_qualify_for_ecmp(iter)) | ||
| 1006 | rt->fib6_nsiblings++; | ||
| 993 | } | 1007 | } |
| 994 | 1008 | ||
| 995 | if (iter->fib6_metric > rt->fib6_metric) | 1009 | if (iter->fib6_metric > rt->fib6_metric) |
| 996 | break; | 1010 | break; |
| 997 | 1011 | ||
| 1012 | next_iter: | ||
| 998 | ins = &iter->fib6_next; | 1013 | ins = &iter->fib6_next; |
| 999 | } | 1014 | } |
| 1000 | 1015 | ||
| 1016 | if (fallback_ins && !found) { | ||
| 1017 | /* No ECMP-able route found, replace first non-ECMP one */ | ||
| 1018 | ins = fallback_ins; | ||
| 1019 | iter = rcu_dereference_protected(*ins, | ||
| 1020 | lockdep_is_held(&rt->fib6_table->tb6_lock)); | ||
| 1021 | found++; | ||
| 1022 | } | ||
| 1023 | |||
| 1001 | /* Reset round-robin state, if necessary */ | 1024 | /* Reset round-robin state, if necessary */ |
| 1002 | if (ins == &fn->leaf) | 1025 | if (ins == &fn->leaf) |
| 1003 | fn->rr_ptr = NULL; | 1026 | fn->rr_ptr = NULL; |
| 1004 | 1027 | ||
| 1005 | /* Link this route to others same route. */ | 1028 | /* Link this route to others same route. */ |
| 1006 | if (append && match) { | 1029 | if (rt->fib6_nsiblings) { |
| 1030 | unsigned int fib6_nsiblings; | ||
| 1007 | struct fib6_info *sibling, *temp_sibling; | 1031 | struct fib6_info *sibling, *temp_sibling; |
| 1008 | 1032 | ||
| 1009 | if (rt->fib6_flags & RTF_REJECT) { | 1033 | /* Find the first route that have the same metric */ |
| 1010 | NL_SET_ERR_MSG(extack, | 1034 | sibling = leaf; |
| 1011 | "Can not append a REJECT route"); | 1035 | while (sibling) { |
| 1012 | return -EINVAL; | 1036 | if (sibling->fib6_metric == rt->fib6_metric && |
| 1013 | } else if (match->fib6_flags & RTF_REJECT) { | 1037 | rt6_qualify_for_ecmp(sibling)) { |
| 1014 | NL_SET_ERR_MSG(extack, | 1038 | list_add_tail(&rt->fib6_siblings, |
| 1015 | "Can not append to a REJECT route"); | 1039 | &sibling->fib6_siblings); |
| 1016 | return -EINVAL; | 1040 | break; |
| 1041 | } | ||
| 1042 | sibling = rcu_dereference_protected(sibling->fib6_next, | ||
| 1043 | lockdep_is_held(&rt->fib6_table->tb6_lock)); | ||
| 1017 | } | 1044 | } |
| 1018 | event = FIB_EVENT_ENTRY_APPEND; | ||
| 1019 | rt->fib6_nsiblings = match->fib6_nsiblings; | ||
| 1020 | list_add_tail(&rt->fib6_siblings, &match->fib6_siblings); | ||
| 1021 | match->fib6_nsiblings++; | ||
| 1022 | |||
| 1023 | /* For each sibling in the list, increment the counter of | 1045 | /* For each sibling in the list, increment the counter of |
| 1024 | * siblings. BUG() if counters does not match, list of siblings | 1046 | * siblings. BUG() if counters does not match, list of siblings |
| 1025 | * is broken! | 1047 | * is broken! |
| 1026 | */ | 1048 | */ |
| 1049 | fib6_nsiblings = 0; | ||
| 1027 | list_for_each_entry_safe(sibling, temp_sibling, | 1050 | list_for_each_entry_safe(sibling, temp_sibling, |
| 1028 | &match->fib6_siblings, fib6_siblings) { | 1051 | &rt->fib6_siblings, fib6_siblings) { |
| 1029 | sibling->fib6_nsiblings++; | 1052 | sibling->fib6_nsiblings++; |
| 1030 | BUG_ON(sibling->fib6_nsiblings != match->fib6_nsiblings); | 1053 | BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings); |
| 1054 | fib6_nsiblings++; | ||
| 1031 | } | 1055 | } |
| 1032 | 1056 | BUG_ON(fib6_nsiblings != rt->fib6_nsiblings); | |
| 1033 | rt6_multipath_rebalance(match); | 1057 | rt6_multipath_rebalance(temp_sibling); |
| 1034 | } | 1058 | } |
| 1035 | 1059 | ||
| 1036 | /* | 1060 | /* |
| @@ -1043,8 +1067,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
| 1043 | add: | 1067 | add: |
| 1044 | nlflags |= NLM_F_CREATE; | 1068 | nlflags |= NLM_F_CREATE; |
| 1045 | 1069 | ||
| 1046 | err = call_fib6_entry_notifiers(info->nl_net, event, rt, | 1070 | err = call_fib6_entry_notifiers(info->nl_net, |
| 1047 | extack); | 1071 | FIB_EVENT_ENTRY_ADD, |
| 1072 | rt, extack); | ||
| 1048 | if (err) | 1073 | if (err) |
| 1049 | return err; | 1074 | return err; |
| 1050 | 1075 | ||
| @@ -1062,7 +1087,7 @@ add: | |||
| 1062 | } | 1087 | } |
| 1063 | 1088 | ||
| 1064 | } else { | 1089 | } else { |
| 1065 | struct fib6_info *tmp; | 1090 | int nsiblings; |
| 1066 | 1091 | ||
| 1067 | if (!found) { | 1092 | if (!found) { |
| 1068 | if (add) | 1093 | if (add) |
| @@ -1077,57 +1102,48 @@ add: | |||
| 1077 | if (err) | 1102 | if (err) |
| 1078 | return err; | 1103 | return err; |
| 1079 | 1104 | ||
| 1080 | /* if route being replaced has siblings, set tmp to | ||
| 1081 | * last one, otherwise tmp is current route. this is | ||
| 1082 | * used to set fib6_next for new route | ||
| 1083 | */ | ||
| 1084 | if (iter->fib6_nsiblings) | ||
| 1085 | tmp = list_last_entry(&iter->fib6_siblings, | ||
| 1086 | struct fib6_info, | ||
| 1087 | fib6_siblings); | ||
| 1088 | else | ||
| 1089 | tmp = iter; | ||
| 1090 | |||
| 1091 | /* insert new route */ | ||
| 1092 | atomic_inc(&rt->fib6_ref); | 1105 | atomic_inc(&rt->fib6_ref); |
| 1093 | rcu_assign_pointer(rt->fib6_node, fn); | 1106 | rcu_assign_pointer(rt->fib6_node, fn); |
| 1094 | rt->fib6_next = tmp->fib6_next; | 1107 | rt->fib6_next = iter->fib6_next; |
| 1095 | rcu_assign_pointer(*ins, rt); | 1108 | rcu_assign_pointer(*ins, rt); |
| 1096 | |||
| 1097 | if (!info->skip_notify) | 1109 | if (!info->skip_notify) |
| 1098 | inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); | 1110 | inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); |
| 1099 | if (!(fn->fn_flags & RTN_RTINFO)) { | 1111 | if (!(fn->fn_flags & RTN_RTINFO)) { |
| 1100 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; | 1112 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; |
| 1101 | fn->fn_flags |= RTN_RTINFO; | 1113 | fn->fn_flags |= RTN_RTINFO; |
| 1102 | } | 1114 | } |
| 1115 | nsiblings = iter->fib6_nsiblings; | ||
| 1116 | iter->fib6_node = NULL; | ||
| 1117 | fib6_purge_rt(iter, fn, info->nl_net); | ||
| 1118 | if (rcu_access_pointer(fn->rr_ptr) == iter) | ||
| 1119 | fn->rr_ptr = NULL; | ||
| 1120 | fib6_info_release(iter); | ||
| 1103 | 1121 | ||
| 1104 | /* delete old route */ | 1122 | if (nsiblings) { |
| 1105 | rt = iter; | ||
| 1106 | |||
| 1107 | if (rt->fib6_nsiblings) { | ||
| 1108 | struct fib6_info *tmp; | ||
| 1109 | |||
| 1110 | /* Replacing an ECMP route, remove all siblings */ | 1123 | /* Replacing an ECMP route, remove all siblings */ |
| 1111 | list_for_each_entry_safe(iter, tmp, &rt->fib6_siblings, | 1124 | ins = &rt->fib6_next; |
| 1112 | fib6_siblings) { | 1125 | iter = rcu_dereference_protected(*ins, |
| 1113 | iter->fib6_node = NULL; | 1126 | lockdep_is_held(&rt->fib6_table->tb6_lock)); |
| 1114 | fib6_purge_rt(iter, fn, info->nl_net); | 1127 | while (iter) { |
| 1115 | if (rcu_access_pointer(fn->rr_ptr) == iter) | 1128 | if (iter->fib6_metric > rt->fib6_metric) |
| 1116 | fn->rr_ptr = NULL; | 1129 | break; |
| 1117 | fib6_info_release(iter); | 1130 | if (rt6_qualify_for_ecmp(iter)) { |
| 1118 | 1131 | *ins = iter->fib6_next; | |
| 1119 | rt->fib6_nsiblings--; | 1132 | iter->fib6_node = NULL; |
| 1120 | info->nl_net->ipv6.rt6_stats->fib_rt_entries--; | 1133 | fib6_purge_rt(iter, fn, info->nl_net); |
| 1134 | if (rcu_access_pointer(fn->rr_ptr) == iter) | ||
| 1135 | fn->rr_ptr = NULL; | ||
| 1136 | fib6_info_release(iter); | ||
| 1137 | nsiblings--; | ||
| 1138 | info->nl_net->ipv6.rt6_stats->fib_rt_entries--; | ||
| 1139 | } else { | ||
| 1140 | ins = &iter->fib6_next; | ||
| 1141 | } | ||
| 1142 | iter = rcu_dereference_protected(*ins, | ||
| 1143 | lockdep_is_held(&rt->fib6_table->tb6_lock)); | ||
| 1121 | } | 1144 | } |
| 1145 | WARN_ON(nsiblings != 0); | ||
| 1122 | } | 1146 | } |
| 1123 | |||
| 1124 | WARN_ON(rt->fib6_nsiblings != 0); | ||
| 1125 | |||
| 1126 | rt->fib6_node = NULL; | ||
| 1127 | fib6_purge_rt(rt, fn, info->nl_net); | ||
| 1128 | if (rcu_access_pointer(fn->rr_ptr) == rt) | ||
| 1129 | fn->rr_ptr = NULL; | ||
| 1130 | fib6_info_release(rt); | ||
| 1131 | } | 1147 | } |
| 1132 | 1148 | ||
| 1133 | return 0; | 1149 | return 0; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c8cf2fdbb13b..cd2cfb04e5d8 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -927,7 +927,6 @@ tx_err: | |||
| 927 | static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | 927 | static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, |
| 928 | struct net_device *dev) | 928 | struct net_device *dev) |
| 929 | { | 929 | { |
| 930 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | ||
| 931 | struct ip6_tnl *t = netdev_priv(dev); | 930 | struct ip6_tnl *t = netdev_priv(dev); |
| 932 | struct dst_entry *dst = skb_dst(skb); | 931 | struct dst_entry *dst = skb_dst(skb); |
| 933 | struct net_device_stats *stats; | 932 | struct net_device_stats *stats; |
| @@ -1010,6 +1009,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
| 1010 | goto tx_err; | 1009 | goto tx_err; |
| 1011 | } | 1010 | } |
| 1012 | } else { | 1011 | } else { |
| 1012 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | ||
| 1013 | |||
| 1013 | switch (skb->protocol) { | 1014 | switch (skb->protocol) { |
| 1014 | case htons(ETH_P_IP): | 1015 | case htons(ETH_P_IP): |
| 1015 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 1016 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4d780c7f0130..568ca4187cd1 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
| @@ -398,6 +398,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
| 398 | case IPV6_DSTOPTS: | 398 | case IPV6_DSTOPTS: |
| 399 | { | 399 | { |
| 400 | struct ipv6_txoptions *opt; | 400 | struct ipv6_txoptions *opt; |
| 401 | struct ipv6_opt_hdr *new = NULL; | ||
| 402 | |||
| 403 | /* hop-by-hop / destination options are privileged option */ | ||
| 404 | retv = -EPERM; | ||
| 405 | if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) | ||
| 406 | break; | ||
| 401 | 407 | ||
| 402 | /* remove any sticky options header with a zero option | 408 | /* remove any sticky options header with a zero option |
| 403 | * length, per RFC3542. | 409 | * length, per RFC3542. |
| @@ -409,17 +415,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
| 409 | else if (optlen < sizeof(struct ipv6_opt_hdr) || | 415 | else if (optlen < sizeof(struct ipv6_opt_hdr) || |
| 410 | optlen & 0x7 || optlen > 8 * 255) | 416 | optlen & 0x7 || optlen > 8 * 255) |
| 411 | goto e_inval; | 417 | goto e_inval; |
| 412 | 418 | else { | |
| 413 | /* hop-by-hop / destination options are privileged option */ | 419 | new = memdup_user(optval, optlen); |
| 414 | retv = -EPERM; | 420 | if (IS_ERR(new)) { |
| 415 | if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) | 421 | retv = PTR_ERR(new); |
| 416 | break; | 422 | break; |
| 423 | } | ||
| 424 | if (unlikely(ipv6_optlen(new) > optlen)) { | ||
| 425 | kfree(new); | ||
| 426 | goto e_inval; | ||
| 427 | } | ||
| 428 | } | ||
| 417 | 429 | ||
| 418 | opt = rcu_dereference_protected(np->opt, | 430 | opt = rcu_dereference_protected(np->opt, |
| 419 | lockdep_sock_is_held(sk)); | 431 | lockdep_sock_is_held(sk)); |
| 420 | opt = ipv6_renew_options(sk, opt, optname, | 432 | opt = ipv6_renew_options(sk, opt, optname, new); |
| 421 | (struct ipv6_opt_hdr __user *)optval, | 433 | kfree(new); |
| 422 | optlen); | ||
| 423 | if (IS_ERR(opt)) { | 434 | if (IS_ERR(opt)) { |
| 424 | retv = PTR_ERR(opt); | 435 | retv = PTR_ERR(opt); |
| 425 | break; | 436 | break; |
| @@ -718,8 +729,9 @@ done: | |||
| 718 | struct sockaddr_in6 *psin6; | 729 | struct sockaddr_in6 *psin6; |
| 719 | 730 | ||
| 720 | psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; | 731 | psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; |
| 721 | retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, | 732 | retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface, |
| 722 | &psin6->sin6_addr); | 733 | &psin6->sin6_addr, |
| 734 | MCAST_INCLUDE); | ||
| 723 | /* prior join w/ different source is ok */ | 735 | /* prior join w/ different source is ok */ |
| 724 | if (retv && retv != -EADDRINUSE) | 736 | if (retv && retv != -EADDRINUSE) |
| 725 | break; | 737 | break; |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index c0c74088f2af..2699be7202be 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
| @@ -95,6 +95,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, | |||
| 95 | int delta); | 95 | int delta); |
| 96 | static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, | 96 | static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, |
| 97 | struct inet6_dev *idev); | 97 | struct inet6_dev *idev); |
| 98 | static int __ipv6_dev_mc_inc(struct net_device *dev, | ||
| 99 | const struct in6_addr *addr, unsigned int mode); | ||
| 98 | 100 | ||
| 99 | #define MLD_QRV_DEFAULT 2 | 101 | #define MLD_QRV_DEFAULT 2 |
| 100 | /* RFC3810, 9.2. Query Interval */ | 102 | /* RFC3810, 9.2. Query Interval */ |
| @@ -132,7 +134,8 @@ static int unsolicited_report_interval(struct inet6_dev *idev) | |||
| 132 | return iv > 0 ? iv : 1; | 134 | return iv > 0 ? iv : 1; |
| 133 | } | 135 | } |
| 134 | 136 | ||
| 135 | int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | 137 | static int __ipv6_sock_mc_join(struct sock *sk, int ifindex, |
| 138 | const struct in6_addr *addr, unsigned int mode) | ||
| 136 | { | 139 | { |
| 137 | struct net_device *dev = NULL; | 140 | struct net_device *dev = NULL; |
| 138 | struct ipv6_mc_socklist *mc_lst; | 141 | struct ipv6_mc_socklist *mc_lst; |
| @@ -179,7 +182,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
| 179 | } | 182 | } |
| 180 | 183 | ||
| 181 | mc_lst->ifindex = dev->ifindex; | 184 | mc_lst->ifindex = dev->ifindex; |
| 182 | mc_lst->sfmode = MCAST_EXCLUDE; | 185 | mc_lst->sfmode = mode; |
| 183 | rwlock_init(&mc_lst->sflock); | 186 | rwlock_init(&mc_lst->sflock); |
| 184 | mc_lst->sflist = NULL; | 187 | mc_lst->sflist = NULL; |
| 185 | 188 | ||
| @@ -187,7 +190,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
| 187 | * now add/increase the group membership on the device | 190 | * now add/increase the group membership on the device |
| 188 | */ | 191 | */ |
| 189 | 192 | ||
| 190 | err = ipv6_dev_mc_inc(dev, addr); | 193 | err = __ipv6_dev_mc_inc(dev, addr, mode); |
| 191 | 194 | ||
| 192 | if (err) { | 195 | if (err) { |
| 193 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 196 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
| @@ -199,8 +202,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
| 199 | 202 | ||
| 200 | return 0; | 203 | return 0; |
| 201 | } | 204 | } |
| 205 | |||
| 206 | int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | ||
| 207 | { | ||
| 208 | return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE); | ||
| 209 | } | ||
| 202 | EXPORT_SYMBOL(ipv6_sock_mc_join); | 210 | EXPORT_SYMBOL(ipv6_sock_mc_join); |
| 203 | 211 | ||
| 212 | int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, | ||
| 213 | const struct in6_addr *addr, unsigned int mode) | ||
| 214 | { | ||
| 215 | return __ipv6_sock_mc_join(sk, ifindex, addr, mode); | ||
| 216 | } | ||
| 217 | |||
| 204 | /* | 218 | /* |
| 205 | * socket leave on multicast group | 219 | * socket leave on multicast group |
| 206 | */ | 220 | */ |
| @@ -646,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, | |||
| 646 | return rv; | 660 | return rv; |
| 647 | } | 661 | } |
| 648 | 662 | ||
| 649 | static void igmp6_group_added(struct ifmcaddr6 *mc) | 663 | static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode) |
| 650 | { | 664 | { |
| 651 | struct net_device *dev = mc->idev->dev; | 665 | struct net_device *dev = mc->idev->dev; |
| 652 | char buf[MAX_ADDR_LEN]; | 666 | char buf[MAX_ADDR_LEN]; |
| @@ -672,7 +686,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) | |||
| 672 | } | 686 | } |
| 673 | /* else v2 */ | 687 | /* else v2 */ |
| 674 | 688 | ||
| 675 | mc->mca_crcount = mc->idev->mc_qrv; | 689 | /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we |
| 690 | * should not send filter-mode change record as the mode | ||
| 691 | * should be from IN() to IN(A). | ||
| 692 | */ | ||
| 693 | if (mode == MCAST_EXCLUDE) | ||
| 694 | mc->mca_crcount = mc->idev->mc_qrv; | ||
| 695 | |||
| 676 | mld_ifc_event(mc->idev); | 696 | mld_ifc_event(mc->idev); |
| 677 | } | 697 | } |
| 678 | 698 | ||
| @@ -770,13 +790,14 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) | |||
| 770 | spin_lock_bh(&im->mca_lock); | 790 | spin_lock_bh(&im->mca_lock); |
| 771 | if (pmc) { | 791 | if (pmc) { |
| 772 | im->idev = pmc->idev; | 792 | im->idev = pmc->idev; |
| 773 | im->mca_crcount = idev->mc_qrv; | ||
| 774 | im->mca_sfmode = pmc->mca_sfmode; | 793 | im->mca_sfmode = pmc->mca_sfmode; |
| 775 | if (pmc->mca_sfmode == MCAST_INCLUDE) { | 794 | if (pmc->mca_sfmode == MCAST_INCLUDE) { |
| 776 | im->mca_tomb = pmc->mca_tomb; | 795 | im->mca_tomb = pmc->mca_tomb; |
| 777 | im->mca_sources = pmc->mca_sources; | 796 | im->mca_sources = pmc->mca_sources; |
| 778 | for (psf = im->mca_sources; psf; psf = psf->sf_next) | 797 | for (psf = im->mca_sources; psf; psf = psf->sf_next) |
| 779 | psf->sf_crcount = im->mca_crcount; | 798 | psf->sf_crcount = idev->mc_qrv; |
| 799 | } else { | ||
| 800 | im->mca_crcount = idev->mc_qrv; | ||
| 780 | } | 801 | } |
| 781 | in6_dev_put(pmc->idev); | 802 | in6_dev_put(pmc->idev); |
| 782 | kfree(pmc); | 803 | kfree(pmc); |
| @@ -831,7 +852,8 @@ static void ma_put(struct ifmcaddr6 *mc) | |||
| 831 | } | 852 | } |
| 832 | 853 | ||
| 833 | static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, | 854 | static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, |
| 834 | const struct in6_addr *addr) | 855 | const struct in6_addr *addr, |
| 856 | unsigned int mode) | ||
| 835 | { | 857 | { |
| 836 | struct ifmcaddr6 *mc; | 858 | struct ifmcaddr6 *mc; |
| 837 | 859 | ||
| @@ -849,9 +871,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, | |||
| 849 | refcount_set(&mc->mca_refcnt, 1); | 871 | refcount_set(&mc->mca_refcnt, 1); |
| 850 | spin_lock_init(&mc->mca_lock); | 872 | spin_lock_init(&mc->mca_lock); |
| 851 | 873 | ||
| 852 | /* initial mode is (EX, empty) */ | 874 | mc->mca_sfmode = mode; |
| 853 | mc->mca_sfmode = MCAST_EXCLUDE; | 875 | mc->mca_sfcount[mode] = 1; |
| 854 | mc->mca_sfcount[MCAST_EXCLUDE] = 1; | ||
| 855 | 876 | ||
| 856 | if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || | 877 | if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || |
| 857 | IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) | 878 | IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) |
| @@ -863,7 +884,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, | |||
| 863 | /* | 884 | /* |
| 864 | * device multicast group inc (add if not found) | 885 | * device multicast group inc (add if not found) |
| 865 | */ | 886 | */ |
| 866 | int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) | 887 | static int __ipv6_dev_mc_inc(struct net_device *dev, |
| 888 | const struct in6_addr *addr, unsigned int mode) | ||
| 867 | { | 889 | { |
| 868 | struct ifmcaddr6 *mc; | 890 | struct ifmcaddr6 *mc; |
| 869 | struct inet6_dev *idev; | 891 | struct inet6_dev *idev; |
| @@ -887,14 +909,13 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) | |||
| 887 | if (ipv6_addr_equal(&mc->mca_addr, addr)) { | 909 | if (ipv6_addr_equal(&mc->mca_addr, addr)) { |
| 888 | mc->mca_users++; | 910 | mc->mca_users++; |
| 889 | write_unlock_bh(&idev->lock); | 911 | write_unlock_bh(&idev->lock); |
| 890 | ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, | 912 | ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0); |
| 891 | NULL, 0); | ||
| 892 | in6_dev_put(idev); | 913 | in6_dev_put(idev); |
| 893 | return 0; | 914 | return 0; |
| 894 | } | 915 | } |
| 895 | } | 916 | } |
| 896 | 917 | ||
| 897 | mc = mca_alloc(idev, addr); | 918 | mc = mca_alloc(idev, addr, mode); |
| 898 | if (!mc) { | 919 | if (!mc) { |
| 899 | write_unlock_bh(&idev->lock); | 920 | write_unlock_bh(&idev->lock); |
| 900 | in6_dev_put(idev); | 921 | in6_dev_put(idev); |
| @@ -911,11 +932,16 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) | |||
| 911 | write_unlock_bh(&idev->lock); | 932 | write_unlock_bh(&idev->lock); |
| 912 | 933 | ||
| 913 | mld_del_delrec(idev, mc); | 934 | mld_del_delrec(idev, mc); |
| 914 | igmp6_group_added(mc); | 935 | igmp6_group_added(mc, mode); |
| 915 | ma_put(mc); | 936 | ma_put(mc); |
| 916 | return 0; | 937 | return 0; |
| 917 | } | 938 | } |
| 918 | 939 | ||
| 940 | int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) | ||
| 941 | { | ||
| 942 | return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE); | ||
| 943 | } | ||
| 944 | |||
| 919 | /* | 945 | /* |
| 920 | * device multicast group del | 946 | * device multicast group del |
| 921 | */ | 947 | */ |
| @@ -1751,7 +1777,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
| 1751 | 1777 | ||
| 1752 | psf_next = psf->sf_next; | 1778 | psf_next = psf->sf_next; |
| 1753 | 1779 | ||
| 1754 | if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { | 1780 | if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) { |
| 1755 | psf_prev = psf; | 1781 | psf_prev = psf; |
| 1756 | continue; | 1782 | continue; |
| 1757 | } | 1783 | } |
| @@ -2066,7 +2092,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev) | |||
| 2066 | if (pmc->mca_sfcount[MCAST_EXCLUDE]) | 2092 | if (pmc->mca_sfcount[MCAST_EXCLUDE]) |
| 2067 | type = MLD2_CHANGE_TO_EXCLUDE; | 2093 | type = MLD2_CHANGE_TO_EXCLUDE; |
| 2068 | else | 2094 | else |
| 2069 | type = MLD2_CHANGE_TO_INCLUDE; | 2095 | type = MLD2_ALLOW_NEW_SOURCES; |
| 2070 | skb = add_grec(skb, pmc, type, 0, 0, 1); | 2096 | skb = add_grec(skb, pmc, type, 0, 0, 1); |
| 2071 | spin_unlock_bh(&pmc->mca_lock); | 2097 | spin_unlock_bh(&pmc->mca_lock); |
| 2072 | } | 2098 | } |
| @@ -2546,7 +2572,7 @@ void ipv6_mc_up(struct inet6_dev *idev) | |||
| 2546 | ipv6_mc_reset(idev); | 2572 | ipv6_mc_reset(idev); |
| 2547 | for (i = idev->mc_list; i; i = i->next) { | 2573 | for (i = idev->mc_list; i; i = i->next) { |
| 2548 | mld_del_delrec(idev, i); | 2574 | mld_del_delrec(idev, i); |
| 2549 | igmp6_group_added(i); | 2575 | igmp6_group_added(i, i->mca_sfmode); |
| 2550 | } | 2576 | } |
| 2551 | read_unlock_bh(&idev->lock); | 2577 | read_unlock_bh(&idev->lock); |
| 2552 | } | 2578 | } |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index e640d2f3c55c..0ec273997d1d 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
| @@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) | |||
| 811 | return; | 811 | return; |
| 812 | } | 812 | } |
| 813 | } | 813 | } |
| 814 | if (ndopts.nd_opts_nonce) | 814 | if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1) |
| 815 | memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6); | 815 | memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6); |
| 816 | 816 | ||
| 817 | inc = ipv6_addr_is_multicast(daddr); | 817 | inc = ipv6_addr_is_multicast(daddr); |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7eab959734bc..daf2e9e9193d 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -1909,6 +1909,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = { | |||
| 1909 | .checkentry = icmp6_checkentry, | 1909 | .checkentry = icmp6_checkentry, |
| 1910 | .proto = IPPROTO_ICMPV6, | 1910 | .proto = IPPROTO_ICMPV6, |
| 1911 | .family = NFPROTO_IPV6, | 1911 | .family = NFPROTO_IPV6, |
| 1912 | .me = THIS_MODULE, | ||
| 1912 | }, | 1913 | }, |
| 1913 | }; | 1914 | }; |
| 1914 | 1915 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index a452d99c9f52..e4d9e6976d3c 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) | |||
| 585 | fq->q.meat == fq->q.len && | 585 | fq->q.meat == fq->q.len && |
| 586 | nf_ct_frag6_reasm(fq, skb, dev)) | 586 | nf_ct_frag6_reasm(fq, skb, dev)) |
| 587 | ret = 0; | 587 | ret = 0; |
| 588 | else | ||
| 589 | skb_dst_drop(skb); | ||
| 588 | 590 | ||
| 589 | out_unlock: | 591 | out_unlock: |
| 590 | spin_unlock_bh(&fq->q.lock); | 592 | spin_unlock_bh(&fq->q.lock); |
diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c index bf1d6c421e3b..5dfd33af6451 100644 --- a/net/ipv6/netfilter/nf_tproxy_ipv6.c +++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c | |||
| @@ -55,7 +55,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, | |||
| 55 | * to a listener socket if there's one */ | 55 | * to a listener socket if there's one */ |
| 56 | struct sock *sk2; | 56 | struct sock *sk2; |
| 57 | 57 | ||
| 58 | sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, hp, tproto, | 58 | sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto, |
| 59 | &iph->saddr, | 59 | &iph->saddr, |
| 60 | nf_tproxy_laddr6(skb, laddr, &iph->daddr), | 60 | nf_tproxy_laddr6(skb, laddr, &iph->daddr), |
| 61 | hp->source, | 61 | hp->source, |
| @@ -72,7 +72,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, | |||
| 72 | EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6); | 72 | EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6); |
| 73 | 73 | ||
| 74 | struct sock * | 74 | struct sock * |
| 75 | nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, | 75 | nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, |
| 76 | const u8 protocol, | 76 | const u8 protocol, |
| 77 | const struct in6_addr *saddr, const struct in6_addr *daddr, | 77 | const struct in6_addr *saddr, const struct in6_addr *daddr, |
| 78 | const __be16 sport, const __be16 dport, | 78 | const __be16 sport, const __be16 dport, |
| @@ -80,15 +80,20 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, | |||
| 80 | const enum nf_tproxy_lookup_t lookup_type) | 80 | const enum nf_tproxy_lookup_t lookup_type) |
| 81 | { | 81 | { |
| 82 | struct sock *sk; | 82 | struct sock *sk; |
| 83 | struct tcphdr *tcph; | ||
| 84 | 83 | ||
| 85 | switch (protocol) { | 84 | switch (protocol) { |
| 86 | case IPPROTO_TCP: | 85 | case IPPROTO_TCP: { |
| 86 | struct tcphdr _hdr, *hp; | ||
| 87 | |||
| 88 | hp = skb_header_pointer(skb, thoff, | ||
| 89 | sizeof(struct tcphdr), &_hdr); | ||
| 90 | if (hp == NULL) | ||
| 91 | return NULL; | ||
| 92 | |||
| 87 | switch (lookup_type) { | 93 | switch (lookup_type) { |
| 88 | case NF_TPROXY_LOOKUP_LISTENER: | 94 | case NF_TPROXY_LOOKUP_LISTENER: |
| 89 | tcph = hp; | ||
| 90 | sk = inet6_lookup_listener(net, &tcp_hashinfo, skb, | 95 | sk = inet6_lookup_listener(net, &tcp_hashinfo, skb, |
| 91 | thoff + __tcp_hdrlen(tcph), | 96 | thoff + __tcp_hdrlen(hp), |
| 92 | saddr, sport, | 97 | saddr, sport, |
| 93 | daddr, ntohs(dport), | 98 | daddr, ntohs(dport), |
| 94 | in->ifindex, 0); | 99 | in->ifindex, 0); |
| @@ -110,6 +115,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, | |||
| 110 | BUG(); | 115 | BUG(); |
| 111 | } | 116 | } |
| 112 | break; | 117 | break; |
| 118 | } | ||
| 113 | case IPPROTO_UDP: | 119 | case IPPROTO_UDP: |
| 114 | sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, | 120 | sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, |
| 115 | in->ifindex); | 121 | in->ifindex); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 86a0e4333d42..2ce0bd17de4f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -3842,7 +3842,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt) | |||
| 3842 | lockdep_is_held(&rt->fib6_table->tb6_lock)); | 3842 | lockdep_is_held(&rt->fib6_table->tb6_lock)); |
| 3843 | while (iter) { | 3843 | while (iter) { |
| 3844 | if (iter->fib6_metric == rt->fib6_metric && | 3844 | if (iter->fib6_metric == rt->fib6_metric && |
| 3845 | iter->fib6_nsiblings) | 3845 | rt6_qualify_for_ecmp(iter)) |
| 3846 | return iter; | 3846 | return iter; |
| 3847 | iter = rcu_dereference_protected(iter->fib6_next, | 3847 | iter = rcu_dereference_protected(iter->fib6_next, |
| 3848 | lockdep_is_held(&rt->fib6_table->tb6_lock)); | 3848 | lockdep_is_held(&rt->fib6_table->tb6_lock)); |
| @@ -4388,6 +4388,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, | |||
| 4388 | rt = NULL; | 4388 | rt = NULL; |
| 4389 | goto cleanup; | 4389 | goto cleanup; |
| 4390 | } | 4390 | } |
| 4391 | if (!rt6_qualify_for_ecmp(rt)) { | ||
| 4392 | err = -EINVAL; | ||
| 4393 | NL_SET_ERR_MSG(extack, | ||
| 4394 | "Device only routes can not be added for IPv6 using the multipath API."); | ||
| 4395 | fib6_info_release(rt); | ||
| 4396 | goto cleanup; | ||
| 4397 | } | ||
| 4391 | 4398 | ||
| 4392 | rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1; | 4399 | rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1; |
| 4393 | 4400 | ||
| @@ -4439,7 +4446,6 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, | |||
| 4439 | */ | 4446 | */ |
| 4440 | cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | | 4447 | cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | |
| 4441 | NLM_F_REPLACE); | 4448 | NLM_F_REPLACE); |
| 4442 | cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_APPEND; | ||
| 4443 | nhn++; | 4449 | nhn++; |
| 4444 | } | 4450 | } |
| 4445 | 4451 | ||
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 19ccf0dc996c..a8854dd3e9c5 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
| @@ -101,7 +101,7 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb, | |||
| 101 | 101 | ||
| 102 | if (do_flowlabel > 0) { | 102 | if (do_flowlabel > 0) { |
| 103 | hash = skb_get_hash(skb); | 103 | hash = skb_get_hash(skb); |
| 104 | rol32(hash, 16); | 104 | hash = rol32(hash, 16); |
| 105 | flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; | 105 | flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; |
| 106 | } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) { | 106 | } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) { |
| 107 | flowlabel = ip6_flowlabel(inner_hdr); | 107 | flowlabel = ip6_flowlabel(inner_hdr); |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index dbd7d1fad277..f0a1c536ef15 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -460,6 +460,13 @@ config NF_TABLES | |||
| 460 | 460 | ||
| 461 | if NF_TABLES | 461 | if NF_TABLES |
| 462 | 462 | ||
| 463 | config NF_TABLES_SET | ||
| 464 | tristate "Netfilter nf_tables set infrastructure" | ||
| 465 | help | ||
| 466 | This option enables the nf_tables set infrastructure that allows to | ||
| 467 | look up for elements in a set and to build one-way mappings between | ||
| 468 | matchings and actions. | ||
| 469 | |||
| 463 | config NF_TABLES_INET | 470 | config NF_TABLES_INET |
| 464 | depends on IPV6 | 471 | depends on IPV6 |
| 465 | select NF_TABLES_IPV4 | 472 | select NF_TABLES_IPV4 |
| @@ -493,24 +500,6 @@ config NFT_FLOW_OFFLOAD | |||
| 493 | This option adds the "flow_offload" expression that you can use to | 500 | This option adds the "flow_offload" expression that you can use to |
| 494 | choose what flows are placed into the hardware. | 501 | choose what flows are placed into the hardware. |
| 495 | 502 | ||
| 496 | config NFT_SET_RBTREE | ||
| 497 | tristate "Netfilter nf_tables rbtree set module" | ||
| 498 | help | ||
| 499 | This option adds the "rbtree" set type (Red Black tree) that is used | ||
| 500 | to build interval-based sets. | ||
| 501 | |||
| 502 | config NFT_SET_HASH | ||
| 503 | tristate "Netfilter nf_tables hash set module" | ||
| 504 | help | ||
| 505 | This option adds the "hash" set type that is used to build one-way | ||
| 506 | mappings between matchings and actions. | ||
| 507 | |||
| 508 | config NFT_SET_BITMAP | ||
| 509 | tristate "Netfilter nf_tables bitmap set module" | ||
| 510 | help | ||
| 511 | This option adds the "bitmap" set type that is used to build sets | ||
| 512 | whose keys are smaller or equal to 16 bits. | ||
| 513 | |||
| 514 | config NFT_COUNTER | 503 | config NFT_COUNTER |
| 515 | tristate "Netfilter nf_tables counter module" | 504 | tristate "Netfilter nf_tables counter module" |
| 516 | help | 505 | help |
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 44449389e527..8a76dced974d 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile | |||
| @@ -78,7 +78,11 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \ | |||
| 78 | nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \ | 78 | nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \ |
| 79 | nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o | 79 | nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o |
| 80 | 80 | ||
| 81 | nf_tables_set-objs := nf_tables_set_core.o \ | ||
| 82 | nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o | ||
| 83 | |||
| 81 | obj-$(CONFIG_NF_TABLES) += nf_tables.o | 84 | obj-$(CONFIG_NF_TABLES) += nf_tables.o |
| 85 | obj-$(CONFIG_NF_TABLES_SET) += nf_tables_set.o | ||
| 82 | obj-$(CONFIG_NFT_COMPAT) += nft_compat.o | 86 | obj-$(CONFIG_NFT_COMPAT) += nft_compat.o |
| 83 | obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o | 87 | obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o |
| 84 | obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o | 88 | obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o |
| @@ -91,9 +95,6 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o | |||
| 91 | obj-$(CONFIG_NFT_QUOTA) += nft_quota.o | 95 | obj-$(CONFIG_NFT_QUOTA) += nft_quota.o |
| 92 | obj-$(CONFIG_NFT_REJECT) += nft_reject.o | 96 | obj-$(CONFIG_NFT_REJECT) += nft_reject.o |
| 93 | obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o | 97 | obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o |
| 94 | obj-$(CONFIG_NFT_SET_RBTREE) += nft_set_rbtree.o | ||
| 95 | obj-$(CONFIG_NFT_SET_HASH) += nft_set_hash.o | ||
| 96 | obj-$(CONFIG_NFT_SET_BITMAP) += nft_set_bitmap.o | ||
| 97 | obj-$(CONFIG_NFT_COUNTER) += nft_counter.o | 98 | obj-$(CONFIG_NFT_COUNTER) += nft_counter.o |
| 98 | obj-$(CONFIG_NFT_LOG) += nft_log.o | 99 | obj-$(CONFIG_NFT_LOG) += nft_log.o |
| 99 | obj-$(CONFIG_NFT_MASQ) += nft_masq.o | 100 | obj-$(CONFIG_NFT_MASQ) += nft_masq.o |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3465da2a98bd..3d5280425027 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -2043,7 +2043,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp) | |||
| 2043 | return -EOPNOTSUPP; | 2043 | return -EOPNOTSUPP; |
| 2044 | 2044 | ||
| 2045 | /* On boot, we can set this without any fancy locking. */ | 2045 | /* On boot, we can set this without any fancy locking. */ |
| 2046 | if (!nf_conntrack_htable_size) | 2046 | if (!nf_conntrack_hash) |
| 2047 | return param_set_uint(val, kp); | 2047 | return param_set_uint(val, kp); |
| 2048 | 2048 | ||
| 2049 | rc = kstrtouint(val, 0, &hashsize); | 2049 | rc = kstrtouint(val, 0, &hashsize); |
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c new file mode 100644 index 000000000000..814789644bd3 --- /dev/null +++ b/net/netfilter/nf_tables_set_core.c | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #include <net/netfilter/nf_tables_core.h> | ||
| 3 | |||
| 4 | static int __init nf_tables_set_module_init(void) | ||
| 5 | { | ||
| 6 | nft_register_set(&nft_set_hash_fast_type); | ||
| 7 | nft_register_set(&nft_set_hash_type); | ||
| 8 | nft_register_set(&nft_set_rhash_type); | ||
| 9 | nft_register_set(&nft_set_bitmap_type); | ||
| 10 | nft_register_set(&nft_set_rbtree_type); | ||
| 11 | |||
| 12 | return 0; | ||
| 13 | } | ||
| 14 | |||
| 15 | static void __exit nf_tables_set_module_exit(void) | ||
| 16 | { | ||
| 17 | nft_unregister_set(&nft_set_rbtree_type); | ||
| 18 | nft_unregister_set(&nft_set_bitmap_type); | ||
| 19 | nft_unregister_set(&nft_set_rhash_type); | ||
| 20 | nft_unregister_set(&nft_set_hash_type); | ||
| 21 | nft_unregister_set(&nft_set_hash_fast_type); | ||
| 22 | } | ||
| 23 | |||
| 24 | module_init(nf_tables_set_module_init); | ||
| 25 | module_exit(nf_tables_set_module_exit); | ||
| 26 | |||
| 27 | MODULE_LICENSE("GPL"); | ||
| 28 | MODULE_ALIAS_NFT_SET(); | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 8d1ff654e5af..32535eea51b2 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
| @@ -832,10 +832,18 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
| 832 | rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV])); | 832 | rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV])); |
| 833 | family = ctx->family; | 833 | family = ctx->family; |
| 834 | 834 | ||
| 835 | if (strcmp(tg_name, XT_ERROR_TARGET) == 0 || | ||
| 836 | strcmp(tg_name, XT_STANDARD_TARGET) == 0 || | ||
| 837 | strcmp(tg_name, "standard") == 0) | ||
| 838 | return ERR_PTR(-EINVAL); | ||
| 839 | |||
| 835 | /* Re-use the existing target if it's already loaded. */ | 840 | /* Re-use the existing target if it's already loaded. */ |
| 836 | list_for_each_entry(nft_target, &nft_target_list, head) { | 841 | list_for_each_entry(nft_target, &nft_target_list, head) { |
| 837 | struct xt_target *target = nft_target->ops.data; | 842 | struct xt_target *target = nft_target->ops.data; |
| 838 | 843 | ||
| 844 | if (!target->target) | ||
| 845 | continue; | ||
| 846 | |||
| 839 | if (nft_target_cmp(target, tg_name, rev, family)) | 847 | if (nft_target_cmp(target, tg_name, rev, family)) |
| 840 | return &nft_target->ops; | 848 | return &nft_target->ops; |
| 841 | } | 849 | } |
| @@ -844,6 +852,11 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
| 844 | if (IS_ERR(target)) | 852 | if (IS_ERR(target)) |
| 845 | return ERR_PTR(-ENOENT); | 853 | return ERR_PTR(-ENOENT); |
| 846 | 854 | ||
| 855 | if (!target->target) { | ||
| 856 | err = -EINVAL; | ||
| 857 | goto err; | ||
| 858 | } | ||
| 859 | |||
| 847 | if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) { | 860 | if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) { |
| 848 | err = -EINVAL; | 861 | err = -EINVAL; |
| 849 | goto err; | 862 | goto err; |
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c index d6626e01c7ee..128bc16f52dd 100644 --- a/net/netfilter/nft_set_bitmap.c +++ b/net/netfilter/nft_set_bitmap.c | |||
| @@ -296,7 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features, | |||
| 296 | return true; | 296 | return true; |
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | static struct nft_set_type nft_bitmap_type __read_mostly = { | 299 | struct nft_set_type nft_set_bitmap_type __read_mostly = { |
| 300 | .owner = THIS_MODULE, | 300 | .owner = THIS_MODULE, |
| 301 | .ops = { | 301 | .ops = { |
| 302 | .privsize = nft_bitmap_privsize, | 302 | .privsize = nft_bitmap_privsize, |
| @@ -314,20 +314,3 @@ static struct nft_set_type nft_bitmap_type __read_mostly = { | |||
| 314 | .get = nft_bitmap_get, | 314 | .get = nft_bitmap_get, |
| 315 | }, | 315 | }, |
| 316 | }; | 316 | }; |
| 317 | |||
| 318 | static int __init nft_bitmap_module_init(void) | ||
| 319 | { | ||
| 320 | return nft_register_set(&nft_bitmap_type); | ||
| 321 | } | ||
| 322 | |||
| 323 | static void __exit nft_bitmap_module_exit(void) | ||
| 324 | { | ||
| 325 | nft_unregister_set(&nft_bitmap_type); | ||
| 326 | } | ||
| 327 | |||
| 328 | module_init(nft_bitmap_module_init); | ||
| 329 | module_exit(nft_bitmap_module_exit); | ||
| 330 | |||
| 331 | MODULE_LICENSE("GPL"); | ||
| 332 | MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); | ||
| 333 | MODULE_ALIAS_NFT_SET(); | ||
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 6f9a1365a09f..72ef35b51cac 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
| @@ -654,7 +654,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features | |||
| 654 | return true; | 654 | return true; |
| 655 | } | 655 | } |
| 656 | 656 | ||
| 657 | static struct nft_set_type nft_rhash_type __read_mostly = { | 657 | struct nft_set_type nft_set_rhash_type __read_mostly = { |
| 658 | .owner = THIS_MODULE, | 658 | .owner = THIS_MODULE, |
| 659 | .features = NFT_SET_MAP | NFT_SET_OBJECT | | 659 | .features = NFT_SET_MAP | NFT_SET_OBJECT | |
| 660 | NFT_SET_TIMEOUT | NFT_SET_EVAL, | 660 | NFT_SET_TIMEOUT | NFT_SET_EVAL, |
| @@ -677,7 +677,7 @@ static struct nft_set_type nft_rhash_type __read_mostly = { | |||
| 677 | }, | 677 | }, |
| 678 | }; | 678 | }; |
| 679 | 679 | ||
| 680 | static struct nft_set_type nft_hash_type __read_mostly = { | 680 | struct nft_set_type nft_set_hash_type __read_mostly = { |
| 681 | .owner = THIS_MODULE, | 681 | .owner = THIS_MODULE, |
| 682 | .features = NFT_SET_MAP | NFT_SET_OBJECT, | 682 | .features = NFT_SET_MAP | NFT_SET_OBJECT, |
| 683 | .ops = { | 683 | .ops = { |
| @@ -697,7 +697,7 @@ static struct nft_set_type nft_hash_type __read_mostly = { | |||
| 697 | }, | 697 | }, |
| 698 | }; | 698 | }; |
| 699 | 699 | ||
| 700 | static struct nft_set_type nft_hash_fast_type __read_mostly = { | 700 | struct nft_set_type nft_set_hash_fast_type __read_mostly = { |
| 701 | .owner = THIS_MODULE, | 701 | .owner = THIS_MODULE, |
| 702 | .features = NFT_SET_MAP | NFT_SET_OBJECT, | 702 | .features = NFT_SET_MAP | NFT_SET_OBJECT, |
| 703 | .ops = { | 703 | .ops = { |
| @@ -716,26 +716,3 @@ static struct nft_set_type nft_hash_fast_type __read_mostly = { | |||
| 716 | .get = nft_hash_get, | 716 | .get = nft_hash_get, |
| 717 | }, | 717 | }, |
| 718 | }; | 718 | }; |
| 719 | |||
| 720 | static int __init nft_hash_module_init(void) | ||
| 721 | { | ||
| 722 | if (nft_register_set(&nft_hash_fast_type) || | ||
| 723 | nft_register_set(&nft_hash_type) || | ||
| 724 | nft_register_set(&nft_rhash_type)) | ||
| 725 | return 1; | ||
| 726 | return 0; | ||
| 727 | } | ||
| 728 | |||
| 729 | static void __exit nft_hash_module_exit(void) | ||
| 730 | { | ||
| 731 | nft_unregister_set(&nft_rhash_type); | ||
| 732 | nft_unregister_set(&nft_hash_type); | ||
| 733 | nft_unregister_set(&nft_hash_fast_type); | ||
| 734 | } | ||
| 735 | |||
| 736 | module_init(nft_hash_module_init); | ||
| 737 | module_exit(nft_hash_module_exit); | ||
| 738 | |||
| 739 | MODULE_LICENSE("GPL"); | ||
| 740 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | ||
| 741 | MODULE_ALIAS_NFT_SET(); | ||
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 7f3a9a211034..1f8f257cb518 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
| @@ -462,7 +462,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, | |||
| 462 | return true; | 462 | return true; |
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | static struct nft_set_type nft_rbtree_type __read_mostly = { | 465 | struct nft_set_type nft_set_rbtree_type __read_mostly = { |
| 466 | .owner = THIS_MODULE, | 466 | .owner = THIS_MODULE, |
| 467 | .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT, | 467 | .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT, |
| 468 | .ops = { | 468 | .ops = { |
| @@ -481,20 +481,3 @@ static struct nft_set_type nft_rbtree_type __read_mostly = { | |||
| 481 | .get = nft_rbtree_get, | 481 | .get = nft_rbtree_get, |
| 482 | }, | 482 | }, |
| 483 | }; | 483 | }; |
| 484 | |||
| 485 | static int __init nft_rbtree_module_init(void) | ||
| 486 | { | ||
| 487 | return nft_register_set(&nft_rbtree_type); | ||
| 488 | } | ||
| 489 | |||
| 490 | static void __exit nft_rbtree_module_exit(void) | ||
| 491 | { | ||
| 492 | nft_unregister_set(&nft_rbtree_type); | ||
| 493 | } | ||
| 494 | |||
| 495 | module_init(nft_rbtree_module_init); | ||
| 496 | module_exit(nft_rbtree_module_exit); | ||
| 497 | |||
| 498 | MODULE_LICENSE("GPL"); | ||
| 499 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | ||
| 500 | MODULE_ALIAS_NFT_SET(); | ||
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 58fce4e749a9..d76550a8b642 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
| @@ -61,7 +61,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
| 61 | * addresses, this happens if the redirect already happened | 61 | * addresses, this happens if the redirect already happened |
| 62 | * and the current packet belongs to an already established | 62 | * and the current packet belongs to an already established |
| 63 | * connection */ | 63 | * connection */ |
| 64 | sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, | 64 | sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol, |
| 65 | iph->saddr, iph->daddr, | 65 | iph->saddr, iph->daddr, |
| 66 | hp->source, hp->dest, | 66 | hp->source, hp->dest, |
| 67 | skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); | 67 | skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); |
| @@ -77,7 +77,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
| 77 | else if (!sk) | 77 | else if (!sk) |
| 78 | /* no, there's no established connection, check if | 78 | /* no, there's no established connection, check if |
| 79 | * there's a listener on the redirected addr/port */ | 79 | * there's a listener on the redirected addr/port */ |
| 80 | sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol, | 80 | sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol, |
| 81 | iph->saddr, laddr, | 81 | iph->saddr, laddr, |
| 82 | hp->source, lport, | 82 | hp->source, lport, |
| 83 | skb->dev, NF_TPROXY_LOOKUP_LISTENER); | 83 | skb->dev, NF_TPROXY_LOOKUP_LISTENER); |
| @@ -150,7 +150,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 150 | * addresses, this happens if the redirect already happened | 150 | * addresses, this happens if the redirect already happened |
| 151 | * and the current packet belongs to an already established | 151 | * and the current packet belongs to an already established |
| 152 | * connection */ | 152 | * connection */ |
| 153 | sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, tproto, | 153 | sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto, |
| 154 | &iph->saddr, &iph->daddr, | 154 | &iph->saddr, &iph->daddr, |
| 155 | hp->source, hp->dest, | 155 | hp->source, hp->dest, |
| 156 | xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED); | 156 | xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED); |
| @@ -171,7 +171,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 171 | else if (!sk) | 171 | else if (!sk) |
| 172 | /* no there's no established connection, check if | 172 | /* no there's no established connection, check if |
| 173 | * there's a listener on the redirected addr/port */ | 173 | * there's a listener on the redirected addr/port */ |
| 174 | sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, | 174 | sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, |
| 175 | tproto, &iph->saddr, laddr, | 175 | tproto, &iph->saddr, laddr, |
| 176 | hp->source, lport, | 176 | hp->source, lport, |
| 177 | xt_in(par), NF_TPROXY_LOOKUP_LISTENER); | 177 | xt_in(par), NF_TPROXY_LOOKUP_LISTENER); |
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 2ceefa183cee..6a196e438b6c 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c | |||
| @@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, | |||
| 752 | pr_debug("Fragment %zd bytes remaining %zd", | 752 | pr_debug("Fragment %zd bytes remaining %zd", |
| 753 | frag_len, remaining_len); | 753 | frag_len, remaining_len); |
| 754 | 754 | ||
| 755 | pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, | 755 | pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0, |
| 756 | frag_len + LLCP_HEADER_SIZE, &err); | 756 | frag_len + LLCP_HEADER_SIZE, &err); |
| 757 | if (pdu == NULL) { | 757 | if (pdu == NULL) { |
| 758 | pr_err("Could not allocate PDU\n"); | 758 | pr_err("Could not allocate PDU (error=%d)\n", err); |
| 759 | continue; | 759 | len -= remaining_len; |
| 760 | if (len == 0) | ||
| 761 | len = err; | ||
| 762 | break; | ||
| 760 | } | 763 | } |
| 761 | 764 | ||
| 762 | pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); | 765 | pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); |
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index 9696ef96b719..1a30e165eeb4 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c | |||
| @@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, | |||
| 104 | __skb_pull(skb, nsh_len); | 104 | __skb_pull(skb, nsh_len); |
| 105 | 105 | ||
| 106 | skb_reset_mac_header(skb); | 106 | skb_reset_mac_header(skb); |
| 107 | skb_reset_mac_len(skb); | 107 | skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0; |
| 108 | skb->protocol = proto; | 108 | skb->protocol = proto; |
| 109 | 109 | ||
| 110 | features &= NETIF_F_SG; | 110 | features &= NETIF_F_SG; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 57634bc3da74..9b27d0cd766d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2878,6 +2878,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2878 | goto out_free; | 2878 | goto out_free; |
| 2879 | } else if (reserve) { | 2879 | } else if (reserve) { |
| 2880 | skb_reserve(skb, -reserve); | 2880 | skb_reserve(skb, -reserve); |
| 2881 | if (len < reserve) | ||
| 2882 | skb_reset_network_header(skb); | ||
| 2881 | } | 2883 | } |
| 2882 | 2884 | ||
| 2883 | /* Returns -EFAULT on error */ | 2885 | /* Returns -EFAULT on error */ |
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 2aa07b547b16..86e1e37eb4e8 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c | |||
| @@ -191,8 +191,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, | |||
| 191 | hdr->type = cpu_to_le32(type); | 191 | hdr->type = cpu_to_le32(type); |
| 192 | hdr->src_node_id = cpu_to_le32(from->sq_node); | 192 | hdr->src_node_id = cpu_to_le32(from->sq_node); |
| 193 | hdr->src_port_id = cpu_to_le32(from->sq_port); | 193 | hdr->src_port_id = cpu_to_le32(from->sq_port); |
| 194 | hdr->dst_node_id = cpu_to_le32(to->sq_node); | 194 | if (to->sq_port == QRTR_PORT_CTRL) { |
| 195 | hdr->dst_port_id = cpu_to_le32(to->sq_port); | 195 | hdr->dst_node_id = cpu_to_le32(node->nid); |
| 196 | hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST); | ||
| 197 | } else { | ||
| 198 | hdr->dst_node_id = cpu_to_le32(to->sq_node); | ||
| 199 | hdr->dst_port_id = cpu_to_le32(to->sq_port); | ||
| 200 | } | ||
| 196 | 201 | ||
| 197 | hdr->size = cpu_to_le32(len); | 202 | hdr->size = cpu_to_le32(len); |
| 198 | hdr->confirm_rx = 0; | 203 | hdr->confirm_rx = 0; |
| @@ -764,6 +769,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 764 | node = NULL; | 769 | node = NULL; |
| 765 | if (addr->sq_node == QRTR_NODE_BCAST) { | 770 | if (addr->sq_node == QRTR_NODE_BCAST) { |
| 766 | enqueue_fn = qrtr_bcast_enqueue; | 771 | enqueue_fn = qrtr_bcast_enqueue; |
| 772 | if (addr->sq_port != QRTR_PORT_CTRL) { | ||
| 773 | release_sock(sk); | ||
| 774 | return -ENOTCONN; | ||
| 775 | } | ||
| 767 | } else if (addr->sq_node == ipc->us.sq_node) { | 776 | } else if (addr->sq_node == ipc->us.sq_node) { |
| 768 | enqueue_fn = qrtr_local_enqueue; | 777 | enqueue_fn = qrtr_local_enqueue; |
| 769 | } else { | 778 | } else { |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 526a8e491626..6e7124e57918 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
| @@ -91,7 +91,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, | |||
| 91 | } | 91 | } |
| 92 | params_old = rtnl_dereference(p->params); | 92 | params_old = rtnl_dereference(p->params); |
| 93 | 93 | ||
| 94 | params_new->action = parm->action; | 94 | p->tcf_action = parm->action; |
| 95 | params_new->update_flags = parm->update_flags; | 95 | params_new->update_flags = parm->update_flags; |
| 96 | rcu_assign_pointer(p->params, params_new); | 96 | rcu_assign_pointer(p->params, params_new); |
| 97 | if (params_old) | 97 | if (params_old) |
| @@ -561,7 +561,7 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a, | |||
| 561 | tcf_lastuse_update(&p->tcf_tm); | 561 | tcf_lastuse_update(&p->tcf_tm); |
| 562 | bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); | 562 | bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb); |
| 563 | 563 | ||
| 564 | action = params->action; | 564 | action = READ_ONCE(p->tcf_action); |
| 565 | if (unlikely(action == TC_ACT_SHOT)) | 565 | if (unlikely(action == TC_ACT_SHOT)) |
| 566 | goto drop_stats; | 566 | goto drop_stats; |
| 567 | 567 | ||
| @@ -599,11 +599,11 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, | |||
| 599 | .index = p->tcf_index, | 599 | .index = p->tcf_index, |
| 600 | .refcnt = p->tcf_refcnt - ref, | 600 | .refcnt = p->tcf_refcnt - ref, |
| 601 | .bindcnt = p->tcf_bindcnt - bind, | 601 | .bindcnt = p->tcf_bindcnt - bind, |
| 602 | .action = p->tcf_action, | ||
| 602 | }; | 603 | }; |
| 603 | struct tcf_t t; | 604 | struct tcf_t t; |
| 604 | 605 | ||
| 605 | params = rtnl_dereference(p->params); | 606 | params = rtnl_dereference(p->params); |
| 606 | opt.action = params->action; | ||
| 607 | opt.update_flags = params->update_flags; | 607 | opt.update_flags = params->update_flags; |
| 608 | 608 | ||
| 609 | if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) | 609 | if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) |
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 626dac81a48a..9bc6c2ae98a5 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
| @@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, | |||
| 36 | 36 | ||
| 37 | tcf_lastuse_update(&t->tcf_tm); | 37 | tcf_lastuse_update(&t->tcf_tm); |
| 38 | bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb); | 38 | bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb); |
| 39 | action = params->action; | 39 | action = READ_ONCE(t->tcf_action); |
| 40 | 40 | ||
| 41 | switch (params->tcft_action) { | 41 | switch (params->tcft_action) { |
| 42 | case TCA_TUNNEL_KEY_ACT_RELEASE: | 42 | case TCA_TUNNEL_KEY_ACT_RELEASE: |
| @@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
| 182 | 182 | ||
| 183 | params_old = rtnl_dereference(t->params); | 183 | params_old = rtnl_dereference(t->params); |
| 184 | 184 | ||
| 185 | params_new->action = parm->action; | 185 | t->tcf_action = parm->action; |
| 186 | params_new->tcft_action = parm->t_action; | 186 | params_new->tcft_action = parm->t_action; |
| 187 | params_new->tcft_enc_metadata = metadata; | 187 | params_new->tcft_enc_metadata = metadata; |
| 188 | 188 | ||
| @@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, | |||
| 254 | .index = t->tcf_index, | 254 | .index = t->tcf_index, |
| 255 | .refcnt = t->tcf_refcnt - ref, | 255 | .refcnt = t->tcf_refcnt - ref, |
| 256 | .bindcnt = t->tcf_bindcnt - bind, | 256 | .bindcnt = t->tcf_bindcnt - bind, |
| 257 | .action = t->tcf_action, | ||
| 257 | }; | 258 | }; |
| 258 | struct tcf_t tm; | 259 | struct tcf_t tm; |
| 259 | 260 | ||
| 260 | params = rtnl_dereference(t->params); | 261 | params = rtnl_dereference(t->params); |
| 261 | 262 | ||
| 262 | opt.t_action = params->tcft_action; | 263 | opt.t_action = params->tcft_action; |
| 263 | opt.action = params->action; | ||
| 264 | 264 | ||
| 265 | if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt)) | 265 | if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt)) |
| 266 | goto nla_put_failure; | 266 | goto nla_put_failure; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index cdc3c87c53e6..f74513a7c7a8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -1053,7 +1053,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, | |||
| 1053 | for (tp = rtnl_dereference(chain->filter_chain); | 1053 | for (tp = rtnl_dereference(chain->filter_chain); |
| 1054 | tp; tp = rtnl_dereference(tp->next)) | 1054 | tp; tp = rtnl_dereference(tp->next)) |
| 1055 | tfilter_notify(net, oskb, n, tp, block, | 1055 | tfilter_notify(net, oskb, n, tp, block, |
| 1056 | q, parent, 0, event, false); | 1056 | q, parent, NULL, event, false); |
| 1057 | } | 1057 | } |
| 1058 | 1058 | ||
| 1059 | static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | 1059 | static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, |
| @@ -1444,7 +1444,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, | |||
| 1444 | memset(&cb->args[1], 0, | 1444 | memset(&cb->args[1], 0, |
| 1445 | sizeof(cb->args) - sizeof(cb->args[0])); | 1445 | sizeof(cb->args) - sizeof(cb->args[0])); |
| 1446 | if (cb->args[1] == 0) { | 1446 | if (cb->args[1] == 0) { |
| 1447 | if (tcf_fill_node(net, skb, tp, block, q, parent, 0, | 1447 | if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, |
| 1448 | NETLINK_CB(cb->skb).portid, | 1448 | NETLINK_CB(cb->skb).portid, |
| 1449 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1449 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 1450 | RTM_NEWTFILTER) <= 0) | 1450 | RTM_NEWTFILTER) <= 0) |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index cd2e0e342fb6..6c0a9d5dbf94 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
| @@ -479,24 +479,28 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, | |||
| 479 | q->cparams.mtu = psched_mtu(qdisc_dev(sch)); | 479 | q->cparams.mtu = psched_mtu(qdisc_dev(sch)); |
| 480 | 480 | ||
| 481 | if (opt) { | 481 | if (opt) { |
| 482 | int err = fq_codel_change(sch, opt, extack); | 482 | err = fq_codel_change(sch, opt, extack); |
| 483 | if (err) | 483 | if (err) |
| 484 | return err; | 484 | goto init_failure; |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | err = tcf_block_get(&q->block, &q->filter_list, sch, extack); | 487 | err = tcf_block_get(&q->block, &q->filter_list, sch, extack); |
| 488 | if (err) | 488 | if (err) |
| 489 | return err; | 489 | goto init_failure; |
| 490 | 490 | ||
| 491 | if (!q->flows) { | 491 | if (!q->flows) { |
| 492 | q->flows = kvcalloc(q->flows_cnt, | 492 | q->flows = kvcalloc(q->flows_cnt, |
| 493 | sizeof(struct fq_codel_flow), | 493 | sizeof(struct fq_codel_flow), |
| 494 | GFP_KERNEL); | 494 | GFP_KERNEL); |
| 495 | if (!q->flows) | 495 | if (!q->flows) { |
| 496 | return -ENOMEM; | 496 | err = -ENOMEM; |
| 497 | goto init_failure; | ||
| 498 | } | ||
| 497 | q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); | 499 | q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); |
| 498 | if (!q->backlogs) | 500 | if (!q->backlogs) { |
| 499 | return -ENOMEM; | 501 | err = -ENOMEM; |
| 502 | goto alloc_failure; | ||
| 503 | } | ||
| 500 | for (i = 0; i < q->flows_cnt; i++) { | 504 | for (i = 0; i < q->flows_cnt; i++) { |
| 501 | struct fq_codel_flow *flow = q->flows + i; | 505 | struct fq_codel_flow *flow = q->flows + i; |
| 502 | 506 | ||
| @@ -509,6 +513,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, | |||
| 509 | else | 513 | else |
| 510 | sch->flags &= ~TCQ_F_CAN_BYPASS; | 514 | sch->flags &= ~TCQ_F_CAN_BYPASS; |
| 511 | return 0; | 515 | return 0; |
| 516 | |||
| 517 | alloc_failure: | ||
| 518 | kvfree(q->flows); | ||
| 519 | q->flows = NULL; | ||
| 520 | init_failure: | ||
| 521 | q->flows_cnt = 0; | ||
| 522 | return err; | ||
| 512 | } | 523 | } |
| 513 | 524 | ||
| 514 | static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) | 525 | static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 445b7ef61677..12cac85da994 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
| @@ -282,7 +282,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) | |||
| 282 | 282 | ||
| 283 | if (dst) { | 283 | if (dst) { |
| 284 | /* Re-fetch, as under layers may have a higher minimum size */ | 284 | /* Re-fetch, as under layers may have a higher minimum size */ |
| 285 | pmtu = SCTP_TRUNC4(dst_mtu(dst)); | 285 | pmtu = sctp_dst_mtu(dst); |
| 286 | change = t->pathmtu != pmtu; | 286 | change = t->pathmtu != pmtu; |
| 287 | } | 287 | } |
| 288 | t->pathmtu = pmtu; | 288 | t->pathmtu = pmtu; |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 3c1405df936c..05e4ffe5aabd 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -147,7 +147,8 @@ static int smc_release(struct socket *sock) | |||
| 147 | smc->clcsock = NULL; | 147 | smc->clcsock = NULL; |
| 148 | } | 148 | } |
| 149 | if (smc->use_fallback) { | 149 | if (smc->use_fallback) { |
| 150 | sock_put(sk); /* passive closing */ | 150 | if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT) |
| 151 | sock_put(sk); /* passive closing */ | ||
| 151 | sk->sk_state = SMC_CLOSED; | 152 | sk->sk_state = SMC_CLOSED; |
| 152 | sk->sk_state_change(sk); | 153 | sk->sk_state_change(sk); |
| 153 | } | 154 | } |
| @@ -417,12 +418,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code) | |||
| 417 | { | 418 | { |
| 418 | int rc; | 419 | int rc; |
| 419 | 420 | ||
| 420 | if (reason_code < 0) /* error, fallback is not possible */ | 421 | if (reason_code < 0) { /* error, fallback is not possible */ |
| 422 | if (smc->sk.sk_state == SMC_INIT) | ||
| 423 | sock_put(&smc->sk); /* passive closing */ | ||
| 421 | return reason_code; | 424 | return reason_code; |
| 425 | } | ||
| 422 | if (reason_code != SMC_CLC_DECL_REPLY) { | 426 | if (reason_code != SMC_CLC_DECL_REPLY) { |
| 423 | rc = smc_clc_send_decline(smc, reason_code); | 427 | rc = smc_clc_send_decline(smc, reason_code); |
| 424 | if (rc < 0) | 428 | if (rc < 0) { |
| 429 | if (smc->sk.sk_state == SMC_INIT) | ||
| 430 | sock_put(&smc->sk); /* passive closing */ | ||
| 425 | return rc; | 431 | return rc; |
| 432 | } | ||
| 426 | } | 433 | } |
| 427 | return smc_connect_fallback(smc); | 434 | return smc_connect_fallback(smc); |
| 428 | } | 435 | } |
| @@ -435,8 +442,6 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code, | |||
| 435 | smc_lgr_forget(smc->conn.lgr); | 442 | smc_lgr_forget(smc->conn.lgr); |
| 436 | mutex_unlock(&smc_create_lgr_pending); | 443 | mutex_unlock(&smc_create_lgr_pending); |
| 437 | smc_conn_free(&smc->conn); | 444 | smc_conn_free(&smc->conn); |
| 438 | if (reason_code < 0 && smc->sk.sk_state == SMC_INIT) | ||
| 439 | sock_put(&smc->sk); /* passive closing */ | ||
| 440 | return reason_code; | 445 | return reason_code; |
| 441 | } | 446 | } |
| 442 | 447 | ||
| @@ -1452,7 +1457,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, | |||
| 1452 | 1457 | ||
| 1453 | if (optlen < sizeof(int)) | 1458 | if (optlen < sizeof(int)) |
| 1454 | return -EINVAL; | 1459 | return -EINVAL; |
| 1455 | get_user(val, (int __user *)optval); | 1460 | if (get_user(val, (int __user *)optval)) |
| 1461 | return -EFAULT; | ||
| 1456 | 1462 | ||
| 1457 | lock_sock(sk); | 1463 | lock_sock(sk); |
| 1458 | switch (optname) { | 1464 | switch (optname) { |
| @@ -1520,10 +1526,13 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, | |||
| 1520 | return -EBADF; | 1526 | return -EBADF; |
| 1521 | return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); | 1527 | return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); |
| 1522 | } | 1528 | } |
| 1529 | lock_sock(&smc->sk); | ||
| 1523 | switch (cmd) { | 1530 | switch (cmd) { |
| 1524 | case SIOCINQ: /* same as FIONREAD */ | 1531 | case SIOCINQ: /* same as FIONREAD */ |
| 1525 | if (smc->sk.sk_state == SMC_LISTEN) | 1532 | if (smc->sk.sk_state == SMC_LISTEN) { |
| 1533 | release_sock(&smc->sk); | ||
| 1526 | return -EINVAL; | 1534 | return -EINVAL; |
| 1535 | } | ||
| 1527 | if (smc->sk.sk_state == SMC_INIT || | 1536 | if (smc->sk.sk_state == SMC_INIT || |
| 1528 | smc->sk.sk_state == SMC_CLOSED) | 1537 | smc->sk.sk_state == SMC_CLOSED) |
| 1529 | answ = 0; | 1538 | answ = 0; |
| @@ -1532,8 +1541,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, | |||
| 1532 | break; | 1541 | break; |
| 1533 | case SIOCOUTQ: | 1542 | case SIOCOUTQ: |
| 1534 | /* output queue size (not send + not acked) */ | 1543 | /* output queue size (not send + not acked) */ |
| 1535 | if (smc->sk.sk_state == SMC_LISTEN) | 1544 | if (smc->sk.sk_state == SMC_LISTEN) { |
| 1545 | release_sock(&smc->sk); | ||
| 1536 | return -EINVAL; | 1546 | return -EINVAL; |
| 1547 | } | ||
| 1537 | if (smc->sk.sk_state == SMC_INIT || | 1548 | if (smc->sk.sk_state == SMC_INIT || |
| 1538 | smc->sk.sk_state == SMC_CLOSED) | 1549 | smc->sk.sk_state == SMC_CLOSED) |
| 1539 | answ = 0; | 1550 | answ = 0; |
| @@ -1543,8 +1554,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, | |||
| 1543 | break; | 1554 | break; |
| 1544 | case SIOCOUTQNSD: | 1555 | case SIOCOUTQNSD: |
| 1545 | /* output queue size (not send only) */ | 1556 | /* output queue size (not send only) */ |
| 1546 | if (smc->sk.sk_state == SMC_LISTEN) | 1557 | if (smc->sk.sk_state == SMC_LISTEN) { |
| 1558 | release_sock(&smc->sk); | ||
| 1547 | return -EINVAL; | 1559 | return -EINVAL; |
| 1560 | } | ||
| 1548 | if (smc->sk.sk_state == SMC_INIT || | 1561 | if (smc->sk.sk_state == SMC_INIT || |
| 1549 | smc->sk.sk_state == SMC_CLOSED) | 1562 | smc->sk.sk_state == SMC_CLOSED) |
| 1550 | answ = 0; | 1563 | answ = 0; |
| @@ -1552,8 +1565,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, | |||
| 1552 | answ = smc_tx_prepared_sends(&smc->conn); | 1565 | answ = smc_tx_prepared_sends(&smc->conn); |
| 1553 | break; | 1566 | break; |
| 1554 | case SIOCATMARK: | 1567 | case SIOCATMARK: |
| 1555 | if (smc->sk.sk_state == SMC_LISTEN) | 1568 | if (smc->sk.sk_state == SMC_LISTEN) { |
| 1569 | release_sock(&smc->sk); | ||
| 1556 | return -EINVAL; | 1570 | return -EINVAL; |
| 1571 | } | ||
| 1557 | if (smc->sk.sk_state == SMC_INIT || | 1572 | if (smc->sk.sk_state == SMC_INIT || |
| 1558 | smc->sk.sk_state == SMC_CLOSED) { | 1573 | smc->sk.sk_state == SMC_CLOSED) { |
| 1559 | answ = 0; | 1574 | answ = 0; |
| @@ -1569,8 +1584,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, | |||
| 1569 | } | 1584 | } |
| 1570 | break; | 1585 | break; |
| 1571 | default: | 1586 | default: |
| 1587 | release_sock(&smc->sk); | ||
| 1572 | return -ENOIOCTLCMD; | 1588 | return -ENOIOCTLCMD; |
| 1573 | } | 1589 | } |
| 1590 | release_sock(&smc->sk); | ||
| 1574 | 1591 | ||
| 1575 | return put_user(answ, (int __user *)arg); | 1592 | return put_user(answ, (int __user *)arg); |
| 1576 | } | 1593 | } |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 717449b1da0b..ae5d168653ce 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
| @@ -250,6 +250,7 @@ out: | |||
| 250 | int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, | 250 | int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, |
| 251 | u8 expected_type) | 251 | u8 expected_type) |
| 252 | { | 252 | { |
| 253 | long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo; | ||
| 253 | struct sock *clc_sk = smc->clcsock->sk; | 254 | struct sock *clc_sk = smc->clcsock->sk; |
| 254 | struct smc_clc_msg_hdr *clcm = buf; | 255 | struct smc_clc_msg_hdr *clcm = buf; |
| 255 | struct msghdr msg = {NULL, 0}; | 256 | struct msghdr msg = {NULL, 0}; |
| @@ -306,7 +307,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, | |||
| 306 | memset(&msg, 0, sizeof(struct msghdr)); | 307 | memset(&msg, 0, sizeof(struct msghdr)); |
| 307 | iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); | 308 | iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); |
| 308 | krflags = MSG_WAITALL; | 309 | krflags = MSG_WAITALL; |
| 309 | smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; | ||
| 310 | len = sock_recvmsg(smc->clcsock, &msg, krflags); | 310 | len = sock_recvmsg(smc->clcsock, &msg, krflags); |
| 311 | if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { | 311 | if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { |
| 312 | smc->sk.sk_err = EPROTO; | 312 | smc->sk.sk_err = EPROTO; |
| @@ -322,6 +322,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, | |||
| 322 | } | 322 | } |
| 323 | 323 | ||
| 324 | out: | 324 | out: |
| 325 | smc->clcsock->sk->sk_rcvtimeo = rcvtimeo; | ||
| 325 | return reason_code; | 326 | return reason_code; |
| 326 | } | 327 | } |
| 327 | 328 | ||
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index fa41d9881741..ac961dfb1ea1 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
| @@ -107,6 +107,8 @@ static void smc_close_active_abort(struct smc_sock *smc) | |||
| 107 | } | 107 | } |
| 108 | switch (sk->sk_state) { | 108 | switch (sk->sk_state) { |
| 109 | case SMC_INIT: | 109 | case SMC_INIT: |
| 110 | sk->sk_state = SMC_PEERABORTWAIT; | ||
| 111 | break; | ||
| 110 | case SMC_ACTIVE: | 112 | case SMC_ACTIVE: |
| 111 | sk->sk_state = SMC_PEERABORTWAIT; | 113 | sk->sk_state = SMC_PEERABORTWAIT; |
| 112 | release_sock(sk); | 114 | release_sock(sk); |
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index cee666400752..f82886b7d1d8 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c | |||
| @@ -495,7 +495,8 @@ out: | |||
| 495 | 495 | ||
| 496 | void smc_tx_consumer_update(struct smc_connection *conn, bool force) | 496 | void smc_tx_consumer_update(struct smc_connection *conn, bool force) |
| 497 | { | 497 | { |
| 498 | union smc_host_cursor cfed, cons; | 498 | union smc_host_cursor cfed, cons, prod; |
| 499 | int sender_free = conn->rmb_desc->len; | ||
| 499 | int to_confirm; | 500 | int to_confirm; |
| 500 | 501 | ||
| 501 | smc_curs_write(&cons, | 502 | smc_curs_write(&cons, |
| @@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) | |||
| 505 | smc_curs_read(&conn->rx_curs_confirmed, conn), | 506 | smc_curs_read(&conn->rx_curs_confirmed, conn), |
| 506 | conn); | 507 | conn); |
| 507 | to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); | 508 | to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); |
| 509 | if (to_confirm > conn->rmbe_update_limit) { | ||
| 510 | smc_curs_write(&prod, | ||
| 511 | smc_curs_read(&conn->local_rx_ctrl.prod, conn), | ||
| 512 | conn); | ||
| 513 | sender_free = conn->rmb_desc->len - | ||
| 514 | smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); | ||
| 515 | } | ||
| 508 | 516 | ||
| 509 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || | 517 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || |
| 510 | force || | 518 | force || |
| 511 | ((to_confirm > conn->rmbe_update_limit) && | 519 | ((to_confirm > conn->rmbe_update_limit) && |
| 512 | ((to_confirm > (conn->rmb_desc->len / 2)) || | 520 | ((sender_free <= (conn->rmb_desc->len / 2)) || |
| 513 | conn->local_rx_ctrl.prod_flags.write_blocked))) { | 521 | conn->local_rx_ctrl.prod_flags.write_blocked))) { |
| 514 | if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && | 522 | if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && |
| 515 | conn->alert_token_local) { /* connection healthy */ | 523 | conn->alert_token_local) { /* connection healthy */ |
diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 9f666e0650e2..2830709957bd 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
| @@ -133,6 +133,8 @@ static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr, | |||
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer | 135 | /* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer |
| 136 | * Returns true if message should be dropped by caller, i.e., if it is a | ||
| 137 | * trial message or we are inside trial period. Otherwise false. | ||
| 136 | */ | 138 | */ |
| 137 | static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, | 139 | static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, |
| 138 | struct tipc_media_addr *maddr, | 140 | struct tipc_media_addr *maddr, |
| @@ -168,8 +170,9 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, | |||
| 168 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); | 170 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); |
| 169 | } | 171 | } |
| 170 | 172 | ||
| 173 | /* Accept regular link requests/responses only after trial period */ | ||
| 171 | if (mtyp != DSC_TRIAL_MSG) | 174 | if (mtyp != DSC_TRIAL_MSG) |
| 172 | return false; | 175 | return trial; |
| 173 | 176 | ||
| 174 | sugg_addr = tipc_node_try_addr(net, peer_id, src); | 177 | sugg_addr = tipc_node_try_addr(net, peer_id, src); |
| 175 | if (sugg_addr) | 178 | if (sugg_addr) |
| @@ -284,7 +287,6 @@ static void tipc_disc_timeout(struct timer_list *t) | |||
| 284 | { | 287 | { |
| 285 | struct tipc_discoverer *d = from_timer(d, t, timer); | 288 | struct tipc_discoverer *d = from_timer(d, t, timer); |
| 286 | struct tipc_net *tn = tipc_net(d->net); | 289 | struct tipc_net *tn = tipc_net(d->net); |
| 287 | u32 self = tipc_own_addr(d->net); | ||
| 288 | struct tipc_media_addr maddr; | 290 | struct tipc_media_addr maddr; |
| 289 | struct sk_buff *skb = NULL; | 291 | struct sk_buff *skb = NULL; |
| 290 | struct net *net = d->net; | 292 | struct net *net = d->net; |
| @@ -298,12 +300,14 @@ static void tipc_disc_timeout(struct timer_list *t) | |||
| 298 | goto exit; | 300 | goto exit; |
| 299 | } | 301 | } |
| 300 | 302 | ||
| 301 | /* Did we just leave the address trial period ? */ | 303 | /* Trial period over ? */ |
| 302 | if (!self && !time_before(jiffies, tn->addr_trial_end)) { | 304 | if (!time_before(jiffies, tn->addr_trial_end)) { |
| 303 | self = tn->trial_addr; | 305 | /* Did we just leave it ? */ |
| 304 | tipc_net_finalize(net, self); | 306 | if (!tipc_own_addr(net)) |
| 305 | msg_set_prevnode(buf_msg(d->skb), self); | 307 | tipc_net_finalize(net, tn->trial_addr); |
| 308 | |||
| 306 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); | 309 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); |
| 310 | msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net)); | ||
| 307 | } | 311 | } |
| 308 | 312 | ||
| 309 | /* Adjust timeout interval according to discovery phase */ | 313 | /* Adjust timeout interval according to discovery phase */ |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 4fbaa0464405..a7f6964c3a4b 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
| @@ -121,12 +121,17 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr) | |||
| 121 | 121 | ||
| 122 | void tipc_net_finalize(struct net *net, u32 addr) | 122 | void tipc_net_finalize(struct net *net, u32 addr) |
| 123 | { | 123 | { |
| 124 | tipc_set_node_addr(net, addr); | 124 | struct tipc_net *tn = tipc_net(net); |
| 125 | smp_mb(); | 125 | |
| 126 | tipc_named_reinit(net); | 126 | spin_lock_bh(&tn->node_list_lock); |
| 127 | tipc_sk_reinit(net); | 127 | if (!tipc_own_addr(net)) { |
| 128 | tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, | 128 | tipc_set_node_addr(net, addr); |
| 129 | TIPC_CLUSTER_SCOPE, 0, addr); | 129 | tipc_named_reinit(net); |
| 130 | tipc_sk_reinit(net); | ||
| 131 | tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, | ||
| 132 | TIPC_CLUSTER_SCOPE, 0, addr); | ||
| 133 | } | ||
| 134 | spin_unlock_bh(&tn->node_list_lock); | ||
| 130 | } | 135 | } |
| 131 | 136 | ||
| 132 | void tipc_net_stop(struct net *net) | 137 | void tipc_net_stop(struct net *net) |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 6a44eb812baf..0453bd451ce8 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -797,6 +797,7 @@ static u32 tipc_node_suggest_addr(struct net *net, u32 addr) | |||
| 797 | } | 797 | } |
| 798 | 798 | ||
| 799 | /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not | 799 | /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not |
| 800 | * Returns suggested address if any, otherwise 0 | ||
| 800 | */ | 801 | */ |
| 801 | u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) | 802 | u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) |
| 802 | { | 803 | { |
| @@ -819,12 +820,14 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) | |||
| 819 | if (n) { | 820 | if (n) { |
| 820 | addr = n->addr; | 821 | addr = n->addr; |
| 821 | tipc_node_put(n); | 822 | tipc_node_put(n); |
| 823 | return addr; | ||
| 822 | } | 824 | } |
| 823 | /* Even this node may be in trial phase */ | 825 | |
| 826 | /* Even this node may be in conflict */ | ||
| 824 | if (tn->trial_addr == addr) | 827 | if (tn->trial_addr == addr) |
| 825 | return tipc_node_suggest_addr(net, addr); | 828 | return tipc_node_suggest_addr(net, addr); |
| 826 | 829 | ||
| 827 | return addr; | 830 | return 0; |
| 828 | } | 831 | } |
| 829 | 832 | ||
| 830 | void tipc_node_check_dest(struct net *net, u32 addr, | 833 | void tipc_node_check_dest(struct net *net, u32 addr, |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index d2380548f8f6..4618f1c31137 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -440,7 +440,7 @@ alloc_encrypted: | |||
| 440 | ret = tls_push_record(sk, msg->msg_flags, record_type); | 440 | ret = tls_push_record(sk, msg->msg_flags, record_type); |
| 441 | if (!ret) | 441 | if (!ret) |
| 442 | continue; | 442 | continue; |
| 443 | if (ret == -EAGAIN) | 443 | if (ret < 0) |
| 444 | goto send_end; | 444 | goto send_end; |
| 445 | 445 | ||
| 446 | copied -= try_to_copy; | 446 | copied -= try_to_copy; |
| @@ -701,6 +701,10 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, | |||
| 701 | nsg = skb_to_sgvec(skb, &sgin[1], | 701 | nsg = skb_to_sgvec(skb, &sgin[1], |
| 702 | rxm->offset + tls_ctx->rx.prepend_size, | 702 | rxm->offset + tls_ctx->rx.prepend_size, |
| 703 | rxm->full_len - tls_ctx->rx.prepend_size); | 703 | rxm->full_len - tls_ctx->rx.prepend_size); |
| 704 | if (nsg < 0) { | ||
| 705 | ret = nsg; | ||
| 706 | goto out; | ||
| 707 | } | ||
| 704 | 708 | ||
| 705 | tls_make_aad(ctx->rx_aad_ciphertext, | 709 | tls_make_aad(ctx->rx_aad_ciphertext, |
| 706 | rxm->full_len - tls_ctx->rx.overhead_size, | 710 | rxm->full_len - tls_ctx->rx.overhead_size, |
| @@ -712,6 +716,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, | |||
| 712 | rxm->full_len - tls_ctx->rx.overhead_size, | 716 | rxm->full_len - tls_ctx->rx.overhead_size, |
| 713 | skb, sk->sk_allocation); | 717 | skb, sk->sk_allocation); |
| 714 | 718 | ||
| 719 | out: | ||
| 715 | if (sgin != &sgin_arr[0]) | 720 | if (sgin != &sgin_arr[0]) |
| 716 | kfree(sgin); | 721 | kfree(sgin); |
| 717 | 722 | ||
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 59fb7d3c36a3..72335c2e8108 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c | |||
| @@ -199,8 +199,11 @@ static void xsk_destruct_skb(struct sk_buff *skb) | |||
| 199 | { | 199 | { |
| 200 | u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; | 200 | u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; |
| 201 | struct xdp_sock *xs = xdp_sk(skb->sk); | 201 | struct xdp_sock *xs = xdp_sk(skb->sk); |
| 202 | unsigned long flags; | ||
| 202 | 203 | ||
| 204 | spin_lock_irqsave(&xs->tx_completion_lock, flags); | ||
| 203 | WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr)); | 205 | WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr)); |
| 206 | spin_unlock_irqrestore(&xs->tx_completion_lock, flags); | ||
| 204 | 207 | ||
| 205 | sock_wfree(skb); | 208 | sock_wfree(skb); |
| 206 | } | 209 | } |
| @@ -215,9 +218,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, | |||
| 215 | struct sk_buff *skb; | 218 | struct sk_buff *skb; |
| 216 | int err = 0; | 219 | int err = 0; |
| 217 | 220 | ||
| 218 | if (unlikely(!xs->tx)) | ||
| 219 | return -ENOBUFS; | ||
| 220 | |||
| 221 | mutex_lock(&xs->mutex); | 221 | mutex_lock(&xs->mutex); |
| 222 | 222 | ||
| 223 | while (xskq_peek_desc(xs->tx, &desc)) { | 223 | while (xskq_peek_desc(xs->tx, &desc)) { |
| @@ -230,22 +230,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, | |||
| 230 | goto out; | 230 | goto out; |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | if (xskq_reserve_addr(xs->umem->cq)) { | 233 | if (xskq_reserve_addr(xs->umem->cq)) |
| 234 | err = -EAGAIN; | ||
| 235 | goto out; | ||
| 236 | } | ||
| 237 | |||
| 238 | len = desc.len; | ||
| 239 | if (unlikely(len > xs->dev->mtu)) { | ||
| 240 | err = -EMSGSIZE; | ||
| 241 | goto out; | 234 | goto out; |
| 242 | } | ||
| 243 | 235 | ||
| 244 | if (xs->queue_id >= xs->dev->real_num_tx_queues) { | 236 | if (xs->queue_id >= xs->dev->real_num_tx_queues) |
| 245 | err = -ENXIO; | ||
| 246 | goto out; | 237 | goto out; |
| 247 | } | ||
| 248 | 238 | ||
| 239 | len = desc.len; | ||
| 249 | skb = sock_alloc_send_skb(sk, len, 1, &err); | 240 | skb = sock_alloc_send_skb(sk, len, 1, &err); |
| 250 | if (unlikely(!skb)) { | 241 | if (unlikely(!skb)) { |
| 251 | err = -EAGAIN; | 242 | err = -EAGAIN; |
| @@ -268,15 +259,15 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, | |||
| 268 | skb->destructor = xsk_destruct_skb; | 259 | skb->destructor = xsk_destruct_skb; |
| 269 | 260 | ||
| 270 | err = dev_direct_xmit(skb, xs->queue_id); | 261 | err = dev_direct_xmit(skb, xs->queue_id); |
| 262 | xskq_discard_desc(xs->tx); | ||
| 271 | /* Ignore NET_XMIT_CN as packet might have been sent */ | 263 | /* Ignore NET_XMIT_CN as packet might have been sent */ |
| 272 | if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) { | 264 | if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) { |
| 273 | err = -EAGAIN; | 265 | /* SKB completed but not sent */ |
| 274 | /* SKB consumed by dev_direct_xmit() */ | 266 | err = -EBUSY; |
| 275 | goto out; | 267 | goto out; |
| 276 | } | 268 | } |
| 277 | 269 | ||
| 278 | sent_frame = true; | 270 | sent_frame = true; |
| 279 | xskq_discard_desc(xs->tx); | ||
| 280 | } | 271 | } |
| 281 | 272 | ||
| 282 | out: | 273 | out: |
| @@ -297,6 +288,8 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) | |||
| 297 | return -ENXIO; | 288 | return -ENXIO; |
| 298 | if (unlikely(!(xs->dev->flags & IFF_UP))) | 289 | if (unlikely(!(xs->dev->flags & IFF_UP))) |
| 299 | return -ENETDOWN; | 290 | return -ENETDOWN; |
| 291 | if (unlikely(!xs->tx)) | ||
| 292 | return -ENOBUFS; | ||
| 300 | if (need_wait) | 293 | if (need_wait) |
| 301 | return -EOPNOTSUPP; | 294 | return -EOPNOTSUPP; |
| 302 | 295 | ||
| @@ -755,6 +748,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, | |||
| 755 | 748 | ||
| 756 | xs = xdp_sk(sk); | 749 | xs = xdp_sk(sk); |
| 757 | mutex_init(&xs->mutex); | 750 | mutex_init(&xs->mutex); |
| 751 | spin_lock_init(&xs->tx_completion_lock); | ||
| 758 | 752 | ||
| 759 | local_bh_disable(); | 753 | local_bh_disable(); |
| 760 | sock_prot_inuse_add(net, &xsk_proto, 1); | 754 | sock_prot_inuse_add(net, &xsk_proto, 1); |
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index ef6a6f0ec949..52ecaf770642 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h | |||
| @@ -62,14 +62,9 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) | |||
| 62 | return (entries > dcnt) ? dcnt : entries; | 62 | return (entries > dcnt) ? dcnt : entries; |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer) | ||
| 66 | { | ||
| 67 | return q->nentries - (producer - q->cons_tail); | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) | 65 | static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) |
| 71 | { | 66 | { |
| 72 | u32 free_entries = xskq_nb_free_lazy(q, producer); | 67 | u32 free_entries = q->nentries - (producer - q->cons_tail); |
| 73 | 68 | ||
| 74 | if (free_entries >= dcnt) | 69 | if (free_entries >= dcnt) |
| 75 | return free_entries; | 70 | return free_entries; |
| @@ -129,7 +124,7 @@ static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr) | |||
| 129 | { | 124 | { |
| 130 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; | 125 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
| 131 | 126 | ||
| 132 | if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0) | 127 | if (xskq_nb_free(q, q->prod_tail, 1) == 0) |
| 133 | return -ENOSPC; | 128 | return -ENOSPC; |
| 134 | 129 | ||
| 135 | ring->desc[q->prod_tail++ & q->ring_mask] = addr; | 130 | ring->desc[q->prod_tail++ & q->ring_mask] = addr; |
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore new file mode 100644 index 000000000000..8ae4940025f8 --- /dev/null +++ b/samples/bpf/.gitignore | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | cpustat | ||
| 2 | fds_example | ||
| 3 | lathist | ||
| 4 | load_sock_ops | ||
| 5 | lwt_len_hist | ||
| 6 | map_perf_test | ||
| 7 | offwaketime | ||
| 8 | per_socket_stats_example | ||
| 9 | sampleip | ||
| 10 | sock_example | ||
| 11 | sockex1 | ||
| 12 | sockex2 | ||
| 13 | sockex3 | ||
| 14 | spintest | ||
| 15 | syscall_nrs.h | ||
| 16 | syscall_tp | ||
| 17 | task_fd_query | ||
| 18 | tc_l2_redirect | ||
| 19 | test_cgrp2_array_pin | ||
| 20 | test_cgrp2_attach | ||
| 21 | test_cgrp2_attach2 | ||
| 22 | test_cgrp2_sock | ||
| 23 | test_cgrp2_sock2 | ||
| 24 | test_current_task_under_cgroup | ||
| 25 | test_lru_dist | ||
| 26 | test_map_in_map | ||
| 27 | test_overhead | ||
| 28 | test_probe_write_user | ||
| 29 | trace_event | ||
| 30 | trace_output | ||
| 31 | tracex1 | ||
| 32 | tracex2 | ||
| 33 | tracex3 | ||
| 34 | tracex4 | ||
| 35 | tracex5 | ||
| 36 | tracex6 | ||
| 37 | tracex7 | ||
| 38 | xdp1 | ||
| 39 | xdp2 | ||
| 40 | xdp_adjust_tail | ||
| 41 | xdp_fwd | ||
| 42 | xdp_monitor | ||
| 43 | xdp_redirect | ||
| 44 | xdp_redirect_cpu | ||
| 45 | xdp_redirect_map | ||
| 46 | xdp_router_ipv4 | ||
| 47 | xdp_rxq_info | ||
| 48 | xdp_tx_iptunnel | ||
| 49 | xdpsock | ||
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c index 95c16324760c..0b6f22feb2c9 100644 --- a/samples/bpf/parse_varlen.c +++ b/samples/bpf/parse_varlen.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | */ | 6 | */ |
| 7 | #define KBUILD_MODNAME "foo" | 7 | #define KBUILD_MODNAME "foo" |
| 8 | #include <linux/if_ether.h> | 8 | #include <linux/if_ether.h> |
| 9 | #include <linux/if_vlan.h> | ||
| 9 | #include <linux/ip.h> | 10 | #include <linux/ip.h> |
| 10 | #include <linux/ipv6.h> | 11 | #include <linux/ipv6.h> |
| 11 | #include <linux/in.h> | 12 | #include <linux/in.h> |
| @@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end) | |||
| 108 | return 0; | 109 | return 0; |
| 109 | } | 110 | } |
| 110 | 111 | ||
| 111 | struct vlan_hdr { | ||
| 112 | uint16_t h_vlan_TCI; | ||
| 113 | uint16_t h_vlan_encapsulated_proto; | ||
| 114 | }; | ||
| 115 | |||
| 116 | SEC("varlen") | 112 | SEC("varlen") |
| 117 | int handle_ingress(struct __sk_buff *skb) | 113 | int handle_ingress(struct __sk_buff *skb) |
| 118 | { | 114 | { |
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c index 6caf47afa635..9d6dcaa9db92 100644 --- a/samples/bpf/test_overhead_user.c +++ b/samples/bpf/test_overhead_user.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | */ | 6 | */ |
| 7 | #define _GNU_SOURCE | 7 | #define _GNU_SOURCE |
| 8 | #include <sched.h> | 8 | #include <sched.h> |
| 9 | #include <errno.h> | ||
| 9 | #include <stdio.h> | 10 | #include <stdio.h> |
| 10 | #include <sys/types.h> | 11 | #include <sys/types.h> |
| 11 | #include <asm/unistd.h> | 12 | #include <asm/unistd.h> |
| @@ -44,8 +45,13 @@ static void test_task_rename(int cpu) | |||
| 44 | exit(1); | 45 | exit(1); |
| 45 | } | 46 | } |
| 46 | start_time = time_get_ns(); | 47 | start_time = time_get_ns(); |
| 47 | for (i = 0; i < MAX_CNT; i++) | 48 | for (i = 0; i < MAX_CNT; i++) { |
| 48 | write(fd, buf, sizeof(buf)); | 49 | if (write(fd, buf, sizeof(buf)) < 0) { |
| 50 | printf("task rename failed: %s\n", strerror(errno)); | ||
| 51 | close(fd); | ||
| 52 | return; | ||
| 53 | } | ||
| 54 | } | ||
| 49 | printf("task_rename:%d: %lld events per sec\n", | 55 | printf("task_rename:%d: %lld events per sec\n", |
| 50 | cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); | 56 | cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); |
| 51 | close(fd); | 57 | close(fd); |
| @@ -63,8 +69,13 @@ static void test_urandom_read(int cpu) | |||
| 63 | exit(1); | 69 | exit(1); |
| 64 | } | 70 | } |
| 65 | start_time = time_get_ns(); | 71 | start_time = time_get_ns(); |
| 66 | for (i = 0; i < MAX_CNT; i++) | 72 | for (i = 0; i < MAX_CNT; i++) { |
| 67 | read(fd, buf, sizeof(buf)); | 73 | if (read(fd, buf, sizeof(buf)) < 0) { |
| 74 | printf("failed to read from /dev/urandom: %s\n", strerror(errno)); | ||
| 75 | close(fd); | ||
| 76 | return; | ||
| 77 | } | ||
| 78 | } | ||
| 68 | printf("urandom_read:%d: %lld events per sec\n", | 79 | printf("urandom_read:%d: %lld events per sec\n", |
| 69 | cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); | 80 | cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); |
| 70 | close(fd); | 81 | close(fd); |
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c index 1fa1becfa641..d08046ab81f0 100644 --- a/samples/bpf/trace_event_user.c +++ b/samples/bpf/trace_event_user.c | |||
| @@ -122,6 +122,16 @@ static void print_stacks(void) | |||
| 122 | } | 122 | } |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static inline int generate_load(void) | ||
| 126 | { | ||
| 127 | if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) { | ||
| 128 | printf("failed to generate some load with dd: %s\n", strerror(errno)); | ||
| 129 | return -1; | ||
| 130 | } | ||
| 131 | |||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 125 | static void test_perf_event_all_cpu(struct perf_event_attr *attr) | 135 | static void test_perf_event_all_cpu(struct perf_event_attr *attr) |
| 126 | { | 136 | { |
| 127 | int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); | 137 | int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); |
| @@ -142,7 +152,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr) | |||
| 142 | assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); | 152 | assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); |
| 143 | assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0); | 153 | assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0); |
| 144 | } | 154 | } |
| 145 | system("dd if=/dev/zero of=/dev/null count=5000k status=none"); | 155 | |
| 156 | if (generate_load() < 0) { | ||
| 157 | error = 1; | ||
| 158 | goto all_cpu_err; | ||
| 159 | } | ||
| 146 | print_stacks(); | 160 | print_stacks(); |
| 147 | all_cpu_err: | 161 | all_cpu_err: |
| 148 | for (i--; i >= 0; i--) { | 162 | for (i--; i >= 0; i--) { |
| @@ -156,7 +170,7 @@ all_cpu_err: | |||
| 156 | 170 | ||
| 157 | static void test_perf_event_task(struct perf_event_attr *attr) | 171 | static void test_perf_event_task(struct perf_event_attr *attr) |
| 158 | { | 172 | { |
| 159 | int pmu_fd; | 173 | int pmu_fd, error = 0; |
| 160 | 174 | ||
| 161 | /* per task perf event, enable inherit so the "dd ..." command can be traced properly. | 175 | /* per task perf event, enable inherit so the "dd ..." command can be traced properly. |
| 162 | * Enabling inherit will cause bpf_perf_prog_read_time helper failure. | 176 | * Enabling inherit will cause bpf_perf_prog_read_time helper failure. |
| @@ -171,10 +185,17 @@ static void test_perf_event_task(struct perf_event_attr *attr) | |||
| 171 | } | 185 | } |
| 172 | assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); | 186 | assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0); |
| 173 | assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0); | 187 | assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0); |
| 174 | system("dd if=/dev/zero of=/dev/null count=5000k status=none"); | 188 | |
| 189 | if (generate_load() < 0) { | ||
| 190 | error = 1; | ||
| 191 | goto err; | ||
| 192 | } | ||
| 175 | print_stacks(); | 193 | print_stacks(); |
| 194 | err: | ||
| 176 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | 195 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); |
| 177 | close(pmu_fd); | 196 | close(pmu_fd); |
| 197 | if (error) | ||
| 198 | int_exit(0); | ||
| 178 | } | 199 | } |
| 179 | 200 | ||
| 180 | static void test_bpf_perf_event(void) | 201 | static void test_bpf_perf_event(void) |
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh index b9c9549c4c27..4bde9d066c46 100755 --- a/samples/bpf/xdp2skb_meta.sh +++ b/samples/bpf/xdp2skb_meta.sh | |||
| @@ -16,8 +16,8 @@ | |||
| 16 | BPF_FILE=xdp2skb_meta_kern.o | 16 | BPF_FILE=xdp2skb_meta_kern.o |
| 17 | DIR=$(dirname $0) | 17 | DIR=$(dirname $0) |
| 18 | 18 | ||
| 19 | export TC=/usr/sbin/tc | 19 | [ -z "$TC" ] && TC=tc |
| 20 | export IP=/usr/sbin/ip | 20 | [ -z "$IP" ] && IP=ip |
| 21 | 21 | ||
| 22 | function usage() { | 22 | function usage() { |
| 23 | echo "" | 23 | echo "" |
| @@ -53,7 +53,7 @@ function _call_cmd() { | |||
| 53 | local allow_fail="$2" | 53 | local allow_fail="$2" |
| 54 | shift 2 | 54 | shift 2 |
| 55 | if [[ -n "$VERBOSE" ]]; then | 55 | if [[ -n "$VERBOSE" ]]; then |
| 56 | echo "$(basename $cmd) $@" | 56 | echo "$cmd $@" |
| 57 | fi | 57 | fi |
| 58 | if [[ -n "$DRYRUN" ]]; then | 58 | if [[ -n "$DRYRUN" ]]; then |
| 59 | return | 59 | return |
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index d69c8d78d3fd..5904b1543831 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c | |||
| @@ -729,7 +729,7 @@ static void kick_tx(int fd) | |||
| 729 | int ret; | 729 | int ret; |
| 730 | 730 | ||
| 731 | ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0); | 731 | ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0); |
| 732 | if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN) | 732 | if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY) |
| 733 | return; | 733 | return; |
| 734 | lassert(0); | 734 | lassert(0); |
| 735 | } | 735 | } |
diff --git a/scripts/tags.sh b/scripts/tags.sh index 412a70cce558..26de7d5aa5c8 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh | |||
| @@ -152,6 +152,7 @@ regex_asm=( | |||
| 152 | ) | 152 | ) |
| 153 | regex_c=( | 153 | regex_c=( |
| 154 | '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/' | 154 | '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/' |
| 155 | '/^BPF_CALL_[0-9](\([[:alnum:]_]*\).*/\1/' | ||
| 155 | '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/' | 156 | '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/' |
| 156 | '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/' | 157 | '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/' |
| 157 | '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/' | 158 | '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/' |
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 69616d00481c..b53026a72e73 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
| @@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card, | |||
| 635 | int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, | 635 | int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, |
| 636 | struct snd_rawmidi_params * params) | 636 | struct snd_rawmidi_params * params) |
| 637 | { | 637 | { |
| 638 | char *newbuf; | 638 | char *newbuf, *oldbuf; |
| 639 | struct snd_rawmidi_runtime *runtime = substream->runtime; | 639 | struct snd_rawmidi_runtime *runtime = substream->runtime; |
| 640 | 640 | ||
| 641 | if (substream->append && substream->use_count > 1) | 641 | if (substream->append && substream->use_count > 1) |
| @@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, | |||
| 648 | return -EINVAL; | 648 | return -EINVAL; |
| 649 | } | 649 | } |
| 650 | if (params->buffer_size != runtime->buffer_size) { | 650 | if (params->buffer_size != runtime->buffer_size) { |
| 651 | newbuf = krealloc(runtime->buffer, params->buffer_size, | 651 | newbuf = kmalloc(params->buffer_size, GFP_KERNEL); |
| 652 | GFP_KERNEL); | ||
| 653 | if (!newbuf) | 652 | if (!newbuf) |
| 654 | return -ENOMEM; | 653 | return -ENOMEM; |
| 654 | spin_lock_irq(&runtime->lock); | ||
| 655 | oldbuf = runtime->buffer; | ||
| 655 | runtime->buffer = newbuf; | 656 | runtime->buffer = newbuf; |
| 656 | runtime->buffer_size = params->buffer_size; | 657 | runtime->buffer_size = params->buffer_size; |
| 657 | runtime->avail = runtime->buffer_size; | 658 | runtime->avail = runtime->buffer_size; |
| 659 | runtime->appl_ptr = runtime->hw_ptr = 0; | ||
| 660 | spin_unlock_irq(&runtime->lock); | ||
| 661 | kfree(oldbuf); | ||
| 658 | } | 662 | } |
| 659 | runtime->avail_min = params->avail_min; | 663 | runtime->avail_min = params->avail_min; |
| 660 | substream->active_sensing = !params->no_active_sensing; | 664 | substream->active_sensing = !params->no_active_sensing; |
| @@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params); | |||
| 665 | int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, | 669 | int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, |
| 666 | struct snd_rawmidi_params * params) | 670 | struct snd_rawmidi_params * params) |
| 667 | { | 671 | { |
| 668 | char *newbuf; | 672 | char *newbuf, *oldbuf; |
| 669 | struct snd_rawmidi_runtime *runtime = substream->runtime; | 673 | struct snd_rawmidi_runtime *runtime = substream->runtime; |
| 670 | 674 | ||
| 671 | snd_rawmidi_drain_input(substream); | 675 | snd_rawmidi_drain_input(substream); |
| @@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, | |||
| 676 | return -EINVAL; | 680 | return -EINVAL; |
| 677 | } | 681 | } |
| 678 | if (params->buffer_size != runtime->buffer_size) { | 682 | if (params->buffer_size != runtime->buffer_size) { |
| 679 | newbuf = krealloc(runtime->buffer, params->buffer_size, | 683 | newbuf = kmalloc(params->buffer_size, GFP_KERNEL); |
| 680 | GFP_KERNEL); | ||
| 681 | if (!newbuf) | 684 | if (!newbuf) |
| 682 | return -ENOMEM; | 685 | return -ENOMEM; |
| 686 | spin_lock_irq(&runtime->lock); | ||
| 687 | oldbuf = runtime->buffer; | ||
| 683 | runtime->buffer = newbuf; | 688 | runtime->buffer = newbuf; |
| 684 | runtime->buffer_size = params->buffer_size; | 689 | runtime->buffer_size = params->buffer_size; |
| 690 | runtime->appl_ptr = runtime->hw_ptr = 0; | ||
| 691 | spin_unlock_irq(&runtime->lock); | ||
| 692 | kfree(oldbuf); | ||
| 685 | } | 693 | } |
| 686 | runtime->avail_min = params->avail_min; | 694 | runtime->avail_min = params->avail_min; |
| 687 | return 0; | 695 | return 0; |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index e7fcfc3b8885..f641c20095f7 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -964,6 +964,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
| 964 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), | 964 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), |
| 965 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), | 965 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), |
| 966 | SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), | 966 | SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), |
| 967 | SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), | ||
| 967 | SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), | 968 | SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
| 968 | SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), | 969 | SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
| 969 | SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), | 970 | SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7496be4491b1..f6af3e1c2b93 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -2366,6 +2366,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
| 2366 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), | 2366 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), |
| 2367 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), | 2367 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), |
| 2368 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), | 2368 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), |
| 2369 | SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), | ||
| 2369 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), | 2370 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), |
| 2370 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2371 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
| 2371 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2372 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
| @@ -6569,6 +6570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 6569 | SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), | 6570 | SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), |
| 6570 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), | 6571 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), |
| 6571 | SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), | 6572 | SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), |
| 6573 | SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), | ||
| 6572 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), | 6574 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), |
| 6573 | SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), | 6575 | SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), |
| 6574 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), | 6576 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), |
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 0d1acb704f64..7ec85d567598 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c | |||
| @@ -519,10 +519,12 @@ struct section *elf_create_section(struct elf *elf, const char *name, | |||
| 519 | sec->sh.sh_flags = SHF_ALLOC; | 519 | sec->sh.sh_flags = SHF_ALLOC; |
| 520 | 520 | ||
| 521 | 521 | ||
| 522 | /* Add section name to .shstrtab */ | 522 | /* Add section name to .shstrtab (or .strtab for Clang) */ |
| 523 | shstrtab = find_section_by_name(elf, ".shstrtab"); | 523 | shstrtab = find_section_by_name(elf, ".shstrtab"); |
| 524 | if (!shstrtab) | ||
| 525 | shstrtab = find_section_by_name(elf, ".strtab"); | ||
| 524 | if (!shstrtab) { | 526 | if (!shstrtab) { |
| 525 | WARN("can't find .shstrtab section"); | 527 | WARN("can't find .shstrtab or .strtab section"); |
| 526 | return NULL; | 528 | return NULL; |
| 527 | } | 529 | } |
| 528 | 530 | ||
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 2ecd27b670d7..f5f7bcc96046 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
| @@ -4975,6 +4975,24 @@ static struct bpf_test tests[] = { | |||
| 4975 | .prog_type = BPF_PROG_TYPE_LWT_XMIT, | 4975 | .prog_type = BPF_PROG_TYPE_LWT_XMIT, |
| 4976 | }, | 4976 | }, |
| 4977 | { | 4977 | { |
| 4978 | "make headroom for LWT_XMIT", | ||
| 4979 | .insns = { | ||
| 4980 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
| 4981 | BPF_MOV64_IMM(BPF_REG_2, 34), | ||
| 4982 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
| 4983 | BPF_EMIT_CALL(BPF_FUNC_skb_change_head), | ||
| 4984 | /* split for s390 to succeed */ | ||
| 4985 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
| 4986 | BPF_MOV64_IMM(BPF_REG_2, 42), | ||
| 4987 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
| 4988 | BPF_EMIT_CALL(BPF_FUNC_skb_change_head), | ||
| 4989 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 4990 | BPF_EXIT_INSN(), | ||
| 4991 | }, | ||
| 4992 | .result = ACCEPT, | ||
| 4993 | .prog_type = BPF_PROG_TYPE_LWT_XMIT, | ||
| 4994 | }, | ||
| 4995 | { | ||
| 4978 | "invalid access of tc_classid for LWT_IN", | 4996 | "invalid access of tc_classid for LWT_IN", |
| 4979 | .insns = { | 4997 | .insns = { |
| 4980 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | 4998 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, |
| @@ -12554,8 +12572,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv, | |||
| 12554 | } | 12572 | } |
| 12555 | 12573 | ||
| 12556 | if (fd_prog >= 0) { | 12574 | if (fd_prog >= 0) { |
| 12575 | __u8 tmp[TEST_DATA_LEN << 2]; | ||
| 12576 | __u32 size_tmp = sizeof(tmp); | ||
| 12577 | |||
| 12557 | err = bpf_prog_test_run(fd_prog, 1, test->data, | 12578 | err = bpf_prog_test_run(fd_prog, 1, test->data, |
| 12558 | sizeof(test->data), NULL, NULL, | 12579 | sizeof(test->data), tmp, &size_tmp, |
| 12559 | &retval, NULL); | 12580 | &retval, NULL); |
| 12560 | if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) { | 12581 | if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) { |
| 12561 | printf("Unexpected bpf_prog_test_run error\n"); | 12582 | printf("Unexpected bpf_prog_test_run error\n"); |
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 78245d60d8bc..0f45633bd634 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh | |||
| @@ -740,13 +740,6 @@ ipv6_rt_add() | |||
| 740 | run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" | 740 | run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" |
| 741 | log_test $? 2 "Attempt to add duplicate route - reject route" | 741 | log_test $? 2 "Attempt to add duplicate route - reject route" |
| 742 | 742 | ||
| 743 | # iproute2 prepend only sets NLM_F_CREATE | ||
| 744 | # - adds a new route; does NOT convert existing route to ECMP | ||
| 745 | add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" | ||
| 746 | run_cmd "$IP -6 ro prepend 2001:db8:104::/64 via 2001:db8:103::2" | ||
| 747 | check_route6 "2001:db8:104::/64 via 2001:db8:101::2 dev veth1 metric 1024 2001:db8:104::/64 via 2001:db8:103::2 dev veth3 metric 1024" | ||
| 748 | log_test $? 0 "Add new route for existing prefix (w/o NLM_F_EXCL)" | ||
| 749 | |||
| 750 | # route append with same prefix adds a new route | 743 | # route append with same prefix adds a new route |
| 751 | # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND | 744 | # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND |
| 752 | add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" | 745 | add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" |
| @@ -754,27 +747,6 @@ ipv6_rt_add() | |||
| 754 | check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" | 747 | check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" |
| 755 | log_test $? 0 "Append nexthop to existing route - gw" | 748 | log_test $? 0 "Append nexthop to existing route - gw" |
| 756 | 749 | ||
| 757 | add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" | ||
| 758 | run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3" | ||
| 759 | check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop dev veth3 weight 1" | ||
| 760 | log_test $? 0 "Append nexthop to existing route - dev only" | ||
| 761 | |||
| 762 | # multipath route can not have a nexthop that is a reject route | ||
| 763 | add_route6 "2001:db8:104::/64" "via 2001:db8:101::2" | ||
| 764 | run_cmd "$IP -6 ro append unreachable 2001:db8:104::/64" | ||
| 765 | log_test $? 2 "Append nexthop to existing route - reject route" | ||
| 766 | |||
| 767 | # reject route can not be converted to multipath route | ||
| 768 | run_cmd "$IP -6 ro flush 2001:db8:104::/64" | ||
| 769 | run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" | ||
| 770 | run_cmd "$IP -6 ro append 2001:db8:104::/64 via 2001:db8:103::2" | ||
| 771 | log_test $? 2 "Append nexthop to existing reject route - gw" | ||
| 772 | |||
| 773 | run_cmd "$IP -6 ro flush 2001:db8:104::/64" | ||
| 774 | run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64" | ||
| 775 | run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3" | ||
| 776 | log_test $? 2 "Append nexthop to existing reject route - dev only" | ||
| 777 | |||
| 778 | # insert mpath directly | 750 | # insert mpath directly |
| 779 | add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" | 751 | add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" |
| 780 | check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" | 752 | check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" |
| @@ -819,13 +791,6 @@ ipv6_rt_replace_single() | |||
| 819 | check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" | 791 | check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1" |
| 820 | log_test $? 0 "Single path with multipath" | 792 | log_test $? 0 "Single path with multipath" |
| 821 | 793 | ||
| 822 | # single path with reject | ||
| 823 | # | ||
| 824 | add_initial_route6 "nexthop via 2001:db8:101::2" | ||
| 825 | run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64" | ||
| 826 | check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024" | ||
| 827 | log_test $? 0 "Single path with reject route" | ||
| 828 | |||
| 829 | # single path with single path using MULTIPATH attribute | 794 | # single path with single path using MULTIPATH attribute |
| 830 | # | 795 | # |
| 831 | add_initial_route6 "via 2001:db8:101::2" | 796 | add_initial_route6 "via 2001:db8:101::2" |
| @@ -873,12 +838,6 @@ ipv6_rt_replace_mpath() | |||
| 873 | check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024" | 838 | check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024" |
| 874 | log_test $? 0 "Multipath with single path via multipath attribute" | 839 | log_test $? 0 "Multipath with single path via multipath attribute" |
| 875 | 840 | ||
| 876 | # multipath with reject | ||
| 877 | add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" | ||
| 878 | run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64" | ||
| 879 | check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024" | ||
| 880 | log_test $? 0 "Multipath with reject route" | ||
| 881 | |||
| 882 | # route replace fails - invalid nexthop 1 | 841 | # route replace fails - invalid nexthop 1 |
| 883 | add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" | 842 | add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2" |
| 884 | run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3" | 843 | run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3" |
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index 792fa4d0285e..850767befa47 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh | |||
| @@ -35,9 +35,6 @@ run_udp() { | |||
| 35 | 35 | ||
| 36 | echo "udp gso" | 36 | echo "udp gso" |
| 37 | run_in_netns ${args} -S | 37 | run_in_netns ${args} -S |
| 38 | |||
| 39 | echo "udp gso zerocopy" | ||
| 40 | run_in_netns ${args} -S -z | ||
| 41 | } | 38 | } |
| 42 | 39 | ||
| 43 | run_tcp() { | 40 | run_tcp() { |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 90d30fbe95ae..b20b751286fc 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
| @@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work) | |||
| 119 | { | 119 | { |
| 120 | struct kvm_kernel_irqfd *irqfd = | 120 | struct kvm_kernel_irqfd *irqfd = |
| 121 | container_of(work, struct kvm_kernel_irqfd, shutdown); | 121 | container_of(work, struct kvm_kernel_irqfd, shutdown); |
| 122 | struct kvm *kvm = irqfd->kvm; | ||
| 122 | u64 cnt; | 123 | u64 cnt; |
| 123 | 124 | ||
| 125 | /* Make sure irqfd has been initalized in assign path. */ | ||
| 126 | synchronize_srcu(&kvm->irq_srcu); | ||
| 127 | |||
| 124 | /* | 128 | /* |
| 125 | * Synchronize with the wait-queue and unhook ourselves to prevent | 129 | * Synchronize with the wait-queue and unhook ourselves to prevent |
| 126 | * further events. | 130 | * further events. |
| @@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
| 387 | 391 | ||
| 388 | idx = srcu_read_lock(&kvm->irq_srcu); | 392 | idx = srcu_read_lock(&kvm->irq_srcu); |
| 389 | irqfd_update(kvm, irqfd); | 393 | irqfd_update(kvm, irqfd); |
| 390 | srcu_read_unlock(&kvm->irq_srcu, idx); | ||
| 391 | 394 | ||
| 392 | list_add_tail(&irqfd->list, &kvm->irqfds.items); | 395 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
| 393 | 396 | ||
| @@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
| 402 | if (events & EPOLLIN) | 405 | if (events & EPOLLIN) |
| 403 | schedule_work(&irqfd->inject); | 406 | schedule_work(&irqfd->inject); |
| 404 | 407 | ||
| 405 | /* | ||
| 406 | * do not drop the file until the irqfd is fully initialized, otherwise | ||
| 407 | * we might race against the EPOLLHUP | ||
| 408 | */ | ||
| 409 | fdput(f); | ||
| 410 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS | 408 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
| 411 | if (kvm_arch_has_irq_bypass()) { | 409 | if (kvm_arch_has_irq_bypass()) { |
| 412 | irqfd->consumer.token = (void *)irqfd->eventfd; | 410 | irqfd->consumer.token = (void *)irqfd->eventfd; |
| @@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
| 421 | } | 419 | } |
| 422 | #endif | 420 | #endif |
| 423 | 421 | ||
| 422 | srcu_read_unlock(&kvm->irq_srcu, idx); | ||
| 423 | |||
| 424 | /* | ||
| 425 | * do not drop the file until the irqfd is fully initialized, otherwise | ||
| 426 | * we might race against the EPOLLHUP | ||
| 427 | */ | ||
| 428 | fdput(f); | ||
| 424 | return 0; | 429 | return 0; |
| 425 | 430 | ||
| 426 | fail: | 431 | fail: |
