diff options
author | David S. Miller <davem@davemloft.net> | 2018-09-05 00:33:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-09-05 00:33:03 -0400 |
commit | 36302685f59345959de96d0d70a5ad20a3a3451b (patch) | |
tree | 778b3170acd1131840823520a4664f2bba343dbe | |
parent | 2fc4aa59ab470f1d5124b33c05680e2b2f2c6f65 (diff) | |
parent | 28619527b8a712590c93d0a9e24b4425b9376a8c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
239 files changed, 2415 insertions, 1377 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend index 3d5951c8bf5f..e8b60bd766f7 100644 --- a/Documentation/ABI/stable/sysfs-bus-xen-backend +++ b/Documentation/ABI/stable/sysfs-bus-xen-backend | |||
@@ -73,3 +73,12 @@ KernelVersion: 3.0 | |||
73 | Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 73 | Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
74 | Description: | 74 | Description: |
75 | Number of sectors written by the frontend. | 75 | Number of sectors written by the frontend. |
76 | |||
77 | What: /sys/bus/xen-backend/devices/*/state | ||
78 | Date: August 2018 | ||
79 | KernelVersion: 4.19 | ||
80 | Contact: Joe Jin <joe.jin@oracle.com> | ||
81 | Description: | ||
82 | The state of the device. One of: 'Unknown', | ||
83 | 'Initialising', 'Initialised', 'Connected', 'Closing', | ||
84 | 'Closed', 'Reconfiguring', 'Reconfigured'. | ||
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback index 8bb43b66eb55..4e7babb3ba1f 100644 --- a/Documentation/ABI/testing/sysfs-driver-xen-blkback +++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback | |||
@@ -15,3 +15,13 @@ Description: | |||
15 | blkback. If the frontend tries to use more than | 15 | blkback. If the frontend tries to use more than |
16 | max_persistent_grants, the LRU kicks in and starts | 16 | max_persistent_grants, the LRU kicks in and starts |
17 | removing 5% of max_persistent_grants every 100ms. | 17 | removing 5% of max_persistent_grants every 100ms. |
18 | |||
19 | What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds | ||
20 | Date: August 2018 | ||
21 | KernelVersion: 4.19 | ||
22 | Contact: Roger Pau Monné <roger.pau@citrix.com> | ||
23 | Description: | ||
24 | How long a persistent grant is allowed to remain | ||
25 | allocated without being in use. The time is in | ||
26 | seconds, 0 means indefinitely long. | ||
27 | The default is 60 seconds. | ||
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt index f128f736b4a5..7169a0ec41d8 100644 --- a/Documentation/arm64/sve.txt +++ b/Documentation/arm64/sve.txt | |||
@@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg) | |||
200 | thread. | 200 | thread. |
201 | 201 | ||
202 | * Changing the vector length causes all of P0..P15, FFR and all bits of | 202 | * Changing the vector length causes all of P0..P15, FFR and all bits of |
203 | Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become | 203 | Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become |
204 | unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current | 204 | unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current |
205 | vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC | 205 | vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC |
206 | flag, does not constitute a change to the vector length for this purpose. | 206 | flag, does not constitute a change to the vector length for this purpose. |
@@ -500,7 +500,7 @@ References | |||
500 | [2] arch/arm64/include/uapi/asm/ptrace.h | 500 | [2] arch/arm64/include/uapi/asm/ptrace.h |
501 | AArch64 Linux ptrace ABI definitions | 501 | AArch64 Linux ptrace ABI definitions |
502 | 502 | ||
503 | [3] linux/Documentation/arm64/cpu-feature-registers.txt | 503 | [3] Documentation/arm64/cpu-feature-registers.txt |
504 | 504 | ||
505 | [4] ARM IHI0055C | 505 | [4] ARM IHI0055C |
506 | http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf | 506 | http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt index b0a8af51c388..265b223cd978 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt | |||
@@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are | |||
11 | attached to every HLIC: software interrupts, the timer interrupt, and external | 11 | attached to every HLIC: software interrupts, the timer interrupt, and external |
12 | interrupts. Software interrupts are used to send IPIs between cores. The | 12 | interrupts. Software interrupts are used to send IPIs between cores. The |
13 | timer interrupt comes from an architecturally mandated real-time timer that is | 13 | timer interrupt comes from an architecturally mandated real-time timer that is |
14 | controller via Supervisor Binary Interface (SBI) calls and CSR reads. External | 14 | controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External |
15 | interrupts connect all other device interrupts to the HLIC, which are routed | 15 | interrupts connect all other device interrupts to the HLIC, which are routed |
16 | via the platform-level interrupt controller (PLIC). | 16 | via the platform-level interrupt controller (PLIC). |
17 | 17 | ||
@@ -25,7 +25,15 @@ in the system. | |||
25 | 25 | ||
26 | Required properties: | 26 | Required properties: |
27 | - compatible : "riscv,cpu-intc" | 27 | - compatible : "riscv,cpu-intc" |
28 | - #interrupt-cells : should be <1> | 28 | - #interrupt-cells : should be <1>. The interrupt sources are defined by the |
29 | RISC-V supervisor ISA manual, with only the following three interrupts being | ||
30 | defined for supervisor mode: | ||
31 | - Source 1 is the supervisor software interrupt, which can be sent by an SBI | ||
32 | call and is reserved for use by software. | ||
33 | - Source 5 is the supervisor timer interrupt, which can be configured by | ||
34 | SBI calls and implements a one-shot timer. | ||
35 | - Source 9 is the supervisor external interrupt, which chains to all other | ||
36 | device interrupts. | ||
29 | - interrupt-controller : Identifies the node as an interrupt controller | 37 | - interrupt-controller : Identifies the node as an interrupt controller |
30 | 38 | ||
31 | Furthermore, this interrupt-controller MUST be embedded inside the cpu | 39 | Furthermore, this interrupt-controller MUST be embedded inside the cpu |
@@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below. | |||
38 | ... | 46 | ... |
39 | cpu1-intc: interrupt-controller { | 47 | cpu1-intc: interrupt-controller { |
40 | #interrupt-cells = <1>; | 48 | #interrupt-cells = <1>; |
41 | compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc"; | 49 | compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc"; |
42 | interrupt-controller; | 50 | interrupt-controller; |
43 | }; | 51 | }; |
44 | }; | 52 | }; |
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index 41089369f891..b3acebe08eb0 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt | |||
@@ -19,6 +19,10 @@ Required properties: | |||
19 | - slaves : Specifies number for slaves | 19 | - slaves : Specifies number for slaves |
20 | - active_slave : Specifies the slave to use for time stamping, | 20 | - active_slave : Specifies the slave to use for time stamping, |
21 | ethtool and SIOCGMIIPHY | 21 | ethtool and SIOCGMIIPHY |
22 | - cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection | ||
23 | device. See also cpsw-phy-sel.txt for it's binding. | ||
24 | Note that in legacy cases cpsw-phy-sel may be | ||
25 | a child device instead of a phandle. | ||
22 | 26 | ||
23 | Optional properties: | 27 | Optional properties: |
24 | - ti,hwmods : Must be "cpgmac0" | 28 | - ti,hwmods : Must be "cpgmac0" |
@@ -75,6 +79,7 @@ Examples: | |||
75 | cpts_clock_mult = <0x80000000>; | 79 | cpts_clock_mult = <0x80000000>; |
76 | cpts_clock_shift = <29>; | 80 | cpts_clock_shift = <29>; |
77 | syscon = <&cm>; | 81 | syscon = <&cm>; |
82 | cpsw-phy-sel = <&phy_sel>; | ||
78 | cpsw_emac0: slave@0 { | 83 | cpsw_emac0: slave@0 { |
79 | phy_id = <&davinci_mdio>, <0>; | 84 | phy_id = <&davinci_mdio>, <0>; |
80 | phy-mode = "rgmii-txid"; | 85 | phy-mode = "rgmii-txid"; |
@@ -103,6 +108,7 @@ Examples: | |||
103 | cpts_clock_mult = <0x80000000>; | 108 | cpts_clock_mult = <0x80000000>; |
104 | cpts_clock_shift = <29>; | 109 | cpts_clock_shift = <29>; |
105 | syscon = <&cm>; | 110 | syscon = <&cm>; |
111 | cpsw-phy-sel = <&phy_sel>; | ||
106 | cpsw_emac0: slave@0 { | 112 | cpsw_emac0: slave@0 { |
107 | phy_id = <&davinci_mdio>, <0>; | 113 | phy_id = <&davinci_mdio>, <0>; |
108 | phy-mode = "rgmii-txid"; | 114 | phy-mode = "rgmii-txid"; |
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt index 76db9f13ad96..abc36274227c 100644 --- a/Documentation/devicetree/bindings/net/sh_eth.txt +++ b/Documentation/devicetree/bindings/net/sh_eth.txt | |||
@@ -16,6 +16,7 @@ Required properties: | |||
16 | "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. | 16 | "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. |
17 | "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC. | 17 | "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC. |
18 | "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. | 18 | "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. |
19 | "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC. | ||
19 | "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device. | 20 | "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device. |
20 | "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1 | 21 | "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1 |
21 | device. | 22 | device. |
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt index 5d47a262474c..9407212a85a8 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt | |||
@@ -7,6 +7,7 @@ Required properties: | |||
7 | Examples with soctypes are: | 7 | Examples with soctypes are: |
8 | - "renesas,r8a7743-wdt" (RZ/G1M) | 8 | - "renesas,r8a7743-wdt" (RZ/G1M) |
9 | - "renesas,r8a7745-wdt" (RZ/G1E) | 9 | - "renesas,r8a7745-wdt" (RZ/G1E) |
10 | - "renesas,r8a774a1-wdt" (RZ/G2M) | ||
10 | - "renesas,r8a7790-wdt" (R-Car H2) | 11 | - "renesas,r8a7790-wdt" (R-Car H2) |
11 | - "renesas,r8a7791-wdt" (R-Car M2-W) | 12 | - "renesas,r8a7791-wdt" (R-Car M2-W) |
12 | - "renesas,r8a7792-wdt" (R-Car V2H) | 13 | - "renesas,r8a7792-wdt" (R-Car V2H) |
@@ -21,8 +22,8 @@ Required properties: | |||
21 | - "renesas,r7s72100-wdt" (RZ/A1) | 22 | - "renesas,r7s72100-wdt" (RZ/A1) |
22 | The generic compatible string must be: | 23 | The generic compatible string must be: |
23 | - "renesas,rza-wdt" for RZ/A | 24 | - "renesas,rza-wdt" for RZ/A |
24 | - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G | 25 | - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1 |
25 | - "renesas,rcar-gen3-wdt" for R-Car Gen3 | 26 | - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2 |
26 | 27 | ||
27 | - reg : Should contain WDT registers location and length | 28 | - reg : Should contain WDT registers location and length |
28 | - clocks : the clock feeding the watchdog timer. | 29 | - clocks : the clock feeding the watchdog timer. |
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index 72d16f08e431..b8df81f6d6bc 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx | |||
@@ -32,7 +32,7 @@ Supported chips: | |||
32 | Datasheet: Publicly available at the Texas Instruments website | 32 | Datasheet: Publicly available at the Texas Instruments website |
33 | http://www.ti.com/ | 33 | http://www.ti.com/ |
34 | 34 | ||
35 | Author: Lothar Felten <l-felten@ti.com> | 35 | Author: Lothar Felten <lothar.felten@gmail.com> |
36 | 36 | ||
37 | Description | 37 | Description |
38 | ----------- | 38 | ----------- |
diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations index 966610aa4620..203002054120 100644 --- a/Documentation/i2c/DMA-considerations +++ b/Documentation/i2c/DMA-considerations | |||
@@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the | |||
50 | returned buffer. If NULL is returned, the threshold was not met or a bounce | 50 | returned buffer. If NULL is returned, the threshold was not met or a bounce |
51 | buffer could not be allocated. Fall back to PIO in that case. | 51 | buffer could not be allocated. Fall back to PIO in that case. |
52 | 52 | ||
53 | In any case, a buffer obtained from above needs to be released. It ensures data | 53 | In any case, a buffer obtained from above needs to be released. Another helper |
54 | is copied back to the message and a potentially used bounce buffer is freed:: | 54 | function ensures a potentially used bounce buffer is freed:: |
55 | 55 | ||
56 | i2c_release_dma_safe_msg_buf(msg, dma_buf); | 56 | i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred); |
57 | |||
58 | The last argument 'xferred' controls if the buffer is synced back to the | ||
59 | message or not. No syncing is needed in cases setting up DMA had an error and | ||
60 | there was no data transferred. | ||
57 | 61 | ||
58 | The bounce buffer handling from the core is generic and simple. It will always | 62 | The bounce buffer handling from the core is generic and simple. It will always |
59 | allocate a new bounce buffer. If you want a more sophisticated handling (e.g. | 63 | allocate a new bounce buffer. If you want a more sophisticated handling (e.g. |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 19 | 3 | PATCHLEVEL = 19 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc1 | 5 | EXTRAVERSION = -rc2 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) | |||
807 | # disable pointer signed / unsigned warnings in gcc 4.0 | 807 | # disable pointer signed / unsigned warnings in gcc 4.0 |
808 | KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) | 808 | KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) |
809 | 809 | ||
810 | # disable stringop warnings in gcc 8+ | ||
811 | KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) | ||
812 | |||
810 | # disable invalid "can't wrap" optimizations for signed / pointers | 813 | # disable invalid "can't wrap" optimizations for signed / pointers |
811 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) | 814 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) |
812 | 815 | ||
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts index 4d969013f99a..4d969013f99a 100755..100644 --- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts +++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts | |||
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index f0cbd86312dc..d4b7c59eec68 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
@@ -469,6 +469,7 @@ | |||
469 | ti,hwmods = "rtc"; | 469 | ti,hwmods = "rtc"; |
470 | clocks = <&clk_32768_ck>; | 470 | clocks = <&clk_32768_ck>; |
471 | clock-names = "int-clk"; | 471 | clock-names = "int-clk"; |
472 | system-power-controller; | ||
472 | status = "disabled"; | 473 | status = "disabled"; |
473 | }; | 474 | }; |
474 | 475 | ||
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 9fb47724b9c1..ad2ae25b7b4d 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts | |||
@@ -13,6 +13,43 @@ | |||
13 | reg = <0x40000000 0x08000000>; | 13 | reg = <0x40000000 0x08000000>; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | reg_vddio_sd0: regulator-vddio-sd0 { | ||
17 | compatible = "regulator-fixed"; | ||
18 | regulator-name = "vddio-sd0"; | ||
19 | regulator-min-microvolt = <3300000>; | ||
20 | regulator-max-microvolt = <3300000>; | ||
21 | gpio = <&gpio1 29 0>; | ||
22 | }; | ||
23 | |||
24 | reg_lcd_3v3: regulator-lcd-3v3 { | ||
25 | compatible = "regulator-fixed"; | ||
26 | regulator-name = "lcd-3v3"; | ||
27 | regulator-min-microvolt = <3300000>; | ||
28 | regulator-max-microvolt = <3300000>; | ||
29 | gpio = <&gpio1 18 0>; | ||
30 | enable-active-high; | ||
31 | }; | ||
32 | |||
33 | reg_lcd_5v: regulator-lcd-5v { | ||
34 | compatible = "regulator-fixed"; | ||
35 | regulator-name = "lcd-5v"; | ||
36 | regulator-min-microvolt = <5000000>; | ||
37 | regulator-max-microvolt = <5000000>; | ||
38 | }; | ||
39 | |||
40 | panel { | ||
41 | compatible = "sii,43wvf1g"; | ||
42 | backlight = <&backlight_display>; | ||
43 | dvdd-supply = <®_lcd_3v3>; | ||
44 | avdd-supply = <®_lcd_5v>; | ||
45 | |||
46 | port { | ||
47 | panel_in: endpoint { | ||
48 | remote-endpoint = <&display_out>; | ||
49 | }; | ||
50 | }; | ||
51 | }; | ||
52 | |||
16 | apb@80000000 { | 53 | apb@80000000 { |
17 | apbh@80000000 { | 54 | apbh@80000000 { |
18 | gpmi-nand@8000c000 { | 55 | gpmi-nand@8000c000 { |
@@ -52,31 +89,11 @@ | |||
52 | lcdif@80030000 { | 89 | lcdif@80030000 { |
53 | pinctrl-names = "default"; | 90 | pinctrl-names = "default"; |
54 | pinctrl-0 = <&lcdif_24bit_pins_a>; | 91 | pinctrl-0 = <&lcdif_24bit_pins_a>; |
55 | lcd-supply = <®_lcd_3v3>; | ||
56 | display = <&display0>; | ||
57 | status = "okay"; | 92 | status = "okay"; |
58 | 93 | ||
59 | display0: display0 { | 94 | port { |
60 | bits-per-pixel = <32>; | 95 | display_out: endpoint { |
61 | bus-width = <24>; | 96 | remote-endpoint = <&panel_in>; |
62 | |||
63 | display-timings { | ||
64 | native-mode = <&timing0>; | ||
65 | timing0: timing0 { | ||
66 | clock-frequency = <9200000>; | ||
67 | hactive = <480>; | ||
68 | vactive = <272>; | ||
69 | hback-porch = <15>; | ||
70 | hfront-porch = <8>; | ||
71 | vback-porch = <12>; | ||
72 | vfront-porch = <4>; | ||
73 | hsync-len = <1>; | ||
74 | vsync-len = <1>; | ||
75 | hsync-active = <0>; | ||
76 | vsync-active = <0>; | ||
77 | de-active = <1>; | ||
78 | pixelclk-active = <0>; | ||
79 | }; | ||
80 | }; | 97 | }; |
81 | }; | 98 | }; |
82 | }; | 99 | }; |
@@ -118,32 +135,7 @@ | |||
118 | }; | 135 | }; |
119 | }; | 136 | }; |
120 | 137 | ||
121 | regulators { | 138 | backlight_display: backlight { |
122 | compatible = "simple-bus"; | ||
123 | #address-cells = <1>; | ||
124 | #size-cells = <0>; | ||
125 | |||
126 | reg_vddio_sd0: regulator@0 { | ||
127 | compatible = "regulator-fixed"; | ||
128 | reg = <0>; | ||
129 | regulator-name = "vddio-sd0"; | ||
130 | regulator-min-microvolt = <3300000>; | ||
131 | regulator-max-microvolt = <3300000>; | ||
132 | gpio = <&gpio1 29 0>; | ||
133 | }; | ||
134 | |||
135 | reg_lcd_3v3: regulator@1 { | ||
136 | compatible = "regulator-fixed"; | ||
137 | reg = <1>; | ||
138 | regulator-name = "lcd-3v3"; | ||
139 | regulator-min-microvolt = <3300000>; | ||
140 | regulator-max-microvolt = <3300000>; | ||
141 | gpio = <&gpio1 18 0>; | ||
142 | enable-active-high; | ||
143 | }; | ||
144 | }; | ||
145 | |||
146 | backlight { | ||
147 | compatible = "pwm-backlight"; | 139 | compatible = "pwm-backlight"; |
148 | pwms = <&pwm 2 5000000>; | 140 | pwms = <&pwm 2 5000000>; |
149 | brightness-levels = <0 4 8 16 32 64 128 255>; | 141 | brightness-levels = <0 4 8 16 32 64 128 255>; |
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 6b0ae667640f..93ab5bdfe068 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts | |||
@@ -13,6 +13,87 @@ | |||
13 | reg = <0x40000000 0x08000000>; | 13 | reg = <0x40000000 0x08000000>; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | |||
17 | reg_3p3v: regulator-3p3v { | ||
18 | compatible = "regulator-fixed"; | ||
19 | regulator-name = "3P3V"; | ||
20 | regulator-min-microvolt = <3300000>; | ||
21 | regulator-max-microvolt = <3300000>; | ||
22 | regulator-always-on; | ||
23 | }; | ||
24 | |||
25 | reg_vddio_sd0: regulator-vddio-sd0 { | ||
26 | compatible = "regulator-fixed"; | ||
27 | regulator-name = "vddio-sd0"; | ||
28 | regulator-min-microvolt = <3300000>; | ||
29 | regulator-max-microvolt = <3300000>; | ||
30 | gpio = <&gpio3 28 0>; | ||
31 | }; | ||
32 | |||
33 | reg_fec_3v3: regulator-fec-3v3 { | ||
34 | compatible = "regulator-fixed"; | ||
35 | regulator-name = "fec-3v3"; | ||
36 | regulator-min-microvolt = <3300000>; | ||
37 | regulator-max-microvolt = <3300000>; | ||
38 | gpio = <&gpio2 15 0>; | ||
39 | }; | ||
40 | |||
41 | reg_usb0_vbus: regulator-usb0-vbus { | ||
42 | compatible = "regulator-fixed"; | ||
43 | regulator-name = "usb0_vbus"; | ||
44 | regulator-min-microvolt = <5000000>; | ||
45 | regulator-max-microvolt = <5000000>; | ||
46 | gpio = <&gpio3 9 0>; | ||
47 | enable-active-high; | ||
48 | }; | ||
49 | |||
50 | reg_usb1_vbus: regulator-usb1-vbus { | ||
51 | compatible = "regulator-fixed"; | ||
52 | regulator-name = "usb1_vbus"; | ||
53 | regulator-min-microvolt = <5000000>; | ||
54 | regulator-max-microvolt = <5000000>; | ||
55 | gpio = <&gpio3 8 0>; | ||
56 | enable-active-high; | ||
57 | }; | ||
58 | |||
59 | reg_lcd_3v3: regulator-lcd-3v3 { | ||
60 | compatible = "regulator-fixed"; | ||
61 | regulator-name = "lcd-3v3"; | ||
62 | regulator-min-microvolt = <3300000>; | ||
63 | regulator-max-microvolt = <3300000>; | ||
64 | gpio = <&gpio3 30 0>; | ||
65 | enable-active-high; | ||
66 | }; | ||
67 | |||
68 | reg_can_3v3: regulator-can-3v3 { | ||
69 | compatible = "regulator-fixed"; | ||
70 | regulator-name = "can-3v3"; | ||
71 | regulator-min-microvolt = <3300000>; | ||
72 | regulator-max-microvolt = <3300000>; | ||
73 | gpio = <&gpio2 13 0>; | ||
74 | enable-active-high; | ||
75 | }; | ||
76 | |||
77 | reg_lcd_5v: regulator-lcd-5v { | ||
78 | compatible = "regulator-fixed"; | ||
79 | regulator-name = "lcd-5v"; | ||
80 | regulator-min-microvolt = <5000000>; | ||
81 | regulator-max-microvolt = <5000000>; | ||
82 | }; | ||
83 | |||
84 | panel { | ||
85 | compatible = "sii,43wvf1g"; | ||
86 | backlight = <&backlight_display>; | ||
87 | dvdd-supply = <®_lcd_3v3>; | ||
88 | avdd-supply = <®_lcd_5v>; | ||
89 | |||
90 | port { | ||
91 | panel_in: endpoint { | ||
92 | remote-endpoint = <&display_out>; | ||
93 | }; | ||
94 | }; | ||
95 | }; | ||
96 | |||
16 | apb@80000000 { | 97 | apb@80000000 { |
17 | apbh@80000000 { | 98 | apbh@80000000 { |
18 | gpmi-nand@8000c000 { | 99 | gpmi-nand@8000c000 { |
@@ -116,31 +197,11 @@ | |||
116 | pinctrl-names = "default"; | 197 | pinctrl-names = "default"; |
117 | pinctrl-0 = <&lcdif_24bit_pins_a | 198 | pinctrl-0 = <&lcdif_24bit_pins_a |
118 | &lcdif_pins_evk>; | 199 | &lcdif_pins_evk>; |
119 | lcd-supply = <®_lcd_3v3>; | ||
120 | display = <&display0>; | ||
121 | status = "okay"; | 200 | status = "okay"; |
122 | 201 | ||
123 | display0: display0 { | 202 | port { |
124 | bits-per-pixel = <32>; | 203 | display_out: endpoint { |
125 | bus-width = <24>; | 204 | remote-endpoint = <&panel_in>; |
126 | |||
127 | display-timings { | ||
128 | native-mode = <&timing0>; | ||
129 | timing0: timing0 { | ||
130 | clock-frequency = <33500000>; | ||
131 | hactive = <800>; | ||
132 | vactive = <480>; | ||
133 | hback-porch = <89>; | ||
134 | hfront-porch = <164>; | ||
135 | vback-porch = <23>; | ||
136 | vfront-porch = <10>; | ||
137 | hsync-len = <10>; | ||
138 | vsync-len = <10>; | ||
139 | hsync-active = <0>; | ||
140 | vsync-active = <0>; | ||
141 | de-active = <1>; | ||
142 | pixelclk-active = <0>; | ||
143 | }; | ||
144 | }; | 205 | }; |
145 | }; | 206 | }; |
146 | }; | 207 | }; |
@@ -269,80 +330,6 @@ | |||
269 | }; | 330 | }; |
270 | }; | 331 | }; |
271 | 332 | ||
272 | regulators { | ||
273 | compatible = "simple-bus"; | ||
274 | #address-cells = <1>; | ||
275 | #size-cells = <0>; | ||
276 | |||
277 | reg_3p3v: regulator@0 { | ||
278 | compatible = "regulator-fixed"; | ||
279 | reg = <0>; | ||
280 | regulator-name = "3P3V"; | ||
281 | regulator-min-microvolt = <3300000>; | ||
282 | regulator-max-microvolt = <3300000>; | ||
283 | regulator-always-on; | ||
284 | }; | ||
285 | |||
286 | reg_vddio_sd0: regulator@1 { | ||
287 | compatible = "regulator-fixed"; | ||
288 | reg = <1>; | ||
289 | regulator-name = "vddio-sd0"; | ||
290 | regulator-min-microvolt = <3300000>; | ||
291 | regulator-max-microvolt = <3300000>; | ||
292 | gpio = <&gpio3 28 0>; | ||
293 | }; | ||
294 | |||
295 | reg_fec_3v3: regulator@2 { | ||
296 | compatible = "regulator-fixed"; | ||
297 | reg = <2>; | ||
298 | regulator-name = "fec-3v3"; | ||
299 | regulator-min-microvolt = <3300000>; | ||
300 | regulator-max-microvolt = <3300000>; | ||
301 | gpio = <&gpio2 15 0>; | ||
302 | }; | ||
303 | |||
304 | reg_usb0_vbus: regulator@3 { | ||
305 | compatible = "regulator-fixed"; | ||
306 | reg = <3>; | ||
307 | regulator-name = "usb0_vbus"; | ||
308 | regulator-min-microvolt = <5000000>; | ||
309 | regulator-max-microvolt = <5000000>; | ||
310 | gpio = <&gpio3 9 0>; | ||
311 | enable-active-high; | ||
312 | }; | ||
313 | |||
314 | reg_usb1_vbus: regulator@4 { | ||
315 | compatible = "regulator-fixed"; | ||
316 | reg = <4>; | ||
317 | regulator-name = "usb1_vbus"; | ||
318 | regulator-min-microvolt = <5000000>; | ||
319 | regulator-max-microvolt = <5000000>; | ||
320 | gpio = <&gpio3 8 0>; | ||
321 | enable-active-high; | ||
322 | }; | ||
323 | |||
324 | reg_lcd_3v3: regulator@5 { | ||
325 | compatible = "regulator-fixed"; | ||
326 | reg = <5>; | ||
327 | regulator-name = "lcd-3v3"; | ||
328 | regulator-min-microvolt = <3300000>; | ||
329 | regulator-max-microvolt = <3300000>; | ||
330 | gpio = <&gpio3 30 0>; | ||
331 | enable-active-high; | ||
332 | }; | ||
333 | |||
334 | reg_can_3v3: regulator@6 { | ||
335 | compatible = "regulator-fixed"; | ||
336 | reg = <6>; | ||
337 | regulator-name = "can-3v3"; | ||
338 | regulator-min-microvolt = <3300000>; | ||
339 | regulator-max-microvolt = <3300000>; | ||
340 | gpio = <&gpio2 13 0>; | ||
341 | enable-active-high; | ||
342 | }; | ||
343 | |||
344 | }; | ||
345 | |||
346 | sound { | 333 | sound { |
347 | compatible = "fsl,imx28-evk-sgtl5000", | 334 | compatible = "fsl,imx28-evk-sgtl5000", |
348 | "fsl,mxs-audio-sgtl5000"; | 335 | "fsl,mxs-audio-sgtl5000"; |
@@ -363,7 +350,7 @@ | |||
363 | }; | 350 | }; |
364 | }; | 351 | }; |
365 | 352 | ||
366 | backlight { | 353 | backlight_display: backlight { |
367 | compatible = "pwm-backlight"; | 354 | compatible = "pwm-backlight"; |
368 | pwms = <&pwm 2 5000000>; | 355 | pwms = <&pwm 2 5000000>; |
369 | brightness-levels = <0 4 8 16 32 64 128 255>; | 356 | brightness-levels = <0 4 8 16 32 64 128 255>; |
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 7cbc2ffa4b3a..7234e8330a57 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi | |||
@@ -126,10 +126,14 @@ | |||
126 | interrupt-names = "msi"; | 126 | interrupt-names = "msi"; |
127 | #interrupt-cells = <1>; | 127 | #interrupt-cells = <1>; |
128 | interrupt-map-mask = <0 0 0 0x7>; | 128 | interrupt-map-mask = <0 0 0 0x7>; |
129 | interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, | 129 | /* |
130 | <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, | 130 | * Reference manual lists pci irqs incorrectly |
131 | <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, | 131 | * Real hardware ordering is same as imx6: D+MSI, C, B, A |
132 | <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; | 132 | */ |
133 | interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, | ||
134 | <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, | ||
135 | <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, | ||
136 | <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; | ||
133 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, | 137 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, |
134 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, | 138 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, |
135 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; | 139 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; |
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 12d6822f0057..04758a2a87f0 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts | |||
@@ -354,7 +354,7 @@ | |||
354 | &mmc2 { | 354 | &mmc2 { |
355 | vmmc-supply = <&vsdio>; | 355 | vmmc-supply = <&vsdio>; |
356 | bus-width = <8>; | 356 | bus-width = <8>; |
357 | non-removable; | 357 | ti,non-removable; |
358 | }; | 358 | }; |
359 | 359 | ||
360 | &mmc3 { | 360 | &mmc3 { |
@@ -621,15 +621,6 @@ | |||
621 | OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ | 621 | OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ |
622 | >; | 622 | >; |
623 | }; | 623 | }; |
624 | }; | ||
625 | |||
626 | &omap4_pmx_wkup { | ||
627 | usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { | ||
628 | /* gpio_wk0 */ | ||
629 | pinctrl-single,pins = < | ||
630 | OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) | ||
631 | >; | ||
632 | }; | ||
633 | 624 | ||
634 | vibrator_direction_pin: pinmux_vibrator_direction_pin { | 625 | vibrator_direction_pin: pinmux_vibrator_direction_pin { |
635 | pinctrl-single,pins = < | 626 | pinctrl-single,pins = < |
@@ -644,6 +635,15 @@ | |||
644 | }; | 635 | }; |
645 | }; | 636 | }; |
646 | 637 | ||
638 | &omap4_pmx_wkup { | ||
639 | usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { | ||
640 | /* gpio_wk0 */ | ||
641 | pinctrl-single,pins = < | ||
642 | OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) | ||
643 | >; | ||
644 | }; | ||
645 | }; | ||
646 | |||
647 | /* | 647 | /* |
648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for | 648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for |
649 | * uart1 wakeirq. | 649 | * uart1 wakeirq. |
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index e2c127608bcc..7eca43ff69bb 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig | |||
@@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y | |||
257 | CONFIG_DRM=y | 257 | CONFIG_DRM=y |
258 | CONFIG_DRM_PANEL_LVDS=y | 258 | CONFIG_DRM_PANEL_LVDS=y |
259 | CONFIG_DRM_PANEL_SIMPLE=y | 259 | CONFIG_DRM_PANEL_SIMPLE=y |
260 | CONFIG_DRM_PANEL_SEIKO_43WVF1G=y | ||
260 | CONFIG_DRM_DW_HDMI_AHB_AUDIO=m | 261 | CONFIG_DRM_DW_HDMI_AHB_AUDIO=m |
261 | CONFIG_DRM_DW_HDMI_CEC=y | 262 | CONFIG_DRM_DW_HDMI_CEC=y |
262 | CONFIG_DRM_IMX=y | 263 | CONFIG_DRM_IMX=y |
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index 148226e36152..7b8212857535 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig | |||
@@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y | |||
95 | CONFIG_REGULATOR=y | 95 | CONFIG_REGULATOR=y |
96 | CONFIG_REGULATOR_FIXED_VOLTAGE=y | 96 | CONFIG_REGULATOR_FIXED_VOLTAGE=y |
97 | CONFIG_DRM=y | 97 | CONFIG_DRM=y |
98 | CONFIG_DRM_PANEL_SEIKO_43WVF1G=y | ||
98 | CONFIG_DRM_MXSFB=y | 99 | CONFIG_DRM_MXSFB=y |
99 | CONFIG_FB_MODE_HELPERS=y | 100 | CONFIG_FB_MODE_HELPERS=y |
100 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 101 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index df68dc4056e5..5282324c7cef 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig | |||
@@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | CONFIG_SLAB=y | 7 | CONFIG_SLAB=y |
8 | CONFIG_MODULES=y | ||
9 | CONFIG_MODULE_UNLOAD=y | ||
10 | CONFIG_PARTITION_ADVANCED=y | ||
11 | # CONFIG_ARCH_MULTI_V7 is not set | 8 | # CONFIG_ARCH_MULTI_V7 is not set |
12 | CONFIG_ARCH_VERSATILE=y | 9 | CONFIG_ARCH_VERSATILE=y |
13 | CONFIG_AEABI=y | 10 | CONFIG_AEABI=y |
14 | CONFIG_OABI_COMPAT=y | 11 | CONFIG_OABI_COMPAT=y |
15 | CONFIG_CMA=y | ||
16 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 12 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
17 | CONFIG_ZBOOT_ROM_BSS=0x0 | 13 | CONFIG_ZBOOT_ROM_BSS=0x0 |
18 | CONFIG_CMDLINE="root=1f03 mem=32M" | 14 | CONFIG_CMDLINE="root=1f03 mem=32M" |
19 | CONFIG_FPE_NWFPE=y | 15 | CONFIG_FPE_NWFPE=y |
20 | CONFIG_VFP=y | 16 | CONFIG_VFP=y |
17 | CONFIG_MODULES=y | ||
18 | CONFIG_MODULE_UNLOAD=y | ||
19 | CONFIG_PARTITION_ADVANCED=y | ||
20 | CONFIG_CMA=y | ||
21 | CONFIG_NET=y | 21 | CONFIG_NET=y |
22 | CONFIG_PACKET=y | 22 | CONFIG_PACKET=y |
23 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
@@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y | |||
59 | CONFIG_DRM=y | 59 | CONFIG_DRM=y |
60 | CONFIG_DRM_PANEL_ARM_VERSATILE=y | 60 | CONFIG_DRM_PANEL_ARM_VERSATILE=y |
61 | CONFIG_DRM_PANEL_SIMPLE=y | 61 | CONFIG_DRM_PANEL_SIMPLE=y |
62 | CONFIG_DRM_DUMB_VGA_DAC=y | ||
62 | CONFIG_DRM_PL111=y | 63 | CONFIG_DRM_PL111=y |
63 | CONFIG_FB_MODE_HELPERS=y | 64 | CONFIG_FB_MODE_HELPERS=y |
64 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 65 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
@@ -89,9 +90,10 @@ CONFIG_NFSD=y | |||
89 | CONFIG_NFSD_V3=y | 90 | CONFIG_NFSD_V3=y |
90 | CONFIG_NLS_CODEPAGE_850=m | 91 | CONFIG_NLS_CODEPAGE_850=m |
91 | CONFIG_NLS_ISO8859_1=m | 92 | CONFIG_NLS_ISO8859_1=m |
93 | CONFIG_FONTS=y | ||
94 | CONFIG_FONT_ACORN_8x8=y | ||
95 | CONFIG_DEBUG_FS=y | ||
92 | CONFIG_MAGIC_SYSRQ=y | 96 | CONFIG_MAGIC_SYSRQ=y |
93 | CONFIG_DEBUG_KERNEL=y | 97 | CONFIG_DEBUG_KERNEL=y |
94 | CONFIG_DEBUG_USER=y | 98 | CONFIG_DEBUG_USER=y |
95 | CONFIG_DEBUG_LL=y | 99 | CONFIG_DEBUG_LL=y |
96 | CONFIG_FONTS=y | ||
97 | CONFIG_FONT_ACORN_8x8=y | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 2ceffd85dd3d..cd65ea4e9c54 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -2161,6 +2161,37 @@ static int of_dev_hwmod_lookup(struct device_node *np, | |||
2161 | } | 2161 | } |
2162 | 2162 | ||
2163 | /** | 2163 | /** |
2164 | * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets | ||
2165 | * | ||
2166 | * @oh: struct omap_hwmod * | ||
2167 | * @np: struct device_node * | ||
2168 | * | ||
2169 | * Fix up module register offsets for modules with mpu_rt_idx. | ||
2170 | * Only needed for cpsw with interconnect target module defined | ||
2171 | * in device tree while still using legacy hwmod platform data | ||
2172 | * for rev, sysc and syss registers. | ||
2173 | * | ||
2174 | * Can be removed when all cpsw hwmod platform data has been | ||
2175 | * dropped. | ||
2176 | */ | ||
2177 | static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh, | ||
2178 | struct device_node *np, | ||
2179 | struct resource *res) | ||
2180 | { | ||
2181 | struct device_node *child = NULL; | ||
2182 | int error; | ||
2183 | |||
2184 | child = of_get_next_child(np, child); | ||
2185 | if (!child) | ||
2186 | return; | ||
2187 | |||
2188 | error = of_address_to_resource(child, oh->mpu_rt_idx, res); | ||
2189 | if (error) | ||
2190 | pr_err("%s: error mapping mpu_rt_idx: %i\n", | ||
2191 | __func__, error); | ||
2192 | } | ||
2193 | |||
2194 | /** | ||
2164 | * omap_hwmod_parse_module_range - map module IO range from device tree | 2195 | * omap_hwmod_parse_module_range - map module IO range from device tree |
2165 | * @oh: struct omap_hwmod * | 2196 | * @oh: struct omap_hwmod * |
2166 | * @np: struct device_node * | 2197 | * @np: struct device_node * |
@@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh, | |||
2220 | size = be32_to_cpup(ranges); | 2251 | size = be32_to_cpup(ranges); |
2221 | 2252 | ||
2222 | pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", | 2253 | pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", |
2223 | oh->name, np->name, base, size); | 2254 | oh ? oh->name : "", np->name, base, size); |
2255 | |||
2256 | if (oh && oh->mpu_rt_idx) { | ||
2257 | omap_hwmod_fix_mpu_rt_idx(oh, np, res); | ||
2258 | |||
2259 | return 0; | ||
2260 | } | ||
2224 | 2261 | ||
2225 | res->start = base; | 2262 | res->start = base; |
2226 | res->end = base + size - 1; | 2263 | res->end = base + size - 1; |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 29e75b47becd..1b1a0e95c751 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK | |||
763 | 763 | ||
764 | config HOLES_IN_ZONE | 764 | config HOLES_IN_ZONE |
765 | def_bool y | 765 | def_bool y |
766 | depends on NUMA | ||
767 | 766 | ||
768 | source kernel/Kconfig.hz | 767 | source kernel/Kconfig.hz |
769 | 768 | ||
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f67e8d5e93ad..db8d364f8476 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y | |||
38 | CONFIG_ARCH_BERLIN=y | 38 | CONFIG_ARCH_BERLIN=y |
39 | CONFIG_ARCH_BRCMSTB=y | 39 | CONFIG_ARCH_BRCMSTB=y |
40 | CONFIG_ARCH_EXYNOS=y | 40 | CONFIG_ARCH_EXYNOS=y |
41 | CONFIG_ARCH_K3=y | ||
41 | CONFIG_ARCH_LAYERSCAPE=y | 42 | CONFIG_ARCH_LAYERSCAPE=y |
42 | CONFIG_ARCH_LG1K=y | 43 | CONFIG_ARCH_LG1K=y |
43 | CONFIG_ARCH_HISI=y | 44 | CONFIG_ARCH_HISI=y |
@@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y | |||
605 | CONFIG_ARCH_TEGRA_210_SOC=y | 606 | CONFIG_ARCH_TEGRA_210_SOC=y |
606 | CONFIG_ARCH_TEGRA_186_SOC=y | 607 | CONFIG_ARCH_TEGRA_186_SOC=y |
607 | CONFIG_ARCH_TEGRA_194_SOC=y | 608 | CONFIG_ARCH_TEGRA_194_SOC=y |
609 | CONFIG_ARCH_K3_AM6_SOC=y | ||
610 | CONFIG_SOC_TI=y | ||
608 | CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y | 611 | CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y |
609 | CONFIG_EXTCON_USB_GPIO=y | 612 | CONFIG_EXTCON_USB_GPIO=y |
610 | CONFIG_EXTCON_USBC_CROS_EC=y | 613 | CONFIG_EXTCON_USBC_CROS_EC=y |
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 6e9f33d14930..067d8937d5af 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c | |||
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req) | |||
417 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); | 417 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); |
418 | put_unaligned_be32(2, iv + GCM_IV_SIZE); | 418 | put_unaligned_be32(2, iv + GCM_IV_SIZE); |
419 | 419 | ||
420 | while (walk.nbytes >= AES_BLOCK_SIZE) { | 420 | while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { |
421 | int blocks = walk.nbytes / AES_BLOCK_SIZE; | 421 | int blocks = walk.nbytes / AES_BLOCK_SIZE; |
422 | u8 *dst = walk.dst.virt.addr; | 422 | u8 *dst = walk.dst.virt.addr; |
423 | u8 *src = walk.src.virt.addr; | 423 | u8 *src = walk.src.virt.addr; |
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req) | |||
437 | NULL); | 437 | NULL); |
438 | 438 | ||
439 | err = skcipher_walk_done(&walk, | 439 | err = skcipher_walk_done(&walk, |
440 | walk.nbytes % AES_BLOCK_SIZE); | 440 | walk.nbytes % (2 * AES_BLOCK_SIZE)); |
441 | } | 441 | } |
442 | if (walk.nbytes) | 442 | if (walk.nbytes) { |
443 | __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, | 443 | __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, |
444 | nrounds); | 444 | nrounds); |
445 | if (walk.nbytes > AES_BLOCK_SIZE) { | ||
446 | crypto_inc(iv, AES_BLOCK_SIZE); | ||
447 | __aes_arm64_encrypt(ctx->aes_key.key_enc, | ||
448 | ks + AES_BLOCK_SIZE, iv, | ||
449 | nrounds); | ||
450 | } | ||
451 | } | ||
445 | } | 452 | } |
446 | 453 | ||
447 | /* handle the tail */ | 454 | /* handle the tail */ |
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req) | |||
545 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); | 552 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); |
546 | put_unaligned_be32(2, iv + GCM_IV_SIZE); | 553 | put_unaligned_be32(2, iv + GCM_IV_SIZE); |
547 | 554 | ||
548 | while (walk.nbytes >= AES_BLOCK_SIZE) { | 555 | while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { |
549 | int blocks = walk.nbytes / AES_BLOCK_SIZE; | 556 | int blocks = walk.nbytes / AES_BLOCK_SIZE; |
550 | u8 *dst = walk.dst.virt.addr; | 557 | u8 *dst = walk.dst.virt.addr; |
551 | u8 *src = walk.src.virt.addr; | 558 | u8 *src = walk.src.virt.addr; |
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req) | |||
564 | } while (--blocks > 0); | 571 | } while (--blocks > 0); |
565 | 572 | ||
566 | err = skcipher_walk_done(&walk, | 573 | err = skcipher_walk_done(&walk, |
567 | walk.nbytes % AES_BLOCK_SIZE); | 574 | walk.nbytes % (2 * AES_BLOCK_SIZE)); |
568 | } | 575 | } |
569 | if (walk.nbytes) | 576 | if (walk.nbytes) { |
577 | if (walk.nbytes > AES_BLOCK_SIZE) { | ||
578 | u8 *iv2 = iv + AES_BLOCK_SIZE; | ||
579 | |||
580 | memcpy(iv2, iv, AES_BLOCK_SIZE); | ||
581 | crypto_inc(iv2, AES_BLOCK_SIZE); | ||
582 | |||
583 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, | ||
584 | iv2, nrounds); | ||
585 | } | ||
570 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, | 586 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, |
571 | nrounds); | 587 | nrounds); |
588 | } | ||
572 | } | 589 | } |
573 | 590 | ||
574 | /* handle the tail */ | 591 | /* handle the tail */ |
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index b7fb5274b250..0c4fc223f225 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c | |||
@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void) | |||
69 | crypto_unregister_alg(&sm4_ce_alg); | 69 | crypto_unregister_alg(&sm4_ce_alg); |
70 | } | 70 | } |
71 | 71 | ||
72 | module_cpu_feature_match(SM3, sm4_ce_mod_init); | 72 | module_cpu_feature_match(SM4, sm4_ce_mod_init); |
73 | module_exit(sm4_ce_mod_fini); | 73 | module_exit(sm4_ce_mod_fini); |
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 3534aa6a4dc2..1b083c500b9a 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c | |||
@@ -98,11 +98,10 @@ static time64_t pmu_read_time(void) | |||
98 | 98 | ||
99 | if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) | 99 | if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) |
100 | return 0; | 100 | return 0; |
101 | while (!req.complete) | 101 | pmu_wait_complete(&req); |
102 | pmu_poll(); | ||
103 | 102 | ||
104 | time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) | | 103 | time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) | |
105 | (req.reply[3] << 8) | req.reply[4]); | 104 | (req.reply[2] << 8) | req.reply[3]); |
106 | 105 | ||
107 | return time - RTC_OFFSET; | 106 | return time - RTC_OFFSET; |
108 | } | 107 | } |
@@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time) | |||
116 | (data >> 24) & 0xFF, (data >> 16) & 0xFF, | 115 | (data >> 24) & 0xFF, (data >> 16) & 0xFF, |
117 | (data >> 8) & 0xFF, data & 0xFF) < 0) | 116 | (data >> 8) & 0xFF, data & 0xFF) < 0) |
118 | return; | 117 | return; |
119 | while (!req.complete) | 118 | pmu_wait_complete(&req); |
120 | pmu_poll(); | ||
121 | } | 119 | } |
122 | 120 | ||
123 | static __u8 pmu_read_pram(int offset) | 121 | static __u8 pmu_read_pram(int offset) |
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug index 7a49f0d28d14..f1da8a7b17ff 100644 --- a/arch/nios2/Kconfig.debug +++ b/arch/nios2/Kconfig.debug | |||
@@ -3,15 +3,6 @@ | |||
3 | config TRACE_IRQFLAGS_SUPPORT | 3 | config TRACE_IRQFLAGS_SUPPORT |
4 | def_bool y | 4 | def_bool y |
5 | 5 | ||
6 | config DEBUG_STACK_USAGE | ||
7 | bool "Enable stack utilization instrumentation" | ||
8 | depends on DEBUG_KERNEL | ||
9 | help | ||
10 | Enables the display of the minimum amount of free stack which each | ||
11 | task has ever had available in the sysrq-T and sysrq-P debug output. | ||
12 | |||
13 | This option will slow down process creation somewhat. | ||
14 | |||
15 | config EARLY_PRINTK | 6 | config EARLY_PRINTK |
16 | bool "Activate early kernel debugging" | 7 | bool "Activate early kernel debugging" |
17 | default y | 8 | default y |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index db0b6eebbfa5..a80669209155 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -177,7 +177,6 @@ config PPC | |||
177 | select HAVE_ARCH_KGDB | 177 | select HAVE_ARCH_KGDB |
178 | select HAVE_ARCH_MMAP_RND_BITS | 178 | select HAVE_ARCH_MMAP_RND_BITS |
179 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT | 179 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT |
180 | select HAVE_ARCH_PREL32_RELOCATIONS | ||
181 | select HAVE_ARCH_SECCOMP_FILTER | 180 | select HAVE_ARCH_SECCOMP_FILTER |
182 | select HAVE_ARCH_TRACEHOOK | 181 | select HAVE_ARCH_TRACEHOOK |
183 | select HAVE_CBPF_JIT if !PPC64 | 182 | select HAVE_CBPF_JIT if !PPC64 |
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index c229509288ea..439dc7072e05 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h | |||
@@ -14,6 +14,10 @@ | |||
14 | #ifndef _ASM_RISCV_TLB_H | 14 | #ifndef _ASM_RISCV_TLB_H |
15 | #define _ASM_RISCV_TLB_H | 15 | #define _ASM_RISCV_TLB_H |
16 | 16 | ||
17 | struct mmu_gather; | ||
18 | |||
19 | static void tlb_flush(struct mmu_gather *tlb); | ||
20 | |||
17 | #include <asm-generic/tlb.h> | 21 | #include <asm-generic/tlb.h> |
18 | 22 | ||
19 | static inline void tlb_flush(struct mmu_gather *tlb) | 23 | static inline void tlb_flush(struct mmu_gather *tlb) |
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 568026ccf6e8..fb03a4482ad6 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c | |||
@@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | |||
65 | SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, | 65 | SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, |
66 | uintptr_t, flags) | 66 | uintptr_t, flags) |
67 | { | 67 | { |
68 | #ifdef CONFIG_SMP | ||
69 | struct mm_struct *mm = current->mm; | ||
70 | bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; | ||
71 | #endif | ||
72 | |||
73 | /* Check the reserved flags. */ | 68 | /* Check the reserved flags. */ |
74 | if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) | 69 | if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) |
75 | return -EINVAL; | 70 | return -EINVAL; |
76 | 71 | ||
77 | /* | 72 | flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); |
78 | * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(), | ||
79 | * which generates unused variable warnings all over this function. | ||
80 | */ | ||
81 | #ifdef CONFIG_SMP | ||
82 | flush_icache_mm(mm, local); | ||
83 | #else | ||
84 | flush_icache_all(); | ||
85 | #endif | ||
86 | 73 | ||
87 | return 0; | 74 | return 0; |
88 | } | 75 | } |
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 3641a294ed54..e4abe9b8f97a 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/of_device.h> | 10 | #include <linux/of_device.h> |
11 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
12 | #include <linux/dma-mapping.h> | ||
12 | #include <asm/leon.h> | 13 | #include <asm/leon.h> |
13 | #include <asm/leon_amba.h> | 14 | #include <asm/leon_amba.h> |
14 | 15 | ||
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, | |||
381 | else | 382 | else |
382 | dev_set_name(&op->dev, "%08x", dp->phandle); | 383 | dev_set_name(&op->dev, "%08x", dp->phandle); |
383 | 384 | ||
385 | op->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
386 | op->dev.dma_mask = &op->dev.coherent_dma_mask; | ||
387 | |||
384 | if (of_device_register(op)) { | 388 | if (of_device_register(op)) { |
385 | printk("%s: Could not register of device.\n", | 389 | printk("%s: Could not register of device.\n", |
386 | dp->full_name); | 390 | dp->full_name); |
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 44e4d4435bed..6df6086968c6 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/string.h> | 2 | #include <linux/string.h> |
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/of.h> | 4 | #include <linux/of.h> |
5 | #include <linux/dma-mapping.h> | ||
5 | #include <linux/init.h> | 6 | #include <linux/init.h> |
6 | #include <linux/export.h> | 7 | #include <linux/export.h> |
7 | #include <linux/mod_devicetable.h> | 8 | #include <linux/mod_devicetable.h> |
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, | |||
675 | dev_set_name(&op->dev, "root"); | 676 | dev_set_name(&op->dev, "root"); |
676 | else | 677 | else |
677 | dev_set_name(&op->dev, "%08x", dp->phandle); | 678 | dev_set_name(&op->dev, "%08x", dp->phandle); |
679 | op->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
680 | op->dev.dma_mask = &op->dev.coherent_dma_mask; | ||
678 | 681 | ||
679 | if (of_device_register(op)) { | 682 | if (of_device_register(op)) { |
680 | printk("%s: Could not register of device.\n", | 683 | printk("%s: Could not register of device.\n", |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c5ff296bc5d1..1a0be022f91d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -2843,7 +2843,7 @@ config X86_SYSFB | |||
2843 | This option, if enabled, marks VGA/VBE/EFI framebuffers as generic | 2843 | This option, if enabled, marks VGA/VBE/EFI framebuffers as generic |
2844 | framebuffers so the new generic system-framebuffer drivers can be | 2844 | framebuffers so the new generic system-framebuffer drivers can be |
2845 | used on x86. If the framebuffer is not compatible with the generic | 2845 | used on x86. If the framebuffer is not compatible with the generic |
2846 | modes, it is adverticed as fallback platform framebuffer so legacy | 2846 | modes, it is advertised as fallback platform framebuffer so legacy |
2847 | drivers like efifb, vesafb and uvesafb can pick it up. | 2847 | drivers like efifb, vesafb and uvesafb can pick it up. |
2848 | If this option is not selected, all system framebuffers are always | 2848 | If this option is not selected, all system framebuffers are always |
2849 | marked as fallback platform framebuffers as usual. | 2849 | marked as fallback platform framebuffers as usual. |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94859241bc3e..8f6e7eb8ae9f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER | |||
175 | endif | 175 | endif |
176 | endif | 176 | endif |
177 | 177 | ||
178 | ifndef CC_HAVE_ASM_GOTO | ||
179 | $(error Compiler lacks asm-goto support.) | ||
180 | endif | ||
181 | |||
182 | # | ||
183 | # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a | ||
184 | # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way | ||
185 | # to test for this bug at compile-time because the test case needs to execute, | ||
186 | # which is a no-go for cross compilers. So check the GCC version instead. | ||
187 | # | ||
188 | ifdef CONFIG_JUMP_LABEL | ||
189 | ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1) | ||
190 | ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1) | ||
191 | endif | ||
192 | endif | ||
193 | |||
194 | ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) | 178 | ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) |
195 | # This compiler flag is not supported by Clang: | 179 | # This compiler flag is not supported by Clang: |
196 | KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) | 180 | KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) |
@@ -312,6 +296,13 @@ PHONY += vdso_install | |||
312 | vdso_install: | 296 | vdso_install: |
313 | $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ | 297 | $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ |
314 | 298 | ||
299 | archprepare: checkbin | ||
300 | checkbin: | ||
301 | ifndef CC_HAVE_ASM_GOTO | ||
302 | @echo Compiler lacks asm-goto support. | ||
303 | @exit 1 | ||
304 | endif | ||
305 | |||
315 | archclean: | 306 | archclean: |
316 | $(Q)rm -rf $(objtree)/arch/i386 | 307 | $(Q)rm -rf $(objtree)/arch/i386 |
317 | $(Q)rm -rf $(objtree)/arch/x86_64 | 308 | $(Q)rm -rf $(objtree)/arch/x86_64 |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 9bd139569b41..cb2deb61c5d9 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff | |||
223 | pcmpeqd TWOONE(%rip), \TMP2 | 223 | pcmpeqd TWOONE(%rip), \TMP2 |
224 | pand POLY(%rip), \TMP2 | 224 | pand POLY(%rip), \TMP2 |
225 | pxor \TMP2, \TMP3 | 225 | pxor \TMP2, \TMP3 |
226 | movdqa \TMP3, HashKey(%arg2) | 226 | movdqu \TMP3, HashKey(%arg2) |
227 | 227 | ||
228 | movdqa \TMP3, \TMP5 | 228 | movdqa \TMP3, \TMP5 |
229 | pshufd $78, \TMP3, \TMP1 | 229 | pshufd $78, \TMP3, \TMP1 |
230 | pxor \TMP3, \TMP1 | 230 | pxor \TMP3, \TMP1 |
231 | movdqa \TMP1, HashKey_k(%arg2) | 231 | movdqu \TMP1, HashKey_k(%arg2) |
232 | 232 | ||
233 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 233 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
234 | # TMP5 = HashKey^2<<1 (mod poly) | 234 | # TMP5 = HashKey^2<<1 (mod poly) |
235 | movdqa \TMP5, HashKey_2(%arg2) | 235 | movdqu \TMP5, HashKey_2(%arg2) |
236 | # HashKey_2 = HashKey^2<<1 (mod poly) | 236 | # HashKey_2 = HashKey^2<<1 (mod poly) |
237 | pshufd $78, \TMP5, \TMP1 | 237 | pshufd $78, \TMP5, \TMP1 |
238 | pxor \TMP5, \TMP1 | 238 | pxor \TMP5, \TMP1 |
239 | movdqa \TMP1, HashKey_2_k(%arg2) | 239 | movdqu \TMP1, HashKey_2_k(%arg2) |
240 | 240 | ||
241 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 241 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
242 | # TMP5 = HashKey^3<<1 (mod poly) | 242 | # TMP5 = HashKey^3<<1 (mod poly) |
243 | movdqa \TMP5, HashKey_3(%arg2) | 243 | movdqu \TMP5, HashKey_3(%arg2) |
244 | pshufd $78, \TMP5, \TMP1 | 244 | pshufd $78, \TMP5, \TMP1 |
245 | pxor \TMP5, \TMP1 | 245 | pxor \TMP5, \TMP1 |
246 | movdqa \TMP1, HashKey_3_k(%arg2) | 246 | movdqu \TMP1, HashKey_3_k(%arg2) |
247 | 247 | ||
248 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 248 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
249 | # TMP5 = HashKey^3<<1 (mod poly) | 249 | # TMP5 = HashKey^3<<1 (mod poly) |
250 | movdqa \TMP5, HashKey_4(%arg2) | 250 | movdqu \TMP5, HashKey_4(%arg2) |
251 | pshufd $78, \TMP5, \TMP1 | 251 | pshufd $78, \TMP5, \TMP1 |
252 | pxor \TMP5, \TMP1 | 252 | pxor \TMP5, \TMP1 |
253 | movdqa \TMP1, HashKey_4_k(%arg2) | 253 | movdqu \TMP1, HashKey_4_k(%arg2) |
254 | .endm | 254 | .endm |
255 | 255 | ||
256 | # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. | 256 | # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. |
@@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff | |||
271 | movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv | 271 | movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv |
272 | 272 | ||
273 | PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, | 273 | PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, |
274 | movdqa HashKey(%arg2), %xmm13 | 274 | movdqu HashKey(%arg2), %xmm13 |
275 | 275 | ||
276 | CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ | 276 | CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ |
277 | %xmm4, %xmm5, %xmm6 | 277 | %xmm4, %xmm5, %xmm6 |
@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
997 | pshufd $78, \XMM5, \TMP6 | 997 | pshufd $78, \XMM5, \TMP6 |
998 | pxor \XMM5, \TMP6 | 998 | pxor \XMM5, \TMP6 |
999 | paddd ONE(%rip), \XMM0 # INCR CNT | 999 | paddd ONE(%rip), \XMM0 # INCR CNT |
1000 | movdqa HashKey_4(%arg2), \TMP5 | 1000 | movdqu HashKey_4(%arg2), \TMP5 |
1001 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 | 1001 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
1002 | movdqa \XMM0, \XMM1 | 1002 | movdqa \XMM0, \XMM1 |
1003 | paddd ONE(%rip), \XMM0 # INCR CNT | 1003 | paddd ONE(%rip), \XMM0 # INCR CNT |
@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1016 | pxor (%arg1), \XMM2 | 1016 | pxor (%arg1), \XMM2 |
1017 | pxor (%arg1), \XMM3 | 1017 | pxor (%arg1), \XMM3 |
1018 | pxor (%arg1), \XMM4 | 1018 | pxor (%arg1), \XMM4 |
1019 | movdqa HashKey_4_k(%arg2), \TMP5 | 1019 | movdqu HashKey_4_k(%arg2), \TMP5 |
1020 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) | 1020 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
1021 | movaps 0x10(%arg1), \TMP1 | 1021 | movaps 0x10(%arg1), \TMP1 |
1022 | AESENC \TMP1, \XMM1 # Round 1 | 1022 | AESENC \TMP1, \XMM1 # Round 1 |
@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1031 | movdqa \XMM6, \TMP1 | 1031 | movdqa \XMM6, \TMP1 |
1032 | pshufd $78, \XMM6, \TMP2 | 1032 | pshufd $78, \XMM6, \TMP2 |
1033 | pxor \XMM6, \TMP2 | 1033 | pxor \XMM6, \TMP2 |
1034 | movdqa HashKey_3(%arg2), \TMP5 | 1034 | movdqu HashKey_3(%arg2), \TMP5 |
1035 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 | 1035 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
1036 | movaps 0x30(%arg1), \TMP3 | 1036 | movaps 0x30(%arg1), \TMP3 |
1037 | AESENC \TMP3, \XMM1 # Round 3 | 1037 | AESENC \TMP3, \XMM1 # Round 3 |
@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1044 | AESENC \TMP3, \XMM2 | 1044 | AESENC \TMP3, \XMM2 |
1045 | AESENC \TMP3, \XMM3 | 1045 | AESENC \TMP3, \XMM3 |
1046 | AESENC \TMP3, \XMM4 | 1046 | AESENC \TMP3, \XMM4 |
1047 | movdqa HashKey_3_k(%arg2), \TMP5 | 1047 | movdqu HashKey_3_k(%arg2), \TMP5 |
1048 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1048 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1049 | movaps 0x50(%arg1), \TMP3 | 1049 | movaps 0x50(%arg1), \TMP3 |
1050 | AESENC \TMP3, \XMM1 # Round 5 | 1050 | AESENC \TMP3, \XMM1 # Round 5 |
@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1058 | movdqa \XMM7, \TMP1 | 1058 | movdqa \XMM7, \TMP1 |
1059 | pshufd $78, \XMM7, \TMP2 | 1059 | pshufd $78, \XMM7, \TMP2 |
1060 | pxor \XMM7, \TMP2 | 1060 | pxor \XMM7, \TMP2 |
1061 | movdqa HashKey_2(%arg2), \TMP5 | 1061 | movdqu HashKey_2(%arg2), \TMP5 |
1062 | 1062 | ||
1063 | # Multiply TMP5 * HashKey using karatsuba | 1063 | # Multiply TMP5 * HashKey using karatsuba |
1064 | 1064 | ||
@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1074 | AESENC \TMP3, \XMM2 | 1074 | AESENC \TMP3, \XMM2 |
1075 | AESENC \TMP3, \XMM3 | 1075 | AESENC \TMP3, \XMM3 |
1076 | AESENC \TMP3, \XMM4 | 1076 | AESENC \TMP3, \XMM4 |
1077 | movdqa HashKey_2_k(%arg2), \TMP5 | 1077 | movdqu HashKey_2_k(%arg2), \TMP5 |
1078 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1078 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1079 | movaps 0x80(%arg1), \TMP3 | 1079 | movaps 0x80(%arg1), \TMP3 |
1080 | AESENC \TMP3, \XMM1 # Round 8 | 1080 | AESENC \TMP3, \XMM1 # Round 8 |
@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1092 | movdqa \XMM8, \TMP1 | 1092 | movdqa \XMM8, \TMP1 |
1093 | pshufd $78, \XMM8, \TMP2 | 1093 | pshufd $78, \XMM8, \TMP2 |
1094 | pxor \XMM8, \TMP2 | 1094 | pxor \XMM8, \TMP2 |
1095 | movdqa HashKey(%arg2), \TMP5 | 1095 | movdqu HashKey(%arg2), \TMP5 |
1096 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1096 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1097 | movaps 0x90(%arg1), \TMP3 | 1097 | movaps 0x90(%arg1), \TMP3 |
1098 | AESENC \TMP3, \XMM1 # Round 9 | 1098 | AESENC \TMP3, \XMM1 # Round 9 |
@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@: | |||
1121 | AESENCLAST \TMP3, \XMM2 | 1121 | AESENCLAST \TMP3, \XMM2 |
1122 | AESENCLAST \TMP3, \XMM3 | 1122 | AESENCLAST \TMP3, \XMM3 |
1123 | AESENCLAST \TMP3, \XMM4 | 1123 | AESENCLAST \TMP3, \XMM4 |
1124 | movdqa HashKey_k(%arg2), \TMP5 | 1124 | movdqu HashKey_k(%arg2), \TMP5 |
1125 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1125 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1126 | movdqu (%arg4,%r11,1), \TMP3 | 1126 | movdqu (%arg4,%r11,1), \TMP3 |
1127 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK | 1127 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1205 | pshufd $78, \XMM5, \TMP6 | 1205 | pshufd $78, \XMM5, \TMP6 |
1206 | pxor \XMM5, \TMP6 | 1206 | pxor \XMM5, \TMP6 |
1207 | paddd ONE(%rip), \XMM0 # INCR CNT | 1207 | paddd ONE(%rip), \XMM0 # INCR CNT |
1208 | movdqa HashKey_4(%arg2), \TMP5 | 1208 | movdqu HashKey_4(%arg2), \TMP5 |
1209 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 | 1209 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
1210 | movdqa \XMM0, \XMM1 | 1210 | movdqa \XMM0, \XMM1 |
1211 | paddd ONE(%rip), \XMM0 # INCR CNT | 1211 | paddd ONE(%rip), \XMM0 # INCR CNT |
@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1224 | pxor (%arg1), \XMM2 | 1224 | pxor (%arg1), \XMM2 |
1225 | pxor (%arg1), \XMM3 | 1225 | pxor (%arg1), \XMM3 |
1226 | pxor (%arg1), \XMM4 | 1226 | pxor (%arg1), \XMM4 |
1227 | movdqa HashKey_4_k(%arg2), \TMP5 | 1227 | movdqu HashKey_4_k(%arg2), \TMP5 |
1228 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) | 1228 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
1229 | movaps 0x10(%arg1), \TMP1 | 1229 | movaps 0x10(%arg1), \TMP1 |
1230 | AESENC \TMP1, \XMM1 # Round 1 | 1230 | AESENC \TMP1, \XMM1 # Round 1 |
@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1239 | movdqa \XMM6, \TMP1 | 1239 | movdqa \XMM6, \TMP1 |
1240 | pshufd $78, \XMM6, \TMP2 | 1240 | pshufd $78, \XMM6, \TMP2 |
1241 | pxor \XMM6, \TMP2 | 1241 | pxor \XMM6, \TMP2 |
1242 | movdqa HashKey_3(%arg2), \TMP5 | 1242 | movdqu HashKey_3(%arg2), \TMP5 |
1243 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 | 1243 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
1244 | movaps 0x30(%arg1), \TMP3 | 1244 | movaps 0x30(%arg1), \TMP3 |
1245 | AESENC \TMP3, \XMM1 # Round 3 | 1245 | AESENC \TMP3, \XMM1 # Round 3 |
@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1252 | AESENC \TMP3, \XMM2 | 1252 | AESENC \TMP3, \XMM2 |
1253 | AESENC \TMP3, \XMM3 | 1253 | AESENC \TMP3, \XMM3 |
1254 | AESENC \TMP3, \XMM4 | 1254 | AESENC \TMP3, \XMM4 |
1255 | movdqa HashKey_3_k(%arg2), \TMP5 | 1255 | movdqu HashKey_3_k(%arg2), \TMP5 |
1256 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1256 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1257 | movaps 0x50(%arg1), \TMP3 | 1257 | movaps 0x50(%arg1), \TMP3 |
1258 | AESENC \TMP3, \XMM1 # Round 5 | 1258 | AESENC \TMP3, \XMM1 # Round 5 |
@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1266 | movdqa \XMM7, \TMP1 | 1266 | movdqa \XMM7, \TMP1 |
1267 | pshufd $78, \XMM7, \TMP2 | 1267 | pshufd $78, \XMM7, \TMP2 |
1268 | pxor \XMM7, \TMP2 | 1268 | pxor \XMM7, \TMP2 |
1269 | movdqa HashKey_2(%arg2), \TMP5 | 1269 | movdqu HashKey_2(%arg2), \TMP5 |
1270 | 1270 | ||
1271 | # Multiply TMP5 * HashKey using karatsuba | 1271 | # Multiply TMP5 * HashKey using karatsuba |
1272 | 1272 | ||
@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1282 | AESENC \TMP3, \XMM2 | 1282 | AESENC \TMP3, \XMM2 |
1283 | AESENC \TMP3, \XMM3 | 1283 | AESENC \TMP3, \XMM3 |
1284 | AESENC \TMP3, \XMM4 | 1284 | AESENC \TMP3, \XMM4 |
1285 | movdqa HashKey_2_k(%arg2), \TMP5 | 1285 | movdqu HashKey_2_k(%arg2), \TMP5 |
1286 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1286 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1287 | movaps 0x80(%arg1), \TMP3 | 1287 | movaps 0x80(%arg1), \TMP3 |
1288 | AESENC \TMP3, \XMM1 # Round 8 | 1288 | AESENC \TMP3, \XMM1 # Round 8 |
@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1300 | movdqa \XMM8, \TMP1 | 1300 | movdqa \XMM8, \TMP1 |
1301 | pshufd $78, \XMM8, \TMP2 | 1301 | pshufd $78, \XMM8, \TMP2 |
1302 | pxor \XMM8, \TMP2 | 1302 | pxor \XMM8, \TMP2 |
1303 | movdqa HashKey(%arg2), \TMP5 | 1303 | movdqu HashKey(%arg2), \TMP5 |
1304 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1304 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1305 | movaps 0x90(%arg1), \TMP3 | 1305 | movaps 0x90(%arg1), \TMP3 |
1306 | AESENC \TMP3, \XMM1 # Round 9 | 1306 | AESENC \TMP3, \XMM1 # Round 9 |
@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@: | |||
1329 | AESENCLAST \TMP3, \XMM2 | 1329 | AESENCLAST \TMP3, \XMM2 |
1330 | AESENCLAST \TMP3, \XMM3 | 1330 | AESENCLAST \TMP3, \XMM3 |
1331 | AESENCLAST \TMP3, \XMM4 | 1331 | AESENCLAST \TMP3, \XMM4 |
1332 | movdqa HashKey_k(%arg2), \TMP5 | 1332 | movdqu HashKey_k(%arg2), \TMP5 |
1333 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1333 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1334 | movdqu (%arg4,%r11,1), \TMP3 | 1334 | movdqu (%arg4,%r11,1), \TMP3 |
1335 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK | 1335 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1405 | movdqa \XMM1, \TMP6 | 1405 | movdqa \XMM1, \TMP6 |
1406 | pshufd $78, \XMM1, \TMP2 | 1406 | pshufd $78, \XMM1, \TMP2 |
1407 | pxor \XMM1, \TMP2 | 1407 | pxor \XMM1, \TMP2 |
1408 | movdqa HashKey_4(%arg2), \TMP5 | 1408 | movdqu HashKey_4(%arg2), \TMP5 |
1409 | PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 | 1409 | PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 |
1410 | PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 | 1410 | PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 |
1411 | movdqa HashKey_4_k(%arg2), \TMP4 | 1411 | movdqu HashKey_4_k(%arg2), \TMP4 |
1412 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1412 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1413 | movdqa \XMM1, \XMMDst | 1413 | movdqa \XMM1, \XMMDst |
1414 | movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 | 1414 | movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 |
@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1418 | movdqa \XMM2, \TMP1 | 1418 | movdqa \XMM2, \TMP1 |
1419 | pshufd $78, \XMM2, \TMP2 | 1419 | pshufd $78, \XMM2, \TMP2 |
1420 | pxor \XMM2, \TMP2 | 1420 | pxor \XMM2, \TMP2 |
1421 | movdqa HashKey_3(%arg2), \TMP5 | 1421 | movdqu HashKey_3(%arg2), \TMP5 |
1422 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1422 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1423 | PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 | 1423 | PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 |
1424 | movdqa HashKey_3_k(%arg2), \TMP4 | 1424 | movdqu HashKey_3_k(%arg2), \TMP4 |
1425 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1425 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1426 | pxor \TMP1, \TMP6 | 1426 | pxor \TMP1, \TMP6 |
1427 | pxor \XMM2, \XMMDst | 1427 | pxor \XMM2, \XMMDst |
@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1433 | movdqa \XMM3, \TMP1 | 1433 | movdqa \XMM3, \TMP1 |
1434 | pshufd $78, \XMM3, \TMP2 | 1434 | pshufd $78, \XMM3, \TMP2 |
1435 | pxor \XMM3, \TMP2 | 1435 | pxor \XMM3, \TMP2 |
1436 | movdqa HashKey_2(%arg2), \TMP5 | 1436 | movdqu HashKey_2(%arg2), \TMP5 |
1437 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1437 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1438 | PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 | 1438 | PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 |
1439 | movdqa HashKey_2_k(%arg2), \TMP4 | 1439 | movdqu HashKey_2_k(%arg2), \TMP4 |
1440 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1440 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1441 | pxor \TMP1, \TMP6 | 1441 | pxor \TMP1, \TMP6 |
1442 | pxor \XMM3, \XMMDst | 1442 | pxor \XMM3, \XMMDst |
@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1446 | movdqa \XMM4, \TMP1 | 1446 | movdqa \XMM4, \TMP1 |
1447 | pshufd $78, \XMM4, \TMP2 | 1447 | pshufd $78, \XMM4, \TMP2 |
1448 | pxor \XMM4, \TMP2 | 1448 | pxor \XMM4, \TMP2 |
1449 | movdqa HashKey(%arg2), \TMP5 | 1449 | movdqu HashKey(%arg2), \TMP5 |
1450 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1450 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1451 | PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 | 1451 | PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 |
1452 | movdqa HashKey_k(%arg2), \TMP4 | 1452 | movdqu HashKey_k(%arg2), \TMP4 |
1453 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1453 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1454 | pxor \TMP1, \TMP6 | 1454 | pxor \TMP1, \TMP6 |
1455 | pxor \XMM4, \XMMDst | 1455 | pxor \XMM4, \XMMDst |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 5f4829f10129..dfb2f7c0d019 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs | |||
2465 | 2465 | ||
2466 | perf_callchain_store(entry, regs->ip); | 2466 | perf_callchain_store(entry, regs->ip); |
2467 | 2467 | ||
2468 | if (!current->mm) | 2468 | if (!nmi_uaccess_okay()) |
2469 | return; | 2469 | return; |
2470 | 2470 | ||
2471 | if (perf_callchain_user32(regs, entry)) | 2471 | if (perf_callchain_user32(regs, entry)) |
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c14f2a74b2be..15450a675031 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void) | |||
33 | return flags; | 33 | return flags; |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void native_restore_fl(unsigned long flags) | 36 | extern inline void native_restore_fl(unsigned long flags); |
37 | extern inline void native_restore_fl(unsigned long flags) | ||
37 | { | 38 | { |
38 | asm volatile("push %0 ; popf" | 39 | asm volatile("push %0 ; popf" |
39 | : /* no output */ | 40 | : /* no output */ |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index a564084c6141..f8b1ad2c3828 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H | 2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
3 | #define _ASM_X86_PGTABLE_3LEVEL_H | 3 | #define _ASM_X86_PGTABLE_3LEVEL_H |
4 | 4 | ||
5 | #include <asm/atomic64_32.h> | ||
6 | |||
5 | /* | 7 | /* |
6 | * Intel Physical Address Extension (PAE) Mode - three-level page | 8 | * Intel Physical Address Extension (PAE) Mode - three-level page |
7 | * tables on PPro+ CPUs. | 9 | * tables on PPro+ CPUs. |
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) | |||
150 | { | 152 | { |
151 | pte_t res; | 153 | pte_t res; |
152 | 154 | ||
153 | /* xchg acts as a barrier before the setting of the high bits */ | 155 | res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0); |
154 | res.pte_low = xchg(&ptep->pte_low, 0); | ||
155 | res.pte_high = ptep->pte_high; | ||
156 | ptep->pte_high = 0; | ||
157 | 156 | ||
158 | return res; | 157 | return res; |
159 | } | 158 | } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c24297268ebc..d53c54b842da 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -132,6 +132,8 @@ struct cpuinfo_x86 { | |||
132 | /* Index into per_cpu list: */ | 132 | /* Index into per_cpu list: */ |
133 | u16 cpu_index; | 133 | u16 cpu_index; |
134 | u32 microcode; | 134 | u32 microcode; |
135 | /* Address space bits used by the cache internally */ | ||
136 | u8 x86_cache_bits; | ||
135 | unsigned initialized : 1; | 137 | unsigned initialized : 1; |
136 | } __randomize_layout; | 138 | } __randomize_layout; |
137 | 139 | ||
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c); | |||
183 | 185 | ||
184 | static inline unsigned long long l1tf_pfn_limit(void) | 186 | static inline unsigned long long l1tf_pfn_limit(void) |
185 | { | 187 | { |
186 | return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT); | 188 | return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); |
187 | } | 189 | } |
188 | 190 | ||
189 | extern void early_cpu_init(void); | 191 | extern void early_cpu_init(void); |
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 5f9012ff52ed..33d3c88a7225 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h | |||
@@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs); | |||
39 | 39 | ||
40 | #define __ARCH_HAS_SA_RESTORER | 40 | #define __ARCH_HAS_SA_RESTORER |
41 | 41 | ||
42 | #include <asm/asm.h> | ||
42 | #include <uapi/asm/sigcontext.h> | 43 | #include <uapi/asm/sigcontext.h> |
43 | 44 | ||
44 | #ifdef __i386__ | 45 | #ifdef __i386__ |
@@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig) | |||
86 | 87 | ||
87 | static inline int __gen_sigismember(sigset_t *set, int _sig) | 88 | static inline int __gen_sigismember(sigset_t *set, int _sig) |
88 | { | 89 | { |
89 | unsigned char ret; | 90 | bool ret; |
90 | asm("btl %2,%1\n\tsetc %0" | 91 | asm("btl %2,%1" CC_SET(c) |
91 | : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); | 92 | : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); |
92 | return ret; | 93 | return ret; |
93 | } | 94 | } |
94 | 95 | ||
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index b6dc698f992a..f335aad404a4 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void) | |||
111 | return (unsigned long)frame; | 111 | return (unsigned long)frame; |
112 | } | 112 | } |
113 | 113 | ||
114 | void show_opcodes(u8 *rip, const char *loglvl); | 114 | void show_opcodes(struct pt_regs *regs, const char *loglvl); |
115 | void show_ip(struct pt_regs *regs, const char *loglvl); | 115 | void show_ip(struct pt_regs *regs, const char *loglvl); |
116 | #endif /* _ASM_X86_STACKTRACE_H */ | 116 | #endif /* _ASM_X86_STACKTRACE_H */ |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 29c9da6c62fc..58ce5288878e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -175,8 +175,16 @@ struct tlb_state { | |||
175 | * are on. This means that it may not match current->active_mm, | 175 | * are on. This means that it may not match current->active_mm, |
176 | * which will contain the previous user mm when we're in lazy TLB | 176 | * which will contain the previous user mm when we're in lazy TLB |
177 | * mode even if we've already switched back to swapper_pg_dir. | 177 | * mode even if we've already switched back to swapper_pg_dir. |
178 | * | ||
179 | * During switch_mm_irqs_off(), loaded_mm will be set to | ||
180 | * LOADED_MM_SWITCHING during the brief interrupts-off window | ||
181 | * when CR3 and loaded_mm would otherwise be inconsistent. This | ||
182 | * is for nmi_uaccess_okay()'s benefit. | ||
178 | */ | 183 | */ |
179 | struct mm_struct *loaded_mm; | 184 | struct mm_struct *loaded_mm; |
185 | |||
186 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1) | ||
187 | |||
180 | u16 loaded_mm_asid; | 188 | u16 loaded_mm_asid; |
181 | u16 next_asid; | 189 | u16 next_asid; |
182 | /* last user mm's ctx id */ | 190 | /* last user mm's ctx id */ |
@@ -246,6 +254,38 @@ struct tlb_state { | |||
246 | }; | 254 | }; |
247 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | 255 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
248 | 256 | ||
257 | /* | ||
258 | * Blindly accessing user memory from NMI context can be dangerous | ||
259 | * if we're in the middle of switching the current user task or | ||
260 | * switching the loaded mm. It can also be dangerous if we | ||
261 | * interrupted some kernel code that was temporarily using a | ||
262 | * different mm. | ||
263 | */ | ||
264 | static inline bool nmi_uaccess_okay(void) | ||
265 | { | ||
266 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); | ||
267 | struct mm_struct *current_mm = current->mm; | ||
268 | |||
269 | VM_WARN_ON_ONCE(!loaded_mm); | ||
270 | |||
271 | /* | ||
272 | * The condition we want to check is | ||
273 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, | ||
274 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() | ||
275 | * is supposed to be reasonably fast. | ||
276 | * | ||
277 | * Instead, we check the almost equivalent but somewhat conservative | ||
278 | * condition below, and we rely on the fact that switch_mm_irqs_off() | ||
279 | * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. | ||
280 | */ | ||
281 | if (loaded_mm != current_mm) | ||
282 | return false; | ||
283 | |||
284 | VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); | ||
285 | |||
286 | return true; | ||
287 | } | ||
288 | |||
249 | /* Initialize cr4 shadow for this CPU. */ | 289 | /* Initialize cr4 shadow for this CPU. */ |
250 | static inline void cr4_init_shadow(void) | 290 | static inline void cr4_init_shadow(void) |
251 | { | 291 | { |
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index fb856c9f0449..53748541c487 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void) | |||
93 | * | 93 | * |
94 | * If RDPID is available, use it. | 94 | * If RDPID is available, use it. |
95 | */ | 95 | */ |
96 | alternative_io ("lsl %[p],%[seg]", | 96 | alternative_io ("lsl %[seg],%[p]", |
97 | ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ | 97 | ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ |
98 | X86_FEATURE_RDPID, | 98 | X86_FEATURE_RDPID, |
99 | [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); | 99 | [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 014f214da581..b9d5e7c9ef43 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, | |||
684 | * It means the size must be writable atomically and the address must be aligned | 684 | * It means the size must be writable atomically and the address must be aligned |
685 | * in a way that permits an atomic write. It also makes sure we fit on a single | 685 | * in a way that permits an atomic write. It also makes sure we fit on a single |
686 | * page. | 686 | * page. |
687 | * | ||
688 | * Note: Must be called under text_mutex. | ||
689 | */ | 687 | */ |
690 | void *text_poke(void *addr, const void *opcode, size_t len) | 688 | void *text_poke(void *addr, const void *opcode, size_t len) |
691 | { | 689 | { |
@@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len) | |||
700 | */ | 698 | */ |
701 | BUG_ON(!after_bootmem); | 699 | BUG_ON(!after_bootmem); |
702 | 700 | ||
701 | lockdep_assert_held(&text_mutex); | ||
702 | |||
703 | if (!core_kernel_text((unsigned long)addr)) { | 703 | if (!core_kernel_text((unsigned long)addr)) { |
704 | pages[0] = vmalloc_to_page(addr); | 704 | pages[0] = vmalloc_to_page(addr); |
705 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | 705 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
@@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs) | |||
782 | * - replace the first byte (int3) by the first byte of | 782 | * - replace the first byte (int3) by the first byte of |
783 | * replacing opcode | 783 | * replacing opcode |
784 | * - sync cores | 784 | * - sync cores |
785 | * | ||
786 | * Note: must be called under text_mutex. | ||
787 | */ | 785 | */ |
788 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) | 786 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) |
789 | { | 787 | { |
@@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) | |||
792 | bp_int3_handler = handler; | 790 | bp_int3_handler = handler; |
793 | bp_int3_addr = (u8 *)addr + sizeof(int3); | 791 | bp_int3_addr = (u8 *)addr + sizeof(int3); |
794 | bp_patching_in_progress = true; | 792 | bp_patching_in_progress = true; |
793 | |||
794 | lockdep_assert_held(&text_mutex); | ||
795 | |||
795 | /* | 796 | /* |
796 | * Corresponding read barrier in int3 notifier for making sure the | 797 | * Corresponding read barrier in int3 notifier for making sure the |
797 | * in_progress and handler are correctly ordered wrt. patching. | 798 | * in_progress and handler are correctly ordered wrt. patching. |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4c2313d0b9ca..40bdaea97fe7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); | |||
668 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; | 668 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
669 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); | 669 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
670 | 670 | ||
671 | /* | ||
672 | * These CPUs all support 44bits physical address space internally in the | ||
673 | * cache but CPUID can report a smaller number of physical address bits. | ||
674 | * | ||
675 | * The L1TF mitigation uses the top most address bit for the inversion of | ||
676 | * non present PTEs. When the installed memory reaches into the top most | ||
677 | * address bit due to memory holes, which has been observed on machines | ||
678 | * which report 36bits physical address bits and have 32G RAM installed, | ||
679 | * then the mitigation range check in l1tf_select_mitigation() triggers. | ||
680 | * This is a false positive because the mitigation is still possible due to | ||
681 | * the fact that the cache uses 44bit internally. Use the cache bits | ||
682 | * instead of the reported physical bits and adjust them on the affected | ||
683 | * machines to 44bit if the reported bits are less than 44. | ||
684 | */ | ||
685 | static void override_cache_bits(struct cpuinfo_x86 *c) | ||
686 | { | ||
687 | if (c->x86 != 6) | ||
688 | return; | ||
689 | |||
690 | switch (c->x86_model) { | ||
691 | case INTEL_FAM6_NEHALEM: | ||
692 | case INTEL_FAM6_WESTMERE: | ||
693 | case INTEL_FAM6_SANDYBRIDGE: | ||
694 | case INTEL_FAM6_IVYBRIDGE: | ||
695 | case INTEL_FAM6_HASWELL_CORE: | ||
696 | case INTEL_FAM6_HASWELL_ULT: | ||
697 | case INTEL_FAM6_HASWELL_GT3E: | ||
698 | case INTEL_FAM6_BROADWELL_CORE: | ||
699 | case INTEL_FAM6_BROADWELL_GT3E: | ||
700 | case INTEL_FAM6_SKYLAKE_MOBILE: | ||
701 | case INTEL_FAM6_SKYLAKE_DESKTOP: | ||
702 | case INTEL_FAM6_KABYLAKE_MOBILE: | ||
703 | case INTEL_FAM6_KABYLAKE_DESKTOP: | ||
704 | if (c->x86_cache_bits < 44) | ||
705 | c->x86_cache_bits = 44; | ||
706 | break; | ||
707 | } | ||
708 | } | ||
709 | |||
671 | static void __init l1tf_select_mitigation(void) | 710 | static void __init l1tf_select_mitigation(void) |
672 | { | 711 | { |
673 | u64 half_pa; | 712 | u64 half_pa; |
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void) | |||
675 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) | 714 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
676 | return; | 715 | return; |
677 | 716 | ||
717 | override_cache_bits(&boot_cpu_data); | ||
718 | |||
678 | switch (l1tf_mitigation) { | 719 | switch (l1tf_mitigation) { |
679 | case L1TF_MITIGATION_OFF: | 720 | case L1TF_MITIGATION_OFF: |
680 | case L1TF_MITIGATION_FLUSH_NOWARN: | 721 | case L1TF_MITIGATION_FLUSH_NOWARN: |
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void) | |||
694 | return; | 735 | return; |
695 | #endif | 736 | #endif |
696 | 737 | ||
697 | /* | ||
698 | * This is extremely unlikely to happen because almost all | ||
699 | * systems have far more MAX_PA/2 than RAM can be fit into | ||
700 | * DIMM slots. | ||
701 | */ | ||
702 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; | 738 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; |
703 | if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { | 739 | if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { |
704 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); | 740 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 84dee5ab745a..44c4ef3d989b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c) | |||
919 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | 919 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) |
920 | c->x86_phys_bits = 36; | 920 | c->x86_phys_bits = 36; |
921 | #endif | 921 | #endif |
922 | c->x86_cache_bits = c->x86_phys_bits; | ||
922 | } | 923 | } |
923 | 924 | ||
924 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | 925 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 401e8c133108..fc3c07fe7df5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) | |||
150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) | 150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) |
151 | return false; | 151 | return false; |
152 | 152 | ||
153 | if (c->x86 != 6) | ||
154 | return false; | ||
155 | |||
153 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { | 156 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { |
154 | if (c->x86_model == spectre_bad_microcodes[i].model && | 157 | if (c->x86_model == spectre_bad_microcodes[i].model && |
155 | c->x86_stepping == spectre_bad_microcodes[i].stepping) | 158 | c->x86_stepping == spectre_bad_microcodes[i].stepping) |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9c8652974f8e..f56895106ccf 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/bug.h> | 17 | #include <linux/bug.h> |
18 | #include <linux/nmi.h> | 18 | #include <linux/nmi.h> |
19 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
20 | #include <linux/kasan.h> | ||
20 | 21 | ||
21 | #include <asm/cpu_entry_area.h> | 22 | #include <asm/cpu_entry_area.h> |
22 | #include <asm/stacktrace.h> | 23 | #include <asm/stacktrace.h> |
@@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable, | |||
89 | * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random | 90 | * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random |
90 | * guesstimate in attempt to achieve all of the above. | 91 | * guesstimate in attempt to achieve all of the above. |
91 | */ | 92 | */ |
92 | void show_opcodes(u8 *rip, const char *loglvl) | 93 | void show_opcodes(struct pt_regs *regs, const char *loglvl) |
93 | { | 94 | { |
94 | #define PROLOGUE_SIZE 42 | 95 | #define PROLOGUE_SIZE 42 |
95 | #define EPILOGUE_SIZE 21 | 96 | #define EPILOGUE_SIZE 21 |
96 | #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) | 97 | #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) |
97 | u8 opcodes[OPCODE_BUFSIZE]; | 98 | u8 opcodes[OPCODE_BUFSIZE]; |
99 | unsigned long prologue = regs->ip - PROLOGUE_SIZE; | ||
100 | bool bad_ip; | ||
98 | 101 | ||
99 | if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) { | 102 | /* |
103 | * Make sure userspace isn't trying to trick us into dumping kernel | ||
104 | * memory by pointing the userspace instruction pointer at it. | ||
105 | */ | ||
106 | bad_ip = user_mode(regs) && | ||
107 | __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); | ||
108 | |||
109 | if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue, | ||
110 | OPCODE_BUFSIZE)) { | ||
100 | printk("%sCode: Bad RIP value.\n", loglvl); | 111 | printk("%sCode: Bad RIP value.\n", loglvl); |
101 | } else { | 112 | } else { |
102 | printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" | 113 | printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" |
@@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl) | |||
112 | #else | 123 | #else |
113 | printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); | 124 | printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); |
114 | #endif | 125 | #endif |
115 | show_opcodes((u8 *)regs->ip, loglvl); | 126 | show_opcodes(regs, loglvl); |
116 | } | 127 | } |
117 | 128 | ||
118 | void show_iret_regs(struct pt_regs *regs) | 129 | void show_iret_regs(struct pt_regs *regs) |
@@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
346 | * We're not going to return, but we might be on an IST stack or | 357 | * We're not going to return, but we might be on an IST stack or |
347 | * have very little stack space left. Rewind the stack and kill | 358 | * have very little stack space left. Rewind the stack and kill |
348 | * the task. | 359 | * the task. |
360 | * Before we rewind the stack, we have to tell KASAN that we're going to | ||
361 | * reuse the task stack and that existing poisons are invalid. | ||
349 | */ | 362 | */ |
363 | kasan_unpoison_task_stack(current); | ||
350 | rewind_stack_do_exit(signr); | 364 | rewind_stack_do_exit(signr); |
351 | } | 365 | } |
352 | NOKPROBE_SYMBOL(oops_end); | 366 | NOKPROBE_SYMBOL(oops_end); |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c8c6ad0d58b8..3f435d7fca5e 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <linux/export.h> | 8 | #include <linux/export.h> |
9 | 9 | ||
10 | #include <asm/tlbflush.h> | ||
11 | |||
10 | /* | 12 | /* |
11 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the | 13 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the |
12 | * nested NMI paths are careful to preserve CR2. | 14 | * nested NMI paths are careful to preserve CR2. |
@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
19 | if (__range_not_ok(from, n, TASK_SIZE)) | 21 | if (__range_not_ok(from, n, TASK_SIZE)) |
20 | return n; | 22 | return n; |
21 | 23 | ||
24 | if (!nmi_uaccess_okay()) | ||
25 | return n; | ||
26 | |||
22 | /* | 27 | /* |
23 | * Even though this function is typically called from NMI/IRQ context | 28 | * Even though this function is typically called from NMI/IRQ context |
24 | * disable pagefaults so that its behaviour is consistent even when | 29 | * disable pagefaults so that its behaviour is consistent even when |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b9123c497e0a..47bebfe6efa7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |||
837 | 837 | ||
838 | printk(KERN_CONT "\n"); | 838 | printk(KERN_CONT "\n"); |
839 | 839 | ||
840 | show_opcodes((u8 *)regs->ip, loglvl); | 840 | show_opcodes(regs, loglvl); |
841 | } | 841 | } |
842 | 842 | ||
843 | static void | 843 | static void |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8d6c34fe49be..51a5a69ecac9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | |||
1420 | return 0; | 1420 | return 0; |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | /* | ||
1424 | * Machine check recovery code needs to change cache mode of poisoned | ||
1425 | * pages to UC to avoid speculative access logging another error. But | ||
1426 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine | ||
1427 | * way to encourage a speculative access. So we cheat and flip the top | ||
1428 | * bit of the address. This works fine for the code that updates the | ||
1429 | * page tables. But at the end of the process we need to flush the cache | ||
1430 | * and the non-canonical address causes a #GP fault when used by the | ||
1431 | * CLFLUSH instruction. | ||
1432 | * | ||
1433 | * But in the common case we already have a canonical address. This code | ||
1434 | * will fix the top bit if needed and is a no-op otherwise. | ||
1435 | */ | ||
1436 | static inline unsigned long make_addr_canonical_again(unsigned long addr) | ||
1437 | { | ||
1438 | #ifdef CONFIG_X86_64 | ||
1439 | return (long)(addr << 1) >> 1; | ||
1440 | #else | ||
1441 | return addr; | ||
1442 | #endif | ||
1443 | } | ||
1444 | |||
1445 | |||
1423 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 1446 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
1424 | pgprot_t mask_set, pgprot_t mask_clr, | 1447 | pgprot_t mask_set, pgprot_t mask_clr, |
1425 | int force_split, int in_flag, | 1448 | int force_split, int in_flag, |
@@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
1465 | * Save address for cache flush. *addr is modified in the call | 1488 | * Save address for cache flush. *addr is modified in the call |
1466 | * to __change_page_attr_set_clr() below. | 1489 | * to __change_page_attr_set_clr() below. |
1467 | */ | 1490 | */ |
1468 | baddr = *addr; | 1491 | baddr = make_addr_canonical_again(*addr); |
1469 | } | 1492 | } |
1470 | 1493 | ||
1471 | /* Must avoid aliasing mappings in the highmem code */ | 1494 | /* Must avoid aliasing mappings in the highmem code */ |
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 31341ae7309f..c1fc1ae6b429 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c | |||
@@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) | |||
248 | * | 248 | * |
249 | * Returns a pointer to a PTE on success, or NULL on failure. | 249 | * Returns a pointer to a PTE on success, or NULL on failure. |
250 | */ | 250 | */ |
251 | static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) | 251 | static pte_t *pti_user_pagetable_walk_pte(unsigned long address) |
252 | { | 252 | { |
253 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); | 253 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
254 | pmd_t *pmd; | 254 | pmd_t *pmd; |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 9517d1b2a281..e96b99eb800c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
305 | 305 | ||
306 | choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); | 306 | choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); |
307 | 307 | ||
308 | /* Let nmi_uaccess_okay() know that we're changing CR3. */ | ||
309 | this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); | ||
310 | barrier(); | ||
311 | |||
308 | if (need_flush) { | 312 | if (need_flush) { |
309 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); | 313 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); |
310 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); | 314 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); |
@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
335 | if (next != &init_mm) | 339 | if (next != &init_mm) |
336 | this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); | 340 | this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); |
337 | 341 | ||
342 | /* Make sure we write CR3 before loaded_mm. */ | ||
343 | barrier(); | ||
344 | |||
338 | this_cpu_write(cpu_tlbstate.loaded_mm, next); | 345 | this_cpu_write(cpu_tlbstate.loaded_mm, next); |
339 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); | 346 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); |
340 | } | 347 | } |
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 324b93328b37..05ca14222463 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c | |||
@@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void) | |||
85 | 85 | ||
86 | void __init efi_call_phys_epilog(pgd_t *save_pgd) | 86 | void __init efi_call_phys_epilog(pgd_t *save_pgd) |
87 | { | 87 | { |
88 | struct desc_ptr gdt_descr; | ||
89 | |||
90 | gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0); | ||
91 | gdt_descr.size = GDT_SIZE - 1; | ||
92 | load_gdt(&gdt_descr); | ||
93 | |||
94 | load_cr3(save_pgd); | 88 | load_cr3(save_pgd); |
95 | __flush_tlb_all(); | 89 | __flush_tlb_all(); |
90 | |||
91 | load_fixmap_gdt(0); | ||
96 | } | 92 | } |
97 | 93 | ||
98 | void __init efi_runtime_update_mappings(void) | 94 | void __init efi_runtime_update_mappings(void) |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 45b700ac5fe7..2fe5c9b1816b 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val) | |||
435 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | 435 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
436 | { | 436 | { |
437 | trace_xen_mmu_set_pte_atomic(ptep, pte); | 437 | trace_xen_mmu_set_pte_atomic(ptep, pte); |
438 | set_64bit((u64 *)ptep, native_pte_val(pte)); | 438 | __xen_set_pte(ptep, pte); |
439 | } | 439 | } |
440 | 440 | ||
441 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 441 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
442 | { | 442 | { |
443 | trace_xen_mmu_pte_clear(mm, addr, ptep); | 443 | trace_xen_mmu_pte_clear(mm, addr, ptep); |
444 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) | 444 | __xen_set_pte(ptep, native_make_pte(0)); |
445 | native_pte_clear(mm, addr, ptep); | ||
446 | } | 445 | } |
447 | 446 | ||
448 | static void xen_pmd_clear(pmd_t *pmdp) | 447 | static void xen_pmd_clear(pmd_t *pmdp) |
@@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | |||
1570 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 1569 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
1571 | pte_val_ma(pte)); | 1570 | pte_val_ma(pte)); |
1572 | #endif | 1571 | #endif |
1573 | native_set_pte(ptep, pte); | 1572 | __xen_set_pte(ptep, pte); |
1574 | } | 1573 | } |
1575 | 1574 | ||
1576 | /* Early in boot, while setting up the initial pagetable, assume | 1575 | /* Early in boot, while setting up the initial pagetable, assume |
@@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void) | |||
2061 | pud_t *pud; | 2060 | pud_t *pud; |
2062 | pgd_t *pgd; | 2061 | pgd_t *pgd; |
2063 | unsigned long *new_p2m; | 2062 | unsigned long *new_p2m; |
2064 | int save_pud; | ||
2065 | 2063 | ||
2066 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | 2064 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); |
2067 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; | 2065 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; |
@@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void) | |||
2091 | 2089 | ||
2092 | pgd = __va(read_cr3_pa()); | 2090 | pgd = __va(read_cr3_pa()); |
2093 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); | 2091 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); |
2094 | save_pud = n_pud; | ||
2095 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { | 2092 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { |
2096 | pud = early_memremap(pud_phys, PAGE_SIZE); | 2093 | pud = early_memremap(pud_phys, PAGE_SIZE); |
2097 | clear_page(pud); | 2094 | clear_page(pud); |
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 84507d3e9a98..8e20a0677dcf 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
@@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb) | |||
123 | } | 123 | } |
124 | } | 124 | } |
125 | 125 | ||
126 | static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | 126 | static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, |
127 | enum wbt_flags wb_acct) | ||
127 | { | 128 | { |
128 | struct rq_wb *rwb = RQWB(rqos); | ||
129 | struct rq_wait *rqw; | ||
130 | int inflight, limit; | 129 | int inflight, limit; |
131 | 130 | ||
132 | if (!(wb_acct & WBT_TRACKED)) | ||
133 | return; | ||
134 | |||
135 | rqw = get_rq_wait(rwb, wb_acct); | ||
136 | inflight = atomic_dec_return(&rqw->inflight); | 131 | inflight = atomic_dec_return(&rqw->inflight); |
137 | 132 | ||
138 | /* | 133 | /* |
@@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | |||
166 | int diff = limit - inflight; | 161 | int diff = limit - inflight; |
167 | 162 | ||
168 | if (!inflight || diff >= rwb->wb_background / 2) | 163 | if (!inflight || diff >= rwb->wb_background / 2) |
169 | wake_up(&rqw->wait); | 164 | wake_up_all(&rqw->wait); |
170 | } | 165 | } |
171 | } | 166 | } |
172 | 167 | ||
168 | static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | ||
169 | { | ||
170 | struct rq_wb *rwb = RQWB(rqos); | ||
171 | struct rq_wait *rqw; | ||
172 | |||
173 | if (!(wb_acct & WBT_TRACKED)) | ||
174 | return; | ||
175 | |||
176 | rqw = get_rq_wait(rwb, wb_acct); | ||
177 | wbt_rqw_done(rwb, rqw, wb_acct); | ||
178 | } | ||
179 | |||
173 | /* | 180 | /* |
174 | * Called on completion of a request. Note that it's also called when | 181 | * Called on completion of a request. Note that it's also called when |
175 | * a request is merged, when the request gets freed. | 182 | * a request is merged, when the request gets freed. |
@@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) | |||
481 | return limit; | 488 | return limit; |
482 | } | 489 | } |
483 | 490 | ||
491 | struct wbt_wait_data { | ||
492 | struct wait_queue_entry wq; | ||
493 | struct task_struct *task; | ||
494 | struct rq_wb *rwb; | ||
495 | struct rq_wait *rqw; | ||
496 | unsigned long rw; | ||
497 | bool got_token; | ||
498 | }; | ||
499 | |||
500 | static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, | ||
501 | int wake_flags, void *key) | ||
502 | { | ||
503 | struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data, | ||
504 | wq); | ||
505 | |||
506 | /* | ||
507 | * If we fail to get a budget, return -1 to interrupt the wake up | ||
508 | * loop in __wake_up_common. | ||
509 | */ | ||
510 | if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw))) | ||
511 | return -1; | ||
512 | |||
513 | data->got_token = true; | ||
514 | list_del_init(&curr->entry); | ||
515 | wake_up_process(data->task); | ||
516 | return 1; | ||
517 | } | ||
518 | |||
484 | /* | 519 | /* |
485 | * Block if we will exceed our limit, or if we are currently waiting for | 520 | * Block if we will exceed our limit, or if we are currently waiting for |
486 | * the timer to kick off queuing again. | 521 | * the timer to kick off queuing again. |
@@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, | |||
491 | __acquires(lock) | 526 | __acquires(lock) |
492 | { | 527 | { |
493 | struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); | 528 | struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); |
494 | DECLARE_WAITQUEUE(wait, current); | 529 | struct wbt_wait_data data = { |
530 | .wq = { | ||
531 | .func = wbt_wake_function, | ||
532 | .entry = LIST_HEAD_INIT(data.wq.entry), | ||
533 | }, | ||
534 | .task = current, | ||
535 | .rwb = rwb, | ||
536 | .rqw = rqw, | ||
537 | .rw = rw, | ||
538 | }; | ||
495 | bool has_sleeper; | 539 | bool has_sleeper; |
496 | 540 | ||
497 | has_sleeper = wq_has_sleeper(&rqw->wait); | 541 | has_sleeper = wq_has_sleeper(&rqw->wait); |
498 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) | 542 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) |
499 | return; | 543 | return; |
500 | 544 | ||
501 | add_wait_queue_exclusive(&rqw->wait, &wait); | 545 | prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); |
502 | do { | 546 | do { |
503 | set_current_state(TASK_UNINTERRUPTIBLE); | 547 | if (data.got_token) |
548 | break; | ||
504 | 549 | ||
505 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) | 550 | if (!has_sleeper && |
551 | rq_wait_inc_below(rqw, get_limit(rwb, rw))) { | ||
552 | finish_wait(&rqw->wait, &data.wq); | ||
553 | |||
554 | /* | ||
555 | * We raced with wbt_wake_function() getting a token, | ||
556 | * which means we now have two. Put our local token | ||
557 | * and wake anyone else potentially waiting for one. | ||
558 | */ | ||
559 | if (data.got_token) | ||
560 | wbt_rqw_done(rwb, rqw, wb_acct); | ||
506 | break; | 561 | break; |
562 | } | ||
507 | 563 | ||
508 | if (lock) { | 564 | if (lock) { |
509 | spin_unlock_irq(lock); | 565 | spin_unlock_irq(lock); |
@@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, | |||
511 | spin_lock_irq(lock); | 567 | spin_lock_irq(lock); |
512 | } else | 568 | } else |
513 | io_schedule(); | 569 | io_schedule(); |
570 | |||
514 | has_sleeper = false; | 571 | has_sleeper = false; |
515 | } while (1); | 572 | } while (1); |
516 | 573 | ||
517 | __set_current_state(TASK_RUNNING); | 574 | finish_wait(&rqw->wait, &data.wq); |
518 | remove_wait_queue(&rqw->wait, &wait); | ||
519 | } | 575 | } |
520 | 576 | ||
521 | static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) | 577 | static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) |
@@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) | |||
580 | return; | 636 | return; |
581 | } | 637 | } |
582 | 638 | ||
583 | if (current_is_kswapd()) | ||
584 | flags |= WBT_KSWAPD; | ||
585 | if (bio_op(bio) == REQ_OP_DISCARD) | ||
586 | flags |= WBT_DISCARD; | ||
587 | |||
588 | __wbt_wait(rwb, flags, bio->bi_opf, lock); | 639 | __wbt_wait(rwb, flags, bio->bi_opf, lock); |
589 | 640 | ||
590 | if (!blk_stat_is_active(rwb->cb)) | 641 | if (!blk_stat_is_active(rwb->cb)) |
diff --git a/block/bsg.c b/block/bsg.c index db588add6ba6..9a442c23a715 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -37,7 +37,7 @@ struct bsg_device { | |||
37 | struct request_queue *queue; | 37 | struct request_queue *queue; |
38 | spinlock_t lock; | 38 | spinlock_t lock; |
39 | struct hlist_node dev_list; | 39 | struct hlist_node dev_list; |
40 | atomic_t ref_count; | 40 | refcount_t ref_count; |
41 | char name[20]; | 41 | char name[20]; |
42 | int max_queue; | 42 | int max_queue; |
43 | }; | 43 | }; |
@@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd) | |||
252 | 252 | ||
253 | mutex_lock(&bsg_mutex); | 253 | mutex_lock(&bsg_mutex); |
254 | 254 | ||
255 | if (!atomic_dec_and_test(&bd->ref_count)) { | 255 | if (!refcount_dec_and_test(&bd->ref_count)) { |
256 | mutex_unlock(&bsg_mutex); | 256 | mutex_unlock(&bsg_mutex); |
257 | return 0; | 257 | return 0; |
258 | } | 258 | } |
@@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
290 | 290 | ||
291 | bd->queue = rq; | 291 | bd->queue = rq; |
292 | 292 | ||
293 | atomic_set(&bd->ref_count, 1); | 293 | refcount_set(&bd->ref_count, 1); |
294 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); | 294 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
295 | 295 | ||
296 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); | 296 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); |
@@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) | |||
308 | 308 | ||
309 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { | 309 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { |
310 | if (bd->queue == q) { | 310 | if (bd->queue == q) { |
311 | atomic_inc(&bd->ref_count); | 311 | refcount_inc(&bd->ref_count); |
312 | goto found; | 312 | goto found; |
313 | } | 313 | } |
314 | } | 314 | } |
diff --git a/block/elevator.c b/block/elevator.c index 5ea6e7d600e4..6a06b5d040e5 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e) | |||
895 | spin_lock(&elv_list_lock); | 895 | spin_lock(&elv_list_lock); |
896 | if (elevator_find(e->elevator_name, e->uses_mq)) { | 896 | if (elevator_find(e->elevator_name, e->uses_mq)) { |
897 | spin_unlock(&elv_list_lock); | 897 | spin_unlock(&elv_list_lock); |
898 | if (e->icq_cache) | 898 | kmem_cache_destroy(e->icq_cache); |
899 | kmem_cache_destroy(e->icq_cache); | ||
900 | return -EBUSY; | 899 | return -EBUSY; |
901 | } | 900 | } |
902 | list_add_tail(&e->list, &elv_list); | 901 | list_add_tail(&e->list, &elv_list); |
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161..569a4a662dcd 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c | |||
@@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { | |||
256 | .qc_issue = ftide010_qc_issue, | 256 | .qc_issue = ftide010_qc_issue, |
257 | }; | 257 | }; |
258 | 258 | ||
259 | static struct ata_port_info ftide010_port_info[] = { | 259 | static struct ata_port_info ftide010_port_info = { |
260 | { | 260 | .flags = ATA_FLAG_SLAVE_POSS, |
261 | .flags = ATA_FLAG_SLAVE_POSS, | 261 | .mwdma_mask = ATA_MWDMA2, |
262 | .mwdma_mask = ATA_MWDMA2, | 262 | .udma_mask = ATA_UDMA6, |
263 | .udma_mask = ATA_UDMA6, | 263 | .pio_mask = ATA_PIO4, |
264 | .pio_mask = ATA_PIO4, | 264 | .port_ops = &pata_ftide010_port_ops, |
265 | .port_ops = &pata_ftide010_port_ops, | ||
266 | }, | ||
267 | }; | 265 | }; |
268 | 266 | ||
269 | #if IS_ENABLED(CONFIG_SATA_GEMINI) | 267 | #if IS_ENABLED(CONFIG_SATA_GEMINI) |
@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) | |||
349 | } | 347 | } |
350 | 348 | ||
351 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 349 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
350 | struct ata_port_info *pi, | ||
352 | bool is_ata1) | 351 | bool is_ata1) |
353 | { | 352 | { |
354 | struct device *dev = ftide->dev; | 353 | struct device *dev = ftide->dev; |
@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
373 | 372 | ||
374 | /* Flag port as SATA-capable */ | 373 | /* Flag port as SATA-capable */ |
375 | if (gemini_sata_bridge_enabled(sg, is_ata1)) | 374 | if (gemini_sata_bridge_enabled(sg, is_ata1)) |
376 | ftide010_port_info[0].flags |= ATA_FLAG_SATA; | 375 | pi->flags |= ATA_FLAG_SATA; |
376 | |||
377 | /* This device has broken DMA, only PIO works */ | ||
378 | if (of_machine_is_compatible("itian,sq201")) { | ||
379 | pi->mwdma_mask = 0; | ||
380 | pi->udma_mask = 0; | ||
381 | } | ||
377 | 382 | ||
378 | /* | 383 | /* |
379 | * We assume that a simple 40-wire cable is used in the PATA mode. | 384 | * We assume that a simple 40-wire cable is used in the PATA mode. |
@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
435 | } | 440 | } |
436 | #else | 441 | #else |
437 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 442 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
443 | struct ata_port_info *pi, | ||
438 | bool is_ata1) | 444 | bool is_ata1) |
439 | { | 445 | { |
440 | return -ENOTSUPP; | 446 | return -ENOTSUPP; |
@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
446 | { | 452 | { |
447 | struct device *dev = &pdev->dev; | 453 | struct device *dev = &pdev->dev; |
448 | struct device_node *np = dev->of_node; | 454 | struct device_node *np = dev->of_node; |
449 | const struct ata_port_info pi = ftide010_port_info[0]; | 455 | struct ata_port_info pi = ftide010_port_info; |
450 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 456 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
451 | struct ftide010 *ftide; | 457 | struct ftide010 *ftide; |
452 | struct resource *res; | 458 | struct resource *res; |
@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
490 | * are ATA0. This will also set up the cable types. | 496 | * are ATA0. This will also set up the cable types. |
491 | */ | 497 | */ |
492 | ret = pata_ftide010_gemini_init(ftide, | 498 | ret = pata_ftide010_gemini_init(ftide, |
499 | &pi, | ||
493 | (res->start == 0x63400000)); | 500 | (res->start == 0x63400000)); |
494 | if (ret) | 501 | if (ret) |
495 | goto err_dis_clk; | 502 | goto err_dis_clk; |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb..5a42ae4078c2 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); | |||
185 | int of_pm_clk_add_clks(struct device *dev) | 185 | int of_pm_clk_add_clks(struct device *dev) |
186 | { | 186 | { |
187 | struct clk **clks; | 187 | struct clk **clks; |
188 | unsigned int i, count; | 188 | int i, count; |
189 | int ret; | 189 | int ret; |
190 | 190 | ||
191 | if (!dev || !dev->of_node) | 191 | if (!dev || !dev->of_node) |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index b55b245e8052..fd1e19f1a49f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants, | |||
84 | "Maximum number of grants to map persistently"); | 84 | "Maximum number of grants to map persistently"); |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * How long a persistent grant is allowed to remain allocated without being in | ||
88 | * use. The time is in seconds, 0 means indefinitely long. | ||
89 | */ | ||
90 | |||
91 | static unsigned int xen_blkif_pgrant_timeout = 60; | ||
92 | module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout, | ||
93 | uint, 0644); | ||
94 | MODULE_PARM_DESC(persistent_grant_unused_seconds, | ||
95 | "Time in seconds an unused persistent grant is allowed to " | ||
96 | "remain allocated. Default is 60, 0 means unlimited."); | ||
97 | |||
98 | /* | ||
87 | * Maximum number of rings/queues blkback supports, allow as many queues as there | 99 | * Maximum number of rings/queues blkback supports, allow as many queues as there |
88 | * are CPUs if user has not specified a value. | 100 | * are CPUs if user has not specified a value. |
89 | */ | 101 | */ |
@@ -123,6 +135,13 @@ module_param(log_stats, int, 0644); | |||
123 | /* Number of free pages to remove on each call to gnttab_free_pages */ | 135 | /* Number of free pages to remove on each call to gnttab_free_pages */ |
124 | #define NUM_BATCH_FREE_PAGES 10 | 136 | #define NUM_BATCH_FREE_PAGES 10 |
125 | 137 | ||
138 | static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) | ||
139 | { | ||
140 | return xen_blkif_pgrant_timeout && | ||
141 | (jiffies - persistent_gnt->last_used >= | ||
142 | HZ * xen_blkif_pgrant_timeout); | ||
143 | } | ||
144 | |||
126 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) | 145 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) |
127 | { | 146 | { |
128 | unsigned long flags; | 147 | unsigned long flags; |
@@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring, | |||
236 | } | 255 | } |
237 | } | 256 | } |
238 | 257 | ||
239 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); | 258 | persistent_gnt->active = true; |
240 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | ||
241 | /* Add new node and rebalance tree. */ | 259 | /* Add new node and rebalance tree. */ |
242 | rb_link_node(&(persistent_gnt->node), parent, new); | 260 | rb_link_node(&(persistent_gnt->node), parent, new); |
243 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); | 261 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); |
@@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
261 | else if (gref > data->gnt) | 279 | else if (gref > data->gnt) |
262 | node = node->rb_right; | 280 | node = node->rb_right; |
263 | else { | 281 | else { |
264 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | 282 | if (data->active) { |
265 | pr_alert_ratelimited("requesting a grant already in use\n"); | 283 | pr_alert_ratelimited("requesting a grant already in use\n"); |
266 | return NULL; | 284 | return NULL; |
267 | } | 285 | } |
268 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | 286 | data->active = true; |
269 | atomic_inc(&ring->persistent_gnt_in_use); | 287 | atomic_inc(&ring->persistent_gnt_in_use); |
270 | return data; | 288 | return data; |
271 | } | 289 | } |
@@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
276 | static void put_persistent_gnt(struct xen_blkif_ring *ring, | 294 | static void put_persistent_gnt(struct xen_blkif_ring *ring, |
277 | struct persistent_gnt *persistent_gnt) | 295 | struct persistent_gnt *persistent_gnt) |
278 | { | 296 | { |
279 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | 297 | if (!persistent_gnt->active) |
280 | pr_alert_ratelimited("freeing a grant already unused\n"); | 298 | pr_alert_ratelimited("freeing a grant already unused\n"); |
281 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | 299 | persistent_gnt->last_used = jiffies; |
282 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | 300 | persistent_gnt->active = false; |
283 | atomic_dec(&ring->persistent_gnt_in_use); | 301 | atomic_dec(&ring->persistent_gnt_in_use); |
284 | } | 302 | } |
285 | 303 | ||
@@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
371 | struct persistent_gnt *persistent_gnt; | 389 | struct persistent_gnt *persistent_gnt; |
372 | struct rb_node *n; | 390 | struct rb_node *n; |
373 | unsigned int num_clean, total; | 391 | unsigned int num_clean, total; |
374 | bool scan_used = false, clean_used = false; | 392 | bool scan_used = false; |
375 | struct rb_root *root; | 393 | struct rb_root *root; |
376 | 394 | ||
377 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || | ||
378 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && | ||
379 | !ring->blkif->vbd.overflow_max_grants)) { | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | if (work_busy(&ring->persistent_purge_work)) { | 395 | if (work_busy(&ring->persistent_purge_work)) { |
384 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); | 396 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); |
385 | goto out; | 397 | goto out; |
386 | } | 398 | } |
387 | 399 | ||
388 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | 400 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || |
389 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; | 401 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && |
390 | num_clean = min(ring->persistent_gnt_c, num_clean); | 402 | !ring->blkif->vbd.overflow_max_grants)) { |
391 | if ((num_clean == 0) || | 403 | num_clean = 0; |
392 | (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) | 404 | } else { |
393 | goto out; | 405 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; |
406 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + | ||
407 | num_clean; | ||
408 | num_clean = min(ring->persistent_gnt_c, num_clean); | ||
409 | pr_debug("Going to purge at least %u persistent grants\n", | ||
410 | num_clean); | ||
411 | } | ||
394 | 412 | ||
395 | /* | 413 | /* |
396 | * At this point, we can assure that there will be no calls | 414 | * At this point, we can assure that there will be no calls |
@@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
401 | * number of grants. | 419 | * number of grants. |
402 | */ | 420 | */ |
403 | 421 | ||
404 | total = num_clean; | 422 | total = 0; |
405 | |||
406 | pr_debug("Going to purge %u persistent grants\n", num_clean); | ||
407 | 423 | ||
408 | BUG_ON(!list_empty(&ring->persistent_purge_list)); | 424 | BUG_ON(!list_empty(&ring->persistent_purge_list)); |
409 | root = &ring->persistent_gnts; | 425 | root = &ring->persistent_gnts; |
@@ -412,46 +428,37 @@ purge_list: | |||
412 | BUG_ON(persistent_gnt->handle == | 428 | BUG_ON(persistent_gnt->handle == |
413 | BLKBACK_INVALID_HANDLE); | 429 | BLKBACK_INVALID_HANDLE); |
414 | 430 | ||
415 | if (clean_used) { | 431 | if (persistent_gnt->active) |
416 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | ||
417 | continue; | 432 | continue; |
418 | } | 433 | if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) |
419 | |||
420 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | ||
421 | continue; | 434 | continue; |
422 | if (!scan_used && | 435 | if (scan_used && total >= num_clean) |
423 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | ||
424 | continue; | 436 | continue; |
425 | 437 | ||
426 | rb_erase(&persistent_gnt->node, root); | 438 | rb_erase(&persistent_gnt->node, root); |
427 | list_add(&persistent_gnt->remove_node, | 439 | list_add(&persistent_gnt->remove_node, |
428 | &ring->persistent_purge_list); | 440 | &ring->persistent_purge_list); |
429 | if (--num_clean == 0) | 441 | total++; |
430 | goto finished; | ||
431 | } | 442 | } |
432 | /* | 443 | /* |
433 | * If we get here it means we also need to start cleaning | 444 | * Check whether we also need to start cleaning |
434 | * grants that were used since last purge in order to cope | 445 | * grants that were used since last purge in order to cope |
435 | * with the requested num | 446 | * with the requested num |
436 | */ | 447 | */ |
437 | if (!scan_used && !clean_used) { | 448 | if (!scan_used && total < num_clean) { |
438 | pr_debug("Still missing %u purged frames\n", num_clean); | 449 | pr_debug("Still missing %u purged frames\n", num_clean - total); |
439 | scan_used = true; | 450 | scan_used = true; |
440 | goto purge_list; | 451 | goto purge_list; |
441 | } | 452 | } |
442 | finished: | ||
443 | if (!clean_used) { | ||
444 | pr_debug("Finished scanning for grants to clean, removing used flag\n"); | ||
445 | clean_used = true; | ||
446 | goto purge_list; | ||
447 | } | ||
448 | 453 | ||
449 | ring->persistent_gnt_c -= (total - num_clean); | 454 | if (total) { |
450 | ring->blkif->vbd.overflow_max_grants = 0; | 455 | ring->persistent_gnt_c -= total; |
456 | ring->blkif->vbd.overflow_max_grants = 0; | ||
451 | 457 | ||
452 | /* We can defer this work */ | 458 | /* We can defer this work */ |
453 | schedule_work(&ring->persistent_purge_work); | 459 | schedule_work(&ring->persistent_purge_work); |
454 | pr_debug("Purged %u/%u\n", (total - num_clean), total); | 460 | pr_debug("Purged %u/%u\n", num_clean, total); |
461 | } | ||
455 | 462 | ||
456 | out: | 463 | out: |
457 | return; | 464 | return; |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index ecb35fe8ca8d..1d3002d773f7 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -233,16 +233,6 @@ struct xen_vbd { | |||
233 | 233 | ||
234 | struct backend_info; | 234 | struct backend_info; |
235 | 235 | ||
236 | /* Number of available flags */ | ||
237 | #define PERSISTENT_GNT_FLAGS_SIZE 2 | ||
238 | /* This persistent grant is currently in use */ | ||
239 | #define PERSISTENT_GNT_ACTIVE 0 | ||
240 | /* | ||
241 | * This persistent grant has been used, this flag is set when we remove the | ||
242 | * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. | ||
243 | */ | ||
244 | #define PERSISTENT_GNT_WAS_ACTIVE 1 | ||
245 | |||
246 | /* Number of requests that we can fit in a ring */ | 236 | /* Number of requests that we can fit in a ring */ |
247 | #define XEN_BLKIF_REQS_PER_PAGE 32 | 237 | #define XEN_BLKIF_REQS_PER_PAGE 32 |
248 | 238 | ||
@@ -250,7 +240,8 @@ struct persistent_gnt { | |||
250 | struct page *page; | 240 | struct page *page; |
251 | grant_ref_t gnt; | 241 | grant_ref_t gnt; |
252 | grant_handle_t handle; | 242 | grant_handle_t handle; |
253 | DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); | 243 | unsigned long last_used; |
244 | bool active; | ||
254 | struct rb_node node; | 245 | struct rb_node node; |
255 | struct list_head remove_node; | 246 | struct list_head remove_node; |
256 | }; | 247 | }; |
@@ -278,7 +269,6 @@ struct xen_blkif_ring { | |||
278 | wait_queue_head_t pending_free_wq; | 269 | wait_queue_head_t pending_free_wq; |
279 | 270 | ||
280 | /* Tree to store persistent grants. */ | 271 | /* Tree to store persistent grants. */ |
281 | spinlock_t pers_gnts_lock; | ||
282 | struct rb_root persistent_gnts; | 272 | struct rb_root persistent_gnts; |
283 | unsigned int persistent_gnt_c; | 273 | unsigned int persistent_gnt_c; |
284 | atomic_t persistent_gnt_in_use; | 274 | atomic_t persistent_gnt_in_use; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8986adab9bf5..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/scatterlist.h> | 46 | #include <linux/scatterlist.h> |
47 | #include <linux/bitmap.h> | 47 | #include <linux/bitmap.h> |
48 | #include <linux/list.h> | 48 | #include <linux/list.h> |
49 | #include <linux/workqueue.h> | ||
49 | 50 | ||
50 | #include <xen/xen.h> | 51 | #include <xen/xen.h> |
51 | #include <xen/xenbus.h> | 52 | #include <xen/xenbus.h> |
@@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq) | |||
121 | 122 | ||
122 | static DEFINE_MUTEX(blkfront_mutex); | 123 | static DEFINE_MUTEX(blkfront_mutex); |
123 | static const struct block_device_operations xlvbd_block_fops; | 124 | static const struct block_device_operations xlvbd_block_fops; |
125 | static struct delayed_work blkfront_work; | ||
126 | static LIST_HEAD(info_list); | ||
124 | 127 | ||
125 | /* | 128 | /* |
126 | * Maximum number of segments in indirect requests, the actual value used by | 129 | * Maximum number of segments in indirect requests, the actual value used by |
@@ -216,6 +219,7 @@ struct blkfront_info | |||
216 | /* Save uncomplete reqs and bios for migration. */ | 219 | /* Save uncomplete reqs and bios for migration. */ |
217 | struct list_head requests; | 220 | struct list_head requests; |
218 | struct bio_list bio_list; | 221 | struct bio_list bio_list; |
222 | struct list_head info_list; | ||
219 | }; | 223 | }; |
220 | 224 | ||
221 | static unsigned int nr_minors; | 225 | static unsigned int nr_minors; |
@@ -1759,6 +1763,12 @@ abort_transaction: | |||
1759 | return err; | 1763 | return err; |
1760 | } | 1764 | } |
1761 | 1765 | ||
1766 | static void free_info(struct blkfront_info *info) | ||
1767 | { | ||
1768 | list_del(&info->info_list); | ||
1769 | kfree(info); | ||
1770 | } | ||
1771 | |||
1762 | /* Common code used when first setting up, and when resuming. */ | 1772 | /* Common code used when first setting up, and when resuming. */ |
1763 | static int talk_to_blkback(struct xenbus_device *dev, | 1773 | static int talk_to_blkback(struct xenbus_device *dev, |
1764 | struct blkfront_info *info) | 1774 | struct blkfront_info *info) |
@@ -1880,7 +1890,10 @@ again: | |||
1880 | destroy_blkring: | 1890 | destroy_blkring: |
1881 | blkif_free(info, 0); | 1891 | blkif_free(info, 0); |
1882 | 1892 | ||
1883 | kfree(info); | 1893 | mutex_lock(&blkfront_mutex); |
1894 | free_info(info); | ||
1895 | mutex_unlock(&blkfront_mutex); | ||
1896 | |||
1884 | dev_set_drvdata(&dev->dev, NULL); | 1897 | dev_set_drvdata(&dev->dev, NULL); |
1885 | 1898 | ||
1886 | return err; | 1899 | return err; |
@@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1991 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | 2004 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); |
1992 | dev_set_drvdata(&dev->dev, info); | 2005 | dev_set_drvdata(&dev->dev, info); |
1993 | 2006 | ||
2007 | mutex_lock(&blkfront_mutex); | ||
2008 | list_add(&info->info_list, &info_list); | ||
2009 | mutex_unlock(&blkfront_mutex); | ||
2010 | |||
1994 | return 0; | 2011 | return 0; |
1995 | } | 2012 | } |
1996 | 2013 | ||
@@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2301 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2318 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) |
2302 | indirect_segments = 0; | 2319 | indirect_segments = 0; |
2303 | info->max_indirect_segments = indirect_segments; | 2320 | info->max_indirect_segments = indirect_segments; |
2321 | |||
2322 | if (info->feature_persistent) { | ||
2323 | mutex_lock(&blkfront_mutex); | ||
2324 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
2325 | mutex_unlock(&blkfront_mutex); | ||
2326 | } | ||
2304 | } | 2327 | } |
2305 | 2328 | ||
2306 | /* | 2329 | /* |
@@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
2482 | mutex_unlock(&info->mutex); | 2505 | mutex_unlock(&info->mutex); |
2483 | 2506 | ||
2484 | if (!bdev) { | 2507 | if (!bdev) { |
2485 | kfree(info); | 2508 | mutex_lock(&blkfront_mutex); |
2509 | free_info(info); | ||
2510 | mutex_unlock(&blkfront_mutex); | ||
2486 | return 0; | 2511 | return 0; |
2487 | } | 2512 | } |
2488 | 2513 | ||
@@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
2502 | if (info && !bdev->bd_openers) { | 2527 | if (info && !bdev->bd_openers) { |
2503 | xlvbd_release_gendisk(info); | 2528 | xlvbd_release_gendisk(info); |
2504 | disk->private_data = NULL; | 2529 | disk->private_data = NULL; |
2505 | kfree(info); | 2530 | mutex_lock(&blkfront_mutex); |
2531 | free_info(info); | ||
2532 | mutex_unlock(&blkfront_mutex); | ||
2506 | } | 2533 | } |
2507 | 2534 | ||
2508 | mutex_unlock(&bdev->bd_mutex); | 2535 | mutex_unlock(&bdev->bd_mutex); |
@@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) | |||
2585 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); | 2612 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
2586 | xlvbd_release_gendisk(info); | 2613 | xlvbd_release_gendisk(info); |
2587 | disk->private_data = NULL; | 2614 | disk->private_data = NULL; |
2588 | kfree(info); | 2615 | free_info(info); |
2589 | } | 2616 | } |
2590 | 2617 | ||
2591 | out: | 2618 | out: |
@@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = { | |||
2618 | .is_ready = blkfront_is_ready, | 2645 | .is_ready = blkfront_is_ready, |
2619 | }; | 2646 | }; |
2620 | 2647 | ||
2648 | static void purge_persistent_grants(struct blkfront_info *info) | ||
2649 | { | ||
2650 | unsigned int i; | ||
2651 | unsigned long flags; | ||
2652 | |||
2653 | for (i = 0; i < info->nr_rings; i++) { | ||
2654 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | ||
2655 | struct grant *gnt_list_entry, *tmp; | ||
2656 | |||
2657 | spin_lock_irqsave(&rinfo->ring_lock, flags); | ||
2658 | |||
2659 | if (rinfo->persistent_gnts_c == 0) { | ||
2660 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
2661 | continue; | ||
2662 | } | ||
2663 | |||
2664 | list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, | ||
2665 | node) { | ||
2666 | if (gnt_list_entry->gref == GRANT_INVALID_REF || | ||
2667 | gnttab_query_foreign_access(gnt_list_entry->gref)) | ||
2668 | continue; | ||
2669 | |||
2670 | list_del(&gnt_list_entry->node); | ||
2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | ||
2672 | rinfo->persistent_gnts_c--; | ||
2673 | __free_page(gnt_list_entry->page); | ||
2674 | kfree(gnt_list_entry); | ||
2675 | } | ||
2676 | |||
2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
2678 | } | ||
2679 | } | ||
2680 | |||
2681 | static void blkfront_delay_work(struct work_struct *work) | ||
2682 | { | ||
2683 | struct blkfront_info *info; | ||
2684 | bool need_schedule_work = false; | ||
2685 | |||
2686 | mutex_lock(&blkfront_mutex); | ||
2687 | |||
2688 | list_for_each_entry(info, &info_list, info_list) { | ||
2689 | if (info->feature_persistent) { | ||
2690 | need_schedule_work = true; | ||
2691 | mutex_lock(&info->mutex); | ||
2692 | purge_persistent_grants(info); | ||
2693 | mutex_unlock(&info->mutex); | ||
2694 | } | ||
2695 | } | ||
2696 | |||
2697 | if (need_schedule_work) | ||
2698 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
2699 | |||
2700 | mutex_unlock(&blkfront_mutex); | ||
2701 | } | ||
2702 | |||
2621 | static int __init xlblk_init(void) | 2703 | static int __init xlblk_init(void) |
2622 | { | 2704 | { |
2623 | int ret; | 2705 | int ret; |
@@ -2626,6 +2708,15 @@ static int __init xlblk_init(void) | |||
2626 | if (!xen_domain()) | 2708 | if (!xen_domain()) |
2627 | return -ENODEV; | 2709 | return -ENODEV; |
2628 | 2710 | ||
2711 | if (!xen_has_pv_disk_devices()) | ||
2712 | return -ENODEV; | ||
2713 | |||
2714 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
2715 | pr_warn("xen_blk: can't get major %d with name %s\n", | ||
2716 | XENVBD_MAJOR, DEV_NAME); | ||
2717 | return -ENODEV; | ||
2718 | } | ||
2719 | |||
2629 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2720 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) |
2630 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2721 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; |
2631 | 2722 | ||
@@ -2641,14 +2732,7 @@ static int __init xlblk_init(void) | |||
2641 | xen_blkif_max_queues = nr_cpus; | 2732 | xen_blkif_max_queues = nr_cpus; |
2642 | } | 2733 | } |
2643 | 2734 | ||
2644 | if (!xen_has_pv_disk_devices()) | 2735 | INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); |
2645 | return -ENODEV; | ||
2646 | |||
2647 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
2648 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | ||
2649 | XENVBD_MAJOR, DEV_NAME); | ||
2650 | return -ENODEV; | ||
2651 | } | ||
2652 | 2736 | ||
2653 | ret = xenbus_register_frontend(&blkfront_driver); | 2737 | ret = xenbus_register_frontend(&blkfront_driver); |
2654 | if (ret) { | 2738 | if (ret) { |
@@ -2663,6 +2747,8 @@ module_init(xlblk_init); | |||
2663 | 2747 | ||
2664 | static void __exit xlblk_exit(void) | 2748 | static void __exit xlblk_exit(void) |
2665 | { | 2749 | { |
2750 | cancel_delayed_work_sync(&blkfront_work); | ||
2751 | |||
2666 | xenbus_unregister_driver(&blkfront_driver); | 2752 | xenbus_unregister_driver(&blkfront_driver); |
2667 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); | 2753 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); |
2668 | kfree(minors); | 2754 | kfree(minors); |
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index c9bac9dc4637..e4fe954e63a9 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata) | |||
498 | 498 | ||
499 | /** | 499 | /** |
500 | * syc_ioremap - ioremap register space for the interconnect target module | 500 | * syc_ioremap - ioremap register space for the interconnect target module |
501 | * @ddata: deviec driver data | 501 | * @ddata: device driver data |
502 | * | 502 | * |
503 | * Note that the interconnect target module registers can be anywhere | 503 | * Note that the interconnect target module registers can be anywhere |
504 | * within the first child device address space. For example, SGX has | 504 | * within the interconnect target module range. For example, SGX has |
505 | * them at offset 0x1fc00 in the 32MB module address space. We just | 505 | * them at offset 0x1fc00 in the 32MB module address space. And cpsw |
506 | * what we need around the interconnect target module registers. | 506 | * has them at offset 0x1200 in the CPSW_WR child. Usually the |
507 | * the interconnect target module registers are at the beginning of | ||
508 | * the module range though. | ||
507 | */ | 509 | */ |
508 | static int sysc_ioremap(struct sysc *ddata) | 510 | static int sysc_ioremap(struct sysc *ddata) |
509 | { | 511 | { |
510 | u32 size = 0; | 512 | int size; |
511 | |||
512 | if (ddata->offsets[SYSC_SYSSTATUS] >= 0) | ||
513 | size = ddata->offsets[SYSC_SYSSTATUS]; | ||
514 | else if (ddata->offsets[SYSC_SYSCONFIG] >= 0) | ||
515 | size = ddata->offsets[SYSC_SYSCONFIG]; | ||
516 | else if (ddata->offsets[SYSC_REVISION] >= 0) | ||
517 | size = ddata->offsets[SYSC_REVISION]; | ||
518 | else | ||
519 | return -EINVAL; | ||
520 | 513 | ||
521 | size &= 0xfff00; | 514 | size = max3(ddata->offsets[SYSC_REVISION], |
522 | size += SZ_256; | 515 | ddata->offsets[SYSC_SYSCONFIG], |
516 | ddata->offsets[SYSC_SYSSTATUS]); | ||
517 | |||
518 | if (size < 0 || (size + sizeof(u32)) > ddata->module_size) | ||
519 | return -EINVAL; | ||
523 | 520 | ||
524 | ddata->module_va = devm_ioremap(ddata->dev, | 521 | ddata->module_va = devm_ioremap(ddata->dev, |
525 | ddata->module_pa, | 522 | ddata->module_pa, |
526 | size); | 523 | size + sizeof(u32)); |
527 | if (!ddata->module_va) | 524 | if (!ddata->module_va) |
528 | return -EIO; | 525 | return -EIO; |
529 | 526 | ||
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev) | |||
1224 | if (!pm_runtime_status_suspended(dev)) { | 1221 | if (!pm_runtime_status_suspended(dev)) { |
1225 | error = pm_generic_runtime_suspend(dev); | 1222 | error = pm_generic_runtime_suspend(dev); |
1226 | if (error) { | 1223 | if (error) { |
1227 | dev_err(dev, "%s error at %i: %i\n", | 1224 | dev_warn(dev, "%s busy at %i: %i\n", |
1228 | __func__, __LINE__, error); | 1225 | __func__, __LINE__, error); |
1229 | 1226 | ||
1230 | return error; | 1227 | return 0; |
1231 | } | 1228 | } |
1232 | 1229 | ||
1233 | error = sysc_runtime_suspend(ddata->dev); | 1230 | error = sysc_runtime_suspend(ddata->dev); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 113fc6edb2b0..a5d5a96479bf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, | |||
2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || | 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || |
2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) | 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) |
2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); | 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); |
2549 | if (((int)arg >= cdi->capacity)) | 2549 | if (arg >= cdi->capacity) |
2550 | return -EINVAL; | 2550 | return -EINVAL; |
2551 | return cdrom_slot_status(cdi, arg); | 2551 | return cdrom_slot_status(cdi, arg); |
2552 | } | 2552 | } |
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index 740af90a9508..c5edf8f2fd19 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c | |||
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) | |||
558 | if (!clk_base) | 558 | if (!clk_base) |
559 | goto npcm7xx_init_error; | 559 | goto npcm7xx_init_error; |
560 | 560 | ||
561 | npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * | 561 | npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws, |
562 | NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); | 562 | NPCM7XX_NUM_CLOCKS), GFP_KERNEL); |
563 | if (!npcm7xx_clk_data) | 563 | if (!npcm7xx_clk_data) |
564 | goto npcm7xx_init_np_err; | 564 | goto npcm7xx_init_np_err; |
565 | 565 | ||
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index fb62f3938008..3a0996f2d556 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c | |||
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev) | |||
46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), | 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), |
47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); | 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); |
48 | 48 | ||
49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); | 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk); |
50 | 50 | ||
51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", | 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", |
52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, | 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 110483f0e3fb..e26a40971b26 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
379 | if (idx == -1) | 379 | if (idx == -1) |
380 | idx = i; /* first enabled state */ | 380 | idx = i; /* first enabled state */ |
381 | if (s->target_residency > data->predicted_us) { | 381 | if (s->target_residency > data->predicted_us) { |
382 | if (!tick_nohz_tick_stopped()) | 382 | if (data->predicted_us < TICK_USEC) |
383 | break; | 383 | break; |
384 | 384 | ||
385 | if (!tick_nohz_tick_stopped()) { | ||
386 | /* | ||
387 | * If the state selected so far is shallow, | ||
388 | * waking up early won't hurt, so retain the | ||
389 | * tick in that case and let the governor run | ||
390 | * again in the next iteration of the loop. | ||
391 | */ | ||
392 | expected_interval = drv->states[idx].target_residency; | ||
393 | break; | ||
394 | } | ||
395 | |||
385 | /* | 396 | /* |
386 | * If the state selected so far is shallow and this | 397 | * If the state selected so far is shallow and this |
387 | * state's target residency matches the time till the | 398 | * state's target residency matches the time till the |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
679 | int ret = 0; | 679 | int ret = 0; |
680 | 680 | ||
681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { | 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
682 | crypto_ablkcipher_set_flags(ablkcipher, | ||
683 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
684 | dev_err(jrdev, "key size mismatch\n"); | 682 | dev_err(jrdev, "key size mismatch\n"); |
685 | return -EINVAL; | 683 | goto badkey; |
686 | } | 684 | } |
687 | 685 | ||
688 | ctx->cdata.keylen = keylen; | 686 | ctx->cdata.keylen = keylen; |
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
715 | return ret; | 713 | return ret; |
716 | badkey: | 714 | badkey: |
717 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 715 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
718 | return 0; | 716 | return -EINVAL; |
719 | } | 717 | } |
720 | 718 | ||
721 | /* | 719 | /* |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); | 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); | 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
95 | } | 95 | } |
96 | 96 | ||
97 | /* RSA Job Completion handler */ | 97 | /* RSA Job Completion handler */ |
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
417 | goto unmap_p; | 417 | goto unmap_p; |
418 | } | 418 | } |
419 | 419 | ||
420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
423 | goto unmap_q; | 423 | goto unmap_q; |
424 | } | 424 | } |
425 | 425 | ||
426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
429 | goto unmap_tmp1; | 429 | goto unmap_tmp1; |
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
451 | return 0; | 451 | return 0; |
452 | 452 | ||
453 | unmap_tmp1: | 453 | unmap_tmp1: |
454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
455 | unmap_q: | 455 | unmap_q: |
456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
457 | unmap_p: | 457 | unmap_p: |
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
504 | goto unmap_dq; | 504 | goto unmap_dq; |
505 | } | 505 | } |
506 | 506 | ||
507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
510 | goto unmap_qinv; | 510 | goto unmap_qinv; |
511 | } | 511 | } |
512 | 512 | ||
513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
516 | goto unmap_tmp1; | 516 | goto unmap_tmp1; |
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
538 | return 0; | 538 | return 0; |
539 | 539 | ||
540 | unmap_tmp1: | 540 | unmap_tmp1: |
541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
542 | unmap_qinv: | 542 | unmap_qinv: |
543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
544 | unmap_dq: | 544 | unmap_dq: |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); | 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); |
191 | 191 | ||
192 | /* Unmap just-run descriptor so we can post-process */ | 192 | /* Unmap just-run descriptor so we can post-process */ |
193 | dma_unmap_single(dev, jrp->outring[hw_idx].desc, | 193 | dma_unmap_single(dev, |
194 | caam_dma_to_cpu(jrp->outring[hw_idx].desc), | ||
194 | jrp->entinfo[sw_idx].desc_size, | 195 | jrp->entinfo[sw_idx].desc_size, |
195 | DMA_TO_DEVICE); | 196 | DMA_TO_DEVICE); |
196 | 197 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h | |||
@@ -35,6 +35,7 @@ struct nitrox_cmdq { | |||
35 | /* requests in backlog queues */ | 35 | /* requests in backlog queues */ |
36 | atomic_t backlog_count; | 36 | atomic_t backlog_count; |
37 | 37 | ||
38 | int write_idx; | ||
38 | /* command size 32B/64B */ | 39 | /* command size 32B/64B */ |
39 | u8 instr_size; | 40 | u8 instr_size; |
40 | u8 qno; | 41 | u8 qno; |
@@ -87,7 +88,7 @@ struct nitrox_bh { | |||
87 | struct bh_data *slc; | 88 | struct bh_data *slc; |
88 | }; | 89 | }; |
89 | 90 | ||
90 | /* NITROX-5 driver state */ | 91 | /* NITROX-V driver state */ |
91 | #define NITROX_UCODE_LOADED 0 | 92 | #define NITROX_UCODE_LOADED 0 |
92 | #define NITROX_READY 1 | 93 | #define NITROX_READY 1 |
93 | 94 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) | |||
36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); | 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); |
37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); | 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); |
38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); | 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); |
39 | cmdq->write_idx = 0; | ||
39 | 40 | ||
40 | spin_lock_init(&cmdq->response_lock); | 41 | spin_lock_init(&cmdq->response_lock); |
41 | spin_lock_init(&cmdq->cmdq_lock); | 42 | spin_lock_init(&cmdq->cmdq_lock); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
@@ -42,6 +42,16 @@ | |||
42 | * Invalid flag options in AES-CCM IV. | 42 | * Invalid flag options in AES-CCM IV. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | static inline int incr_index(int index, int count, int max) | ||
46 | { | ||
47 | if ((index + count) >= max) | ||
48 | index = index + count - max; | ||
49 | else | ||
50 | index += count; | ||
51 | |||
52 | return index; | ||
53 | } | ||
54 | |||
45 | /** | 55 | /** |
46 | * dma_free_sglist - unmap and free the sg lists. | 56 | * dma_free_sglist - unmap and free the sg lists. |
47 | * @ndev: N5 device | 57 | * @ndev: N5 device |
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, | |||
426 | struct nitrox_cmdq *cmdq) | 436 | struct nitrox_cmdq *cmdq) |
427 | { | 437 | { |
428 | struct nitrox_device *ndev = sr->ndev; | 438 | struct nitrox_device *ndev = sr->ndev; |
429 | union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; | 439 | int idx; |
430 | u64 offset; | ||
431 | u8 *ent; | 440 | u8 *ent; |
432 | 441 | ||
433 | spin_lock_bh(&cmdq->cmdq_lock); | 442 | spin_lock_bh(&cmdq->cmdq_lock); |
434 | 443 | ||
435 | /* get the next write offset */ | 444 | idx = cmdq->write_idx; |
436 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); | ||
437 | pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); | ||
438 | /* copy the instruction */ | 445 | /* copy the instruction */ |
439 | ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; | 446 | ent = cmdq->head + (idx * cmdq->instr_size); |
440 | memcpy(ent, &sr->instr, cmdq->instr_size); | 447 | memcpy(ent, &sr->instr, cmdq->instr_size); |
441 | /* flush the command queue updates */ | ||
442 | dma_wmb(); | ||
443 | 448 | ||
444 | sr->tstamp = jiffies; | ||
445 | atomic_set(&sr->status, REQ_POSTED); | 449 | atomic_set(&sr->status, REQ_POSTED); |
446 | response_list_add(sr, cmdq); | 450 | response_list_add(sr, cmdq); |
451 | sr->tstamp = jiffies; | ||
452 | /* flush the command queue updates */ | ||
453 | dma_wmb(); | ||
447 | 454 | ||
448 | /* Ring doorbell with count 1 */ | 455 | /* Ring doorbell with count 1 */ |
449 | writeq(1, cmdq->dbell_csr_addr); | 456 | writeq(1, cmdq->dbell_csr_addr); |
450 | /* orders the doorbell rings */ | 457 | /* orders the doorbell rings */ |
451 | mmiowb(); | 458 | mmiowb(); |
452 | 459 | ||
460 | cmdq->write_idx = incr_index(idx, 1, ndev->qlen); | ||
461 | |||
453 | spin_unlock_bh(&cmdq->cmdq_lock); | 462 | spin_unlock_bh(&cmdq->cmdq_lock); |
454 | } | 463 | } |
455 | 464 | ||
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
459 | struct nitrox_softreq *sr, *tmp; | 468 | struct nitrox_softreq *sr, *tmp; |
460 | int ret = 0; | 469 | int ret = 0; |
461 | 470 | ||
471 | if (!atomic_read(&cmdq->backlog_count)) | ||
472 | return 0; | ||
473 | |||
462 | spin_lock_bh(&cmdq->backlog_lock); | 474 | spin_lock_bh(&cmdq->backlog_lock); |
463 | 475 | ||
464 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | 476 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { |
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
466 | 478 | ||
467 | /* submit until space available */ | 479 | /* submit until space available */ |
468 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 480 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
469 | ret = -EBUSY; | 481 | ret = -ENOSPC; |
470 | break; | 482 | break; |
471 | } | 483 | } |
472 | /* delete from backlog list */ | 484 | /* delete from backlog list */ |
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) | |||
491 | { | 503 | { |
492 | struct nitrox_cmdq *cmdq = sr->cmdq; | 504 | struct nitrox_cmdq *cmdq = sr->cmdq; |
493 | struct nitrox_device *ndev = sr->ndev; | 505 | struct nitrox_device *ndev = sr->ndev; |
494 | int ret = -EBUSY; | 506 | |
507 | /* try to post backlog requests */ | ||
508 | post_backlog_cmds(cmdq); | ||
495 | 509 | ||
496 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 510 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
497 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 511 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
498 | return -EAGAIN; | 512 | return -ENOSPC; |
499 | 513 | /* add to backlog list */ | |
500 | backlog_list_add(sr, cmdq); | 514 | backlog_list_add(sr, cmdq); |
501 | } else { | 515 | return -EBUSY; |
502 | ret = post_backlog_cmds(cmdq); | ||
503 | if (ret) { | ||
504 | backlog_list_add(sr, cmdq); | ||
505 | return ret; | ||
506 | } | ||
507 | post_se_instr(sr, cmdq); | ||
508 | ret = -EINPROGRESS; | ||
509 | } | 516 | } |
510 | return ret; | 517 | post_se_instr(sr, cmdq); |
518 | |||
519 | return -EINPROGRESS; | ||
511 | } | 520 | } |
512 | 521 | ||
513 | /** | 522 | /** |
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, | |||
624 | */ | 633 | */ |
625 | sr->instr.fdata[0] = *((u64 *)&req->gph); | 634 | sr->instr.fdata[0] = *((u64 *)&req->gph); |
626 | sr->instr.fdata[1] = 0; | 635 | sr->instr.fdata[1] = 0; |
627 | /* flush the soft_req changes before posting the cmd */ | ||
628 | wmb(); | ||
629 | 636 | ||
630 | ret = nitrox_enqueue_request(sr); | 637 | ret = nitrox_enqueue_request(sr); |
631 | if (ret == -EAGAIN) | 638 | if (ret == -ENOSPC) |
632 | goto send_fail; | 639 | goto send_fail; |
633 | 640 | ||
634 | return ret; | 641 | return ret; |
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h | |||
@@ -96,6 +96,10 @@ enum csk_flags { | |||
96 | CSK_CONN_INLINE, /* Connection on HW */ | 96 | CSK_CONN_INLINE, /* Connection on HW */ |
97 | }; | 97 | }; |
98 | 98 | ||
99 | enum chtls_cdev_state { | ||
100 | CHTLS_CDEV_STATE_UP = 1 | ||
101 | }; | ||
102 | |||
99 | struct listen_ctx { | 103 | struct listen_ctx { |
100 | struct sock *lsk; | 104 | struct sock *lsk; |
101 | struct chtls_dev *cdev; | 105 | struct chtls_dev *cdev; |
@@ -146,6 +150,7 @@ struct chtls_dev { | |||
146 | unsigned int send_page_order; | 150 | unsigned int send_page_order; |
147 | int max_host_sndbuf; | 151 | int max_host_sndbuf; |
148 | struct key_map kmap; | 152 | struct key_map kmap; |
153 | unsigned int cdev_state; | ||
149 | }; | 154 | }; |
150 | 155 | ||
151 | struct chtls_hws { | 156 | struct chtls_hws { |
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c | |||
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) | |||
160 | tlsdev->hash = chtls_create_hash; | 160 | tlsdev->hash = chtls_create_hash; |
161 | tlsdev->unhash = chtls_destroy_hash; | 161 | tlsdev->unhash = chtls_destroy_hash; |
162 | tls_register_device(&cdev->tlsdev); | 162 | tls_register_device(&cdev->tlsdev); |
163 | cdev->cdev_state = CHTLS_CDEV_STATE_UP; | ||
163 | } | 164 | } |
164 | 165 | ||
165 | static void chtls_unregister_dev(struct chtls_dev *cdev) | 166 | static void chtls_unregister_dev(struct chtls_dev *cdev) |
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) | |||
281 | struct chtls_dev *cdev, *tmp; | 282 | struct chtls_dev *cdev, *tmp; |
282 | 283 | ||
283 | mutex_lock(&cdev_mutex); | 284 | mutex_lock(&cdev_mutex); |
284 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) | 285 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { |
285 | chtls_free_uld(cdev); | 286 | if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) |
287 | chtls_free_uld(cdev); | ||
288 | } | ||
286 | mutex_unlock(&cdev_mutex); | 289 | mutex_unlock(&cdev_mutex); |
287 | } | 290 | } |
288 | 291 | ||
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | |||
107 | ret = crypto_skcipher_encrypt(req); | 107 | ret = crypto_skcipher_encrypt(req); |
108 | skcipher_request_zero(req); | 108 | skcipher_request_zero(req); |
109 | } else { | 109 | } else { |
110 | preempt_disable(); | ||
111 | pagefault_disable(); | ||
112 | enable_kernel_vsx(); | ||
113 | |||
114 | blkcipher_walk_init(&walk, dst, src, nbytes); | 110 | blkcipher_walk_init(&walk, dst, src, nbytes); |
115 | ret = blkcipher_walk_virt(desc, &walk); | 111 | ret = blkcipher_walk_virt(desc, &walk); |
116 | while ((nbytes = walk.nbytes)) { | 112 | while ((nbytes = walk.nbytes)) { |
113 | preempt_disable(); | ||
114 | pagefault_disable(); | ||
115 | enable_kernel_vsx(); | ||
117 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 116 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
118 | walk.dst.virt.addr, | 117 | walk.dst.virt.addr, |
119 | nbytes & AES_BLOCK_MASK, | 118 | nbytes & AES_BLOCK_MASK, |
120 | &ctx->enc_key, walk.iv, 1); | 119 | &ctx->enc_key, walk.iv, 1); |
120 | disable_kernel_vsx(); | ||
121 | pagefault_enable(); | ||
122 | preempt_enable(); | ||
123 | |||
121 | nbytes &= AES_BLOCK_SIZE - 1; | 124 | nbytes &= AES_BLOCK_SIZE - 1; |
122 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 125 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
123 | } | 126 | } |
124 | |||
125 | disable_kernel_vsx(); | ||
126 | pagefault_enable(); | ||
127 | preempt_enable(); | ||
128 | } | 127 | } |
129 | 128 | ||
130 | return ret; | 129 | return ret; |
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | |||
147 | ret = crypto_skcipher_decrypt(req); | 146 | ret = crypto_skcipher_decrypt(req); |
148 | skcipher_request_zero(req); | 147 | skcipher_request_zero(req); |
149 | } else { | 148 | } else { |
150 | preempt_disable(); | ||
151 | pagefault_disable(); | ||
152 | enable_kernel_vsx(); | ||
153 | |||
154 | blkcipher_walk_init(&walk, dst, src, nbytes); | 149 | blkcipher_walk_init(&walk, dst, src, nbytes); |
155 | ret = blkcipher_walk_virt(desc, &walk); | 150 | ret = blkcipher_walk_virt(desc, &walk); |
156 | while ((nbytes = walk.nbytes)) { | 151 | while ((nbytes = walk.nbytes)) { |
152 | preempt_disable(); | ||
153 | pagefault_disable(); | ||
154 | enable_kernel_vsx(); | ||
157 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 155 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
158 | walk.dst.virt.addr, | 156 | walk.dst.virt.addr, |
159 | nbytes & AES_BLOCK_MASK, | 157 | nbytes & AES_BLOCK_MASK, |
160 | &ctx->dec_key, walk.iv, 0); | 158 | &ctx->dec_key, walk.iv, 0); |
159 | disable_kernel_vsx(); | ||
160 | pagefault_enable(); | ||
161 | preempt_enable(); | ||
162 | |||
161 | nbytes &= AES_BLOCK_SIZE - 1; | 163 | nbytes &= AES_BLOCK_SIZE - 1; |
162 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 164 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
163 | } | 165 | } |
164 | |||
165 | disable_kernel_vsx(); | ||
166 | pagefault_enable(); | ||
167 | preempt_enable(); | ||
168 | } | 166 | } |
169 | 167 | ||
170 | return ret; | 168 | return ret; |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | |||
116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); | 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); |
117 | skcipher_request_zero(req); | 117 | skcipher_request_zero(req); |
118 | } else { | 118 | } else { |
119 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
120 | |||
121 | ret = blkcipher_walk_virt(desc, &walk); | ||
122 | |||
119 | preempt_disable(); | 123 | preempt_disable(); |
120 | pagefault_disable(); | 124 | pagefault_disable(); |
121 | enable_kernel_vsx(); | 125 | enable_kernel_vsx(); |
122 | 126 | ||
123 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
124 | |||
125 | ret = blkcipher_walk_virt(desc, &walk); | ||
126 | iv = walk.iv; | 127 | iv = walk.iv; |
127 | memset(tweak, 0, AES_BLOCK_SIZE); | 128 | memset(tweak, 0, AES_BLOCK_SIZE); |
128 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 129 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
129 | 130 | ||
131 | disable_kernel_vsx(); | ||
132 | pagefault_enable(); | ||
133 | preempt_enable(); | ||
134 | |||
130 | while ((nbytes = walk.nbytes)) { | 135 | while ((nbytes = walk.nbytes)) { |
136 | preempt_disable(); | ||
137 | pagefault_disable(); | ||
138 | enable_kernel_vsx(); | ||
131 | if (enc) | 139 | if (enc) |
132 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 140 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
133 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); | 141 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); |
134 | else | 142 | else |
135 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | 143 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
136 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); | 144 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); |
145 | disable_kernel_vsx(); | ||
146 | pagefault_enable(); | ||
147 | preempt_enable(); | ||
137 | 148 | ||
138 | nbytes &= AES_BLOCK_SIZE - 1; | 149 | nbytes &= AES_BLOCK_SIZE - 1; |
139 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 150 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
140 | } | 151 | } |
141 | |||
142 | disable_kernel_vsx(); | ||
143 | pagefault_enable(); | ||
144 | preempt_enable(); | ||
145 | } | 152 | } |
146 | return ret; | 153 | return ret; |
147 | } | 154 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..b6e9df11115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
1012 | if (r) | 1012 | if (r) |
1013 | return r; | 1013 | return r; |
1014 | 1014 | ||
1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { | 1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
1016 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; | 1016 | parser->job->preamble_status |= |
1017 | if (!parser->ctx->preamble_presented) { | 1017 | AMDGPU_PREAMBLE_IB_PRESENT; |
1018 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
1019 | parser->ctx->preamble_presented = true; | ||
1020 | } | ||
1021 | } | ||
1022 | 1018 | ||
1023 | if (parser->ring && parser->ring != ring) | 1019 | if (parser->ring && parser->ring != ring) |
1024 | return -EINVAL; | 1020 | return -EINVAL; |
@@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1207 | 1203 | ||
1208 | int r; | 1204 | int r; |
1209 | 1205 | ||
1206 | job = p->job; | ||
1207 | p->job = NULL; | ||
1208 | |||
1209 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
1210 | if (r) | ||
1211 | goto error_unlock; | ||
1212 | |||
1213 | /* No memory allocation is allowed while holding the mn lock */ | ||
1210 | amdgpu_mn_lock(p->mn); | 1214 | amdgpu_mn_lock(p->mn); |
1211 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 1215 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
1212 | struct amdgpu_bo *bo = e->robj; | 1216 | struct amdgpu_bo *bo = e->robj; |
1213 | 1217 | ||
1214 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { | 1218 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { |
1215 | amdgpu_mn_unlock(p->mn); | 1219 | r = -ERESTARTSYS; |
1216 | return -ERESTARTSYS; | 1220 | goto error_abort; |
1217 | } | 1221 | } |
1218 | } | 1222 | } |
1219 | 1223 | ||
1220 | job = p->job; | ||
1221 | p->job = NULL; | ||
1222 | |||
1223 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
1224 | if (r) { | ||
1225 | amdgpu_job_free(job); | ||
1226 | amdgpu_mn_unlock(p->mn); | ||
1227 | return r; | ||
1228 | } | ||
1229 | |||
1230 | job->owner = p->filp; | 1224 | job->owner = p->filp; |
1231 | p->fence = dma_fence_get(&job->base.s_fence->finished); | 1225 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
1232 | 1226 | ||
@@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1241 | 1235 | ||
1242 | amdgpu_cs_post_dependencies(p); | 1236 | amdgpu_cs_post_dependencies(p); |
1243 | 1237 | ||
1238 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && | ||
1239 | !p->ctx->preamble_presented) { | ||
1240 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
1241 | p->ctx->preamble_presented = true; | ||
1242 | } | ||
1243 | |||
1244 | cs->out.handle = seq; | 1244 | cs->out.handle = seq; |
1245 | job->uf_sequence = seq; | 1245 | job->uf_sequence = seq; |
1246 | 1246 | ||
@@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1258 | amdgpu_mn_unlock(p->mn); | 1258 | amdgpu_mn_unlock(p->mn); |
1259 | 1259 | ||
1260 | return 0; | 1260 | return 0; |
1261 | |||
1262 | error_abort: | ||
1263 | dma_fence_put(&job->base.s_fence->finished); | ||
1264 | job->base.s_fence = NULL; | ||
1265 | |||
1266 | error_unlock: | ||
1267 | amdgpu_job_free(job); | ||
1268 | amdgpu_mn_unlock(p->mn); | ||
1269 | return r; | ||
1261 | } | 1270 | } |
1262 | 1271 | ||
1263 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 1272 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
164 | return r; | 164 | return r; |
165 | } | 165 | } |
166 | 166 | ||
167 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
167 | if (ring->funcs->emit_pipeline_sync && job && | 168 | if (ring->funcs->emit_pipeline_sync && job && |
168 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || | 169 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || |
170 | (amdgpu_sriov_vf(adev) && need_ctx_switch) || | ||
169 | amdgpu_vm_need_pipeline_sync(ring, job))) { | 171 | amdgpu_vm_need_pipeline_sync(ring, job))) { |
170 | need_pipe_sync = true; | 172 | need_pipe_sync = true; |
171 | dma_fence_put(tmp); | 173 | dma_fence_put(tmp); |
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
196 | } | 198 | } |
197 | 199 | ||
198 | skip_preamble = ring->current_ctx == fence_ctx; | 200 | skip_preamble = ring->current_ctx == fence_ctx; |
199 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
200 | if (job && ring->funcs->emit_cntxcntl) { | 201 | if (job && ring->funcs->emit_cntxcntl) { |
201 | if (need_ctx_switch) | 202 | if (need_ctx_switch) |
202 | status |= AMDGPU_HAVE_CTX_SWITCH; | 203 | status |= AMDGPU_HAVE_CTX_SWITCH; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
1932 | amdgpu_fence_wait_empty(ring); | 1932 | amdgpu_fence_wait_empty(ring); |
1933 | } | 1933 | } |
1934 | 1934 | ||
1935 | mutex_lock(&adev->pm.mutex); | ||
1936 | /* update battery/ac status */ | ||
1937 | if (power_supply_is_system_supplied() > 0) | ||
1938 | adev->pm.ac_power = true; | ||
1939 | else | ||
1940 | adev->pm.ac_power = false; | ||
1941 | mutex_unlock(&adev->pm.mutex); | ||
1942 | |||
1943 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | 1935 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
1944 | if (!amdgpu_device_has_dc_support(adev)) { | 1936 | if (!amdgpu_device_has_dc_support(adev)) { |
1945 | mutex_lock(&adev->pm.mutex); | 1937 | mutex_lock(&adev->pm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, | |||
172 | * is validated on next vm use to avoid fault. | 172 | * is validated on next vm use to avoid fault. |
173 | * */ | 173 | * */ |
174 | list_move_tail(&base->vm_status, &vm->evicted); | 174 | list_move_tail(&base->vm_status, &vm->evicted); |
175 | base->moved = true; | ||
175 | } | 176 | } |
176 | 177 | ||
177 | /** | 178 | /** |
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
369 | uint64_t addr; | 370 | uint64_t addr; |
370 | int r; | 371 | int r; |
371 | 372 | ||
372 | addr = amdgpu_bo_gpu_offset(bo); | ||
373 | entries = amdgpu_bo_size(bo) / 8; | 373 | entries = amdgpu_bo_size(bo) / 8; |
374 | 374 | ||
375 | if (pte_support_ats) { | 375 | if (pte_support_ats) { |
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
401 | if (r) | 401 | if (r) |
402 | goto error; | 402 | goto error; |
403 | 403 | ||
404 | addr = amdgpu_bo_gpu_offset(bo); | ||
404 | if (ats_entries) { | 405 | if (ats_entries) { |
405 | uint64_t ats_value; | 406 | uint64_t ats_value; |
406 | 407 | ||
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) | |||
2483 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size | 2484 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size |
2484 | * | 2485 | * |
2485 | * @adev: amdgpu_device pointer | 2486 | * @adev: amdgpu_device pointer |
2486 | * @vm_size: the default vm size if it's set auto | 2487 | * @min_vm_size: the minimum vm size in GB if it's set auto |
2487 | * @fragment_size_default: Default PTE fragment size | 2488 | * @fragment_size_default: Default PTE fragment size |
2488 | * @max_level: max VMPT level | 2489 | * @max_level: max VMPT level |
2489 | * @max_bits: max address space size in bits | 2490 | * @max_bits: max address space size in bits |
2490 | * | 2491 | * |
2491 | */ | 2492 | */ |
2492 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 2493 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
2493 | uint32_t fragment_size_default, unsigned max_level, | 2494 | uint32_t fragment_size_default, unsigned max_level, |
2494 | unsigned max_bits) | 2495 | unsigned max_bits) |
2495 | { | 2496 | { |
2497 | unsigned int max_size = 1 << (max_bits - 30); | ||
2498 | unsigned int vm_size; | ||
2496 | uint64_t tmp; | 2499 | uint64_t tmp; |
2497 | 2500 | ||
2498 | /* adjust vm size first */ | 2501 | /* adjust vm size first */ |
2499 | if (amdgpu_vm_size != -1) { | 2502 | if (amdgpu_vm_size != -1) { |
2500 | unsigned max_size = 1 << (max_bits - 30); | ||
2501 | |||
2502 | vm_size = amdgpu_vm_size; | 2503 | vm_size = amdgpu_vm_size; |
2503 | if (vm_size > max_size) { | 2504 | if (vm_size > max_size) { |
2504 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", | 2505 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", |
2505 | amdgpu_vm_size, max_size); | 2506 | amdgpu_vm_size, max_size); |
2506 | vm_size = max_size; | 2507 | vm_size = max_size; |
2507 | } | 2508 | } |
2509 | } else { | ||
2510 | struct sysinfo si; | ||
2511 | unsigned int phys_ram_gb; | ||
2512 | |||
2513 | /* Optimal VM size depends on the amount of physical | ||
2514 | * RAM available. Underlying requirements and | ||
2515 | * assumptions: | ||
2516 | * | ||
2517 | * - Need to map system memory and VRAM from all GPUs | ||
2518 | * - VRAM from other GPUs not known here | ||
2519 | * - Assume VRAM <= system memory | ||
2520 | * - On GFX8 and older, VM space can be segmented for | ||
2521 | * different MTYPEs | ||
2522 | * - Need to allow room for fragmentation, guard pages etc. | ||
2523 | * | ||
2524 | * This adds up to a rough guess of system memory x3. | ||
2525 | * Round up to power of two to maximize the available | ||
2526 | * VM size with the given page table size. | ||
2527 | */ | ||
2528 | si_meminfo(&si); | ||
2529 | phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + | ||
2530 | (1 << 30) - 1) >> 30; | ||
2531 | vm_size = roundup_pow_of_two( | ||
2532 | min(max(phys_ram_gb * 3, min_vm_size), max_size)); | ||
2508 | } | 2533 | } |
2509 | 2534 | ||
2510 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; | 2535 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, | |||
321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); | 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); |
322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
323 | struct amdgpu_bo_va *bo_va); | 323 | struct amdgpu_bo_va *bo_va); |
324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
325 | uint32_t fragment_size_default, unsigned max_level, | 325 | uint32_t fragment_size_default, unsigned max_level, |
326 | unsigned max_bits); | 326 | unsigned max_bits); |
327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5664 | if (amdgpu_sriov_vf(adev)) | 5664 | if (amdgpu_sriov_vf(adev)) |
5665 | return 0; | 5665 | return 0; |
5666 | 5666 | ||
5667 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | ||
5668 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
5669 | AMD_PG_SUPPORT_CP | | ||
5670 | AMD_PG_SUPPORT_GFX_DMG)) | ||
5671 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | ||
5667 | switch (adev->asic_type) { | 5672 | switch (adev->asic_type) { |
5668 | case CHIP_CARRIZO: | 5673 | case CHIP_CARRIZO: |
5669 | case CHIP_STONEY: | 5674 | case CHIP_STONEY: |
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5713 | default: | 5718 | default: |
5714 | break; | 5719 | break; |
5715 | } | 5720 | } |
5716 | 5721 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | |
5722 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
5723 | AMD_PG_SUPPORT_CP | | ||
5724 | AMD_PG_SUPPORT_GFX_DMG)) | ||
5725 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | ||
5717 | return 0; | 5726 | return 0; |
5718 | } | 5727 | } |
5719 | 5728 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) | |||
632 | amdgpu_gart_table_vram_unpin(adev); | 632 | amdgpu_gart_table_vram_unpin(adev); |
633 | } | 633 | } |
634 | 634 | ||
635 | static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) | ||
636 | { | ||
637 | amdgpu_gart_table_vram_free(adev); | ||
638 | amdgpu_gart_fini(adev); | ||
639 | } | ||
640 | |||
641 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, | 635 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, |
642 | u32 status, u32 addr, u32 mc_client) | 636 | u32 status, u32 addr, u32 mc_client) |
643 | { | 637 | { |
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) | |||
935 | 929 | ||
936 | amdgpu_gem_force_release(adev); | 930 | amdgpu_gem_force_release(adev); |
937 | amdgpu_vm_manager_fini(adev); | 931 | amdgpu_vm_manager_fini(adev); |
938 | gmc_v6_0_gart_fini(adev); | 932 | amdgpu_gart_table_vram_free(adev); |
939 | amdgpu_bo_fini(adev); | 933 | amdgpu_bo_fini(adev); |
934 | amdgpu_gart_fini(adev); | ||
940 | release_firmware(adev->gmc.fw); | 935 | release_firmware(adev->gmc.fw); |
941 | adev->gmc.fw = NULL; | 936 | adev->gmc.fw = NULL; |
942 | 937 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) | |||
747 | } | 747 | } |
748 | 748 | ||
749 | /** | 749 | /** |
750 | * gmc_v7_0_gart_fini - vm fini callback | ||
751 | * | ||
752 | * @adev: amdgpu_device pointer | ||
753 | * | ||
754 | * Tears down the driver GART/VM setup (CIK). | ||
755 | */ | ||
756 | static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) | ||
757 | { | ||
758 | amdgpu_gart_table_vram_free(adev); | ||
759 | amdgpu_gart_fini(adev); | ||
760 | } | ||
761 | |||
762 | /** | ||
763 | * gmc_v7_0_vm_decode_fault - print human readable fault info | 750 | * gmc_v7_0_vm_decode_fault - print human readable fault info |
764 | * | 751 | * |
765 | * @adev: amdgpu_device pointer | 752 | * @adev: amdgpu_device pointer |
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) | |||
1095 | amdgpu_gem_force_release(adev); | 1082 | amdgpu_gem_force_release(adev); |
1096 | amdgpu_vm_manager_fini(adev); | 1083 | amdgpu_vm_manager_fini(adev); |
1097 | kfree(adev->gmc.vm_fault_info); | 1084 | kfree(adev->gmc.vm_fault_info); |
1098 | gmc_v7_0_gart_fini(adev); | 1085 | amdgpu_gart_table_vram_free(adev); |
1099 | amdgpu_bo_fini(adev); | 1086 | amdgpu_bo_fini(adev); |
1087 | amdgpu_gart_fini(adev); | ||
1100 | release_firmware(adev->gmc.fw); | 1088 | release_firmware(adev->gmc.fw); |
1101 | adev->gmc.fw = NULL; | 1089 | adev->gmc.fw = NULL; |
1102 | 1090 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | |||
969 | } | 969 | } |
970 | 970 | ||
971 | /** | 971 | /** |
972 | * gmc_v8_0_gart_fini - vm fini callback | ||
973 | * | ||
974 | * @adev: amdgpu_device pointer | ||
975 | * | ||
976 | * Tears down the driver GART/VM setup (CIK). | ||
977 | */ | ||
978 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | ||
979 | { | ||
980 | amdgpu_gart_table_vram_free(adev); | ||
981 | amdgpu_gart_fini(adev); | ||
982 | } | ||
983 | |||
984 | /** | ||
985 | * gmc_v8_0_vm_decode_fault - print human readable fault info | 972 | * gmc_v8_0_vm_decode_fault - print human readable fault info |
986 | * | 973 | * |
987 | * @adev: amdgpu_device pointer | 974 | * @adev: amdgpu_device pointer |
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) | |||
1199 | amdgpu_gem_force_release(adev); | 1186 | amdgpu_gem_force_release(adev); |
1200 | amdgpu_vm_manager_fini(adev); | 1187 | amdgpu_vm_manager_fini(adev); |
1201 | kfree(adev->gmc.vm_fault_info); | 1188 | kfree(adev->gmc.vm_fault_info); |
1202 | gmc_v8_0_gart_fini(adev); | 1189 | amdgpu_gart_table_vram_free(adev); |
1203 | amdgpu_bo_fini(adev); | 1190 | amdgpu_bo_fini(adev); |
1191 | amdgpu_gart_fini(adev); | ||
1204 | release_firmware(adev->gmc.fw); | 1192 | release_firmware(adev->gmc.fw); |
1205 | adev->gmc.fw = NULL; | 1193 | adev->gmc.fw = NULL; |
1206 | 1194 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) | |||
942 | return 0; | 942 | return 0; |
943 | } | 943 | } |
944 | 944 | ||
945 | /** | ||
946 | * gmc_v9_0_gart_fini - vm fini callback | ||
947 | * | ||
948 | * @adev: amdgpu_device pointer | ||
949 | * | ||
950 | * Tears down the driver GART/VM setup (CIK). | ||
951 | */ | ||
952 | static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) | ||
953 | { | ||
954 | amdgpu_gart_table_vram_free(adev); | ||
955 | amdgpu_gart_fini(adev); | ||
956 | } | ||
957 | |||
958 | static int gmc_v9_0_sw_fini(void *handle) | 945 | static int gmc_v9_0_sw_fini(void *handle) |
959 | { | 946 | { |
960 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 947 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
961 | 948 | ||
962 | amdgpu_gem_force_release(adev); | 949 | amdgpu_gem_force_release(adev); |
963 | amdgpu_vm_manager_fini(adev); | 950 | amdgpu_vm_manager_fini(adev); |
964 | gmc_v9_0_gart_fini(adev); | ||
965 | 951 | ||
966 | /* | 952 | /* |
967 | * TODO: | 953 | * TODO: |
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) | |||
974 | */ | 960 | */ |
975 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); | 961 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
976 | 962 | ||
963 | amdgpu_gart_table_vram_free(adev); | ||
977 | amdgpu_bo_fini(adev); | 964 | amdgpu_bo_fini(adev); |
965 | amdgpu_gart_fini(adev); | ||
978 | 966 | ||
979 | return 0; | 967 | return 0; |
980 | } | 968 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, | |||
65 | int min_temp, int max_temp); | 65 | int min_temp, int max_temp); |
66 | static int kv_init_fps_limits(struct amdgpu_device *adev); | 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); |
67 | 67 | ||
68 | static void kv_dpm_powergate_uvd(void *handle, bool gate); | ||
69 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); | ||
70 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); | 68 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); |
71 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); | 69 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); |
72 | 70 | ||
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
1354 | return ret; | 1352 | return ret; |
1355 | } | 1353 | } |
1356 | 1354 | ||
1357 | kv_update_current_ps(adev, adev->pm.dpm.boot_ps); | ||
1358 | |||
1359 | if (adev->irq.installed && | 1355 | if (adev->irq.installed && |
1360 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { | 1356 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { |
1361 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); | 1357 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); |
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
1374 | 1370 | ||
1375 | static void kv_dpm_disable(struct amdgpu_device *adev) | 1371 | static void kv_dpm_disable(struct amdgpu_device *adev) |
1376 | { | 1372 | { |
1373 | struct kv_power_info *pi = kv_get_pi(adev); | ||
1374 | |||
1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1375 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
1378 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); | 1376 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
1379 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) | |||
1387 | /* powerup blocks */ | 1385 | /* powerup blocks */ |
1388 | kv_dpm_powergate_acp(adev, false); | 1386 | kv_dpm_powergate_acp(adev, false); |
1389 | kv_dpm_powergate_samu(adev, false); | 1387 | kv_dpm_powergate_samu(adev, false); |
1390 | kv_dpm_powergate_vce(adev, false); | 1388 | if (pi->caps_vce_pg) /* power on the VCE block */ |
1391 | kv_dpm_powergate_uvd(adev, false); | 1389 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
1390 | if (pi->caps_uvd_pg) /* power on the UVD block */ | ||
1391 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); | ||
1392 | 1392 | ||
1393 | kv_enable_smc_cac(adev, false); | 1393 | kv_enable_smc_cac(adev, false); |
1394 | kv_enable_didt(adev, false); | 1394 | kv_enable_didt(adev, false); |
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
1551 | int ret; | 1551 | int ret; |
1552 | 1552 | ||
1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { | 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { |
1554 | kv_dpm_powergate_vce(adev, false); | ||
1555 | if (pi->caps_stable_p_state) | 1554 | if (pi->caps_stable_p_state) |
1556 | pi->vce_boot_level = table->count - 1; | 1555 | pi->vce_boot_level = table->count - 1; |
1557 | else | 1556 | else |
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
1573 | kv_enable_vce_dpm(adev, true); | 1572 | kv_enable_vce_dpm(adev, true); |
1574 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { | 1573 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { |
1575 | kv_enable_vce_dpm(adev, false); | 1574 | kv_enable_vce_dpm(adev, false); |
1576 | kv_dpm_powergate_vce(adev, true); | ||
1577 | } | 1575 | } |
1578 | 1576 | ||
1579 | return 0; | 1577 | return 0; |
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) | |||
1702 | } | 1700 | } |
1703 | } | 1701 | } |
1704 | 1702 | ||
1705 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | 1703 | static void kv_dpm_powergate_vce(void *handle, bool gate) |
1706 | { | 1704 | { |
1705 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1707 | struct kv_power_info *pi = kv_get_pi(adev); | 1706 | struct kv_power_info *pi = kv_get_pi(adev); |
1708 | 1707 | int ret; | |
1709 | if (pi->vce_power_gated == gate) | ||
1710 | return; | ||
1711 | 1708 | ||
1712 | pi->vce_power_gated = gate; | 1709 | pi->vce_power_gated = gate; |
1713 | 1710 | ||
1714 | if (!pi->caps_vce_pg) | 1711 | if (gate) { |
1715 | return; | 1712 | /* stop the VCE block */ |
1716 | 1713 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | |
1717 | if (gate) | 1714 | AMD_PG_STATE_GATE); |
1718 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); | 1715 | kv_enable_vce_dpm(adev, false); |
1719 | else | 1716 | if (pi->caps_vce_pg) /* power off the VCE block */ |
1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | 1717 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); |
1718 | } else { | ||
1719 | if (pi->caps_vce_pg) /* power on the VCE block */ | ||
1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | ||
1721 | kv_enable_vce_dpm(adev, true); | ||
1722 | /* re-init the VCE block */ | ||
1723 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
1724 | AMD_PG_STATE_UNGATE); | ||
1725 | } | ||
1721 | } | 1726 | } |
1722 | 1727 | ||
1728 | |||
1723 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) | 1729 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) |
1724 | { | 1730 | { |
1725 | struct kv_power_info *pi = kv_get_pi(adev); | 1731 | struct kv_power_info *pi = kv_get_pi(adev); |
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) | |||
3061 | else | 3067 | else |
3062 | adev->pm.dpm_enabled = true; | 3068 | adev->pm.dpm_enabled = true; |
3063 | mutex_unlock(&adev->pm.mutex); | 3069 | mutex_unlock(&adev->pm.mutex); |
3064 | 3070 | amdgpu_pm_compute_clocks(adev); | |
3065 | return ret; | 3071 | return ret; |
3066 | } | 3072 | } |
3067 | 3073 | ||
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, | |||
3313 | case AMD_IP_BLOCK_TYPE_UVD: | 3319 | case AMD_IP_BLOCK_TYPE_UVD: |
3314 | kv_dpm_powergate_uvd(handle, gate); | 3320 | kv_dpm_powergate_uvd(handle, gate); |
3315 | break; | 3321 | break; |
3322 | case AMD_IP_BLOCK_TYPE_VCE: | ||
3323 | kv_dpm_powergate_vce(handle, gate); | ||
3324 | break; | ||
3316 | default: | 3325 | default: |
3317 | break; | 3326 | break; |
3318 | } | 3327 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) | |||
6887 | 6887 | ||
6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
6889 | si_thermal_start_thermal_controller(adev); | 6889 | si_thermal_start_thermal_controller(adev); |
6890 | ni_update_current_ps(adev, boot_ps); | ||
6891 | 6890 | ||
6892 | return 0; | 6891 | return 0; |
6893 | } | 6892 | } |
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) | |||
7763 | else | 7762 | else |
7764 | adev->pm.dpm_enabled = true; | 7763 | adev->pm.dpm_enabled = true; |
7765 | mutex_unlock(&adev->pm.mutex); | 7764 | mutex_unlock(&adev->pm.mutex); |
7766 | 7765 | amdgpu_pm_compute_clocks(adev); | |
7767 | return ret; | 7766 | return ret; |
7768 | } | 7767 | } |
7769 | 7768 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | |||
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, | |||
480 | { | 480 | { |
481 | struct dc_context *ctx = pp->ctx; | 481 | struct dc_context *ctx = pp->ctx; |
482 | struct amdgpu_device *adev = ctx->driver_context; | 482 | struct amdgpu_device *adev = ctx->driver_context; |
483 | void *pp_handle = adev->powerplay.pp_handle; | ||
483 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | 484 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; |
485 | struct pp_display_clock_request clock = {0}; | ||
484 | 486 | ||
485 | if (!pp_funcs || !pp_funcs->display_configuration_changed) | 487 | if (!pp_funcs || !pp_funcs->display_clock_voltage_request) |
486 | return; | 488 | return; |
487 | 489 | ||
488 | amdgpu_dpm_display_configuration_changed(adev); | 490 | clock.clock_type = amd_pp_dcf_clock; |
491 | clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; | ||
492 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
493 | |||
494 | clock.clock_type = amd_pp_f_clock; | ||
495 | clock.clock_freq_in_khz = req->hard_min_fclk_khz; | ||
496 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
489 | } | 497 | } |
490 | 498 | ||
491 | void pp_rv_set_wm_ranges(struct pp_smu *pp, | 499 | void pp_rv_set_wm_ranges(struct pp_smu *pp, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
754 | * fail-safe mode | 754 | * fail-safe mode |
755 | */ | 755 | */ |
756 | if (dc_is_hdmi_signal(link->connector_signal) || | 756 | if (dc_is_hdmi_signal(link->connector_signal) || |
757 | dc_is_dvi_signal(link->connector_signal)) | 757 | dc_is_dvi_signal(link->connector_signal)) { |
758 | if (prev_sink != NULL) | ||
759 | dc_sink_release(prev_sink); | ||
760 | |||
758 | return false; | 761 | return false; |
762 | } | ||
759 | default: | 763 | default: |
760 | break; | 764 | break; |
761 | } | 765 | } |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..98358b4b36de 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, | |||
199 | vma->flags |= I915_VMA_GGTT; | 199 | vma->flags |= I915_VMA_GGTT; |
200 | list_add(&vma->obj_link, &obj->vma_list); | 200 | list_add(&vma->obj_link, &obj->vma_list); |
201 | } else { | 201 | } else { |
202 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); | ||
203 | list_add_tail(&vma->obj_link, &obj->vma_list); | 202 | list_add_tail(&vma->obj_link, &obj->vma_list); |
204 | } | 203 | } |
205 | 204 | ||
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) | |||
807 | if (vma->obj) | 806 | if (vma->obj) |
808 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); | 807 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); |
809 | 808 | ||
810 | if (!i915_vma_is_ggtt(vma)) | ||
811 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); | ||
812 | |||
813 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { | 809 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { |
814 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); | 810 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); |
815 | kfree(iter); | 811 | kfree(iter); |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) | |||
962 | { | 962 | { |
963 | int ret; | 963 | int ret; |
964 | 964 | ||
965 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | ||
966 | return; | ||
967 | |||
968 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); | 965 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); |
969 | if (ret < 0) { | 966 | if (ret < 0) { |
970 | DRM_ERROR("failed to add audio component (%d)\n", ret); | 967 | DRM_ERROR("failed to add audio component (%d)\n", ret); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3fa1c8a983..4a3c8ee9a973 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; | 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; |
2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; | 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; |
2990 | int dst_x = plane_state->base.dst.x1; | 2990 | int dst_x = plane_state->base.dst.x1; |
2991 | int dst_w = drm_rect_width(&plane_state->base.dst); | ||
2991 | int pipe_src_w = crtc_state->pipe_src_w; | 2992 | int pipe_src_w = crtc_state->pipe_src_w; |
2992 | int max_width = skl_max_plane_width(fb, 0, rotation); | 2993 | int max_width = skl_max_plane_width(fb, 0, rotation); |
2993 | int max_height = 4096; | 2994 | int max_height = 4096; |
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
3009 | * screen may cause FIFO underflow and display corruption. | 3010 | * screen may cause FIFO underflow and display corruption. |
3010 | */ | 3011 | */ |
3011 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && | 3012 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && |
3012 | (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { | 3013 | (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { |
3013 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", | 3014 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", |
3014 | dst_x + w < 4 ? "end" : "start", | 3015 | dst_x + dst_w < 4 ? "end" : "start", |
3015 | dst_x + w < 4 ? dst_x + w : dst_x, | 3016 | dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, |
3016 | 4, pipe_src_w - 4); | 3017 | 4, pipe_src_w - 4); |
3017 | return -ERANGE; | 3018 | return -ERANGE; |
3018 | } | 3019 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..192972a7d287 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, | |||
943 | 943 | ||
944 | ret = i2c_transfer(adapter, &msg, 1); | 944 | ret = i2c_transfer(adapter, &msg, 1); |
945 | if (ret == 1) | 945 | if (ret == 1) |
946 | return 0; | 946 | ret = 0; |
947 | return ret >= 0 ? -EIO : ret; | 947 | else if (ret >= 0) |
948 | ret = -EIO; | ||
949 | |||
950 | kfree(write_buf); | ||
951 | return ret; | ||
948 | } | 952 | } |
949 | 953 | ||
950 | static | 954 | static |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, | |||
74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", | 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", |
75 | lspcon_mode_name(mode)); | 75 | lspcon_mode_name(mode)); |
76 | 76 | ||
77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); | 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); |
78 | if (current_mode != mode) | 78 | if (current_mode != mode) |
79 | DRM_ERROR("LSPCON mode hasn't settled\n"); | 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); |
80 | 80 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c | |||
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, | |||
132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); | 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); |
133 | } | 133 | } |
134 | 134 | ||
135 | static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) | ||
136 | { | ||
137 | return 4; | ||
138 | } | ||
139 | |||
135 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) | 140 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) |
136 | { | 141 | { |
137 | unsigned int reg; | 142 | unsigned int reg; |
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) | |||
157 | 162 | ||
158 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) | 163 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) |
159 | { | 164 | { |
165 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
166 | * is defined in mediatek HW data sheet. | ||
167 | * The alphabet order in XXX is no relation to data | ||
168 | * arrangement in memory. | ||
169 | */ | ||
160 | switch (fmt) { | 170 | switch (fmt) { |
161 | default: | 171 | default: |
162 | case DRM_FORMAT_RGB565: | 172 | case DRM_FORMAT_RGB565: |
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { | |||
221 | .stop = mtk_ovl_stop, | 231 | .stop = mtk_ovl_stop, |
222 | .enable_vblank = mtk_ovl_enable_vblank, | 232 | .enable_vblank = mtk_ovl_enable_vblank, |
223 | .disable_vblank = mtk_ovl_disable_vblank, | 233 | .disable_vblank = mtk_ovl_disable_vblank, |
234 | .layer_nr = mtk_ovl_layer_nr, | ||
224 | .layer_on = mtk_ovl_layer_on, | 235 | .layer_on = mtk_ovl_layer_on, |
225 | .layer_off = mtk_ovl_layer_off, | 236 | .layer_off = mtk_ovl_layer_off, |
226 | .layer_config = mtk_ovl_layer_config, | 237 | .layer_config = mtk_ovl_layer_config, |
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c | |||
@@ -31,14 +31,31 @@ | |||
31 | #define RDMA_REG_UPDATE_INT BIT(0) | 31 | #define RDMA_REG_UPDATE_INT BIT(0) |
32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 | 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 |
33 | #define RDMA_ENGINE_EN BIT(0) | 33 | #define RDMA_ENGINE_EN BIT(0) |
34 | #define RDMA_MODE_MEMORY BIT(1) | ||
34 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 | 35 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 |
36 | #define RDMA_MATRIX_ENABLE BIT(17) | ||
37 | #define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) | ||
38 | #define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) | ||
35 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 | 39 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 |
36 | #define DISP_REG_RDMA_TARGET_LINE 0x001c | 40 | #define DISP_REG_RDMA_TARGET_LINE 0x001c |
41 | #define DISP_RDMA_MEM_CON 0x0024 | ||
42 | #define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) | ||
43 | #define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) | ||
44 | #define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) | ||
45 | #define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) | ||
46 | #define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) | ||
47 | #define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) | ||
48 | #define MEM_MODE_INPUT_SWAP BIT(8) | ||
49 | #define DISP_RDMA_MEM_SRC_PITCH 0x002c | ||
50 | #define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 | ||
37 | #define DISP_REG_RDMA_FIFO_CON 0x0040 | 51 | #define DISP_REG_RDMA_FIFO_CON 0x0040 |
38 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) | 52 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) |
39 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) | 53 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) |
40 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) | 54 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) |
41 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) | 55 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) |
56 | #define DISP_RDMA_MEM_START_ADDR 0x0f00 | ||
57 | |||
58 | #define RDMA_MEM_GMC 0x40402020 | ||
42 | 59 | ||
43 | struct mtk_disp_rdma_data { | 60 | struct mtk_disp_rdma_data { |
44 | unsigned int fifo_size; | 61 | unsigned int fifo_size; |
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, | |||
138 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); | 155 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); |
139 | } | 156 | } |
140 | 157 | ||
158 | static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, | ||
159 | unsigned int fmt) | ||
160 | { | ||
161 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
162 | * is defined in mediatek HW data sheet. | ||
163 | * The alphabet order in XXX is no relation to data | ||
164 | * arrangement in memory. | ||
165 | */ | ||
166 | switch (fmt) { | ||
167 | default: | ||
168 | case DRM_FORMAT_RGB565: | ||
169 | return MEM_MODE_INPUT_FORMAT_RGB565; | ||
170 | case DRM_FORMAT_BGR565: | ||
171 | return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; | ||
172 | case DRM_FORMAT_RGB888: | ||
173 | return MEM_MODE_INPUT_FORMAT_RGB888; | ||
174 | case DRM_FORMAT_BGR888: | ||
175 | return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; | ||
176 | case DRM_FORMAT_RGBX8888: | ||
177 | case DRM_FORMAT_RGBA8888: | ||
178 | return MEM_MODE_INPUT_FORMAT_ARGB8888; | ||
179 | case DRM_FORMAT_BGRX8888: | ||
180 | case DRM_FORMAT_BGRA8888: | ||
181 | return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; | ||
182 | case DRM_FORMAT_XRGB8888: | ||
183 | case DRM_FORMAT_ARGB8888: | ||
184 | return MEM_MODE_INPUT_FORMAT_RGBA8888; | ||
185 | case DRM_FORMAT_XBGR8888: | ||
186 | case DRM_FORMAT_ABGR8888: | ||
187 | return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; | ||
188 | case DRM_FORMAT_UYVY: | ||
189 | return MEM_MODE_INPUT_FORMAT_UYVY; | ||
190 | case DRM_FORMAT_YUYV: | ||
191 | return MEM_MODE_INPUT_FORMAT_YUYV; | ||
192 | } | ||
193 | } | ||
194 | |||
195 | static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) | ||
196 | { | ||
197 | return 1; | ||
198 | } | ||
199 | |||
200 | static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, | ||
201 | struct mtk_plane_state *state) | ||
202 | { | ||
203 | struct mtk_disp_rdma *rdma = comp_to_rdma(comp); | ||
204 | struct mtk_plane_pending_state *pending = &state->pending; | ||
205 | unsigned int addr = pending->addr; | ||
206 | unsigned int pitch = pending->pitch & 0xffff; | ||
207 | unsigned int fmt = pending->format; | ||
208 | unsigned int con; | ||
209 | |||
210 | con = rdma_fmt_convert(rdma, fmt); | ||
211 | writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); | ||
212 | |||
213 | if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { | ||
214 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
215 | RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); | ||
216 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
217 | RDMA_MATRIX_INT_MTX_SEL, | ||
218 | RDMA_MATRIX_INT_MTX_BT601_to_RGB); | ||
219 | } else { | ||
220 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
221 | RDMA_MATRIX_ENABLE, 0); | ||
222 | } | ||
223 | |||
224 | writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); | ||
225 | writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); | ||
226 | writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); | ||
227 | rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, | ||
228 | RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); | ||
229 | } | ||
230 | |||
141 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { | 231 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { |
142 | .config = mtk_rdma_config, | 232 | .config = mtk_rdma_config, |
143 | .start = mtk_rdma_start, | 233 | .start = mtk_rdma_start, |
144 | .stop = mtk_rdma_stop, | 234 | .stop = mtk_rdma_stop, |
145 | .enable_vblank = mtk_rdma_enable_vblank, | 235 | .enable_vblank = mtk_rdma_enable_vblank, |
146 | .disable_vblank = mtk_rdma_disable_vblank, | 236 | .disable_vblank = mtk_rdma_disable_vblank, |
237 | .layer_nr = mtk_rdma_layer_nr, | ||
238 | .layer_config = mtk_rdma_layer_config, | ||
147 | }; | 239 | }; |
148 | 240 | ||
149 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, | 241 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c | |||
@@ -45,7 +45,8 @@ struct mtk_drm_crtc { | |||
45 | bool pending_needs_vblank; | 45 | bool pending_needs_vblank; |
46 | struct drm_pending_vblank_event *event; | 46 | struct drm_pending_vblank_event *event; |
47 | 47 | ||
48 | struct drm_plane planes[OVL_LAYER_NR]; | 48 | struct drm_plane *planes; |
49 | unsigned int layer_nr; | ||
49 | bool pending_planes; | 50 | bool pending_planes; |
50 | 51 | ||
51 | void __iomem *config_regs; | 52 | void __iomem *config_regs; |
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
171 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | 172 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) |
172 | { | 173 | { |
173 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 174 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
174 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 175 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
175 | 176 | ||
176 | mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); | 177 | mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); |
177 | 178 | ||
178 | return 0; | 179 | return 0; |
179 | } | 180 | } |
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | |||
181 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) | 182 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) |
182 | { | 183 | { |
183 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 184 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
184 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 185 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
185 | 186 | ||
186 | mtk_ddp_comp_disable_vblank(ovl); | 187 | mtk_ddp_comp_disable_vblank(comp); |
187 | } | 188 | } |
188 | 189 | ||
189 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) | 190 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) |
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) | |||
286 | } | 287 | } |
287 | 288 | ||
288 | /* Initially configure all planes */ | 289 | /* Initially configure all planes */ |
289 | for (i = 0; i < OVL_LAYER_NR; i++) { | 290 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
290 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 291 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
291 | struct mtk_plane_state *plane_state; | 292 | struct mtk_plane_state *plane_state; |
292 | 293 | ||
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
334 | { | 335 | { |
335 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 336 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
336 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); | 337 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); |
337 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 338 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
338 | unsigned int i; | 339 | unsigned int i; |
339 | 340 | ||
340 | /* | 341 | /* |
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
343 | * queue update module registers on vblank. | 344 | * queue update module registers on vblank. |
344 | */ | 345 | */ |
345 | if (state->pending_config) { | 346 | if (state->pending_config) { |
346 | mtk_ddp_comp_config(ovl, state->pending_width, | 347 | mtk_ddp_comp_config(comp, state->pending_width, |
347 | state->pending_height, | 348 | state->pending_height, |
348 | state->pending_vrefresh, 0); | 349 | state->pending_vrefresh, 0); |
349 | 350 | ||
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
351 | } | 352 | } |
352 | 353 | ||
353 | if (mtk_crtc->pending_planes) { | 354 | if (mtk_crtc->pending_planes) { |
354 | for (i = 0; i < OVL_LAYER_NR; i++) { | 355 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
355 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 356 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
356 | struct mtk_plane_state *plane_state; | 357 | struct mtk_plane_state *plane_state; |
357 | 358 | ||
358 | plane_state = to_mtk_plane_state(plane->state); | 359 | plane_state = to_mtk_plane_state(plane->state); |
359 | 360 | ||
360 | if (plane_state->pending.config) { | 361 | if (plane_state->pending.config) { |
361 | mtk_ddp_comp_layer_config(ovl, i, plane_state); | 362 | mtk_ddp_comp_layer_config(comp, i, plane_state); |
362 | plane_state->pending.config = false; | 363 | plane_state->pending.config = false; |
363 | } | 364 | } |
364 | } | 365 | } |
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
370 | struct drm_crtc_state *old_state) | 371 | struct drm_crtc_state *old_state) |
371 | { | 372 | { |
372 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 373 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
373 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 374 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
374 | int ret; | 375 | int ret; |
375 | 376 | ||
376 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 377 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
377 | 378 | ||
378 | ret = mtk_smi_larb_get(ovl->larb_dev); | 379 | ret = mtk_smi_larb_get(comp->larb_dev); |
379 | if (ret) { | 380 | if (ret) { |
380 | DRM_ERROR("Failed to get larb: %d\n", ret); | 381 | DRM_ERROR("Failed to get larb: %d\n", ret); |
381 | return; | 382 | return; |
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
383 | 384 | ||
384 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); | 385 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); |
385 | if (ret) { | 386 | if (ret) { |
386 | mtk_smi_larb_put(ovl->larb_dev); | 387 | mtk_smi_larb_put(comp->larb_dev); |
387 | return; | 388 | return; |
388 | } | 389 | } |
389 | 390 | ||
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
395 | struct drm_crtc_state *old_state) | 396 | struct drm_crtc_state *old_state) |
396 | { | 397 | { |
397 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 398 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
398 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 399 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
399 | int i; | 400 | int i; |
400 | 401 | ||
401 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 402 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
403 | return; | 404 | return; |
404 | 405 | ||
405 | /* Set all pending plane state to disabled */ | 406 | /* Set all pending plane state to disabled */ |
406 | for (i = 0; i < OVL_LAYER_NR; i++) { | 407 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
407 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 408 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
408 | struct mtk_plane_state *plane_state; | 409 | struct mtk_plane_state *plane_state; |
409 | 410 | ||
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
418 | 419 | ||
419 | drm_crtc_vblank_off(crtc); | 420 | drm_crtc_vblank_off(crtc); |
420 | mtk_crtc_ddp_hw_fini(mtk_crtc); | 421 | mtk_crtc_ddp_hw_fini(mtk_crtc); |
421 | mtk_smi_larb_put(ovl->larb_dev); | 422 | mtk_smi_larb_put(comp->larb_dev); |
422 | 423 | ||
423 | mtk_crtc->enabled = false; | 424 | mtk_crtc->enabled = false; |
424 | } | 425 | } |
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, | |||
450 | 451 | ||
451 | if (mtk_crtc->event) | 452 | if (mtk_crtc->event) |
452 | mtk_crtc->pending_needs_vblank = true; | 453 | mtk_crtc->pending_needs_vblank = true; |
453 | for (i = 0; i < OVL_LAYER_NR; i++) { | 454 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
454 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 455 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
455 | struct mtk_plane_state *plane_state; | 456 | struct mtk_plane_state *plane_state; |
456 | 457 | ||
@@ -516,7 +517,7 @@ err_cleanup_crtc: | |||
516 | return ret; | 517 | return ret; |
517 | } | 518 | } |
518 | 519 | ||
519 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) | 520 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) |
520 | { | 521 | { |
521 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 522 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
522 | struct mtk_drm_private *priv = crtc->dev->dev_private; | 523 | struct mtk_drm_private *priv = crtc->dev->dev_private; |
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
598 | mtk_crtc->ddp_comp[i] = comp; | 599 | mtk_crtc->ddp_comp[i] = comp; |
599 | } | 600 | } |
600 | 601 | ||
601 | for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { | 602 | mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); |
603 | mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * | ||
604 | sizeof(struct drm_plane), | ||
605 | GFP_KERNEL); | ||
606 | |||
607 | for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { | ||
602 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : | 608 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : |
603 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : | 609 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : |
604 | DRM_PLANE_TYPE_OVERLAY; | 610 | DRM_PLANE_TYPE_OVERLAY; |
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
609 | } | 615 | } |
610 | 616 | ||
611 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], | 617 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], |
612 | &mtk_crtc->planes[1], pipe); | 618 | mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : |
619 | NULL, pipe); | ||
613 | if (ret < 0) | 620 | if (ret < 0) |
614 | goto unprepare; | 621 | goto unprepare; |
615 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); | 622 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h | |||
@@ -18,13 +18,12 @@ | |||
18 | #include "mtk_drm_ddp_comp.h" | 18 | #include "mtk_drm_ddp_comp.h" |
19 | #include "mtk_drm_plane.h" | 19 | #include "mtk_drm_plane.h" |
20 | 20 | ||
21 | #define OVL_LAYER_NR 4 | ||
22 | #define MTK_LUT_SIZE 512 | 21 | #define MTK_LUT_SIZE 512 |
23 | #define MTK_MAX_BPC 10 | 22 | #define MTK_MAX_BPC 10 |
24 | #define MTK_MIN_BPC 3 | 23 | #define MTK_MIN_BPC 3 |
25 | 24 | ||
26 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); | 25 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); |
27 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); | 26 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); |
28 | int mtk_drm_crtc_create(struct drm_device *drm_dev, | 27 | int mtk_drm_crtc_create(struct drm_device *drm_dev, |
29 | const enum mtk_ddp_comp_id *path, | 28 | const enum mtk_ddp_comp_id *path, |
30 | unsigned int path_len); | 29 | unsigned int path_len); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c | |||
@@ -106,6 +106,8 @@ | |||
106 | #define OVL1_MOUT_EN_COLOR1 0x1 | 106 | #define OVL1_MOUT_EN_COLOR1 0x1 |
107 | #define GAMMA_MOUT_EN_RDMA1 0x1 | 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 |
108 | #define RDMA0_SOUT_DPI0 0x2 | 108 | #define RDMA0_SOUT_DPI0 0x2 |
109 | #define RDMA0_SOUT_DPI1 0x3 | ||
110 | #define RDMA0_SOUT_DSI1 0x1 | ||
109 | #define RDMA0_SOUT_DSI2 0x4 | 111 | #define RDMA0_SOUT_DSI2 0x4 |
110 | #define RDMA0_SOUT_DSI3 0x5 | 112 | #define RDMA0_SOUT_DSI3 0x5 |
111 | #define RDMA1_SOUT_DPI0 0x2 | 113 | #define RDMA1_SOUT_DPI0 0x2 |
@@ -122,6 +124,8 @@ | |||
122 | #define DPI0_SEL_IN_RDMA2 0x3 | 124 | #define DPI0_SEL_IN_RDMA2 0x3 |
123 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) | 125 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) |
124 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) | 126 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) |
127 | #define DSI0_SEL_IN_RDMA1 0x1 | ||
128 | #define DSI0_SEL_IN_RDMA2 0x4 | ||
125 | #define DSI1_SEL_IN_RDMA1 0x1 | 129 | #define DSI1_SEL_IN_RDMA1 0x1 |
126 | #define DSI1_SEL_IN_RDMA2 0x4 | 130 | #define DSI1_SEL_IN_RDMA2 0x4 |
127 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) | 131 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) |
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, | |||
224 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { | 228 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { |
225 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 229 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
226 | value = RDMA0_SOUT_DPI0; | 230 | value = RDMA0_SOUT_DPI0; |
231 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { | ||
232 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
233 | value = RDMA0_SOUT_DPI1; | ||
234 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { | ||
235 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
236 | value = RDMA0_SOUT_DSI1; | ||
227 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { | 237 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { |
228 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 238 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
229 | value = RDMA0_SOUT_DSI2; | 239 | value = RDMA0_SOUT_DSI2; |
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
282 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { | 292 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { |
283 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 293 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
284 | value = DPI1_SEL_IN_RDMA1; | 294 | value = DPI1_SEL_IN_RDMA1; |
295 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { | ||
296 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | ||
297 | value = DSI0_SEL_IN_RDMA1; | ||
285 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { | 298 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { |
286 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | 299 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; |
287 | value = DSI1_SEL_IN_RDMA1; | 300 | value = DSI1_SEL_IN_RDMA1; |
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
297 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { | 310 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { |
298 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 311 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
299 | value = DPI1_SEL_IN_RDMA2; | 312 | value = DPI1_SEL_IN_RDMA2; |
300 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | 313 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { |
301 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 314 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
315 | value = DSI0_SEL_IN_RDMA2; | ||
316 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | ||
317 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | ||
302 | value = DSI1_SEL_IN_RDMA2; | 318 | value = DSI1_SEL_IN_RDMA2; |
303 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { | 319 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { |
304 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 320 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | |||
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { | |||
78 | void (*stop)(struct mtk_ddp_comp *comp); | 78 | void (*stop)(struct mtk_ddp_comp *comp); |
79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); | 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); |
80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); | 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); |
81 | unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); | ||
81 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); | 82 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); |
82 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); | 83 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); |
83 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, | 84 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, |
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) | |||
128 | comp->funcs->disable_vblank(comp); | 129 | comp->funcs->disable_vblank(comp); |
129 | } | 130 | } |
130 | 131 | ||
132 | static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) | ||
133 | { | ||
134 | if (comp->funcs && comp->funcs->layer_nr) | ||
135 | return comp->funcs->layer_nr(comp); | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
131 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, | 140 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, |
132 | unsigned int idx) | 141 | unsigned int idx) |
133 | { | 142 | { |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) | |||
381 | err_deinit: | 381 | err_deinit: |
382 | mtk_drm_kms_deinit(drm); | 382 | mtk_drm_kms_deinit(drm); |
383 | err_free: | 383 | err_free: |
384 | drm_dev_unref(drm); | 384 | drm_dev_put(drm); |
385 | return ret; | 385 | return ret; |
386 | } | 386 | } |
387 | 387 | ||
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) | |||
390 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
391 | 391 | ||
392 | drm_dev_unregister(private->drm); | 392 | drm_dev_unregister(private->drm); |
393 | drm_dev_unref(private->drm); | 393 | drm_dev_put(private->drm); |
394 | private->drm = NULL; | 394 | private->drm = NULL; |
395 | } | 395 | } |
396 | 396 | ||
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) | |||
564 | 564 | ||
565 | drm_dev_unregister(drm); | 565 | drm_dev_unregister(drm); |
566 | mtk_drm_kms_deinit(drm); | 566 | mtk_drm_kms_deinit(drm); |
567 | drm_dev_unref(drm); | 567 | drm_dev_put(drm); |
568 | 568 | ||
569 | component_master_del(&pdev->dev, &mtk_drm_ops); | 569 | component_master_del(&pdev->dev, &mtk_drm_ops); |
570 | pm_runtime_disable(&pdev->dev); | 570 | pm_runtime_disable(&pdev->dev); |
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) | |||
580 | { | 580 | { |
581 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
582 | struct drm_device *drm = private->drm; | 582 | struct drm_device *drm = private->drm; |
583 | int ret; | ||
583 | 584 | ||
584 | drm_kms_helper_poll_disable(drm); | 585 | ret = drm_mode_config_helper_suspend(drm); |
585 | |||
586 | private->suspend_state = drm_atomic_helper_suspend(drm); | ||
587 | if (IS_ERR(private->suspend_state)) { | ||
588 | drm_kms_helper_poll_enable(drm); | ||
589 | return PTR_ERR(private->suspend_state); | ||
590 | } | ||
591 | |||
592 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); | 586 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); |
593 | return 0; | 587 | |
588 | return ret; | ||
594 | } | 589 | } |
595 | 590 | ||
596 | static int mtk_drm_sys_resume(struct device *dev) | 591 | static int mtk_drm_sys_resume(struct device *dev) |
597 | { | 592 | { |
598 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 593 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
599 | struct drm_device *drm = private->drm; | 594 | struct drm_device *drm = private->drm; |
595 | int ret; | ||
600 | 596 | ||
601 | drm_atomic_helper_resume(drm, private->suspend_state); | 597 | ret = drm_mode_config_helper_resume(drm); |
602 | drm_kms_helper_poll_enable(drm); | ||
603 | |||
604 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); | 598 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); |
605 | return 0; | 599 | |
600 | return ret; | ||
606 | } | 601 | } |
607 | #endif | 602 | #endif |
608 | 603 | ||
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 90837f7c7d0f..f4c7516eb989 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c | |||
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) | |||
302 | return clamp_val(reg, 0, 1023) & (0xff << 2); | 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); |
303 | } | 303 | } |
304 | 304 | ||
305 | static u16 adt7475_read_word(struct i2c_client *client, int reg) | 305 | static int adt7475_read_word(struct i2c_client *client, int reg) |
306 | { | 306 | { |
307 | u16 val; | 307 | int val1, val2; |
308 | 308 | ||
309 | val = i2c_smbus_read_byte_data(client, reg); | 309 | val1 = i2c_smbus_read_byte_data(client, reg); |
310 | val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); | 310 | if (val1 < 0) |
311 | return val1; | ||
312 | val2 = i2c_smbus_read_byte_data(client, reg + 1); | ||
313 | if (val2 < 0) | ||
314 | return val2; | ||
311 | 315 | ||
312 | return val; | 316 | return val1 | (val2 << 8); |
313 | } | 317 | } |
314 | 318 | ||
315 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) | 319 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) |
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, | |||
962 | { | 966 | { |
963 | struct adt7475_data *data = adt7475_update_device(dev); | 967 | struct adt7475_data *data = adt7475_update_device(dev); |
964 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | 968 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); |
965 | int i = clamp_val(data->range[sattr->index] & 0xf, 0, | 969 | int idx; |
966 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
967 | 970 | ||
968 | if (IS_ERR(data)) | 971 | if (IS_ERR(data)) |
969 | return PTR_ERR(data); | 972 | return PTR_ERR(data); |
973 | idx = clamp_val(data->range[sattr->index] & 0xf, 0, | ||
974 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
970 | 975 | ||
971 | return sprintf(buf, "%d\n", pwmfreq_table[i]); | 976 | return sprintf(buf, "%d\n", pwmfreq_table[idx]); |
972 | } | 977 | } |
973 | 978 | ||
974 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, | 979 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, |
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, | |||
1004 | char *buf) | 1009 | char *buf) |
1005 | { | 1010 | { |
1006 | struct adt7475_data *data = adt7475_update_device(dev); | 1011 | struct adt7475_data *data = adt7475_update_device(dev); |
1012 | |||
1013 | if (IS_ERR(data)) | ||
1014 | return PTR_ERR(data); | ||
1015 | |||
1007 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); | 1016 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); |
1008 | } | 1017 | } |
1009 | 1018 | ||
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf84..71d3445ba869 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c | |||
@@ -17,7 +17,7 @@ | |||
17 | * Bi-directional Current/Power Monitor with I2C Interface | 17 | * Bi-directional Current/Power Monitor with I2C Interface |
18 | * Datasheet: http://www.ti.com/product/ina230 | 18 | * Datasheet: http://www.ti.com/product/ina230 |
19 | * | 19 | * |
20 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 20 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
21 | * Thanks to Jan Volkering | 21 | * Thanks to Jan Volkering |
22 | * | 22 | * |
23 | * This program is free software; you can redistribute it and/or modify | 23 | * This program is free software; you can redistribute it and/or modify |
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) | |||
329 | return 0; | 329 | return 0; |
330 | } | 330 | } |
331 | 331 | ||
332 | static ssize_t ina2xx_show_shunt(struct device *dev, | ||
333 | struct device_attribute *da, | ||
334 | char *buf) | ||
335 | { | ||
336 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
337 | |||
338 | return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); | ||
339 | } | ||
340 | |||
332 | static ssize_t ina2xx_store_shunt(struct device *dev, | 341 | static ssize_t ina2xx_store_shunt(struct device *dev, |
333 | struct device_attribute *da, | 342 | struct device_attribute *da, |
334 | const char *buf, size_t count) | 343 | const char *buf, size_t count) |
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, | |||
403 | 412 | ||
404 | /* shunt resistance */ | 413 | /* shunt resistance */ |
405 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, | 414 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, |
406 | ina2xx_show_value, ina2xx_store_shunt, | 415 | ina2xx_show_shunt, ina2xx_store_shunt, |
407 | INA2XX_CALIBRATION); | 416 | INA2XX_CALIBRATION); |
408 | 417 | ||
409 | /* update interval (ina226 only) */ | 418 | /* update interval (ina226 only) */ |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c6bd61e4695a..944f5b63aecd 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/bitops.h> | 63 | #include <linux/bitops.h> |
64 | #include <linux/dmi.h> | 64 | #include <linux/dmi.h> |
65 | #include <linux/io.h> | 65 | #include <linux/io.h> |
66 | #include <linux/nospec.h> | ||
66 | #include "lm75.h" | 67 | #include "lm75.h" |
67 | 68 | ||
68 | #define USE_ALTERNATE | 69 | #define USE_ALTERNATE |
@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, | |||
2689 | return err; | 2690 | return err; |
2690 | if (val > NUM_TEMP) | 2691 | if (val > NUM_TEMP) |
2691 | return -EINVAL; | 2692 | return -EINVAL; |
2693 | val = array_index_nospec(val, NUM_TEMP + 1); | ||
2692 | if (val && (!(data->have_temp & BIT(val - 1)) || | 2694 | if (val && (!(data->have_temp & BIT(val - 1)) || |
2693 | !data->temp_src[val - 1])) | 2695 | !data->temp_src[val - 1])) |
2694 | return -EINVAL; | 2696 | return -EINVAL; |
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 6ec65adaba49..c33dcfb87993 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap) | |||
110 | } | 110 | } |
111 | #ifdef DEBUG | 111 | #ifdef DEBUG |
112 | if (jiffies != start && i2c_debug >= 3) | 112 | if (jiffies != start && i2c_debug >= 3) |
113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " | 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", |
114 | "high\n", jiffies - start); | 114 | jiffies - start); |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | done: | 117 | done: |
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
171 | setsda(adap, sb); | 171 | setsda(adap, sb); |
172 | udelay((adap->udelay + 1) / 2); | 172 | udelay((adap->udelay + 1) / 2); |
173 | if (sclhi(adap) < 0) { /* timed out */ | 173 | if (sclhi(adap) < 0) { /* timed out */ |
174 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 174 | bit_dbg(1, &i2c_adap->dev, |
175 | "timeout at bit #%d\n", (int)c, i); | 175 | "i2c_outb: 0x%02x, timeout at bit #%d\n", |
176 | (int)c, i); | ||
176 | return -ETIMEDOUT; | 177 | return -ETIMEDOUT; |
177 | } | 178 | } |
178 | /* FIXME do arbitration here: | 179 | /* FIXME do arbitration here: |
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
185 | } | 186 | } |
186 | sdahi(adap); | 187 | sdahi(adap); |
187 | if (sclhi(adap) < 0) { /* timeout */ | 188 | if (sclhi(adap) < 0) { /* timeout */ |
188 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 189 | bit_dbg(1, &i2c_adap->dev, |
189 | "timeout at ack\n", (int)c); | 190 | "i2c_outb: 0x%02x, timeout at ack\n", (int)c); |
190 | return -ETIMEDOUT; | 191 | return -ETIMEDOUT; |
191 | } | 192 | } |
192 | 193 | ||
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) | |||
215 | sdahi(adap); | 216 | sdahi(adap); |
216 | for (i = 0; i < 8; i++) { | 217 | for (i = 0; i < 8; i++) { |
217 | if (sclhi(adap) < 0) { /* timeout */ | 218 | if (sclhi(adap) < 0) { /* timeout */ |
218 | bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " | 219 | bit_dbg(1, &i2c_adap->dev, |
219 | "#%d\n", 7 - i); | 220 | "i2c_inb: timeout at bit #%d\n", |
221 | 7 - i); | ||
220 | return -ETIMEDOUT; | 222 | return -ETIMEDOUT; |
221 | } | 223 | } |
222 | indata *= 2; | 224 | indata *= 2; |
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
265 | goto bailout; | 267 | goto bailout; |
266 | } | 268 | } |
267 | if (!scl) { | 269 | if (!scl) { |
268 | printk(KERN_WARNING "%s: SCL unexpected low " | 270 | printk(KERN_WARNING |
269 | "while pulling SDA low!\n", name); | 271 | "%s: SCL unexpected low while pulling SDA low!\n", |
272 | name); | ||
270 | goto bailout; | 273 | goto bailout; |
271 | } | 274 | } |
272 | 275 | ||
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
278 | goto bailout; | 281 | goto bailout; |
279 | } | 282 | } |
280 | if (!scl) { | 283 | if (!scl) { |
281 | printk(KERN_WARNING "%s: SCL unexpected low " | 284 | printk(KERN_WARNING |
282 | "while pulling SDA high!\n", name); | 285 | "%s: SCL unexpected low while pulling SDA high!\n", |
286 | name); | ||
283 | goto bailout; | 287 | goto bailout; |
284 | } | 288 | } |
285 | 289 | ||
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
291 | goto bailout; | 295 | goto bailout; |
292 | } | 296 | } |
293 | if (!sda) { | 297 | if (!sda) { |
294 | printk(KERN_WARNING "%s: SDA unexpected low " | 298 | printk(KERN_WARNING |
295 | "while pulling SCL low!\n", name); | 299 | "%s: SDA unexpected low while pulling SCL low!\n", |
300 | name); | ||
296 | goto bailout; | 301 | goto bailout; |
297 | } | 302 | } |
298 | 303 | ||
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
304 | goto bailout; | 309 | goto bailout; |
305 | } | 310 | } |
306 | if (!sda) { | 311 | if (!sda) { |
307 | printk(KERN_WARNING "%s: SDA unexpected low " | 312 | printk(KERN_WARNING |
308 | "while pulling SCL high!\n", name); | 313 | "%s: SDA unexpected low while pulling SCL high!\n", |
314 | name); | ||
309 | goto bailout; | 315 | goto bailout; |
310 | } | 316 | } |
311 | 317 | ||
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap, | |||
352 | i2c_start(adap); | 358 | i2c_start(adap); |
353 | } | 359 | } |
354 | if (i && ret) | 360 | if (i && ret) |
355 | bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " | 361 | bit_dbg(1, &i2c_adap->dev, |
356 | "0x%02x: %s\n", i + 1, | 362 | "Used %d tries to %s client at 0x%02x: %s\n", i + 1, |
357 | addr & 1 ? "read from" : "write to", addr >> 1, | 363 | addr & 1 ? "read from" : "write to", addr >> 1, |
358 | ret == 1 ? "success" : "failed, timeout?"); | 364 | ret == 1 ? "success" : "failed, timeout?"); |
359 | return ret; | 365 | return ret; |
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
442 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { | 448 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { |
443 | if (!(flags & I2C_M_NO_RD_ACK)) | 449 | if (!(flags & I2C_M_NO_RD_ACK)) |
444 | acknak(i2c_adap, 0); | 450 | acknak(i2c_adap, 0); |
445 | dev_err(&i2c_adap->dev, "readbytes: invalid " | 451 | dev_err(&i2c_adap->dev, |
446 | "block length (%d)\n", inval); | 452 | "readbytes: invalid block length (%d)\n", |
453 | inval); | ||
447 | return -EPROTO; | 454 | return -EPROTO; |
448 | } | 455 | } |
449 | /* The original count value accounts for the extra | 456 | /* The original count value accounts for the extra |
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
506 | return -ENXIO; | 513 | return -ENXIO; |
507 | } | 514 | } |
508 | if (flags & I2C_M_RD) { | 515 | if (flags & I2C_M_RD) { |
509 | bit_dbg(3, &i2c_adap->dev, "emitting repeated " | 516 | bit_dbg(3, &i2c_adap->dev, |
510 | "start condition\n"); | 517 | "emitting repeated start condition\n"); |
511 | i2c_repstart(adap); | 518 | i2c_repstart(adap); |
512 | /* okay, now switch into reading mode */ | 519 | /* okay, now switch into reading mode */ |
513 | addr |= 0x01; | 520 | addr |= 0x01; |
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, | |||
564 | } | 571 | } |
565 | ret = bit_doAddress(i2c_adap, pmsg); | 572 | ret = bit_doAddress(i2c_adap, pmsg); |
566 | if ((ret != 0) && !nak_ok) { | 573 | if ((ret != 0) && !nak_ok) { |
567 | bit_dbg(1, &i2c_adap->dev, "NAK from " | 574 | bit_dbg(1, &i2c_adap->dev, |
568 | "device addr 0x%02x msg #%d\n", | 575 | "NAK from device addr 0x%02x msg #%d\n", |
569 | msgs[i].addr, i); | 576 | msgs[i].addr, i); |
570 | goto bailout; | 577 | goto bailout; |
571 | } | 578 | } |
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e18442b9973a..94d94b4a9a0d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
@@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) | |||
708 | i2c_set_adapdata(adap, dev); | 708 | i2c_set_adapdata(adap, dev); |
709 | 709 | ||
710 | if (dev->pm_disabled) { | 710 | if (dev->pm_disabled) { |
711 | dev_pm_syscore_device(dev->dev, true); | ||
712 | irq_flags = IRQF_NO_SUSPEND; | 711 | irq_flags = IRQF_NO_SUSPEND; |
713 | } else { | 712 | } else { |
714 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; | 713 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 1a8d2da5b000..b5750fd85125 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev) | |||
434 | { | 434 | { |
435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
436 | 436 | ||
437 | if (i_dev->pm_disabled) | ||
438 | return 0; | ||
439 | |||
437 | i_dev->disable(i_dev); | 440 | i_dev->disable(i_dev); |
438 | i2c_dw_prepare_clk(i_dev, false); | 441 | i2c_dw_prepare_clk(i_dev, false); |
439 | 442 | ||
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev) | |||
444 | { | 447 | { |
445 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 448 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
446 | 449 | ||
447 | i2c_dw_prepare_clk(i_dev, true); | 450 | if (!i_dev->pm_disabled) |
451 | i2c_dw_prepare_clk(i_dev, true); | ||
452 | |||
448 | i_dev->init(i_dev); | 453 | i_dev->init(i_dev); |
449 | 454 | ||
450 | return 0; | 455 | return 0; |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 941c223f6491..04b60a349d7e 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -1415,6 +1415,13 @@ static void i801_add_tco(struct i801_priv *priv) | |||
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | #ifdef CONFIG_ACPI | 1417 | #ifdef CONFIG_ACPI |
1418 | static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, | ||
1419 | acpi_physical_address address) | ||
1420 | { | ||
1421 | return address >= priv->smba && | ||
1422 | address <= pci_resource_end(priv->pci_dev, SMBBAR); | ||
1423 | } | ||
1424 | |||
1418 | static acpi_status | 1425 | static acpi_status |
1419 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | 1426 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, |
1420 | u64 *value, void *handler_context, void *region_context) | 1427 | u64 *value, void *handler_context, void *region_context) |
@@ -1430,7 +1437,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | |||
1430 | */ | 1437 | */ |
1431 | mutex_lock(&priv->acpi_lock); | 1438 | mutex_lock(&priv->acpi_lock); |
1432 | 1439 | ||
1433 | if (!priv->acpi_reserved) { | 1440 | if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { |
1434 | priv->acpi_reserved = true; | 1441 | priv->acpi_reserved = true; |
1435 | 1442 | ||
1436 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | 1443 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 439e8778f849..818cab14e87c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data) | |||
507 | pd->pos = pd->msg->len; | 507 | pd->pos = pd->msg->len; |
508 | pd->stop_after_dma = true; | 508 | pd->stop_after_dma = true; |
509 | 509 | ||
510 | i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf); | ||
511 | |||
512 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); | 510 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); |
513 | } | 511 | } |
514 | 512 | ||
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) | |||
602 | dma_async_issue_pending(chan); | 600 | dma_async_issue_pending(chan); |
603 | } | 601 | } |
604 | 602 | ||
605 | static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | 603 | static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, |
606 | bool do_init) | 604 | bool do_init) |
607 | { | 605 | { |
608 | if (do_init) { | 606 | if (do_init) { |
609 | /* Initialize channel registers */ | 607 | /* Initialize channel registers */ |
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | |||
627 | 625 | ||
628 | /* Enable all interrupts to begin with */ | 626 | /* Enable all interrupts to begin with */ |
629 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | 627 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); |
630 | return 0; | ||
631 | } | 628 | } |
632 | 629 | ||
633 | static int poll_dte(struct sh_mobile_i2c_data *pd) | 630 | static int poll_dte(struct sh_mobile_i2c_data *pd) |
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
698 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; | 695 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; |
699 | pd->stop_after_dma = false; | 696 | pd->stop_after_dma = false; |
700 | 697 | ||
701 | err = start_ch(pd, msg, do_start); | 698 | start_ch(pd, msg, do_start); |
702 | if (err) | ||
703 | break; | ||
704 | 699 | ||
705 | if (do_start) | 700 | if (do_start) |
706 | i2c_op(pd, OP_START, 0); | 701 | i2c_op(pd, OP_START, 0); |
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
709 | timeout = wait_event_timeout(pd->wait, | 704 | timeout = wait_event_timeout(pd->wait, |
710 | pd->sr & (ICSR_TACK | SW_DONE), | 705 | pd->sr & (ICSR_TACK | SW_DONE), |
711 | adapter->timeout); | 706 | adapter->timeout); |
707 | |||
708 | /* 'stop_after_dma' tells if DMA transfer was complete */ | ||
709 | i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); | ||
710 | |||
712 | if (!timeout) { | 711 | if (!timeout) { |
713 | dev_err(pd->dev, "Transfer request timed out\n"); | 712 | dev_err(pd->dev, "Transfer request timed out\n"); |
714 | if (pd->dma_direction != DMA_NONE) | 713 | if (pd->dma_direction != DMA_NONE) |
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f15737763608..9ee9a15e7134 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) | |||
2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); | 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); |
2294 | 2294 | ||
2295 | /** | 2295 | /** |
2296 | * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg | 2296 | * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg |
2297 | * @msg: the message to be synced with | ||
2298 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. | 2297 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. |
2298 | * @msg: the message which the buffer corresponds to | ||
2299 | * @xferred: bool saying if the message was transferred | ||
2299 | */ | 2300 | */ |
2300 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) | 2301 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) |
2301 | { | 2302 | { |
2302 | if (!buf || buf == msg->buf) | 2303 | if (!buf || buf == msg->buf) |
2303 | return; | 2304 | return; |
2304 | 2305 | ||
2305 | if (msg->flags & I2C_M_RD) | 2306 | if (xferred && msg->flags & I2C_M_RD) |
2306 | memcpy(msg->buf, buf, msg->len); | 2307 | memcpy(msg->buf, buf, msg->len); |
2307 | 2308 | ||
2308 | kfree(buf); | 2309 | kfree(buf); |
2309 | } | 2310 | } |
2310 | EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); | 2311 | EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); |
2311 | 2312 | ||
2312 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); | 2313 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); |
2313 | MODULE_DESCRIPTION("I2C-Bus main module"); | 2314 | MODULE_DESCRIPTION("I2C-Bus main module"); |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 648eb6743ed5..6edffeed9953 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, | |||
238 | mmc_exit_request(mq->queue, req); | 238 | mmc_exit_request(mq->queue, req); |
239 | } | 239 | } |
240 | 240 | ||
241 | /* | ||
242 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests | ||
243 | * will not be dispatched in parallel. | ||
244 | */ | ||
245 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | 241 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
246 | const struct blk_mq_queue_data *bd) | 242 | const struct blk_mq_queue_data *bd) |
247 | { | 243 | { |
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
264 | 260 | ||
265 | spin_lock_irq(q->queue_lock); | 261 | spin_lock_irq(q->queue_lock); |
266 | 262 | ||
267 | if (mq->recovery_needed) { | 263 | if (mq->recovery_needed || mq->busy) { |
268 | spin_unlock_irq(q->queue_lock); | 264 | spin_unlock_irq(q->queue_lock); |
269 | return BLK_STS_RESOURCE; | 265 | return BLK_STS_RESOURCE; |
270 | } | 266 | } |
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
291 | break; | 287 | break; |
292 | } | 288 | } |
293 | 289 | ||
290 | /* Parallel dispatch of requests is not supported at the moment */ | ||
291 | mq->busy = true; | ||
292 | |||
294 | mq->in_flight[issue_type] += 1; | 293 | mq->in_flight[issue_type] += 1; |
295 | get_card = (mmc_tot_in_flight(mq) == 1); | 294 | get_card = (mmc_tot_in_flight(mq) == 1); |
296 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); | 295 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
333 | mq->in_flight[issue_type] -= 1; | 332 | mq->in_flight[issue_type] -= 1; |
334 | if (mmc_tot_in_flight(mq) == 0) | 333 | if (mmc_tot_in_flight(mq) == 0) |
335 | put_card = true; | 334 | put_card = true; |
335 | mq->busy = false; | ||
336 | spin_unlock_irq(q->queue_lock); | 336 | spin_unlock_irq(q->queue_lock); |
337 | if (put_card) | 337 | if (put_card) |
338 | mmc_put_card(card, &mq->ctx); | 338 | mmc_put_card(card, &mq->ctx); |
339 | } else { | ||
340 | WRITE_ONCE(mq->busy, false); | ||
339 | } | 341 | } |
340 | 342 | ||
341 | return ret; | 343 | return ret; |
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 17e59d50b496..9bf3c9245075 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h | |||
@@ -81,6 +81,7 @@ struct mmc_queue { | |||
81 | unsigned int cqe_busy; | 81 | unsigned int cqe_busy; |
82 | #define MMC_CQE_DCMD_BUSY BIT(0) | 82 | #define MMC_CQE_DCMD_BUSY BIT(0) |
83 | #define MMC_CQE_QUEUE_FULL BIT(1) | 83 | #define MMC_CQE_QUEUE_FULL BIT(1) |
84 | bool busy; | ||
84 | bool use_cqe; | 85 | bool use_cqe; |
85 | bool recovery_needed; | 86 | bool recovery_needed; |
86 | bool in_recovery; | 87 | bool in_recovery; |
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 294de177632c..61e4e2a213c9 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c | |||
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, | |||
217 | * We don't really have DMA, so we need | 217 | * We don't really have DMA, so we need |
218 | * to copy from our platform driver buffer | 218 | * to copy from our platform driver buffer |
219 | */ | 219 | */ |
220 | sg_copy_to_buffer(data->sg, 1, host->virt_base, | 220 | sg_copy_from_buffer(data->sg, 1, host->virt_base, |
221 | data->sg->length); | 221 | data->sg->length); |
222 | } | 222 | } |
223 | host->data->bytes_xfered += data->sg->length; | 223 | host->data->bytes_xfered += data->sg->length; |
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, | |||
393 | * We don't really have DMA, so we need to copy to our | 393 | * We don't really have DMA, so we need to copy to our |
394 | * platform driver buffer | 394 | * platform driver buffer |
395 | */ | 395 | */ |
396 | sg_copy_from_buffer(data->sg, 1, host->virt_base, | 396 | sg_copy_to_buffer(data->sg, 1, host->virt_base, |
397 | data->sg->length); | 397 | data->sg->length); |
398 | } | 398 | } |
399 | } | 399 | } |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 5aa2c9404e92..be53044086c7 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
1976 | do { | 1976 | do { |
1977 | value = atmci_readl(host, ATMCI_RDR); | 1977 | value = atmci_readl(host, ATMCI_RDR); |
1978 | if (likely(offset + 4 <= sg->length)) { | 1978 | if (likely(offset + 4 <= sg->length)) { |
1979 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); | 1979 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); |
1980 | 1980 | ||
1981 | offset += 4; | 1981 | offset += 4; |
1982 | nbytes += 4; | 1982 | nbytes += 4; |
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
1993 | } else { | 1993 | } else { |
1994 | unsigned int remaining = sg->length - offset; | 1994 | unsigned int remaining = sg->length - offset; |
1995 | 1995 | ||
1996 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); | 1996 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); |
1997 | nbytes += remaining; | 1997 | nbytes += remaining; |
1998 | 1998 | ||
1999 | flush_dcache_page(sg_page(sg)); | 1999 | flush_dcache_page(sg_page(sg)); |
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
2003 | goto done; | 2003 | goto done; |
2004 | 2004 | ||
2005 | offset = 4 - remaining; | 2005 | offset = 4 - remaining; |
2006 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, | 2006 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, |
2007 | offset, 0); | 2007 | offset, 0); |
2008 | nbytes += offset; | 2008 | nbytes += offset; |
2009 | } | 2009 | } |
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
2042 | 2042 | ||
2043 | do { | 2043 | do { |
2044 | if (likely(offset + 4 <= sg->length)) { | 2044 | if (likely(offset + 4 <= sg->length)) { |
2045 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); | 2045 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); |
2046 | atmci_writel(host, ATMCI_TDR, value); | 2046 | atmci_writel(host, ATMCI_TDR, value); |
2047 | 2047 | ||
2048 | offset += 4; | 2048 | offset += 4; |
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
2059 | unsigned int remaining = sg->length - offset; | 2059 | unsigned int remaining = sg->length - offset; |
2060 | 2060 | ||
2061 | value = 0; | 2061 | value = 0; |
2062 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); | 2062 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); |
2063 | nbytes += remaining; | 2063 | nbytes += remaining; |
2064 | 2064 | ||
2065 | host->sg = sg = sg_next(sg); | 2065 | host->sg = sg = sg_next(sg); |
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
2070 | } | 2070 | } |
2071 | 2071 | ||
2072 | offset = 4 - remaining; | 2072 | offset = 4 - remaining; |
2073 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, | 2073 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, |
2074 | offset, 0); | 2074 | offset, 0); |
2075 | atmci_writel(host, ATMCI_TDR, value); | 2075 | atmci_writel(host, ATMCI_TDR, value); |
2076 | nbytes += offset; | 2076 | nbytes += offset; |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 35cc0de6be67..ca0b43973769 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
@@ -45,14 +45,16 @@ | |||
45 | /* DM_CM_RST */ | 45 | /* DM_CM_RST */ |
46 | #define RST_DTRANRST1 BIT(9) | 46 | #define RST_DTRANRST1 BIT(9) |
47 | #define RST_DTRANRST0 BIT(8) | 47 | #define RST_DTRANRST0 BIT(8) |
48 | #define RST_RESERVED_BITS GENMASK_ULL(32, 0) | 48 | #define RST_RESERVED_BITS GENMASK_ULL(31, 0) |
49 | 49 | ||
50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ | 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ |
51 | #define INFO1_CLEAR 0 | 51 | #define INFO1_CLEAR 0 |
52 | #define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) | ||
52 | #define INFO1_DTRANEND1 BIT(17) | 53 | #define INFO1_DTRANEND1 BIT(17) |
53 | #define INFO1_DTRANEND0 BIT(16) | 54 | #define INFO1_DTRANEND0 BIT(16) |
54 | 55 | ||
55 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ | 56 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ |
57 | #define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) | ||
56 | #define INFO2_DTRANERR1 BIT(17) | 58 | #define INFO2_DTRANERR1 BIT(17) |
57 | #define INFO2_DTRANERR0 BIT(16) | 59 | #define INFO2_DTRANERR0 BIT(16) |
58 | 60 | ||
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, | |||
252 | { | 254 | { |
253 | struct renesas_sdhi *priv = host_to_priv(host); | 255 | struct renesas_sdhi *priv = host_to_priv(host); |
254 | 256 | ||
257 | /* Disable DMAC interrupts, we don't use them */ | ||
258 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, | ||
259 | INFO1_MASK_CLEAR); | ||
260 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, | ||
261 | INFO2_MASK_CLEAR); | ||
262 | |||
255 | /* Each value is set to non-zero to assume "enabling" each DMA */ | 263 | /* Each value is set to non-zero to assume "enabling" each DMA */ |
256 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; | 264 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; |
257 | 265 | ||
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index ca18612c4201..67b2065e7a19 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
@@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali) | |||
1338 | 1338 | ||
1339 | denali_enable_irq(denali); | 1339 | denali_enable_irq(denali); |
1340 | denali_reset_banks(denali); | 1340 | denali_reset_banks(denali); |
1341 | if (!denali->max_banks) { | ||
1342 | /* Error out earlier if no chip is found for some reasons. */ | ||
1343 | ret = -ENODEV; | ||
1344 | goto disable_irq; | ||
1345 | } | ||
1341 | 1346 | ||
1342 | denali->active_bank = DENALI_INVALID_BANK; | 1347 | denali->active_bank = DENALI_INVALID_BANK; |
1343 | 1348 | ||
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c index a3f04315c05c..427fcbc1b71c 100644 --- a/drivers/mtd/nand/raw/docg4.c +++ b/drivers/mtd/nand/raw/docg4.c | |||
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev) | |||
1218 | return 0; | 1218 | return 0; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static void __init init_mtd_structs(struct mtd_info *mtd) | 1221 | static void init_mtd_structs(struct mtd_info *mtd) |
1222 | { | 1222 | { |
1223 | /* initialize mtd and nand data structures */ | 1223 | /* initialize mtd and nand data structures */ |
1224 | 1224 | ||
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) | |||
1290 | 1290 | ||
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | static int __init read_id_reg(struct mtd_info *mtd) | 1293 | static int read_id_reg(struct mtd_info *mtd) |
1294 | { | 1294 | { |
1295 | struct nand_chip *nand = mtd_to_nand(mtd); | 1295 | struct nand_chip *nand = mtd_to_nand(mtd); |
1296 | struct docg4_priv *doc = nand_get_controller_data(nand); | 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8bb1e38b1681..cecbb1d1f587 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) | |||
5913 | return bp->hw_resc.max_cp_rings; | 5913 | return bp->hw_resc.max_cp_rings; |
5914 | } | 5914 | } |
5915 | 5915 | ||
5916 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) | 5916 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
5917 | { | 5917 | { |
5918 | bp->hw_resc.max_cp_rings = max; | 5918 | return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); |
5919 | } | 5919 | } |
5920 | 5920 | ||
5921 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) | 5921 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
5922 | { | 5922 | { |
5923 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5923 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
5924 | 5924 | ||
@@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) | |||
6684 | hw_resc->resv_rx_rings = 0; | 6684 | hw_resc->resv_rx_rings = 0; |
6685 | hw_resc->resv_hw_ring_grps = 0; | 6685 | hw_resc->resv_hw_ring_grps = 0; |
6686 | hw_resc->resv_vnics = 0; | 6686 | hw_resc->resv_vnics = 0; |
6687 | bp->tx_nr_rings = 0; | ||
6688 | bp->rx_nr_rings = 0; | ||
6687 | } | 6689 | } |
6688 | return rc; | 6690 | return rc; |
6689 | } | 6691 | } |
@@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
8629 | 8631 | ||
8630 | *max_tx = hw_resc->max_tx_rings; | 8632 | *max_tx = hw_resc->max_tx_rings; |
8631 | *max_rx = hw_resc->max_rx_rings; | 8633 | *max_rx = hw_resc->max_rx_rings; |
8632 | *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); | 8634 | *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), |
8635 | hw_resc->max_irqs); | ||
8633 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); | 8636 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); |
8634 | max_ring_grps = hw_resc->max_hw_ring_grps; | 8637 | max_ring_grps = hw_resc->max_hw_ring_grps; |
8635 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { | 8638 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
@@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) | |||
8769 | if (bp->tx_nr_rings) | 8772 | if (bp->tx_nr_rings) |
8770 | return 0; | 8773 | return 0; |
8771 | 8774 | ||
8775 | bnxt_ulp_irq_stop(bp); | ||
8776 | bnxt_clear_int_mode(bp); | ||
8772 | rc = bnxt_set_dflt_rings(bp, true); | 8777 | rc = bnxt_set_dflt_rings(bp, true); |
8773 | if (rc) { | 8778 | if (rc) { |
8774 | netdev_err(bp->dev, "Not enough rings available.\n"); | 8779 | netdev_err(bp->dev, "Not enough rings available.\n"); |
8775 | return rc; | 8780 | goto init_dflt_ring_err; |
8776 | } | 8781 | } |
8777 | rc = bnxt_init_int_mode(bp); | 8782 | rc = bnxt_init_int_mode(bp); |
8778 | if (rc) | 8783 | if (rc) |
8779 | return rc; | 8784 | goto init_dflt_ring_err; |
8785 | |||
8780 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; | 8786 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
8781 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { | 8787 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { |
8782 | bp->flags |= BNXT_FLAG_RFS; | 8788 | bp->flags |= BNXT_FLAG_RFS; |
8783 | bp->dev->features |= NETIF_F_NTUPLE; | 8789 | bp->dev->features |= NETIF_F_NTUPLE; |
8784 | } | 8790 | } |
8785 | return 0; | 8791 | init_dflt_ring_err: |
8792 | bnxt_ulp_irq_restart(bp, rc); | ||
8793 | return rc; | ||
8786 | } | 8794 | } |
8787 | 8795 | ||
8788 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) | 8796 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fefa011320e0..bde384630a75 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *); | |||
1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); | 1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); |
1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); | 1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); |
1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); | 1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); |
1484 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); | 1484 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp); |
1485 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); | ||
1486 | int bnxt_get_avail_msix(struct bnxt *bp, int num); | 1485 | int bnxt_get_avail_msix(struct bnxt *bp, int num); |
1487 | int bnxt_reserve_rings(struct bnxt *bp); | 1486 | int bnxt_reserve_rings(struct bnxt *bp); |
1488 | void bnxt_tx_disable(struct bnxt *bp); | 1487 | void bnxt_tx_disable(struct bnxt *bp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 6d583bcd2a81..fcd085a9853a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) | |||
451 | 451 | ||
452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); | 452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); |
453 | 453 | ||
454 | vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; | 454 | vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; | 457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; |
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) | |||
549 | max_stat_ctxs = hw_resc->max_stat_ctxs; | 549 | max_stat_ctxs = hw_resc->max_stat_ctxs; |
550 | 550 | ||
551 | /* Remaining rings are distributed equally amongs VF's for now */ | 551 | /* Remaining rings are distributed equally amongs VF's for now */ |
552 | vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; | 552 | vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - |
553 | bp->cp_nr_rings) / num_vfs; | ||
553 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; | 554 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; |
554 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 555 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
555 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / | 556 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / |
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
643 | */ | 644 | */ |
644 | vfs_supported = *num_vfs; | 645 | vfs_supported = *num_vfs; |
645 | 646 | ||
646 | avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; | 647 | avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
647 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 648 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
648 | avail_cp = min_t(int, avail_cp, avail_stat); | 649 | avail_cp = min_t(int, avail_cp, avail_stat); |
649 | 650 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c37b2842f972..beee61292d5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, | |||
169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; | 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
170 | } | 170 | } |
171 | bnxt_fill_msix_vecs(bp, ent); | 171 | bnxt_fill_msix_vecs(bp, ent); |
172 | bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); | ||
173 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; | 172 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; |
174 | return avail_msix; | 173 | return avail_msix; |
175 | } | 174 | } |
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
178 | { | 177 | { |
179 | struct net_device *dev = edev->net; | 178 | struct net_device *dev = edev->net; |
180 | struct bnxt *bp = netdev_priv(dev); | 179 | struct bnxt *bp = netdev_priv(dev); |
181 | int max_cp_rings, msix_requested; | ||
182 | 180 | ||
183 | ASSERT_RTNL(); | 181 | ASSERT_RTNL(); |
184 | if (ulp_id != BNXT_ROCE_ULP) | 182 | if (ulp_id != BNXT_ROCE_ULP) |
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
187 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) | 185 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) |
188 | return 0; | 186 | return 0; |
189 | 187 | ||
190 | max_cp_rings = bnxt_get_max_func_cp_rings(bp); | ||
191 | msix_requested = edev->ulp_tbl[ulp_id].msix_requested; | ||
192 | bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); | ||
193 | edev->ulp_tbl[ulp_id].msix_requested = 0; | 188 | edev->ulp_tbl[ulp_id].msix_requested = 0; |
194 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; | 189 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; |
195 | if (netif_running(dev)) { | 190 | if (netif_running(dev)) { |
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) | |||
220 | return 0; | 215 | return 0; |
221 | } | 216 | } |
222 | 217 | ||
223 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id) | ||
224 | { | ||
225 | ASSERT_RTNL(); | ||
226 | if (bnxt_ulp_registered(bp->edev, ulp_id)) { | ||
227 | struct bnxt_en_dev *edev = bp->edev; | ||
228 | unsigned int msix_req, max; | ||
229 | |||
230 | msix_req = edev->ulp_tbl[ulp_id].msix_requested; | ||
231 | max = bnxt_get_max_func_cp_rings(bp); | ||
232 | bnxt_set_max_func_cp_rings(bp, max - msix_req); | ||
233 | max = bnxt_get_max_func_stat_ctxs(bp); | ||
234 | bnxt_set_max_func_stat_ctxs(bp, max - 1); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, | 218 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, |
239 | struct bnxt_fw_msg *fw_msg) | 219 | struct bnxt_fw_msg *fw_msg) |
240 | { | 220 | { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index df48ac71729f..d9bea37cd211 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | |||
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) | |||
90 | 90 | ||
91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); | 91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); |
92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); | 92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); |
93 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); | ||
94 | void bnxt_ulp_stop(struct bnxt *bp); | 93 | void bnxt_ulp_stop(struct bnxt *bp); |
95 | void bnxt_ulp_start(struct bnxt *bp); | 94 | void bnxt_ulp_start(struct bnxt *bp); |
96 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); | 95 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b773bc07edf7..14b49612aa86 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters { | |||
186 | #define UMAC_MAC1 0x010 | 186 | #define UMAC_MAC1 0x010 |
187 | #define UMAC_MAX_FRAME_LEN 0x014 | 187 | #define UMAC_MAX_FRAME_LEN 0x014 |
188 | 188 | ||
189 | #define UMAC_MODE 0x44 | ||
190 | #define MODE_LINK_STATUS (1 << 5) | ||
191 | |||
189 | #define UMAC_EEE_CTRL 0x064 | 192 | #define UMAC_EEE_CTRL 0x064 |
190 | #define EN_LPI_RX_PAUSE (1 << 0) | 193 | #define EN_LPI_RX_PAUSE (1 << 0) |
191 | #define EN_LPI_TX_PFC (1 << 1) | 194 | #define EN_LPI_TX_PFC (1 << 1) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 5333274a283c..4241ae928d4a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev) | |||
115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, | 115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, |
116 | struct fixed_phy_status *status) | 116 | struct fixed_phy_status *status) |
117 | { | 117 | { |
118 | if (dev && dev->phydev && status) | 118 | struct bcmgenet_priv *priv; |
119 | status->link = dev->phydev->link; | 119 | u32 reg; |
120 | |||
121 | if (dev && dev->phydev && status) { | ||
122 | priv = netdev_priv(dev); | ||
123 | reg = bcmgenet_umac_readl(priv, UMAC_MODE); | ||
124 | status->link = !!(reg & MODE_LINK_STATUS); | ||
125 | } | ||
120 | 126 | ||
121 | return 0; | 127 | return 0; |
122 | } | 128 | } |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index c6707ea2d751..16e4ef7d7185 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
@@ -649,7 +649,7 @@ static int macb_halt_tx(struct macb *bp) | |||
649 | if (!(status & MACB_BIT(TGO))) | 649 | if (!(status & MACB_BIT(TGO))) |
650 | return 0; | 650 | return 0; |
651 | 651 | ||
652 | usleep_range(10, 250); | 652 | udelay(250); |
653 | } while (time_before(halt_time, timeout)); | 653 | } while (time_before(halt_time, timeout)); |
654 | 654 | ||
655 | return -ETIMEDOUT; | 655 | return -ETIMEDOUT; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index cad52bd331f7..08a750fb60c4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
@@ -486,6 +486,8 @@ struct hnae_ae_ops { | |||
486 | u8 *auto_neg, u16 *speed, u8 *duplex); | 486 | u8 *auto_neg, u16 *speed, u8 *duplex); |
487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); | 487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); |
488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); | 488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); |
489 | bool (*need_adjust_link)(struct hnae_handle *handle, | ||
490 | int speed, int duplex); | ||
489 | int (*set_loopback)(struct hnae_handle *handle, | 491 | int (*set_loopback)(struct hnae_handle *handle, |
490 | enum hnae_loop loop_mode, int en); | 492 | enum hnae_loop loop_mode, int en); |
491 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, | 493 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e6aad30e7e69..b52029e26d15 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | |||
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle) | |||
155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; | 155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; |
156 | } | 156 | } |
157 | 157 | ||
158 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) | ||
159 | { | ||
160 | struct dsaf_device *dsaf_dev; | ||
161 | struct hns_ppe_cb *ppe_cb; | ||
162 | struct hnae_vf_cb *vf_cb; | ||
163 | int ret; | ||
164 | int i; | ||
165 | |||
166 | for (i = 0; i < handle->q_num; i++) { | ||
167 | ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); | ||
168 | if (ret) | ||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | ppe_cb = hns_get_ppe_cb(handle); | ||
173 | ret = hns_ppe_wait_tx_fifo_clean(ppe_cb); | ||
174 | if (ret) | ||
175 | return ret; | ||
176 | |||
177 | dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); | ||
178 | if (!dsaf_dev) | ||
179 | return -EINVAL; | ||
180 | ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id); | ||
181 | if (ret) | ||
182 | return ret; | ||
183 | |||
184 | vf_cb = hns_ae_get_vf_cb(handle); | ||
185 | ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb); | ||
186 | if (ret) | ||
187 | return ret; | ||
188 | |||
189 | mdelay(10); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
158 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) | 193 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) |
159 | { | 194 | { |
160 | int q_num = handle->q_num; | 195 | int q_num = handle->q_num; |
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle, | |||
399 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); | 434 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); |
400 | } | 435 | } |
401 | 436 | ||
437 | static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed, | ||
438 | int duplex) | ||
439 | { | ||
440 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | ||
441 | |||
442 | return hns_mac_need_adjust_link(mac_cb, speed, duplex); | ||
443 | } | ||
444 | |||
402 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, | 445 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, |
403 | int duplex) | 446 | int duplex) |
404 | { | 447 | { |
405 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | 448 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); |
406 | 449 | ||
407 | hns_mac_adjust_link(mac_cb, speed, duplex); | 450 | switch (mac_cb->dsaf_dev->dsaf_ver) { |
451 | case AE_VERSION_1: | ||
452 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
453 | break; | ||
454 | |||
455 | case AE_VERSION_2: | ||
456 | /* chip need to clear all pkt inside */ | ||
457 | hns_mac_disable(mac_cb, MAC_COMM_MODE_RX); | ||
458 | if (hns_ae_wait_flow_down(handle)) { | ||
459 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
460 | break; | ||
461 | } | ||
462 | |||
463 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
464 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
465 | break; | ||
466 | |||
467 | default: | ||
468 | break; | ||
469 | } | ||
470 | |||
471 | return; | ||
408 | } | 472 | } |
409 | 473 | ||
410 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, | 474 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, |
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { | |||
902 | .get_status = hns_ae_get_link_status, | 966 | .get_status = hns_ae_get_link_status, |
903 | .get_info = hns_ae_get_mac_info, | 967 | .get_info = hns_ae_get_mac_info, |
904 | .adjust_link = hns_ae_adjust_link, | 968 | .adjust_link = hns_ae_adjust_link, |
969 | .need_adjust_link = hns_ae_need_adjust_link, | ||
905 | .set_loopback = hns_ae_config_loopback, | 970 | .set_loopback = hns_ae_config_loopback, |
906 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, | 971 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, |
907 | .get_pauseparam = hns_ae_get_pauseparam, | 972 | .get_pauseparam = hns_ae_get_pauseparam, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 5488c6e89f21..09e4061d1fa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en, | |||
257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); | 257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); |
258 | } | 258 | } |
259 | 259 | ||
260 | static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed, | ||
261 | int duplex) | ||
262 | { | ||
263 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
264 | struct hns_mac_cb *mac_cb = drv->mac_cb; | ||
265 | |||
266 | return (mac_cb->speed != speed) || | ||
267 | (mac_cb->half_duplex == duplex); | ||
268 | } | ||
269 | |||
260 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, | 270 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, |
261 | u32 full_duplex) | 271 | u32 full_duplex) |
262 | { | 272 | { |
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) | |||
309 | hns_gmac_set_uc_match(mac_drv, en); | 319 | hns_gmac_set_uc_match(mac_drv, en); |
310 | } | 320 | } |
311 | 321 | ||
322 | int hns_gmac_wait_fifo_clean(void *mac_drv) | ||
323 | { | ||
324 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
325 | int wait_cnt; | ||
326 | u32 val; | ||
327 | |||
328 | wait_cnt = 0; | ||
329 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
330 | val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG); | ||
331 | /* bit5~bit0 is not send complete pkts */ | ||
332 | if ((val & 0x3f) == 0) | ||
333 | break; | ||
334 | usleep_range(100, 200); | ||
335 | } | ||
336 | |||
337 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
338 | dev_err(drv->dev, | ||
339 | "hns ge %d fifo was not idle.\n", drv->mac_id); | ||
340 | return -EBUSY; | ||
341 | } | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
312 | static void hns_gmac_init(void *mac_drv) | 346 | static void hns_gmac_init(void *mac_drv) |
313 | { | 347 | { |
314 | u32 port; | 348 | u32 port; |
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
690 | mac_drv->mac_disable = hns_gmac_disable; | 724 | mac_drv->mac_disable = hns_gmac_disable; |
691 | mac_drv->mac_free = hns_gmac_free; | 725 | mac_drv->mac_free = hns_gmac_free; |
692 | mac_drv->adjust_link = hns_gmac_adjust_link; | 726 | mac_drv->adjust_link = hns_gmac_adjust_link; |
727 | mac_drv->need_adjust_link = hns_gmac_need_adjust_link; | ||
693 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; | 728 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; |
694 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; | 729 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; |
695 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; | 730 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; |
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
717 | mac_drv->get_strings = hns_gmac_get_strings; | 752 | mac_drv->get_strings = hns_gmac_get_strings; |
718 | mac_drv->update_stats = hns_gmac_update_stats; | 753 | mac_drv->update_stats = hns_gmac_update_stats; |
719 | mac_drv->set_promiscuous = hns_gmac_set_promisc; | 754 | mac_drv->set_promiscuous = hns_gmac_set_promisc; |
755 | mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean; | ||
720 | 756 | ||
721 | return (void *)mac_drv; | 757 | return (void *)mac_drv; |
722 | } | 758 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 6521d8d53745..3613e400e816 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, | |||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
117 | /** | ||
118 | *hns_mac_is_adjust_link - check is need change mac speed and duplex register | ||
119 | *@mac_cb: mac device | ||
120 | *@speed: phy device speed | ||
121 | *@duplex:phy device duplex | ||
122 | * | ||
123 | */ | ||
124 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | ||
125 | { | ||
126 | struct mac_driver *mac_ctrl_drv; | ||
127 | |||
128 | mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac); | ||
129 | |||
130 | if (mac_ctrl_drv->need_adjust_link) | ||
131 | return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv, | ||
132 | (enum mac_speed)speed, duplex); | ||
133 | else | ||
134 | return true; | ||
135 | } | ||
136 | |||
117 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | 137 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) |
118 | { | 138 | { |
119 | int ret; | 139 | int ret; |
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) | |||
430 | return 0; | 450 | return 0; |
431 | } | 451 | } |
432 | 452 | ||
453 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb) | ||
454 | { | ||
455 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | ||
456 | |||
457 | if (drv->wait_fifo_clean) | ||
458 | return drv->wait_fifo_clean(drv); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
433 | void hns_mac_reset(struct hns_mac_cb *mac_cb) | 463 | void hns_mac_reset(struct hns_mac_cb *mac_cb) |
434 | { | 464 | { |
435 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | 465 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); |
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev) | |||
998 | return DSAF_MAX_PORT_NUM; | 1028 | return DSAF_MAX_PORT_NUM; |
999 | } | 1029 | } |
1000 | 1030 | ||
1031 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
1032 | { | ||
1033 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
1034 | |||
1035 | mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode); | ||
1036 | } | ||
1037 | |||
1038 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
1039 | { | ||
1040 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
1041 | |||
1042 | mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode); | ||
1043 | } | ||
1044 | |||
1001 | /** | 1045 | /** |
1002 | * hns_mac_init - init mac | 1046 | * hns_mac_init - init mac |
1003 | * @dsaf_dev: dsa fabric device struct pointer | 1047 | * @dsaf_dev: dsa fabric device struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index bbc0a98e7ca3..fbc75341bef7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | |||
@@ -356,6 +356,9 @@ struct mac_driver { | |||
356 | /*adjust mac mode of port,include speed and duplex*/ | 356 | /*adjust mac mode of port,include speed and duplex*/ |
357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, | 357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, |
358 | u32 full_duplex); | 358 | u32 full_duplex); |
359 | /* need adjust link */ | ||
360 | bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed, | ||
361 | int duplex); | ||
359 | /* config autoegotaite mode of port*/ | 362 | /* config autoegotaite mode of port*/ |
360 | void (*set_an_mode)(void *mac_drv, u8 enable); | 363 | void (*set_an_mode)(void *mac_drv, u8 enable); |
361 | /* config loopbank mode */ | 364 | /* config loopbank mode */ |
@@ -394,6 +397,7 @@ struct mac_driver { | |||
394 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); | 397 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); |
395 | 398 | ||
396 | void (*update_stats)(void *mac_drv); | 399 | void (*update_stats)(void *mac_drv); |
400 | int (*wait_fifo_clean)(void *mac_drv); | ||
397 | 401 | ||
398 | enum mac_mode mac_mode; | 402 | enum mac_mode mac_mode; |
399 | u8 mac_id; | 403 | u8 mac_id; |
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, | |||
427 | 431 | ||
428 | int hns_mac_init(struct dsaf_device *dsaf_dev); | 432 | int hns_mac_init(struct dsaf_device *dsaf_dev); |
429 | void mac_adjust_link(struct net_device *net_dev); | 433 | void mac_adjust_link(struct net_device *net_dev); |
434 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); | ||
430 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); | 435 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); |
431 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); | 436 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); |
432 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, | 437 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, |
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | |||
463 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | 468 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, |
464 | const unsigned char *addr); | 469 | const unsigned char *addr); |
465 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); | 470 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); |
471 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
472 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
473 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb); | ||
466 | 474 | ||
467 | #endif /* _HNS_DSAF_MAC_H */ | 475 | #endif /* _HNS_DSAF_MAC_H */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ca50c2553a9c..e557a4ef5996 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, | |||
2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; | 2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; |
2728 | } | 2728 | } |
2729 | 2729 | ||
2730 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) | ||
2731 | { | ||
2732 | u32 val, val_tmp; | ||
2733 | int wait_cnt; | ||
2734 | |||
2735 | if (port >= DSAF_SERVICE_NW_NUM) | ||
2736 | return 0; | ||
2737 | |||
2738 | wait_cnt = 0; | ||
2739 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
2740 | val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG + | ||
2741 | (port + DSAF_XGE_NUM) * 0x40); | ||
2742 | val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG + | ||
2743 | (port + DSAF_XGE_NUM) * 0x40); | ||
2744 | if (val == val_tmp) | ||
2745 | break; | ||
2746 | |||
2747 | usleep_range(100, 200); | ||
2748 | } | ||
2749 | |||
2750 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
2751 | dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n", | ||
2752 | val, val_tmp); | ||
2753 | return -EBUSY; | ||
2754 | } | ||
2755 | |||
2756 | return 0; | ||
2757 | } | ||
2758 | |||
2730 | /** | 2759 | /** |
2731 | * dsaf_probe - probo dsaf dev | 2760 | * dsaf_probe - probo dsaf dev |
2732 | * @pdev: dasf platform device | 2761 | * @pdev: dasf platform device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 4507e8222683..0e1cd99831a6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | |||
@@ -44,6 +44,8 @@ struct hns_mac_cb; | |||
44 | #define DSAF_ROCE_CREDIT_CHN 8 | 44 | #define DSAF_ROCE_CREDIT_CHN 8 |
45 | #define DSAF_ROCE_CHAN_MODE 3 | 45 | #define DSAF_ROCE_CHAN_MODE 3 |
46 | 46 | ||
47 | #define HNS_MAX_WAIT_CNT 10000 | ||
48 | |||
47 | enum dsaf_roce_port_mode { | 49 | enum dsaf_roce_port_mode { |
48 | DSAF_ROCE_6PORT_MODE, | 50 | DSAF_ROCE_6PORT_MODE, |
49 | DSAF_ROCE_4PORT_MODE, | 51 | DSAF_ROCE_4PORT_MODE, |
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr( | |||
463 | 465 | ||
464 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, | 466 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, |
465 | u8 mac_id, u8 port_num); | 467 | u8 mac_id, u8 port_num); |
468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); | ||
466 | 469 | ||
467 | #endif /* __HNS_DSAF_MAIN_H__ */ | 470 | #endif /* __HNS_DSAF_MAIN_H__ */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index d160d8c9e45b..0942e4916d9d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) | |||
275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); | 275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); |
276 | } | 276 | } |
277 | 277 | ||
278 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb) | ||
279 | { | ||
280 | int wait_cnt; | ||
281 | u32 val; | ||
282 | |||
283 | wait_cnt = 0; | ||
284 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
285 | val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU; | ||
286 | if (!val) | ||
287 | break; | ||
288 | |||
289 | usleep_range(100, 200); | ||
290 | } | ||
291 | |||
292 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
293 | dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n", | ||
294 | val); | ||
295 | return -EBUSY; | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
278 | /** | 301 | /** |
279 | * ppe_init_hw - init ppe | 302 | * ppe_init_hw - init ppe |
280 | * @ppe_cb: ppe device | 303 | * @ppe_cb: ppe device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 9d8e643e8aa6..f670e63a5a01 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | |||
@@ -100,6 +100,7 @@ struct ppe_common_cb { | |||
100 | 100 | ||
101 | }; | 101 | }; |
102 | 102 | ||
103 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb); | ||
103 | int hns_ppe_init(struct dsaf_device *dsaf_dev); | 104 | int hns_ppe_init(struct dsaf_device *dsaf_dev); |
104 | 105 | ||
105 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); | 106 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 9d76e2e54f9d..5d64519b9b1d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) | |||
66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); | 66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); |
67 | } | 67 | } |
68 | 68 | ||
69 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs) | ||
70 | { | ||
71 | u32 head, tail; | ||
72 | int wait_cnt; | ||
73 | |||
74 | tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL); | ||
75 | wait_cnt = 0; | ||
76 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
77 | head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); | ||
78 | if (tail == head) | ||
79 | break; | ||
80 | |||
81 | usleep_range(100, 200); | ||
82 | } | ||
83 | |||
84 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
85 | dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n"); | ||
86 | return -EBUSY; | ||
87 | } | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
69 | /** | 92 | /** |
70 | *hns_rcb_reset_ring_hw - ring reset | 93 | *hns_rcb_reset_ring_hw - ring reset |
71 | *@q: ring struct pointer | 94 | *@q: ring struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 602816498c8d..2319b772a271 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | |||
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); | |||
136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); | 136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); |
137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); | 137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); |
138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); | 138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); |
139 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs); | ||
139 | u32 hns_rcb_get_rx_coalesced_frames( | 140 | u32 hns_rcb_get_rx_coalesced_frames( |
140 | struct rcb_common_cb *rcb_common, u32 port_idx); | 141 | struct rcb_common_cb *rcb_common, u32 port_idx); |
141 | u32 hns_rcb_get_tx_coalesced_frames( | 142 | u32 hns_rcb_get_tx_coalesced_frames( |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 886cbbf25761..74d935d82cbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
@@ -464,6 +464,7 @@ | |||
464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 | 464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 |
465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 | 465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 |
466 | 466 | ||
467 | #define GMAC_FIFO_STATE_REG 0x0000UL | ||
467 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL | 468 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL |
468 | #define GMAC_FD_FC_TYPE_REG 0x000CUL | 469 | #define GMAC_FD_FC_TYPE_REG 0x000CUL |
469 | #define GMAC_TX_WATER_LINE_REG 0x0010UL | 470 | #define GMAC_TX_WATER_LINE_REG 0x0010UL |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 02a0ba20fad5..f56855e63c96 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
@@ -1112,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev) | |||
1112 | struct hnae_handle *h = priv->ae_handle; | 1112 | struct hnae_handle *h = priv->ae_handle; |
1113 | int state = 1; | 1113 | int state = 1; |
1114 | 1114 | ||
1115 | /* If there is no phy, do not need adjust link */ | ||
1115 | if (ndev->phydev) { | 1116 | if (ndev->phydev) { |
1116 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | 1117 | /* When phy link down, do nothing */ |
1117 | ndev->phydev->duplex); | 1118 | if (ndev->phydev->link == 0) |
1118 | state = ndev->phydev->link; | 1119 | return; |
1120 | |||
1121 | if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed, | ||
1122 | ndev->phydev->duplex)) { | ||
1123 | /* because Hi161X chip don't support to change gmac | ||
1124 | * speed and duplex with traffic. Delay 200ms to | ||
1125 | * make sure there is no more data in chip FIFO. | ||
1126 | */ | ||
1127 | netif_carrier_off(ndev); | ||
1128 | msleep(200); | ||
1129 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | ||
1130 | ndev->phydev->duplex); | ||
1131 | netif_carrier_on(ndev); | ||
1132 | } | ||
1119 | } | 1133 | } |
1134 | |||
1120 | state = state && h->dev->ops->get_status(h); | 1135 | state = state && h->dev->ops->get_status(h); |
1121 | 1136 | ||
1122 | if (state != priv->link) { | 1137 | if (state != priv->link) { |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 08f3c4743f74..774beda040a1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev, | |||
243 | } | 243 | } |
244 | 244 | ||
245 | if (h->dev->ops->adjust_link) { | 245 | if (h->dev->ops->adjust_link) { |
246 | netif_carrier_off(net_dev); | ||
246 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); | 247 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); |
248 | netif_carrier_on(net_dev); | ||
247 | return 0; | 249 | return 0; |
248 | } | 250 | } |
249 | 251 | ||
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 354c0982847b..372664686309 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s | |||
494 | case 16384: | 494 | case 16384: |
495 | ret |= EMAC_MR1_RFS_16K; | 495 | ret |= EMAC_MR1_RFS_16K; |
496 | break; | 496 | break; |
497 | case 8192: | ||
498 | ret |= EMAC4_MR1_RFS_8K; | ||
499 | break; | ||
500 | case 4096: | 497 | case 4096: |
501 | ret |= EMAC_MR1_RFS_4K; | 498 | ret |= EMAC_MR1_RFS_4K; |
502 | break; | 499 | break; |
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ | |||
537 | case 16384: | 534 | case 16384: |
538 | ret |= EMAC4_MR1_RFS_16K; | 535 | ret |= EMAC4_MR1_RFS_16K; |
539 | break; | 536 | break; |
537 | case 8192: | ||
538 | ret |= EMAC4_MR1_RFS_8K; | ||
539 | break; | ||
540 | case 4096: | 540 | case 4096: |
541 | ret |= EMAC4_MR1_RFS_4K; | 541 | ret |= EMAC4_MR1_RFS_4K; |
542 | break; | 542 | break; |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index dafdd4ade705..4f0daf67b18d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1823 | adapter->map_id = 1; | 1823 | adapter->map_id = 1; |
1824 | release_rx_pools(adapter); | 1824 | release_rx_pools(adapter); |
1825 | release_tx_pools(adapter); | 1825 | release_tx_pools(adapter); |
1826 | init_rx_pools(netdev); | 1826 | rc = init_rx_pools(netdev); |
1827 | init_tx_pools(netdev); | 1827 | if (rc) |
1828 | return rc; | ||
1829 | rc = init_tx_pools(netdev); | ||
1830 | if (rc) | ||
1831 | return rc; | ||
1828 | 1832 | ||
1829 | release_napi(adapter); | 1833 | release_napi(adapter); |
1830 | init_napi(adapter); | 1834 | rc = init_napi(adapter); |
1835 | if (rc) | ||
1836 | return rc; | ||
1831 | } else { | 1837 | } else { |
1832 | rc = reset_tx_pools(adapter); | 1838 | rc = reset_tx_pools(adapter); |
1833 | if (rc) | 1839 | if (rc) |
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 32d785b616e1..28500417843e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
@@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
4803 | dev->min_mtu = ETH_MIN_MTU; | 4803 | dev->min_mtu = ETH_MIN_MTU; |
4804 | /* 9704 == 9728 - 20 and rounding to 8 */ | 4804 | /* 9704 == 9728 - 20 and rounding to 8 */ |
4805 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; | 4805 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; |
4806 | dev->dev.of_node = port_node; | ||
4806 | 4807 | ||
4807 | /* Phylink isn't used w/ ACPI as of now */ | 4808 | /* Phylink isn't used w/ ACPI as of now */ |
4808 | if (port_node) { | 4809 | if (port_node) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 86478a6b99c5..c8c315eb5128 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c | |||
@@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
139 | struct mlx5_wq_ctrl *wq_ctrl) | 139 | struct mlx5_wq_ctrl *wq_ctrl) |
140 | { | 140 | { |
141 | u32 sq_strides_offset; | 141 | u32 sq_strides_offset; |
142 | u32 rq_pg_remainder; | ||
142 | int err; | 143 | int err; |
143 | 144 | ||
144 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, | 145 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, |
145 | MLX5_GET(qpc, qpc, log_rq_size), | 146 | MLX5_GET(qpc, qpc, log_rq_size), |
146 | &wq->rq.fbc); | 147 | &wq->rq.fbc); |
147 | 148 | ||
148 | sq_strides_offset = | 149 | rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; |
149 | ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; | 150 | sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; |
150 | 151 | ||
151 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), | 152 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), |
152 | MLX5_GET(qpc, qpc, log_sq_size), | 153 | MLX5_GET(qpc, qpc, log_sq_size), |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a8b9fbab5f73..253bdaef1505 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
@@ -229,29 +229,16 @@ done: | |||
229 | spin_unlock_bh(&nn->reconfig_lock); | 229 | spin_unlock_bh(&nn->reconfig_lock); |
230 | } | 230 | } |
231 | 231 | ||
232 | /** | 232 | static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) |
233 | * nfp_net_reconfig() - Reconfigure the firmware | ||
234 | * @nn: NFP Net device to reconfigure | ||
235 | * @update: The value for the update field in the BAR config | ||
236 | * | ||
237 | * Write the update word to the BAR and ping the reconfig queue. The | ||
238 | * poll until the firmware has acknowledged the update by zeroing the | ||
239 | * update word. | ||
240 | * | ||
241 | * Return: Negative errno on error, 0 on success | ||
242 | */ | ||
243 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
244 | { | 233 | { |
245 | bool cancelled_timer = false; | 234 | bool cancelled_timer = false; |
246 | u32 pre_posted_requests; | 235 | u32 pre_posted_requests; |
247 | int ret; | ||
248 | 236 | ||
249 | spin_lock_bh(&nn->reconfig_lock); | 237 | spin_lock_bh(&nn->reconfig_lock); |
250 | 238 | ||
251 | nn->reconfig_sync_present = true; | 239 | nn->reconfig_sync_present = true; |
252 | 240 | ||
253 | if (nn->reconfig_timer_active) { | 241 | if (nn->reconfig_timer_active) { |
254 | del_timer(&nn->reconfig_timer); | ||
255 | nn->reconfig_timer_active = false; | 242 | nn->reconfig_timer_active = false; |
256 | cancelled_timer = true; | 243 | cancelled_timer = true; |
257 | } | 244 | } |
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) | |||
260 | 247 | ||
261 | spin_unlock_bh(&nn->reconfig_lock); | 248 | spin_unlock_bh(&nn->reconfig_lock); |
262 | 249 | ||
263 | if (cancelled_timer) | 250 | if (cancelled_timer) { |
251 | del_timer_sync(&nn->reconfig_timer); | ||
264 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); | 252 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); |
253 | } | ||
265 | 254 | ||
266 | /* Run the posted reconfigs which were issued before we started */ | 255 | /* Run the posted reconfigs which were issued before we started */ |
267 | if (pre_posted_requests) { | 256 | if (pre_posted_requests) { |
268 | nfp_net_reconfig_start(nn, pre_posted_requests); | 257 | nfp_net_reconfig_start(nn, pre_posted_requests); |
269 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 258 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
270 | } | 259 | } |
260 | } | ||
261 | |||
262 | static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) | ||
263 | { | ||
264 | nfp_net_reconfig_sync_enter(nn); | ||
265 | |||
266 | spin_lock_bh(&nn->reconfig_lock); | ||
267 | nn->reconfig_sync_present = false; | ||
268 | spin_unlock_bh(&nn->reconfig_lock); | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * nfp_net_reconfig() - Reconfigure the firmware | ||
273 | * @nn: NFP Net device to reconfigure | ||
274 | * @update: The value for the update field in the BAR config | ||
275 | * | ||
276 | * Write the update word to the BAR and ping the reconfig queue. The | ||
277 | * poll until the firmware has acknowledged the update by zeroing the | ||
278 | * update word. | ||
279 | * | ||
280 | * Return: Negative errno on error, 0 on success | ||
281 | */ | ||
282 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
283 | { | ||
284 | int ret; | ||
285 | |||
286 | nfp_net_reconfig_sync_enter(nn); | ||
271 | 287 | ||
272 | nfp_net_reconfig_start(nn, update); | 288 | nfp_net_reconfig_start(nn, update); |
273 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 289 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
@@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, | |||
3633 | */ | 3649 | */ |
3634 | void nfp_net_free(struct nfp_net *nn) | 3650 | void nfp_net_free(struct nfp_net *nn) |
3635 | { | 3651 | { |
3652 | WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); | ||
3636 | if (nn->dp.netdev) | 3653 | if (nn->dp.netdev) |
3637 | free_netdev(nn->dp.netdev); | 3654 | free_netdev(nn->dp.netdev); |
3638 | else | 3655 | else |
@@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn) | |||
3920 | return; | 3937 | return; |
3921 | 3938 | ||
3922 | unregister_netdev(nn->dp.netdev); | 3939 | unregister_netdev(nn->dp.netdev); |
3940 | nfp_net_reconfig_wait_posted(nn); | ||
3923 | } | 3941 | } |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0efa977c422d..b08d51bf7a20 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { | |||
218 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, | 218 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, |
219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, | 219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, |
220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, | 220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, |
221 | { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, | ||
221 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, | 222 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, |
222 | { PCI_VENDOR_ID_DLINK, 0x4300, | 223 | { PCI_VENDOR_ID_DLINK, 0x4300, |
223 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, | 224 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, |
@@ -4522,7 +4523,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
4522 | rtl_hw_reset(tp); | 4523 | rtl_hw_reset(tp); |
4523 | } | 4524 | } |
4524 | 4525 | ||
4525 | static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) | 4526 | static void rtl_set_tx_config_registers(struct rtl8169_private *tp) |
4526 | { | 4527 | { |
4527 | /* Set DMA burst size and Interframe Gap Time */ | 4528 | /* Set DMA burst size and Interframe Gap Time */ |
4528 | RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | | 4529 | RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | |
@@ -4633,12 +4634,14 @@ static void rtl_hw_start(struct rtl8169_private *tp) | |||
4633 | 4634 | ||
4634 | rtl_set_rx_max_size(tp); | 4635 | rtl_set_rx_max_size(tp); |
4635 | rtl_set_rx_tx_desc_registers(tp); | 4636 | rtl_set_rx_tx_desc_registers(tp); |
4636 | rtl_set_rx_tx_config_registers(tp); | 4637 | rtl_set_tx_config_registers(tp); |
4637 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); | 4638 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); |
4638 | 4639 | ||
4639 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ | 4640 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ |
4640 | RTL_R8(tp, IntrMask); | 4641 | RTL_R8(tp, IntrMask); |
4641 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); | 4642 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); |
4643 | rtl_init_rxcfg(tp); | ||
4644 | |||
4642 | rtl_set_rx_mode(tp->dev); | 4645 | rtl_set_rx_mode(tp->dev); |
4643 | /* no early-rx interrupts */ | 4646 | /* no early-rx interrupts */ |
4644 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); | 4647 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ad4433d59237..f27a0dc8c563 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -798,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = { | |||
798 | .magic = 1, | 798 | .magic = 1, |
799 | .cexcr = 1, | 799 | .cexcr = 1, |
800 | }; | 800 | }; |
801 | |||
802 | /* R7S9210 */ | ||
803 | static struct sh_eth_cpu_data r7s9210_data = { | ||
804 | .soft_reset = sh_eth_soft_reset, | ||
805 | |||
806 | .set_duplex = sh_eth_set_duplex, | ||
807 | .set_rate = sh_eth_set_rate_rcar, | ||
808 | |||
809 | .register_type = SH_ETH_REG_FAST_SH4, | ||
810 | |||
811 | .edtrr_trns = EDTRR_TRNS_ETHER, | ||
812 | .ecsr_value = ECSR_ICD, | ||
813 | .ecsipr_value = ECSIPR_ICDIP, | ||
814 | .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP | | ||
815 | EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | | ||
816 | EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | | ||
817 | EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP | | ||
818 | EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | | ||
819 | EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP | | ||
820 | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, | ||
821 | |||
822 | .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, | ||
823 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | | ||
824 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, | ||
825 | |||
826 | .fdr_value = 0x0000070f, | ||
827 | |||
828 | .apr = 1, | ||
829 | .mpr = 1, | ||
830 | .tpauser = 1, | ||
831 | .hw_swap = 1, | ||
832 | .rpadir = 1, | ||
833 | .no_ade = 1, | ||
834 | .xdfar_rw = 1, | ||
835 | }; | ||
801 | #endif /* CONFIG_OF */ | 836 | #endif /* CONFIG_OF */ |
802 | 837 | ||
803 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) | 838 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) |
@@ -3121,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = { | |||
3121 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, | 3156 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, |
3122 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, | 3157 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, |
3123 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, | 3158 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, |
3159 | { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data }, | ||
3124 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, | 3160 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, |
3125 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, | 3161 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, |
3126 | { } | 3162 | { } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index bf4acebb6bcd..324049eebb9b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP | |||
110 | 110 | ||
111 | config DWMAC_SOCFPGA | 111 | config DWMAC_SOCFPGA |
112 | tristate "SOCFPGA dwmac support" | 112 | tristate "SOCFPGA dwmac support" |
113 | default ARCH_SOCFPGA | 113 | default (ARCH_SOCFPGA || ARCH_STRATIX10) |
114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) | 114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) |
115 | select MFD_SYSCON | 115 | select MFD_SYSCON |
116 | help | 116 | help |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 76649adf8fb0..c0a855b7ab3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -112,7 +112,6 @@ struct stmmac_priv { | |||
112 | u32 tx_count_frames; | 112 | u32 tx_count_frames; |
113 | u32 tx_coal_frames; | 113 | u32 tx_coal_frames; |
114 | u32 tx_coal_timer; | 114 | u32 tx_coal_timer; |
115 | bool tx_timer_armed; | ||
116 | 115 | ||
117 | int tx_coalesce; | 116 | int tx_coalesce; |
118 | int hwts_tx_en; | 117 | int hwts_tx_en; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ff1ffb46198a..9f458bb16f2a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3147 | * element in case of no SG. | 3147 | * element in case of no SG. |
3148 | */ | 3148 | */ |
3149 | priv->tx_count_frames += nfrags + 1; | 3149 | priv->tx_count_frames += nfrags + 1; |
3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames) && | 3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { |
3151 | !priv->tx_timer_armed) { | ||
3152 | mod_timer(&priv->txtimer, | 3151 | mod_timer(&priv->txtimer, |
3153 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | 3152 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); |
3154 | priv->tx_timer_armed = true; | ||
3155 | } else { | 3153 | } else { |
3156 | priv->tx_count_frames = 0; | 3154 | priv->tx_count_frames = 0; |
3157 | stmmac_set_tx_ic(priv, desc); | 3155 | stmmac_set_tx_ic(priv, desc); |
3158 | priv->xstats.tx_set_ic_bit++; | 3156 | priv->xstats.tx_set_ic_bit++; |
3159 | priv->tx_timer_armed = false; | ||
3160 | } | 3157 | } |
3161 | 3158 | ||
3162 | skb_tx_timestamp(skb); | 3159 | skb_tx_timestamp(skb); |
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 0c1adad7415d..396e1cd10667 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) | |||
170 | struct device_node *node; | 170 | struct device_node *node; |
171 | struct cpsw_phy_sel_priv *priv; | 171 | struct cpsw_phy_sel_priv *priv; |
172 | 172 | ||
173 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); | 173 | node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0); |
174 | if (!node) { | 174 | if (!node) { |
175 | dev_err(dev, "Phy mode driver DT not found\n"); | 175 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); |
176 | return; | 176 | if (!node) { |
177 | dev_err(dev, "Phy mode driver DT not found\n"); | ||
178 | return; | ||
179 | } | ||
177 | } | 180 | } |
178 | 181 | ||
179 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 182 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1121a1ec407c..70921bbe0e28 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -2206,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev, | |||
2206 | 2206 | ||
2207 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 2207 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
2208 | 2208 | ||
2209 | /* We must get rtnl lock before scheduling nvdev->subchan_work, | ||
2210 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait | ||
2211 | * all subchannels to show up, but that may not happen because | ||
2212 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() | ||
2213 | * -> ... -> device_add() -> ... -> __device_attach() can't get | ||
2214 | * the device lock, so all the subchannels can't be processed -- | ||
2215 | * finally netvsc_subchan_work() hangs for ever. | ||
2216 | */ | ||
2217 | rtnl_lock(); | ||
2218 | |||
2209 | if (nvdev->num_chn > 1) | 2219 | if (nvdev->num_chn > 1) |
2210 | schedule_work(&nvdev->subchan_work); | 2220 | schedule_work(&nvdev->subchan_work); |
2211 | 2221 | ||
@@ -2224,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev, | |||
2224 | else | 2234 | else |
2225 | net->max_mtu = ETH_DATA_LEN; | 2235 | net->max_mtu = ETH_DATA_LEN; |
2226 | 2236 | ||
2227 | rtnl_lock(); | ||
2228 | ret = register_netdevice(net); | 2237 | ret = register_netdevice(net); |
2229 | if (ret != 0) { | 2238 | if (ret != 0) { |
2230 | pr_err("Unable to register netdev.\n"); | 2239 | pr_err("Unable to register netdev.\n"); |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4637d980310e..52fffb98fde9 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
398 | switch (type) { | 398 | switch (type) { |
399 | case hwmon_temp: | 399 | case hwmon_temp: |
400 | switch (attr) { | 400 | switch (attr) { |
401 | case hwmon_temp_input: | ||
402 | case hwmon_temp_min_alarm: | 401 | case hwmon_temp_min_alarm: |
403 | case hwmon_temp_max_alarm: | 402 | case hwmon_temp_max_alarm: |
404 | case hwmon_temp_lcrit_alarm: | 403 | case hwmon_temp_lcrit_alarm: |
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
407 | case hwmon_temp_max: | 406 | case hwmon_temp_max: |
408 | case hwmon_temp_lcrit: | 407 | case hwmon_temp_lcrit: |
409 | case hwmon_temp_crit: | 408 | case hwmon_temp_crit: |
409 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
410 | return 0; | ||
411 | /* fall through */ | ||
412 | case hwmon_temp_input: | ||
410 | return 0444; | 413 | return 0444; |
411 | default: | 414 | default: |
412 | return 0; | 415 | return 0; |
413 | } | 416 | } |
414 | case hwmon_in: | 417 | case hwmon_in: |
415 | switch (attr) { | 418 | switch (attr) { |
416 | case hwmon_in_input: | ||
417 | case hwmon_in_min_alarm: | 419 | case hwmon_in_min_alarm: |
418 | case hwmon_in_max_alarm: | 420 | case hwmon_in_max_alarm: |
419 | case hwmon_in_lcrit_alarm: | 421 | case hwmon_in_lcrit_alarm: |
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
422 | case hwmon_in_max: | 424 | case hwmon_in_max: |
423 | case hwmon_in_lcrit: | 425 | case hwmon_in_lcrit: |
424 | case hwmon_in_crit: | 426 | case hwmon_in_crit: |
427 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
428 | return 0; | ||
429 | /* fall through */ | ||
430 | case hwmon_in_input: | ||
425 | return 0444; | 431 | return 0444; |
426 | default: | 432 | default: |
427 | return 0; | 433 | return 0; |
428 | } | 434 | } |
429 | case hwmon_curr: | 435 | case hwmon_curr: |
430 | switch (attr) { | 436 | switch (attr) { |
431 | case hwmon_curr_input: | ||
432 | case hwmon_curr_min_alarm: | 437 | case hwmon_curr_min_alarm: |
433 | case hwmon_curr_max_alarm: | 438 | case hwmon_curr_max_alarm: |
434 | case hwmon_curr_lcrit_alarm: | 439 | case hwmon_curr_lcrit_alarm: |
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
437 | case hwmon_curr_max: | 442 | case hwmon_curr_max: |
438 | case hwmon_curr_lcrit: | 443 | case hwmon_curr_lcrit: |
439 | case hwmon_curr_crit: | 444 | case hwmon_curr_crit: |
445 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
446 | return 0; | ||
447 | /* fall through */ | ||
448 | case hwmon_curr_input: | ||
440 | return 0444; | 449 | return 0444; |
441 | default: | 450 | default: |
442 | return 0; | 451 | return 0; |
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
452 | channel == 1) | 461 | channel == 1) |
453 | return 0; | 462 | return 0; |
454 | switch (attr) { | 463 | switch (attr) { |
455 | case hwmon_power_input: | ||
456 | case hwmon_power_min_alarm: | 464 | case hwmon_power_min_alarm: |
457 | case hwmon_power_max_alarm: | 465 | case hwmon_power_max_alarm: |
458 | case hwmon_power_lcrit_alarm: | 466 | case hwmon_power_lcrit_alarm: |
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
461 | case hwmon_power_max: | 469 | case hwmon_power_max: |
462 | case hwmon_power_lcrit: | 470 | case hwmon_power_lcrit: |
463 | case hwmon_power_crit: | 471 | case hwmon_power_crit: |
472 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
473 | return 0; | ||
474 | /* fall through */ | ||
475 | case hwmon_power_input: | ||
464 | return 0444; | 476 | return 0444; |
465 | default: | 477 | default: |
466 | return 0; | 478 | return 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index b4c3a957c102..73969dbeb5c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? | 985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? |
986 | iwl_ext_nvm_channels : iwl_nvm_channels; | 986 | iwl_ext_nvm_channels : iwl_nvm_channels; |
987 | struct ieee80211_regdomain *regd, *copy_rd; | 987 | struct ieee80211_regdomain *regd, *copy_rd; |
988 | int size_of_regd, regd_to_copy, wmms_to_copy; | 988 | int size_of_regd, regd_to_copy; |
989 | int size_of_wmms = 0; | ||
990 | struct ieee80211_reg_rule *rule; | 989 | struct ieee80211_reg_rule *rule; |
991 | struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; | ||
992 | struct regdb_ptrs *regdb_ptrs; | 990 | struct regdb_ptrs *regdb_ptrs; |
993 | enum nl80211_band band; | 991 | enum nl80211_band band; |
994 | int center_freq, prev_center_freq = 0; | 992 | int center_freq, prev_center_freq = 0; |
995 | int valid_rules = 0, n_wmms = 0; | 993 | int valid_rules = 0; |
996 | int i; | ||
997 | bool new_rule; | 994 | bool new_rule; |
998 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? | 995 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? |
999 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; | 996 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; |
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1012 | sizeof(struct ieee80211_regdomain) + | 1009 | sizeof(struct ieee80211_regdomain) + |
1013 | num_of_ch * sizeof(struct ieee80211_reg_rule); | 1010 | num_of_ch * sizeof(struct ieee80211_reg_rule); |
1014 | 1011 | ||
1015 | if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) | 1012 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
1016 | size_of_wmms = | ||
1017 | num_of_ch * sizeof(struct ieee80211_wmm_rule); | ||
1018 | |||
1019 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | ||
1020 | if (!regd) | 1013 | if (!regd) |
1021 | return ERR_PTR(-ENOMEM); | 1014 | return ERR_PTR(-ENOMEM); |
1022 | 1015 | ||
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1030 | regd->alpha2[0] = fw_mcc >> 8; | 1023 | regd->alpha2[0] = fw_mcc >> 8; |
1031 | regd->alpha2[1] = fw_mcc & 0xff; | 1024 | regd->alpha2[1] = fw_mcc & 0xff; |
1032 | 1025 | ||
1033 | wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
1034 | |||
1035 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { | 1026 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { |
1036 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); | 1027 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); |
1037 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? | 1028 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? |
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1085 | band == NL80211_BAND_2GHZ) | 1076 | band == NL80211_BAND_2GHZ) |
1086 | continue; | 1077 | continue; |
1087 | 1078 | ||
1088 | if (!reg_query_regdb_wmm(regd->alpha2, center_freq, | 1079 | reg_query_regdb_wmm(regd->alpha2, center_freq, rule); |
1089 | ®db_ptrs[n_wmms].token, wmm_rule)) { | ||
1090 | /* Add only new rules */ | ||
1091 | for (i = 0; i < n_wmms; i++) { | ||
1092 | if (regdb_ptrs[i].token == | ||
1093 | regdb_ptrs[n_wmms].token) { | ||
1094 | rule->wmm_rule = regdb_ptrs[i].rule; | ||
1095 | break; | ||
1096 | } | ||
1097 | } | ||
1098 | if (i == n_wmms) { | ||
1099 | rule->wmm_rule = wmm_rule; | ||
1100 | regdb_ptrs[n_wmms++].rule = wmm_rule; | ||
1101 | wmm_rule++; | ||
1102 | } | ||
1103 | } | ||
1104 | } | 1080 | } |
1105 | 1081 | ||
1106 | regd->n_reg_rules = valid_rules; | 1082 | regd->n_reg_rules = valid_rules; |
1107 | regd->n_wmm_rules = n_wmms; | ||
1108 | 1083 | ||
1109 | /* | 1084 | /* |
1110 | * Narrow down regdom for unused regulatory rules to prevent hole | 1085 | * Narrow down regdom for unused regulatory rules to prevent hole |
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1113 | regd_to_copy = sizeof(struct ieee80211_regdomain) + | 1088 | regd_to_copy = sizeof(struct ieee80211_regdomain) + |
1114 | valid_rules * sizeof(struct ieee80211_reg_rule); | 1089 | valid_rules * sizeof(struct ieee80211_reg_rule); |
1115 | 1090 | ||
1116 | wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; | 1091 | copy_rd = kzalloc(regd_to_copy, GFP_KERNEL); |
1117 | |||
1118 | copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); | ||
1119 | if (!copy_rd) { | 1092 | if (!copy_rd) { |
1120 | copy_rd = ERR_PTR(-ENOMEM); | 1093 | copy_rd = ERR_PTR(-ENOMEM); |
1121 | goto out; | 1094 | goto out; |
1122 | } | 1095 | } |
1123 | 1096 | ||
1124 | memcpy(copy_rd, regd, regd_to_copy); | 1097 | memcpy(copy_rd, regd, regd_to_copy); |
1125 | memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, | ||
1126 | wmms_to_copy); | ||
1127 | |||
1128 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); | ||
1129 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
1130 | |||
1131 | for (i = 0; i < regd->n_reg_rules; i++) { | ||
1132 | if (!regd->reg_rules[i].wmm_rule) | ||
1133 | continue; | ||
1134 | |||
1135 | copy_rd->reg_rules[i].wmm_rule = d_wmm + | ||
1136 | (regd->reg_rules[i].wmm_rule - s_wmm); | ||
1137 | } | ||
1138 | 1098 | ||
1139 | out: | 1099 | out: |
1140 | kfree(regdb_ptrs); | 1100 | kfree(regdb_ptrs); |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 998dfac0fcff..1068757ec42e 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <net/net_namespace.h> | 34 | #include <net/net_namespace.h> |
35 | #include <net/netns/generic.h> | 35 | #include <net/netns/generic.h> |
36 | #include <linux/rhashtable.h> | 36 | #include <linux/rhashtable.h> |
37 | #include <linux/nospec.h> | ||
37 | #include "mac80211_hwsim.h" | 38 | #include "mac80211_hwsim.h" |
38 | 39 | ||
39 | #define WARN_QUEUE 100 | 40 | #define WARN_QUEUE 100 |
@@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2820 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 2821 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
2821 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 2822 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
2822 | IEEE80211_VHT_CAP_TXSTBC | | 2823 | IEEE80211_VHT_CAP_TXSTBC | |
2823 | IEEE80211_VHT_CAP_RXSTBC_1 | | ||
2824 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
2825 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
2826 | IEEE80211_VHT_CAP_RXSTBC_4 | | 2824 | IEEE80211_VHT_CAP_RXSTBC_4 | |
2827 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; | 2825 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; |
2828 | sband->vht_cap.vht_mcs.rx_mcs_map = | 2826 | sband->vht_cap.vht_mcs.rx_mcs_map = |
@@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
3317 | if (info->attrs[HWSIM_ATTR_CHANNELS]) | 3315 | if (info->attrs[HWSIM_ATTR_CHANNELS]) |
3318 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); | 3316 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); |
3319 | 3317 | ||
3318 | if (param.channels < 1) { | ||
3319 | GENL_SET_ERR_MSG(info, "must have at least one channel"); | ||
3320 | return -EINVAL; | ||
3321 | } | ||
3322 | |||
3320 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { | 3323 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { |
3321 | GENL_SET_ERR_MSG(info, "too many channels specified"); | 3324 | GENL_SET_ERR_MSG(info, "too many channels specified"); |
3322 | return -EINVAL; | 3325 | return -EINVAL; |
@@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
3350 | kfree(hwname); | 3353 | kfree(hwname); |
3351 | return -EINVAL; | 3354 | return -EINVAL; |
3352 | } | 3355 | } |
3356 | |||
3357 | idx = array_index_nospec(idx, | ||
3358 | ARRAY_SIZE(hwsim_world_regdom_custom)); | ||
3353 | param.regd = hwsim_world_regdom_custom[idx]; | 3359 | param.regd = hwsim_world_regdom_custom[idx]; |
3354 | } | 3360 | } |
3355 | 3361 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b9951d2067e..d668682f91df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, | |||
316 | old_value = *dbbuf_db; | 316 | old_value = *dbbuf_db; |
317 | *dbbuf_db = value; | 317 | *dbbuf_db = value; |
318 | 318 | ||
319 | /* | ||
320 | * Ensure that the doorbell is updated before reading the event | ||
321 | * index from memory. The controller needs to provide similar | ||
322 | * ordering to ensure the envent index is updated before reading | ||
323 | * the doorbell. | ||
324 | */ | ||
325 | mb(); | ||
326 | |||
319 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) | 327 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
320 | return false; | 328 | return false; |
321 | } | 329 | } |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ebf3e7a6c49e..b5ec96abd048 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -1210,7 +1210,7 @@ static int __init nvmet_init(void) | |||
1210 | 1210 | ||
1211 | error = nvmet_init_discovery(); | 1211 | error = nvmet_init_discovery(); |
1212 | if (error) | 1212 | if (error) |
1213 | goto out; | 1213 | goto out_free_work_queue; |
1214 | 1214 | ||
1215 | error = nvmet_init_configfs(); | 1215 | error = nvmet_init_configfs(); |
1216 | if (error) | 1216 | if (error) |
@@ -1219,6 +1219,8 @@ static int __init nvmet_init(void) | |||
1219 | 1219 | ||
1220 | out_exit_discovery: | 1220 | out_exit_discovery: |
1221 | nvmet_exit_discovery(); | 1221 | nvmet_exit_discovery(); |
1222 | out_free_work_queue: | ||
1223 | destroy_workqueue(buffered_io_wq); | ||
1222 | out: | 1224 | out: |
1223 | return error; | 1225 | return error; |
1224 | } | 1226 | } |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 34712def81b1..5251689a1d9a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
@@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) | |||
311 | struct fcloop_tport *tport = tls_req->tport; | 311 | struct fcloop_tport *tport = tls_req->tport; |
312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; | 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; |
313 | 313 | ||
314 | if (tport->remoteport) | 314 | if (!tport || tport->remoteport) |
315 | lsreq->done(lsreq, tls_req->status); | 315 | lsreq->done(lsreq, tls_req->status); |
316 | } | 316 | } |
317 | 317 | ||
@@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, | |||
329 | 329 | ||
330 | if (!rport->targetport) { | 330 | if (!rport->targetport) { |
331 | tls_req->status = -ECONNREFUSED; | 331 | tls_req->status = -ECONNREFUSED; |
332 | tls_req->tport = NULL; | ||
332 | schedule_work(&tls_req->work); | 333 | schedule_work(&tls_req->work); |
333 | return ret; | 334 | return ret; |
334 | } | 335 | } |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 466e3c8582f0..9095b8290150 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex); | |||
54 | */ | 54 | */ |
55 | DEFINE_RAW_SPINLOCK(devtree_lock); | 55 | DEFINE_RAW_SPINLOCK(devtree_lock); |
56 | 56 | ||
57 | bool of_node_name_eq(const struct device_node *np, const char *name) | ||
58 | { | ||
59 | const char *node_name; | ||
60 | size_t len; | ||
61 | |||
62 | if (!np) | ||
63 | return false; | ||
64 | |||
65 | node_name = kbasename(np->full_name); | ||
66 | len = strchrnul(node_name, '@') - node_name; | ||
67 | |||
68 | return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); | ||
69 | } | ||
70 | |||
71 | bool of_node_name_prefix(const struct device_node *np, const char *prefix) | ||
72 | { | ||
73 | if (!np) | ||
74 | return false; | ||
75 | |||
76 | return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; | ||
77 | } | ||
78 | |||
57 | int of_n_addr_cells(struct device_node *np) | 79 | int of_n_addr_cells(struct device_node *np) |
58 | { | 80 | { |
59 | u32 cells; | 81 | u32 cells; |
@@ -720,6 +742,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node, | |||
720 | EXPORT_SYMBOL(of_get_next_available_child); | 742 | EXPORT_SYMBOL(of_get_next_available_child); |
721 | 743 | ||
722 | /** | 744 | /** |
745 | * of_get_compatible_child - Find compatible child node | ||
746 | * @parent: parent node | ||
747 | * @compatible: compatible string | ||
748 | * | ||
749 | * Lookup child node whose compatible property contains the given compatible | ||
750 | * string. | ||
751 | * | ||
752 | * Returns a node pointer with refcount incremented, use of_node_put() on it | ||
753 | * when done; or NULL if not found. | ||
754 | */ | ||
755 | struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
756 | const char *compatible) | ||
757 | { | ||
758 | struct device_node *child; | ||
759 | |||
760 | for_each_child_of_node(parent, child) { | ||
761 | if (of_device_is_compatible(child, compatible)) | ||
762 | break; | ||
763 | } | ||
764 | |||
765 | return child; | ||
766 | } | ||
767 | EXPORT_SYMBOL(of_get_compatible_child); | ||
768 | |||
769 | /** | ||
723 | * of_get_child_by_name - Find the child node by name for a given parent | 770 | * of_get_child_by_name - Find the child node by name for a given parent |
724 | * @node: parent node | 771 | * @node: parent node |
725 | * @name: child name to look for. | 772 | * @name: child name to look for. |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 7ba90c290a42..6c59673933e9 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node, | |||
241 | if (!dev) | 241 | if (!dev) |
242 | goto err_clear_flag; | 242 | goto err_clear_flag; |
243 | 243 | ||
244 | /* AMBA devices only support a single DMA mask */ | ||
245 | dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
246 | dev->dev.dma_mask = &dev->dev.coherent_dma_mask; | ||
247 | |||
244 | /* setup generic device info */ | 248 | /* setup generic device info */ |
245 | dev->dev.of_node = of_node_get(node); | 249 | dev->dev.of_node = of_node_get(node); |
246 | dev->dev.fwnode = &node->fwnode; | 250 | dev->dev.fwnode = &node->fwnode; |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 977a8307fbb1..4f2816559205 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz, | |||
260 | 260 | ||
261 | mutex_lock(&tz->lock); | 261 | mutex_lock(&tz->lock); |
262 | 262 | ||
263 | if (mode == THERMAL_DEVICE_ENABLED) | 263 | if (mode == THERMAL_DEVICE_ENABLED) { |
264 | tz->polling_delay = data->polling_delay; | 264 | tz->polling_delay = data->polling_delay; |
265 | else | 265 | tz->passive_delay = data->passive_delay; |
266 | } else { | ||
266 | tz->polling_delay = 0; | 267 | tz->polling_delay = 0; |
268 | tz->passive_delay = 0; | ||
269 | } | ||
267 | 270 | ||
268 | mutex_unlock(&tz->lock); | 271 | mutex_unlock(&tz->lock); |
269 | 272 | ||
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index c866cc165960..450ed66edf58 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c | |||
@@ -1,16 +1,6 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | * Copyright 2016 Freescale Semiconductor, Inc. | 2 | // |
3 | * | 3 | // Copyright 2016 Freescale Semiconductor, Inc. |
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | */ | ||
14 | 4 | ||
15 | #include <linux/module.h> | 5 | #include <linux/module.h> |
16 | #include <linux/platform_device.h> | 6 | #include <linux/platform_device.h> |
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
197 | int ret; | 187 | int ret; |
198 | struct qoriq_tmu_data *data; | 188 | struct qoriq_tmu_data *data; |
199 | struct device_node *np = pdev->dev.of_node; | 189 | struct device_node *np = pdev->dev.of_node; |
200 | u32 site = 0; | 190 | u32 site; |
201 | 191 | ||
202 | if (!np) { | 192 | if (!np) { |
203 | dev_err(&pdev->dev, "Device OF-Node is NULL"); | 193 | dev_err(&pdev->dev, "Device OF-Node is NULL"); |
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
233 | if (ret < 0) | 223 | if (ret < 0) |
234 | goto err_tmu; | 224 | goto err_tmu; |
235 | 225 | ||
236 | data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, | 226 | data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev, |
237 | data, &tmu_tz_ops); | 227 | data->sensor_id, |
228 | data, &tmu_tz_ops); | ||
238 | if (IS_ERR(data->tz)) { | 229 | if (IS_ERR(data->tz)) { |
239 | ret = PTR_ERR(data->tz); | 230 | ret = PTR_ERR(data->tz); |
240 | dev_err(&pdev->dev, | 231 | dev_err(&pdev->dev, |
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
243 | } | 234 | } |
244 | 235 | ||
245 | /* Enable monitoring */ | 236 | /* Enable monitoring */ |
246 | site |= 0x1 << (15 - data->sensor_id); | 237 | site = 0x1 << (15 - data->sensor_id); |
247 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); | 238 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); |
248 | 239 | ||
249 | return 0; | 240 | return 0; |
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev) | |||
261 | { | 252 | { |
262 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); | 253 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); |
263 | 254 | ||
264 | thermal_zone_of_sensor_unregister(&pdev->dev, data->tz); | ||
265 | |||
266 | /* Disable monitoring */ | 255 | /* Disable monitoring */ |
267 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); | 256 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); |
268 | 257 | ||
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 766521eb7071..7aed5337bdd3 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c | |||
@@ -1,19 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * R-Car Gen3 THS thermal sensor driver | 3 | * R-Car Gen3 THS thermal sensor driver |
3 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. | 4 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. |
4 | * | 5 | * |
5 | * Copyright (C) 2016 Renesas Electronics Corporation. | 6 | * Copyright (C) 2016 Renesas Electronics Corporation. |
6 | * Copyright (C) 2016 Sang Engineering | 7 | * Copyright (C) 2016 Sang Engineering |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | */ | 8 | */ |
18 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
19 | #include <linux/err.h> | 10 | #include <linux/err.h> |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index e77e63070e99..78f932822d38 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -1,21 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * R-Car THS/TSC thermal sensor driver | 3 | * R-Car THS/TSC thermal sensor driver |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2012 Renesas Solutions Corp. |
5 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 6 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; version 2 of the License. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along | ||
17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
19 | */ | 7 | */ |
20 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
21 | #include <linux/err.h> | 9 | #include <linux/err.h> |
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = { | |||
660 | }; | 648 | }; |
661 | module_platform_driver(rcar_thermal_driver); | 649 | module_platform_driver(rcar_thermal_driver); |
662 | 650 | ||
663 | MODULE_LICENSE("GPL"); | 651 | MODULE_LICENSE("GPL v2"); |
664 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); | 652 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); |
665 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); | 653 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index f2088838f690..5b471889d723 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev, | |||
402 | } | 402 | } |
403 | static DEVICE_ATTR_RO(modalias); | 403 | static DEVICE_ATTR_RO(modalias); |
404 | 404 | ||
405 | static ssize_t state_show(struct device *dev, | ||
406 | struct device_attribute *attr, char *buf) | ||
407 | { | ||
408 | return sprintf(buf, "%s\n", | ||
409 | xenbus_strstate(to_xenbus_device(dev)->state)); | ||
410 | } | ||
411 | static DEVICE_ATTR_RO(state); | ||
412 | |||
405 | static struct attribute *xenbus_dev_attrs[] = { | 413 | static struct attribute *xenbus_dev_attrs[] = { |
406 | &dev_attr_nodename.attr, | 414 | &dev_attr_nodename.attr, |
407 | &dev_attr_devtype.attr, | 415 | &dev_attr_devtype.attr, |
408 | &dev_attr_modalias.attr, | 416 | &dev_attr_modalias.attr, |
417 | &dev_attr_state.attr, | ||
409 | NULL, | 418 | NULL, |
410 | }; | 419 | }; |
411 | 420 | ||
diff --git a/fs/buffer.c b/fs/buffer.c index 4cc679d5bf58..6f1ae3ac9789 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/buffer_head.h> | 39 | #include <linux/buffer_head.h> |
40 | #include <linux/task_io_accounting_ops.h> | 40 | #include <linux/task_io_accounting_ops.h> |
41 | #include <linux/bio.h> | 41 | #include <linux/bio.h> |
42 | #include <linux/notifier.h> | ||
43 | #include <linux/cpu.h> | 42 | #include <linux/cpu.h> |
44 | #include <linux/bitops.h> | 43 | #include <linux/bitops.h> |
45 | #include <linux/mpage.h> | 44 | #include <linux/mpage.h> |
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index ec3fba7d492f..488a9e7f8f66 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mpage.h> | 24 | #include <linux/mpage.h> |
25 | #include <linux/user_namespace.h> | 25 | #include <linux/user_namespace.h> |
26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
27 | #include <linux/blkdev.h> | ||
27 | 28 | ||
28 | #include "isofs.h" | 29 | #include "isofs.h" |
29 | #include "zisofs.h" | 30 | #include "zisofs.h" |
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) | |||
653 | /* | 654 | /* |
654 | * What if bugger tells us to go beyond page size? | 655 | * What if bugger tells us to go beyond page size? |
655 | */ | 656 | */ |
657 | if (bdev_logical_block_size(s->s_bdev) > 2048) { | ||
658 | printk(KERN_WARNING | ||
659 | "ISOFS: unsupported/invalid hardware sector size %d\n", | ||
660 | bdev_logical_block_size(s->s_bdev)); | ||
661 | goto out_freesbi; | ||
662 | } | ||
656 | opt.blocksize = sb_min_blocksize(s, opt.blocksize); | 663 | opt.blocksize = sb_min_blocksize(s, opt.blocksize); |
657 | 664 | ||
658 | sbi->s_high_sierra = 0; /* default is iso9660 */ | 665 | sbi->s_high_sierra = 0; /* default is iso9660 */ |
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 05506d60131c..59cdb27826de 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) | |||
132 | struct fsnotify_mark *mark; | 132 | struct fsnotify_mark *mark; |
133 | 133 | ||
134 | assert_spin_locked(&conn->lock); | 134 | assert_spin_locked(&conn->lock); |
135 | /* We can get detached connector here when inode is getting unlinked. */ | ||
136 | if (!fsnotify_valid_obj_type(conn->type)) | ||
137 | return; | ||
135 | hlist_for_each_entry(mark, &conn->list, obj_list) { | 138 | hlist_for_each_entry(mark, &conn->list, obj_list) { |
136 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) | 139 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) |
137 | new_mask |= mark->mask; | 140 | new_mask |= mark->mask; |
138 | } | 141 | } |
139 | if (WARN_ON(!fsnotify_valid_obj_type(conn->type))) | ||
140 | return; | ||
141 | |||
142 | *fsnotify_conn_mask_p(conn) = new_mask; | 142 | *fsnotify_conn_mask_p(conn) = new_mask; |
143 | } | 143 | } |
144 | 144 | ||
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 860bfbe7a07a..f0cbf58ad4da 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/quotaops.h> | 18 | #include <linux/quotaops.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
21 | #include <linux/nospec.h> | ||
21 | 22 | ||
22 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, | 23 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, |
23 | qid_t id) | 24 | qid_t id) |
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr) | |||
120 | struct if_dqinfo uinfo; | 121 | struct if_dqinfo uinfo; |
121 | int ret; | 122 | int ret; |
122 | 123 | ||
123 | /* This checks whether qc_state has enough entries... */ | ||
124 | BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); | ||
125 | if (!sb->s_qcop->get_state) | 124 | if (!sb->s_qcop->get_state) |
126 | return -ENOSYS; | 125 | return -ENOSYS; |
127 | ret = sb->s_qcop->get_state(sb, &state); | 126 | ret = sb->s_qcop->get_state(sb, &state); |
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) | |||
354 | * GETXSTATE quotactl has space for just one set of time limits so | 353 | * GETXSTATE quotactl has space for just one set of time limits so |
355 | * report them for the first enabled quota type | 354 | * report them for the first enabled quota type |
356 | */ | 355 | */ |
357 | for (type = 0; type < XQM_MAXQUOTAS; type++) | 356 | for (type = 0; type < MAXQUOTAS; type++) |
358 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) | 357 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) |
359 | break; | 358 | break; |
360 | BUG_ON(type == XQM_MAXQUOTAS); | 359 | BUG_ON(type == MAXQUOTAS); |
361 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | 360 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
362 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | 361 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
363 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | 362 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) | |||
427 | * GETXSTATV quotactl has space for just one set of time limits so | 426 | * GETXSTATV quotactl has space for just one set of time limits so |
428 | * report them for the first enabled quota type | 427 | * report them for the first enabled quota type |
429 | */ | 428 | */ |
430 | for (type = 0; type < XQM_MAXQUOTAS; type++) | 429 | for (type = 0; type < MAXQUOTAS; type++) |
431 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) | 430 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) |
432 | break; | 431 | break; |
433 | BUG_ON(type == XQM_MAXQUOTAS); | 432 | BUG_ON(type == MAXQUOTAS); |
434 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | 433 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
435 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | 434 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
436 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | 435 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
701 | { | 700 | { |
702 | int ret; | 701 | int ret; |
703 | 702 | ||
704 | if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) | 703 | if (type >= MAXQUOTAS) |
705 | return -EINVAL; | 704 | return -EINVAL; |
705 | type = array_index_nospec(type, MAXQUOTAS); | ||
706 | /* | 706 | /* |
707 | * Quota not supported on this fs? Check this before s_quota_types | 707 | * Quota not supported on this fs? Check this before s_quota_types |
708 | * since they needn't be set if quota is not supported at all. | 708 | * since they needn't be set if quota is not supported at all. |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 3040dc2a32f6..6f515651a2c2 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb, | |||
764 | struct kernel_lb_addr *root) | 764 | struct kernel_lb_addr *root) |
765 | { | 765 | { |
766 | struct buffer_head *bh = NULL; | 766 | struct buffer_head *bh = NULL; |
767 | long lastblock; | ||
768 | uint16_t ident; | 767 | uint16_t ident; |
769 | struct udf_sb_info *sbi; | ||
770 | 768 | ||
771 | if (fileset->logicalBlockNum != 0xFFFFFFFF || | 769 | if (fileset->logicalBlockNum != 0xFFFFFFFF || |
772 | fileset->partitionReferenceNum != 0xFFFF) { | 770 | fileset->partitionReferenceNum != 0xFFFF) { |
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb, | |||
779 | return 1; | 777 | return 1; |
780 | } | 778 | } |
781 | 779 | ||
782 | } | ||
783 | |||
784 | sbi = UDF_SB(sb); | ||
785 | if (!bh) { | ||
786 | /* Search backwards through the partitions */ | ||
787 | struct kernel_lb_addr newfileset; | ||
788 | |||
789 | /* --> cvg: FIXME - is it reasonable? */ | ||
790 | return 1; | ||
791 | |||
792 | for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; | ||
793 | (newfileset.partitionReferenceNum != 0xFFFF && | ||
794 | fileset->logicalBlockNum == 0xFFFFFFFF && | ||
795 | fileset->partitionReferenceNum == 0xFFFF); | ||
796 | newfileset.partitionReferenceNum--) { | ||
797 | lastblock = sbi->s_partmaps | ||
798 | [newfileset.partitionReferenceNum] | ||
799 | .s_partition_len; | ||
800 | newfileset.logicalBlockNum = 0; | ||
801 | |||
802 | do { | ||
803 | bh = udf_read_ptagged(sb, &newfileset, 0, | ||
804 | &ident); | ||
805 | if (!bh) { | ||
806 | newfileset.logicalBlockNum++; | ||
807 | continue; | ||
808 | } | ||
809 | |||
810 | switch (ident) { | ||
811 | case TAG_IDENT_SBD: | ||
812 | { | ||
813 | struct spaceBitmapDesc *sp; | ||
814 | sp = (struct spaceBitmapDesc *) | ||
815 | bh->b_data; | ||
816 | newfileset.logicalBlockNum += 1 + | ||
817 | ((le32_to_cpu(sp->numOfBytes) + | ||
818 | sizeof(struct spaceBitmapDesc) | ||
819 | - 1) >> sb->s_blocksize_bits); | ||
820 | brelse(bh); | ||
821 | break; | ||
822 | } | ||
823 | case TAG_IDENT_FSD: | ||
824 | *fileset = newfileset; | ||
825 | break; | ||
826 | default: | ||
827 | newfileset.logicalBlockNum++; | ||
828 | brelse(bh); | ||
829 | bh = NULL; | ||
830 | break; | ||
831 | } | ||
832 | } while (newfileset.logicalBlockNum < lastblock && | ||
833 | fileset->logicalBlockNum == 0xFFFFFFFF && | ||
834 | fileset->partitionReferenceNum == 0xFFFF); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | if ((fileset->logicalBlockNum != 0xFFFFFFFF || | ||
839 | fileset->partitionReferenceNum != 0xFFFF) && bh) { | ||
840 | udf_debug("Fileset at block=%u, partition=%u\n", | 780 | udf_debug("Fileset at block=%u, partition=%u\n", |
841 | fileset->logicalBlockNum, | 781 | fileset->logicalBlockNum, |
842 | fileset->partitionReferenceNum); | 782 | fileset->partitionReferenceNum); |
843 | 783 | ||
844 | sbi->s_partition = fileset->partitionReferenceNum; | 784 | UDF_SB(sb)->s_partition = fileset->partitionReferenceNum; |
845 | udf_load_fileset(sb, bh, root); | 785 | udf_load_fileset(sb, bh, root); |
846 | brelse(bh); | 786 | brelse(bh); |
847 | return 0; | 787 | return 0; |
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ | |||
1570 | */ | 1510 | */ |
1571 | #define PART_DESC_ALLOC_STEP 32 | 1511 | #define PART_DESC_ALLOC_STEP 32 |
1572 | 1512 | ||
1513 | struct part_desc_seq_scan_data { | ||
1514 | struct udf_vds_record rec; | ||
1515 | u32 partnum; | ||
1516 | }; | ||
1517 | |||
1573 | struct desc_seq_scan_data { | 1518 | struct desc_seq_scan_data { |
1574 | struct udf_vds_record vds[VDS_POS_LENGTH]; | 1519 | struct udf_vds_record vds[VDS_POS_LENGTH]; |
1575 | unsigned int size_part_descs; | 1520 | unsigned int size_part_descs; |
1576 | struct udf_vds_record *part_descs_loc; | 1521 | unsigned int num_part_descs; |
1522 | struct part_desc_seq_scan_data *part_descs_loc; | ||
1577 | }; | 1523 | }; |
1578 | 1524 | ||
1579 | static struct udf_vds_record *handle_partition_descriptor( | 1525 | static struct udf_vds_record *handle_partition_descriptor( |
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor( | |||
1582 | { | 1528 | { |
1583 | struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; | 1529 | struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; |
1584 | int partnum; | 1530 | int partnum; |
1531 | int i; | ||
1585 | 1532 | ||
1586 | partnum = le16_to_cpu(desc->partitionNumber); | 1533 | partnum = le16_to_cpu(desc->partitionNumber); |
1587 | if (partnum >= data->size_part_descs) { | 1534 | for (i = 0; i < data->num_part_descs; i++) |
1588 | struct udf_vds_record *new_loc; | 1535 | if (partnum == data->part_descs_loc[i].partnum) |
1536 | return &(data->part_descs_loc[i].rec); | ||
1537 | if (data->num_part_descs >= data->size_part_descs) { | ||
1538 | struct part_desc_seq_scan_data *new_loc; | ||
1589 | unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); | 1539 | unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); |
1590 | 1540 | ||
1591 | new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); | 1541 | new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); |
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor( | |||
1597 | data->part_descs_loc = new_loc; | 1547 | data->part_descs_loc = new_loc; |
1598 | data->size_part_descs = new_size; | 1548 | data->size_part_descs = new_size; |
1599 | } | 1549 | } |
1600 | return &(data->part_descs_loc[partnum]); | 1550 | return &(data->part_descs_loc[data->num_part_descs++].rec); |
1601 | } | 1551 | } |
1602 | 1552 | ||
1603 | 1553 | ||
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence( | |||
1647 | 1597 | ||
1648 | memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); | 1598 | memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); |
1649 | data.size_part_descs = PART_DESC_ALLOC_STEP; | 1599 | data.size_part_descs = PART_DESC_ALLOC_STEP; |
1600 | data.num_part_descs = 0; | ||
1650 | data.part_descs_loc = kcalloc(data.size_part_descs, | 1601 | data.part_descs_loc = kcalloc(data.size_part_descs, |
1651 | sizeof(*data.part_descs_loc), | 1602 | sizeof(*data.part_descs_loc), |
1652 | GFP_KERNEL); | 1603 | GFP_KERNEL); |
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence( | |||
1658 | * are in it. | 1609 | * are in it. |
1659 | */ | 1610 | */ |
1660 | for (; (!done && block <= lastblock); block++) { | 1611 | for (; (!done && block <= lastblock); block++) { |
1661 | |||
1662 | bh = udf_read_tagged(sb, block, block, &ident); | 1612 | bh = udf_read_tagged(sb, block, block, &ident); |
1663 | if (!bh) | 1613 | if (!bh) |
1664 | break; | 1614 | break; |
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence( | |||
1730 | } | 1680 | } |
1731 | 1681 | ||
1732 | /* Now handle prevailing Partition Descriptors */ | 1682 | /* Now handle prevailing Partition Descriptors */ |
1733 | for (i = 0; i < data.size_part_descs; i++) { | 1683 | for (i = 0; i < data.num_part_descs; i++) { |
1734 | if (data.part_descs_loc[i].block) { | 1684 | ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); |
1735 | ret = udf_load_partdesc(sb, | 1685 | if (ret < 0) |
1736 | data.part_descs_loc[i].block); | 1686 | return ret; |
1737 | if (ret < 0) | ||
1738 | return ret; | ||
1739 | } | ||
1740 | } | 1687 | } |
1741 | 1688 | ||
1742 | return 0; | 1689 | return 0; |
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index ca1d2cc2cdfa..18863d56273c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h | |||
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, | |||
199 | 199 | ||
200 | #define __declare_arg_0(a0, res) \ | 200 | #define __declare_arg_0(a0, res) \ |
201 | struct arm_smccc_res *___res = res; \ | 201 | struct arm_smccc_res *___res = res; \ |
202 | register u32 r0 asm("r0") = a0; \ | 202 | register unsigned long r0 asm("r0") = (u32)a0; \ |
203 | register unsigned long r1 asm("r1"); \ | 203 | register unsigned long r1 asm("r1"); \ |
204 | register unsigned long r2 asm("r2"); \ | 204 | register unsigned long r2 asm("r2"); \ |
205 | register unsigned long r3 asm("r3") | 205 | register unsigned long r3 asm("r3") |
206 | 206 | ||
207 | #define __declare_arg_1(a0, a1, res) \ | 207 | #define __declare_arg_1(a0, a1, res) \ |
208 | typeof(a1) __a1 = a1; \ | ||
208 | struct arm_smccc_res *___res = res; \ | 209 | struct arm_smccc_res *___res = res; \ |
209 | register u32 r0 asm("r0") = a0; \ | 210 | register unsigned long r0 asm("r0") = (u32)a0; \ |
210 | register typeof(a1) r1 asm("r1") = a1; \ | 211 | register unsigned long r1 asm("r1") = __a1; \ |
211 | register unsigned long r2 asm("r2"); \ | 212 | register unsigned long r2 asm("r2"); \ |
212 | register unsigned long r3 asm("r3") | 213 | register unsigned long r3 asm("r3") |
213 | 214 | ||
214 | #define __declare_arg_2(a0, a1, a2, res) \ | 215 | #define __declare_arg_2(a0, a1, a2, res) \ |
216 | typeof(a1) __a1 = a1; \ | ||
217 | typeof(a2) __a2 = a2; \ | ||
215 | struct arm_smccc_res *___res = res; \ | 218 | struct arm_smccc_res *___res = res; \ |
216 | register u32 r0 asm("r0") = a0; \ | 219 | register unsigned long r0 asm("r0") = (u32)a0; \ |
217 | register typeof(a1) r1 asm("r1") = a1; \ | 220 | register unsigned long r1 asm("r1") = __a1; \ |
218 | register typeof(a2) r2 asm("r2") = a2; \ | 221 | register unsigned long r2 asm("r2") = __a2; \ |
219 | register unsigned long r3 asm("r3") | 222 | register unsigned long r3 asm("r3") |
220 | 223 | ||
221 | #define __declare_arg_3(a0, a1, a2, a3, res) \ | 224 | #define __declare_arg_3(a0, a1, a2, a3, res) \ |
225 | typeof(a1) __a1 = a1; \ | ||
226 | typeof(a2) __a2 = a2; \ | ||
227 | typeof(a3) __a3 = a3; \ | ||
222 | struct arm_smccc_res *___res = res; \ | 228 | struct arm_smccc_res *___res = res; \ |
223 | register u32 r0 asm("r0") = a0; \ | 229 | register unsigned long r0 asm("r0") = (u32)a0; \ |
224 | register typeof(a1) r1 asm("r1") = a1; \ | 230 | register unsigned long r1 asm("r1") = __a1; \ |
225 | register typeof(a2) r2 asm("r2") = a2; \ | 231 | register unsigned long r2 asm("r2") = __a2; \ |
226 | register typeof(a3) r3 asm("r3") = a3 | 232 | register unsigned long r3 asm("r3") = __a3 |
227 | 233 | ||
228 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ | 234 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ |
235 | typeof(a4) __a4 = a4; \ | ||
229 | __declare_arg_3(a0, a1, a2, a3, res); \ | 236 | __declare_arg_3(a0, a1, a2, a3, res); \ |
230 | register typeof(a4) r4 asm("r4") = a4 | 237 | register unsigned long r4 asm("r4") = __a4 |
231 | 238 | ||
232 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ | 239 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ |
240 | typeof(a5) __a5 = a5; \ | ||
233 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ | 241 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ |
234 | register typeof(a5) r5 asm("r5") = a5 | 242 | register unsigned long r5 asm("r5") = __a5 |
235 | 243 | ||
236 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ | 244 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ |
245 | typeof(a6) __a6 = a6; \ | ||
237 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ | 246 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ |
238 | register typeof(a6) r6 asm("r6") = a6 | 247 | register unsigned long r6 asm("r6") = __a6 |
239 | 248 | ||
240 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ | 249 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ |
250 | typeof(a7) __a7 = a7; \ | ||
241 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ | 251 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ |
242 | register typeof(a7) r7 asm("r7") = a7 | 252 | register unsigned long r7 asm("r7") = __a7 |
243 | 253 | ||
244 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) | 254 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) |
245 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) | 255 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b79387fd57da..65b4eaed1d96 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) | |||
855 | } | 855 | } |
856 | 856 | ||
857 | u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); | 857 | u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); |
858 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); | 858 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred); |
859 | 859 | ||
860 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); | 860 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); |
861 | /** | 861 | /** |
diff --git a/include/linux/of.h b/include/linux/of.h index 4d25e4f952d9..99b0ebf49632 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) | |||
256 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) | 256 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) |
257 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) | 257 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) |
258 | 258 | ||
259 | extern bool of_node_name_eq(const struct device_node *np, const char *name); | ||
260 | extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); | ||
261 | |||
259 | static inline const char *of_node_full_name(const struct device_node *np) | 262 | static inline const char *of_node_full_name(const struct device_node *np) |
260 | { | 263 | { |
261 | return np ? np->full_name : "<no-node>"; | 264 | return np ? np->full_name : "<no-node>"; |
@@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node, | |||
290 | extern struct device_node *of_get_next_available_child( | 293 | extern struct device_node *of_get_next_available_child( |
291 | const struct device_node *node, struct device_node *prev); | 294 | const struct device_node *node, struct device_node *prev); |
292 | 295 | ||
296 | extern struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
297 | const char *compatible); | ||
293 | extern struct device_node *of_get_child_by_name(const struct device_node *node, | 298 | extern struct device_node *of_get_child_by_name(const struct device_node *node, |
294 | const char *name); | 299 | const char *name); |
295 | 300 | ||
@@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) | |||
561 | return NULL; | 566 | return NULL; |
562 | } | 567 | } |
563 | 568 | ||
569 | static inline bool of_node_name_eq(const struct device_node *np, const char *name) | ||
570 | { | ||
571 | return false; | ||
572 | } | ||
573 | |||
574 | static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) | ||
575 | { | ||
576 | return false; | ||
577 | } | ||
578 | |||
564 | static inline const char* of_node_full_name(const struct device_node *np) | 579 | static inline const char* of_node_full_name(const struct device_node *np) |
565 | { | 580 | { |
566 | return "<no-node>"; | 581 | return "<no-node>"; |
@@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void) | |||
632 | return false; | 647 | return false; |
633 | } | 648 | } |
634 | 649 | ||
650 | static inline struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
651 | const char *compatible) | ||
652 | { | ||
653 | return NULL; | ||
654 | } | ||
655 | |||
635 | static inline struct device_node *of_get_child_by_name( | 656 | static inline struct device_node *of_get_child_by_name( |
636 | const struct device_node *node, | 657 | const struct device_node *node, |
637 | const char *name) | 658 | const char *name) |
@@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node( | |||
967 | return of_find_matching_node_and_match(from, matches, NULL); | 988 | return of_find_matching_node_and_match(from, matches, NULL); |
968 | } | 989 | } |
969 | 990 | ||
991 | static inline const char *of_node_get_device_type(const struct device_node *np) | ||
992 | { | ||
993 | return of_get_property(np, "type", NULL); | ||
994 | } | ||
995 | |||
996 | static inline bool of_node_is_type(const struct device_node *np, const char *type) | ||
997 | { | ||
998 | const char *match = of_node_get_device_type(np); | ||
999 | |||
1000 | return np && match && type && !strcmp(match, type); | ||
1001 | } | ||
1002 | |||
970 | /** | 1003 | /** |
971 | * of_property_count_u8_elems - Count the number of u8 elements in a property | 1004 | * of_property_count_u8_elems - Count the number of u8 elements in a property |
972 | * | 1005 | * |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 99d366cb0e9f..d157983b84cf 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -3084,4 +3084,6 @@ | |||
3084 | 3084 | ||
3085 | #define PCI_VENDOR_ID_OCZ 0x1b85 | 3085 | #define PCI_VENDOR_ID_OCZ 0x1b85 |
3086 | 3086 | ||
3087 | #define PCI_VENDOR_ID_NCUBE 0x10ff | ||
3088 | |||
3087 | #endif /* _LINUX_PCI_IDS_H */ | 3089 | #endif /* _LINUX_PCI_IDS_H */ |
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9abc0ca7259b..9f0aa1b48c78 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for Texas Instruments INA219, INA226 power monitor chips | 2 | * Driver for Texas Instruments INA219, INA226 power monitor chips |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 4 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
diff --git a/include/linux/quota.h b/include/linux/quota.h index ca9772c8e48b..f32dd270b8e3 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -408,13 +408,7 @@ struct qc_type_state { | |||
408 | 408 | ||
409 | struct qc_state { | 409 | struct qc_state { |
410 | unsigned int s_incoredqs; /* Number of dquots in core */ | 410 | unsigned int s_incoredqs; /* Number of dquots in core */ |
411 | /* | 411 | struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ |
412 | * Per quota type information. The array should really have | ||
413 | * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in | ||
414 | * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS | ||
415 | * supports project quotas, this can be changed to MAXQUOTAS | ||
416 | */ | ||
417 | struct qc_type_state s_state[XQM_MAXQUOTAS]; | ||
418 | }; | 412 | }; |
419 | 413 | ||
420 | /* Structure for communicating via ->set_info */ | 414 | /* Structure for communicating via ->set_info */ |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 46c4cbf54903..33c2a1d2a8d2 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -4876,8 +4876,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); | |||
4876 | * | 4876 | * |
4877 | * Return: 0 on success. -ENODATA. | 4877 | * Return: 0 on success. -ENODATA. |
4878 | */ | 4878 | */ |
4879 | int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr, | 4879 | int reg_query_regdb_wmm(char *alpha2, int freq, |
4880 | struct ieee80211_wmm_rule *rule); | 4880 | struct ieee80211_reg_rule *rule); |
4881 | 4881 | ||
4882 | /* | 4882 | /* |
4883 | * callbacks for asynchronous cfg80211 methods, notification | 4883 | * callbacks for asynchronous cfg80211 methods, notification |
diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 60f8cc86a447..3469750df0f4 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h | |||
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule { | |||
217 | struct ieee80211_reg_rule { | 217 | struct ieee80211_reg_rule { |
218 | struct ieee80211_freq_range freq_range; | 218 | struct ieee80211_freq_range freq_range; |
219 | struct ieee80211_power_rule power_rule; | 219 | struct ieee80211_power_rule power_rule; |
220 | struct ieee80211_wmm_rule *wmm_rule; | 220 | struct ieee80211_wmm_rule wmm_rule; |
221 | u32 flags; | 221 | u32 flags; |
222 | u32 dfs_cac_ms; | 222 | u32 dfs_cac_ms; |
223 | bool has_wmm; | ||
223 | }; | 224 | }; |
224 | 225 | ||
225 | struct ieee80211_regdomain { | 226 | struct ieee80211_regdomain { |
226 | struct rcu_head rcu_head; | 227 | struct rcu_head rcu_head; |
227 | u32 n_reg_rules; | 228 | u32 n_reg_rules; |
228 | u32 n_wmm_rules; | ||
229 | char alpha2[3]; | 229 | char alpha2[3]; |
230 | enum nl80211_dfs_regions dfs_region; | 230 | enum nl80211_dfs_regions dfs_region; |
231 | struct ieee80211_reg_rule reg_rules[]; | 231 | struct ieee80211_reg_rule reg_rules[]; |
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index dc520e1a4123..8b73cb603c5f 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
39 | #include <linux/socket.h> /* For __kernel_sockaddr_storage. */ | 39 | #include <linux/socket.h> /* For __kernel_sockaddr_storage. */ |
40 | #include <linux/in6.h> /* For struct in6_addr. */ | ||
40 | 41 | ||
41 | #define RDS_IB_ABI_VERSION 0x301 | 42 | #define RDS_IB_ABI_VERSION 0x301 |
42 | 43 | ||
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index b1e22c40c4b6..84c3de89696a 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h | |||
@@ -176,7 +176,7 @@ struct vhost_memory { | |||
176 | #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 | 176 | #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 |
177 | 177 | ||
178 | #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) | 178 | #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) |
179 | #define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64) | 179 | #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) |
180 | 180 | ||
181 | /* VHOST_NET specific defines */ | 181 | /* VHOST_NET specific defines */ |
182 | 182 | ||
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index cf5195c7c331..488ef9663c01 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
@@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock); | 238 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock); |
239 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md); | 239 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge); |
240 | 240 | ||
241 | static void bpf_tcp_release(struct sock *sk) | 241 | static void bpf_tcp_release(struct sock *sk) |
242 | { | 242 | { |
@@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk) | |||
248 | goto out; | 248 | goto out; |
249 | 249 | ||
250 | if (psock->cork) { | 250 | if (psock->cork) { |
251 | free_start_sg(psock->sock, psock->cork); | 251 | free_start_sg(psock->sock, psock->cork, true); |
252 | kfree(psock->cork); | 252 | kfree(psock->cork); |
253 | psock->cork = NULL; | 253 | psock->cork = NULL; |
254 | } | 254 | } |
@@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
330 | close_fun = psock->save_close; | 330 | close_fun = psock->save_close; |
331 | 331 | ||
332 | if (psock->cork) { | 332 | if (psock->cork) { |
333 | free_start_sg(psock->sock, psock->cork); | 333 | free_start_sg(psock->sock, psock->cork, true); |
334 | kfree(psock->cork); | 334 | kfree(psock->cork); |
335 | psock->cork = NULL; | 335 | psock->cork = NULL; |
336 | } | 336 | } |
337 | 337 | ||
338 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { | 338 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { |
339 | list_del(&md->list); | 339 | list_del(&md->list); |
340 | free_start_sg(psock->sock, md); | 340 | free_start_sg(psock->sock, md, true); |
341 | kfree(md); | 341 | kfree(md); |
342 | } | 342 | } |
343 | 343 | ||
@@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
369 | /* If another thread deleted this object skip deletion. | 369 | /* If another thread deleted this object skip deletion. |
370 | * The refcnt on psock may or may not be zero. | 370 | * The refcnt on psock may or may not be zero. |
371 | */ | 371 | */ |
372 | if (l) { | 372 | if (l && l == link) { |
373 | hlist_del_rcu(&link->hash_node); | 373 | hlist_del_rcu(&link->hash_node); |
374 | smap_release_sock(psock, link->sk); | 374 | smap_release_sock(psock, link->sk); |
375 | free_htab_elem(htab, link); | 375 | free_htab_elem(htab, link); |
@@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes, | |||
570 | md->sg_start = i; | 570 | md->sg_start = i; |
571 | } | 571 | } |
572 | 572 | ||
573 | static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | 573 | static int free_sg(struct sock *sk, int start, |
574 | struct sk_msg_buff *md, bool charge) | ||
574 | { | 575 | { |
575 | struct scatterlist *sg = md->sg_data; | 576 | struct scatterlist *sg = md->sg_data; |
576 | int i = start, free = 0; | 577 | int i = start, free = 0; |
577 | 578 | ||
578 | while (sg[i].length) { | 579 | while (sg[i].length) { |
579 | free += sg[i].length; | 580 | free += sg[i].length; |
580 | sk_mem_uncharge(sk, sg[i].length); | 581 | if (charge) |
582 | sk_mem_uncharge(sk, sg[i].length); | ||
581 | if (!md->skb) | 583 | if (!md->skb) |
582 | put_page(sg_page(&sg[i])); | 584 | put_page(sg_page(&sg[i])); |
583 | sg[i].length = 0; | 585 | sg[i].length = 0; |
@@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | |||
594 | return free; | 596 | return free; |
595 | } | 597 | } |
596 | 598 | ||
597 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) | 599 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge) |
598 | { | 600 | { |
599 | int free = free_sg(sk, md->sg_start, md); | 601 | int free = free_sg(sk, md->sg_start, md, charge); |
600 | 602 | ||
601 | md->sg_start = md->sg_end; | 603 | md->sg_start = md->sg_end; |
602 | return free; | 604 | return free; |
@@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) | |||
604 | 606 | ||
605 | static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) | 607 | static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) |
606 | { | 608 | { |
607 | return free_sg(sk, md->sg_curr, md); | 609 | return free_sg(sk, md->sg_curr, md, true); |
608 | } | 610 | } |
609 | 611 | ||
610 | static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) | 612 | static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) |
@@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, | |||
718 | list_add_tail(&r->list, &psock->ingress); | 720 | list_add_tail(&r->list, &psock->ingress); |
719 | sk->sk_data_ready(sk); | 721 | sk->sk_data_ready(sk); |
720 | } else { | 722 | } else { |
721 | free_start_sg(sk, r); | 723 | free_start_sg(sk, r, true); |
722 | kfree(r); | 724 | kfree(r); |
723 | } | 725 | } |
724 | 726 | ||
@@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, | |||
752 | release_sock(sk); | 754 | release_sock(sk); |
753 | } | 755 | } |
754 | smap_release_sock(psock, sk); | 756 | smap_release_sock(psock, sk); |
755 | if (unlikely(err)) | 757 | return err; |
756 | goto out; | ||
757 | return 0; | ||
758 | out_rcu: | 758 | out_rcu: |
759 | rcu_read_unlock(); | 759 | rcu_read_unlock(); |
760 | out: | 760 | return 0; |
761 | free_bytes_sg(NULL, send, md, false); | ||
762 | return err; | ||
763 | } | 761 | } |
764 | 762 | ||
765 | static inline void bpf_md_init(struct smap_psock *psock) | 763 | static inline void bpf_md_init(struct smap_psock *psock) |
@@ -822,7 +820,7 @@ more_data: | |||
822 | case __SK_PASS: | 820 | case __SK_PASS: |
823 | err = bpf_tcp_push(sk, send, m, flags, true); | 821 | err = bpf_tcp_push(sk, send, m, flags, true); |
824 | if (unlikely(err)) { | 822 | if (unlikely(err)) { |
825 | *copied -= free_start_sg(sk, m); | 823 | *copied -= free_start_sg(sk, m, true); |
826 | break; | 824 | break; |
827 | } | 825 | } |
828 | 826 | ||
@@ -845,16 +843,17 @@ more_data: | |||
845 | lock_sock(sk); | 843 | lock_sock(sk); |
846 | 844 | ||
847 | if (unlikely(err < 0)) { | 845 | if (unlikely(err < 0)) { |
848 | free_start_sg(sk, m); | 846 | int free = free_start_sg(sk, m, false); |
847 | |||
849 | psock->sg_size = 0; | 848 | psock->sg_size = 0; |
850 | if (!cork) | 849 | if (!cork) |
851 | *copied -= send; | 850 | *copied -= free; |
852 | } else { | 851 | } else { |
853 | psock->sg_size -= send; | 852 | psock->sg_size -= send; |
854 | } | 853 | } |
855 | 854 | ||
856 | if (cork) { | 855 | if (cork) { |
857 | free_start_sg(sk, m); | 856 | free_start_sg(sk, m, true); |
858 | psock->sg_size = 0; | 857 | psock->sg_size = 0; |
859 | kfree(m); | 858 | kfree(m); |
860 | m = NULL; | 859 | m = NULL; |
@@ -912,6 +911,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
912 | 911 | ||
913 | if (unlikely(flags & MSG_ERRQUEUE)) | 912 | if (unlikely(flags & MSG_ERRQUEUE)) |
914 | return inet_recv_error(sk, msg, len, addr_len); | 913 | return inet_recv_error(sk, msg, len, addr_len); |
914 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
915 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | ||
915 | 916 | ||
916 | rcu_read_lock(); | 917 | rcu_read_lock(); |
917 | psock = smap_psock_sk(sk); | 918 | psock = smap_psock_sk(sk); |
@@ -922,9 +923,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
922 | goto out; | 923 | goto out; |
923 | rcu_read_unlock(); | 924 | rcu_read_unlock(); |
924 | 925 | ||
925 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
926 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | ||
927 | |||
928 | lock_sock(sk); | 926 | lock_sock(sk); |
929 | bytes_ready: | 927 | bytes_ready: |
930 | while (copied != len) { | 928 | while (copied != len) { |
@@ -1122,7 +1120,7 @@ wait_for_memory: | |||
1122 | err = sk_stream_wait_memory(sk, &timeo); | 1120 | err = sk_stream_wait_memory(sk, &timeo); |
1123 | if (err) { | 1121 | if (err) { |
1124 | if (m && m != psock->cork) | 1122 | if (m && m != psock->cork) |
1125 | free_start_sg(sk, m); | 1123 | free_start_sg(sk, m, true); |
1126 | goto out_err; | 1124 | goto out_err; |
1127 | } | 1125 | } |
1128 | } | 1126 | } |
@@ -1464,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu) | |||
1464 | schedule_work(&psock->gc_work); | 1462 | schedule_work(&psock->gc_work); |
1465 | } | 1463 | } |
1466 | 1464 | ||
1465 | static bool psock_is_smap_sk(struct sock *sk) | ||
1466 | { | ||
1467 | return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops; | ||
1468 | } | ||
1469 | |||
1467 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock) | 1470 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock) |
1468 | { | 1471 | { |
1469 | if (refcount_dec_and_test(&psock->refcnt)) { | 1472 | if (refcount_dec_and_test(&psock->refcnt)) { |
1470 | tcp_cleanup_ulp(sock); | 1473 | if (psock_is_smap_sk(sock)) |
1474 | tcp_cleanup_ulp(sock); | ||
1471 | write_lock_bh(&sock->sk_callback_lock); | 1475 | write_lock_bh(&sock->sk_callback_lock); |
1472 | smap_stop_sock(psock, sock); | 1476 | smap_stop_sock(psock, sock); |
1473 | write_unlock_bh(&sock->sk_callback_lock); | 1477 | write_unlock_bh(&sock->sk_callback_lock); |
@@ -1581,13 +1585,13 @@ static void smap_gc_work(struct work_struct *w) | |||
1581 | bpf_prog_put(psock->bpf_tx_msg); | 1585 | bpf_prog_put(psock->bpf_tx_msg); |
1582 | 1586 | ||
1583 | if (psock->cork) { | 1587 | if (psock->cork) { |
1584 | free_start_sg(psock->sock, psock->cork); | 1588 | free_start_sg(psock->sock, psock->cork, true); |
1585 | kfree(psock->cork); | 1589 | kfree(psock->cork); |
1586 | } | 1590 | } |
1587 | 1591 | ||
1588 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { | 1592 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { |
1589 | list_del(&md->list); | 1593 | list_del(&md->list); |
1590 | free_start_sg(psock->sock, md); | 1594 | free_start_sg(psock->sock, md, true); |
1591 | kfree(md); | 1595 | kfree(md); |
1592 | } | 1596 | } |
1593 | 1597 | ||
@@ -1894,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1894 | * doesn't update user data. | 1898 | * doesn't update user data. |
1895 | */ | 1899 | */ |
1896 | if (psock) { | 1900 | if (psock) { |
1901 | if (!psock_is_smap_sk(sock)) { | ||
1902 | err = -EBUSY; | ||
1903 | goto out_progs; | ||
1904 | } | ||
1897 | if (READ_ONCE(psock->bpf_parse) && parse) { | 1905 | if (READ_ONCE(psock->bpf_parse) && parse) { |
1898 | err = -EBUSY; | 1906 | err = -EBUSY; |
1899 | goto out_progs; | 1907 | goto out_progs; |
diff --git a/kernel/cpu.c b/kernel/cpu.c index ed44d7d34c2d..aa7fe85ad62e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { } | |||
102 | * @name: Name of the step | 102 | * @name: Name of the step |
103 | * @startup: Startup function of the step | 103 | * @startup: Startup function of the step |
104 | * @teardown: Teardown function of the step | 104 | * @teardown: Teardown function of the step |
105 | * @skip_onerr: Do not invoke the functions on error rollback | ||
106 | * Will go away once the notifiers are gone | ||
107 | * @cant_stop: Bringup/teardown can't be stopped at this step | 105 | * @cant_stop: Bringup/teardown can't be stopped at this step |
108 | */ | 106 | */ |
109 | struct cpuhp_step { | 107 | struct cpuhp_step { |
@@ -119,7 +117,6 @@ struct cpuhp_step { | |||
119 | struct hlist_node *node); | 117 | struct hlist_node *node); |
120 | } teardown; | 118 | } teardown; |
121 | struct hlist_head list; | 119 | struct hlist_head list; |
122 | bool skip_onerr; | ||
123 | bool cant_stop; | 120 | bool cant_stop; |
124 | bool multi_instance; | 121 | bool multi_instance; |
125 | }; | 122 | }; |
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu) | |||
550 | 547 | ||
551 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | 548 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
552 | { | 549 | { |
553 | for (st->state--; st->state > st->target; st->state--) { | 550 | for (st->state--; st->state > st->target; st->state--) |
554 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 551 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
555 | |||
556 | if (!step->skip_onerr) | ||
557 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | ||
558 | } | ||
559 | } | 552 | } |
560 | 553 | ||
561 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 554 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
644 | 637 | ||
645 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); | 638 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); |
646 | 639 | ||
647 | if (st->rollback) { | ||
648 | struct cpuhp_step *step = cpuhp_get_step(state); | ||
649 | if (step->skip_onerr) | ||
650 | goto next; | ||
651 | } | ||
652 | |||
653 | if (cpuhp_is_atomic_state(state)) { | 640 | if (cpuhp_is_atomic_state(state)) { |
654 | local_irq_disable(); | 641 | local_irq_disable(); |
655 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); | 642 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); |
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
673 | st->should_run = false; | 660 | st->should_run = false; |
674 | } | 661 | } |
675 | 662 | ||
676 | next: | ||
677 | cpuhp_lock_release(bringup); | 663 | cpuhp_lock_release(bringup); |
678 | 664 | ||
679 | if (!st->should_run) | 665 | if (!st->should_run) |
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void) | |||
916 | 902 | ||
917 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) | 903 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
918 | { | 904 | { |
919 | for (st->state++; st->state < st->target; st->state++) { | 905 | for (st->state++; st->state < st->target; st->state++) |
920 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 906 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
921 | |||
922 | if (!step->skip_onerr) | ||
923 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); | ||
924 | } | ||
925 | } | 907 | } |
926 | 908 | ||
927 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 909 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 1c35b7b945d0..de87b0282e74 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c | |||
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | |||
168 | int dma_direct_supported(struct device *dev, u64 mask) | 168 | int dma_direct_supported(struct device *dev, u64 mask) |
169 | { | 169 | { |
170 | #ifdef CONFIG_ZONE_DMA | 170 | #ifdef CONFIG_ZONE_DMA |
171 | if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) | 171 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))) |
172 | return 0; | 172 | return 0; |
173 | #else | 173 | #else |
174 | /* | 174 | /* |
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask) | |||
177 | * memory, or by providing a ZONE_DMA32. If neither is the case, the | 177 | * memory, or by providing a ZONE_DMA32. If neither is the case, the |
178 | * architecture needs to use an IOMMU instead of the direct mapping. | 178 | * architecture needs to use an IOMMU instead of the direct mapping. |
179 | */ | 179 | */ |
180 | if (mask < DMA_BIT_MASK(32)) | 180 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) |
181 | return 0; | 181 | return 0; |
182 | #endif | 182 | #endif |
183 | /* | 183 | /* |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 924e37fb1620..fd6f8ed28e01 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/kmsg_dump.h> | 38 | #include <linux/kmsg_dump.h> |
39 | #include <linux/syslog.h> | 39 | #include <linux/syslog.h> |
40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
41 | #include <linux/notifier.h> | ||
42 | #include <linux/rculist.h> | 41 | #include <linux/rculist.h> |
43 | #include <linux/poll.h> | 42 | #include <linux/poll.h> |
44 | #include <linux/irq_work.h> | 43 | #include <linux/irq_work.h> |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5470dce212c0..977918d5d350 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -261,7 +261,7 @@ static void __touch_watchdog(void) | |||
261 | * entering idle state. This should only be used for scheduler events. | 261 | * entering idle state. This should only be used for scheduler events. |
262 | * Use touch_softlockup_watchdog() for everything else. | 262 | * Use touch_softlockup_watchdog() for everything else. |
263 | */ | 263 | */ |
264 | void touch_softlockup_watchdog_sched(void) | 264 | notrace void touch_softlockup_watchdog_sched(void) |
265 | { | 265 | { |
266 | /* | 266 | /* |
267 | * Preemption can be enabled. It doesn't matter which CPU's timestamp | 267 | * Preemption can be enabled. It doesn't matter which CPU's timestamp |
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void) | |||
270 | raw_cpu_write(watchdog_touch_ts, 0); | 270 | raw_cpu_write(watchdog_touch_ts, 0); |
271 | } | 271 | } |
272 | 272 | ||
273 | void touch_softlockup_watchdog(void) | 273 | notrace void touch_softlockup_watchdog(void) |
274 | { | 274 | { |
275 | touch_softlockup_watchdog_sched(); | 275 | touch_softlockup_watchdog_sched(); |
276 | wq_watchdog_touch(raw_smp_processor_id()); | 276 | wq_watchdog_touch(raw_smp_processor_id()); |
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 1f7020d65d0a..71381168dede 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c | |||
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask; | |||
29 | static unsigned long hardlockup_allcpu_dumped; | 29 | static unsigned long hardlockup_allcpu_dumped; |
30 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); | 30 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); |
31 | 31 | ||
32 | void arch_touch_nmi_watchdog(void) | 32 | notrace void arch_touch_nmi_watchdog(void) |
33 | { | 33 | { |
34 | /* | 34 | /* |
35 | * Using __raw here because some code paths have | 35 | * Using __raw here because some code paths have |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 60e80198c3df..0280deac392e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) | |||
5574 | mod_timer(&wq_watchdog_timer, jiffies + thresh); | 5574 | mod_timer(&wq_watchdog_timer, jiffies + thresh); |
5575 | } | 5575 | } |
5576 | 5576 | ||
5577 | void wq_watchdog_touch(int cpu) | 5577 | notrace void wq_watchdog_touch(int cpu) |
5578 | { | 5578 | { |
5579 | if (cpu >= 0) | 5579 | if (cpu >= 0) |
5580 | per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; | 5580 | per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index c72577e472f2..a66595ba5543 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -4,7 +4,6 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/percpu_counter.h> | 6 | #include <linux/percpu_counter.h> |
7 | #include <linux/notifier.h> | ||
8 | #include <linux/mutex.h> | 7 | #include <linux/mutex.h> |
9 | #include <linux/init.h> | 8 | #include <linux/init.h> |
10 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6551d3b0dc30..84ae9bf5858a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/mpage.h> | 27 | #include <linux/mpage.h> |
28 | #include <linux/rmap.h> | 28 | #include <linux/rmap.h> |
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/notifier.h> | ||
31 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
32 | #include <linux/sysctl.h> | 31 | #include <linux/sysctl.h> |
33 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e75865d58ba7..05e983f42316 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/ratelimit.h> | 33 | #include <linux/ratelimit.h> |
34 | #include <linux/oom.h> | 34 | #include <linux/oom.h> |
35 | #include <linux/notifier.h> | ||
36 | #include <linux/topology.h> | 35 | #include <linux/topology.h> |
37 | #include <linux/sysctl.h> | 36 | #include <linux/sysctl.h> |
38 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include "slab.h" | 20 | #include "slab.h" |
21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/notifier.h> | ||
23 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
24 | #include <linux/kasan.h> | 23 | #include <linux/kasan.h> |
25 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
diff --git a/net/core/dev.c b/net/core/dev.c index 325fc5088370..82114e1111e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -93,7 +93,6 @@ | |||
93 | #include <linux/netdevice.h> | 93 | #include <linux/netdevice.h> |
94 | #include <linux/etherdevice.h> | 94 | #include <linux/etherdevice.h> |
95 | #include <linux/ethtool.h> | 95 | #include <linux/ethtool.h> |
96 | #include <linux/notifier.h> | ||
97 | #include <linux/skbuff.h> | 96 | #include <linux/skbuff.h> |
98 | #include <linux/bpf.h> | 97 | #include <linux/bpf.h> |
99 | #include <linux/bpf_trace.h> | 98 | #include <linux/bpf_trace.h> |
diff --git a/net/core/filter.c b/net/core/filter.c index feb578506009..d301134bca3a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { | |||
2282 | .arg2_type = ARG_ANYTHING, | 2282 | .arg2_type = ARG_ANYTHING, |
2283 | }; | 2283 | }; |
2284 | 2284 | ||
2285 | #define sk_msg_iter_var(var) \ | ||
2286 | do { \ | ||
2287 | var++; \ | ||
2288 | if (var == MAX_SKB_FRAGS) \ | ||
2289 | var = 0; \ | ||
2290 | } while (0) | ||
2291 | |||
2285 | BPF_CALL_4(bpf_msg_pull_data, | 2292 | BPF_CALL_4(bpf_msg_pull_data, |
2286 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) | 2293 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) |
2287 | { | 2294 | { |
2288 | unsigned int len = 0, offset = 0, copy = 0; | 2295 | unsigned int len = 0, offset = 0, copy = 0, poffset = 0; |
2296 | int bytes = end - start, bytes_sg_total; | ||
2289 | struct scatterlist *sg = msg->sg_data; | 2297 | struct scatterlist *sg = msg->sg_data; |
2290 | int first_sg, last_sg, i, shift; | 2298 | int first_sg, last_sg, i, shift; |
2291 | unsigned char *p, *to, *from; | 2299 | unsigned char *p, *to, *from; |
2292 | int bytes = end - start; | ||
2293 | struct page *page; | 2300 | struct page *page; |
2294 | 2301 | ||
2295 | if (unlikely(flags || end <= start)) | 2302 | if (unlikely(flags || end <= start)) |
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2299 | i = msg->sg_start; | 2306 | i = msg->sg_start; |
2300 | do { | 2307 | do { |
2301 | len = sg[i].length; | 2308 | len = sg[i].length; |
2302 | offset += len; | ||
2303 | if (start < offset + len) | 2309 | if (start < offset + len) |
2304 | break; | 2310 | break; |
2305 | i++; | 2311 | offset += len; |
2306 | if (i == MAX_SKB_FRAGS) | 2312 | sk_msg_iter_var(i); |
2307 | i = 0; | ||
2308 | } while (i != msg->sg_end); | 2313 | } while (i != msg->sg_end); |
2309 | 2314 | ||
2310 | if (unlikely(start >= offset + len)) | 2315 | if (unlikely(start >= offset + len)) |
2311 | return -EINVAL; | 2316 | return -EINVAL; |
2312 | 2317 | ||
2313 | if (!msg->sg_copy[i] && bytes <= len) | ||
2314 | goto out; | ||
2315 | |||
2316 | first_sg = i; | 2318 | first_sg = i; |
2319 | /* The start may point into the sg element so we need to also | ||
2320 | * account for the headroom. | ||
2321 | */ | ||
2322 | bytes_sg_total = start - offset + bytes; | ||
2323 | if (!msg->sg_copy[i] && bytes_sg_total <= len) | ||
2324 | goto out; | ||
2317 | 2325 | ||
2318 | /* At this point we need to linearize multiple scatterlist | 2326 | /* At this point we need to linearize multiple scatterlist |
2319 | * elements or a single shared page. Either way we need to | 2327 | * elements or a single shared page. Either way we need to |
@@ -2327,37 +2335,32 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2327 | */ | 2335 | */ |
2328 | do { | 2336 | do { |
2329 | copy += sg[i].length; | 2337 | copy += sg[i].length; |
2330 | i++; | 2338 | sk_msg_iter_var(i); |
2331 | if (i == MAX_SKB_FRAGS) | 2339 | if (bytes_sg_total <= copy) |
2332 | i = 0; | ||
2333 | if (bytes < copy) | ||
2334 | break; | 2340 | break; |
2335 | } while (i != msg->sg_end); | 2341 | } while (i != msg->sg_end); |
2336 | last_sg = i; | 2342 | last_sg = i; |
2337 | 2343 | ||
2338 | if (unlikely(copy < end - start)) | 2344 | if (unlikely(bytes_sg_total > copy)) |
2339 | return -EINVAL; | 2345 | return -EINVAL; |
2340 | 2346 | ||
2341 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); | 2347 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); |
2342 | if (unlikely(!page)) | 2348 | if (unlikely(!page)) |
2343 | return -ENOMEM; | 2349 | return -ENOMEM; |
2344 | p = page_address(page); | 2350 | p = page_address(page); |
2345 | offset = 0; | ||
2346 | 2351 | ||
2347 | i = first_sg; | 2352 | i = first_sg; |
2348 | do { | 2353 | do { |
2349 | from = sg_virt(&sg[i]); | 2354 | from = sg_virt(&sg[i]); |
2350 | len = sg[i].length; | 2355 | len = sg[i].length; |
2351 | to = p + offset; | 2356 | to = p + poffset; |
2352 | 2357 | ||
2353 | memcpy(to, from, len); | 2358 | memcpy(to, from, len); |
2354 | offset += len; | 2359 | poffset += len; |
2355 | sg[i].length = 0; | 2360 | sg[i].length = 0; |
2356 | put_page(sg_page(&sg[i])); | 2361 | put_page(sg_page(&sg[i])); |
2357 | 2362 | ||
2358 | i++; | 2363 | sk_msg_iter_var(i); |
2359 | if (i == MAX_SKB_FRAGS) | ||
2360 | i = 0; | ||
2361 | } while (i != last_sg); | 2364 | } while (i != last_sg); |
2362 | 2365 | ||
2363 | sg[first_sg].length = copy; | 2366 | sg[first_sg].length = copy; |
@@ -2367,11 +2370,15 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2367 | * had a single entry though we can just replace it and | 2370 | * had a single entry though we can just replace it and |
2368 | * be done. Otherwise walk the ring and shift the entries. | 2371 | * be done. Otherwise walk the ring and shift the entries. |
2369 | */ | 2372 | */ |
2370 | shift = last_sg - first_sg - 1; | 2373 | WARN_ON_ONCE(last_sg == first_sg); |
2374 | shift = last_sg > first_sg ? | ||
2375 | last_sg - first_sg - 1 : | ||
2376 | MAX_SKB_FRAGS - first_sg + last_sg - 1; | ||
2371 | if (!shift) | 2377 | if (!shift) |
2372 | goto out; | 2378 | goto out; |
2373 | 2379 | ||
2374 | i = first_sg + 1; | 2380 | i = first_sg; |
2381 | sk_msg_iter_var(i); | ||
2375 | do { | 2382 | do { |
2376 | int move_from; | 2383 | int move_from; |
2377 | 2384 | ||
@@ -2388,15 +2395,13 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2388 | sg[move_from].page_link = 0; | 2395 | sg[move_from].page_link = 0; |
2389 | sg[move_from].offset = 0; | 2396 | sg[move_from].offset = 0; |
2390 | 2397 | ||
2391 | i++; | 2398 | sk_msg_iter_var(i); |
2392 | if (i == MAX_SKB_FRAGS) | ||
2393 | i = 0; | ||
2394 | } while (1); | 2399 | } while (1); |
2395 | msg->sg_end -= shift; | 2400 | msg->sg_end -= shift; |
2396 | if (msg->sg_end < 0) | 2401 | if (msg->sg_end < 0) |
2397 | msg->sg_end += MAX_SKB_FRAGS; | 2402 | msg->sg_end += MAX_SKB_FRAGS; |
2398 | out: | 2403 | out: |
2399 | msg->data = sg_virt(&sg[i]) + start - offset; | 2404 | msg->data = sg_virt(&sg[first_sg]) + start - offset; |
2400 | msg->data_end = msg->data + bytes; | 2405 | msg->data_end = msg->data + bytes; |
2401 | 2406 | ||
2402 | return 0; | 2407 | return 0; |
@@ -7298,7 +7303,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, | |||
7298 | break; | 7303 | break; |
7299 | 7304 | ||
7300 | case offsetof(struct sk_reuseport_md, ip_protocol): | 7305 | case offsetof(struct sk_reuseport_md, ip_protocol): |
7301 | BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE); | 7306 | BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); |
7302 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, | 7307 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, |
7303 | BPF_W, 0); | 7308 | BPF_W, 0); |
7304 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); | 7309 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 24431e578310..60c928894a78 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol) | |||
324 | 324 | ||
325 | rtnl_lock(); | 325 | rtnl_lock(); |
326 | tab = rtnl_msg_handlers[protocol]; | 326 | tab = rtnl_msg_handlers[protocol]; |
327 | if (!tab) { | ||
328 | rtnl_unlock(); | ||
329 | return; | ||
330 | } | ||
327 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); | 331 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); |
328 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { | 332 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { |
329 | link = tab[msgindex]; | 333 | link = tab[msgindex]; |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e63c554e0623..9f3209ff7ffd 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -19,12 +19,10 @@ | |||
19 | #include <linux/of_mdio.h> | 19 | #include <linux/of_mdio.h> |
20 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
21 | #include <linux/of_net.h> | 21 | #include <linux/of_net.h> |
22 | #include <linux/of_gpio.h> | ||
23 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
24 | #include <linux/sysfs.h> | 23 | #include <linux/sysfs.h> |
25 | #include <linux/phy_fixed.h> | 24 | #include <linux/phy_fixed.h> |
26 | #include <linux/ptp_classify.h> | 25 | #include <linux/ptp_classify.h> |
27 | #include <linux/gpio/consumer.h> | ||
28 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
29 | 27 | ||
30 | #include "dsa_priv.h" | 28 | #include "dsa_priv.h" |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index cf75f8944b05..4da39446da2d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t) | |||
820 | spin_lock(&im->lock); | 820 | spin_lock(&im->lock); |
821 | im->tm_running = 0; | 821 | im->tm_running = 0; |
822 | 822 | ||
823 | if (im->unsolicit_count) { | 823 | if (im->unsolicit_count && --im->unsolicit_count) |
824 | im->unsolicit_count--; | ||
825 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); | 824 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); |
826 | } | 825 | |
827 | im->reporter = 1; | 826 | im->reporter = 1; |
828 | spin_unlock(&im->lock); | 827 | spin_unlock(&im->lock); |
829 | 828 | ||
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im) | |||
1308 | 1307 | ||
1309 | if (in_dev->dead) | 1308 | if (in_dev->dead) |
1310 | return; | 1309 | return; |
1310 | |||
1311 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
1311 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { | 1312 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { |
1312 | spin_lock_bh(&im->lock); | 1313 | spin_lock_bh(&im->lock); |
1313 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); | 1314 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); |
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
1391 | unsigned int mode) | 1392 | unsigned int mode) |
1392 | { | 1393 | { |
1393 | struct ip_mc_list *im; | 1394 | struct ip_mc_list *im; |
1394 | #ifdef CONFIG_IP_MULTICAST | ||
1395 | struct net *net = dev_net(in_dev->dev); | ||
1396 | #endif | ||
1397 | 1395 | ||
1398 | ASSERT_RTNL(); | 1396 | ASSERT_RTNL(); |
1399 | 1397 | ||
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
1420 | spin_lock_init(&im->lock); | 1418 | spin_lock_init(&im->lock); |
1421 | #ifdef CONFIG_IP_MULTICAST | 1419 | #ifdef CONFIG_IP_MULTICAST |
1422 | timer_setup(&im->timer, igmp_timer_expire, 0); | 1420 | timer_setup(&im->timer, igmp_timer_expire, 0); |
1423 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
1424 | #endif | 1421 | #endif |
1425 | 1422 | ||
1426 | im->next_rcu = in_dev->mc_list; | 1423 | im->next_rcu = in_dev->mc_list; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 51a5d06085ac..ae714aecc31c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1508,11 +1508,14 @@ nla_put_failure: | |||
1508 | 1508 | ||
1509 | static void erspan_setup(struct net_device *dev) | 1509 | static void erspan_setup(struct net_device *dev) |
1510 | { | 1510 | { |
1511 | struct ip_tunnel *t = netdev_priv(dev); | ||
1512 | |||
1511 | ether_setup(dev); | 1513 | ether_setup(dev); |
1512 | dev->netdev_ops = &erspan_netdev_ops; | 1514 | dev->netdev_ops = &erspan_netdev_ops; |
1513 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1515 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1514 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1516 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1515 | ip_tunnel_setup(dev, erspan_net_id); | 1517 | ip_tunnel_setup(dev, erspan_net_id); |
1518 | t->erspan_ver = 1; | ||
1516 | } | 1519 | } |
1517 | 1520 | ||
1518 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { | 1521 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 75ef332a7caf..12affb7864d9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -184,8 +184,9 @@ kill: | |||
184 | inet_twsk_deschedule_put(tw); | 184 | inet_twsk_deschedule_put(tw); |
185 | return TCP_TW_SUCCESS; | 185 | return TCP_TW_SUCCESS; |
186 | } | 186 | } |
187 | } else { | ||
188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
187 | } | 189 | } |
188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
189 | 190 | ||
190 | if (tmp_opt.saw_tstamp) { | 191 | if (tmp_opt.saw_tstamp) { |
191 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; | 192 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 673bba31eb18..9a4261e50272 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -938,14 +938,14 @@ static int __init inet6_init(void) | |||
938 | 938 | ||
939 | err = proto_register(&pingv6_prot, 1); | 939 | err = proto_register(&pingv6_prot, 1); |
940 | if (err) | 940 | if (err) |
941 | goto out_unregister_ping_proto; | 941 | goto out_unregister_raw_proto; |
942 | 942 | ||
943 | /* We MUST register RAW sockets before we create the ICMP6, | 943 | /* We MUST register RAW sockets before we create the ICMP6, |
944 | * IGMP6, or NDISC control sockets. | 944 | * IGMP6, or NDISC control sockets. |
945 | */ | 945 | */ |
946 | err = rawv6_init(); | 946 | err = rawv6_init(); |
947 | if (err) | 947 | if (err) |
948 | goto out_unregister_raw_proto; | 948 | goto out_unregister_ping_proto; |
949 | 949 | ||
950 | /* Register the family here so that the init calls below will | 950 | /* Register the family here so that the init calls below will |
951 | * be able to create sockets. (?? is this dangerous ??) | 951 | * be able to create sockets. (?? is this dangerous ??) |
@@ -1113,11 +1113,11 @@ netfilter_fail: | |||
1113 | igmp_fail: | 1113 | igmp_fail: |
1114 | ndisc_cleanup(); | 1114 | ndisc_cleanup(); |
1115 | ndisc_fail: | 1115 | ndisc_fail: |
1116 | ip6_mr_cleanup(); | 1116 | icmpv6_cleanup(); |
1117 | icmp_fail: | 1117 | icmp_fail: |
1118 | unregister_pernet_subsys(&inet6_net_ops); | 1118 | ip6_mr_cleanup(); |
1119 | ipmr_fail: | 1119 | ipmr_fail: |
1120 | icmpv6_cleanup(); | 1120 | unregister_pernet_subsys(&inet6_net_ops); |
1121 | register_pernet_fail: | 1121 | register_pernet_fail: |
1122 | sock_unregister(PF_INET6); | 1122 | sock_unregister(PF_INET6); |
1123 | rtnl_unregister_all(PF_INET6); | 1123 | rtnl_unregister_all(PF_INET6); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index c861a6d4671d..5516f55e214b 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -989,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
989 | fib6_clean_expires(iter); | 989 | fib6_clean_expires(iter); |
990 | else | 990 | else |
991 | fib6_set_expires(iter, rt->expires); | 991 | fib6_set_expires(iter, rt->expires); |
992 | fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); | 992 | |
993 | if (rt->fib6_pmtu) | ||
994 | fib6_metric_set(iter, RTAX_MTU, | ||
995 | rt->fib6_pmtu); | ||
993 | return -EEXIST; | 996 | return -EEXIST; |
994 | } | 997 | } |
995 | /* If we have the same destination and the same metric, | 998 | /* If we have the same destination and the same metric, |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 18a3794b0f52..e493b041d4ac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1778 | if (data[IFLA_GRE_COLLECT_METADATA]) | 1778 | if (data[IFLA_GRE_COLLECT_METADATA]) |
1779 | parms->collect_md = true; | 1779 | parms->collect_md = true; |
1780 | 1780 | ||
1781 | parms->erspan_ver = 1; | ||
1781 | if (data[IFLA_GRE_ERSPAN_VER]) | 1782 | if (data[IFLA_GRE_ERSPAN_VER]) |
1782 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | 1783 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); |
1783 | 1784 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 5df2a58d945c..419960b0ba16 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1188,7 +1188,15 @@ route_lookup: | |||
1188 | init_tel_txopt(&opt, encap_limit); | 1188 | init_tel_txopt(&opt, encap_limit); |
1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); | 1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); |
1190 | } | 1190 | } |
1191 | hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); | 1191 | |
1192 | if (hop_limit == 0) { | ||
1193 | if (skb->protocol == htons(ETH_P_IP)) | ||
1194 | hop_limit = ip_hdr(skb)->ttl; | ||
1195 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
1196 | hop_limit = ipv6_hdr(skb)->hop_limit; | ||
1197 | else | ||
1198 | hop_limit = ip6_dst_hoplimit(dst); | ||
1199 | } | ||
1192 | 1200 | ||
1193 | /* Calculate max headroom for all the headers and adjust | 1201 | /* Calculate max headroom for all the headers and adjust |
1194 | * needed_headroom if necessary. | 1202 | * needed_headroom if necessary. |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 5095367c7204..eeaf7455d51e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
481 | } | 481 | } |
482 | 482 | ||
483 | mtu = dst_mtu(dst); | 483 | mtu = dst_mtu(dst); |
484 | if (!skb->ignore_df && skb->len > mtu) { | 484 | if (skb->len > mtu) { |
485 | skb_dst_update_pmtu(skb, mtu); | 485 | skb_dst_update_pmtu(skb, mtu); |
486 | 486 | ||
487 | if (skb->protocol == htons(ETH_P_IPV6)) { | 487 | if (skb->protocol == htons(ETH_P_IPV6)) { |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b5f385d2b0e9..0fa62acc923c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) | |||
996 | rt->rt6i_src = ort->fib6_src; | 996 | rt->rt6i_src = ort->fib6_src; |
997 | #endif | 997 | #endif |
998 | rt->rt6i_prefsrc = ort->fib6_prefsrc; | 998 | rt->rt6i_prefsrc = ort->fib6_prefsrc; |
999 | rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); | ||
1000 | } | 999 | } |
1001 | 1000 | ||
1002 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, | 1001 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 6449a1c2283b..f0f5fedb8caa 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, | |||
947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) | 947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) |
948 | return; | 948 | return; |
949 | 949 | ||
950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", | 950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
951 | mgmt->sa, mgmt->da, mgmt->bssid, reason); | 951 | ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); |
952 | sta_info_destroy_addr(sdata, mgmt->sa); | 952 | sta_info_destroy_addr(sdata, mgmt->sa); |
953 | } | 953 | } |
954 | 954 | ||
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, | |||
966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); | 966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); |
967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
968 | 968 | ||
969 | ibss_dbg(sdata, | 969 | ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
970 | "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", | 970 | ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", |
971 | mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); | 971 | mgmt->bssid, auth_transaction); |
972 | 972 | ||
973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | 973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) |
974 | return; | 974 | return; |
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
1175 | rx_timestamp = drv_get_tsf(local, sdata); | 1175 | rx_timestamp = drv_get_tsf(local, sdata); |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | ibss_dbg(sdata, | 1178 | ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", |
1179 | "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", | ||
1180 | mgmt->sa, mgmt->bssid, | 1179 | mgmt->sa, mgmt->bssid, |
1181 | (unsigned long long)rx_timestamp, | 1180 | (unsigned long long)rx_timestamp); |
1181 | ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", | ||
1182 | (unsigned long long)beacon_timestamp, | 1182 | (unsigned long long)beacon_timestamp, |
1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), | 1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), |
1184 | jiffies); | 1184 | jiffies); |
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, | |||
1537 | 1537 | ||
1538 | tx_last_beacon = drv_tx_last_beacon(local); | 1538 | tx_last_beacon = drv_tx_last_beacon(local); |
1539 | 1539 | ||
1540 | ibss_dbg(sdata, | 1540 | ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
1541 | "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", | 1541 | ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", |
1542 | mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); | 1542 | mgmt->bssid, tx_last_beacon); |
1543 | 1543 | ||
1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) | 1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) |
1545 | return; | 1545 | return; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index e1215416a207..2d51eca46aa0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work) | |||
256 | 256 | ||
257 | flush_work(&local->radar_detected_work); | 257 | flush_work(&local->radar_detected_work); |
258 | rtnl_lock(); | 258 | rtnl_lock(); |
259 | list_for_each_entry(sdata, &local->interfaces, list) | 259 | list_for_each_entry(sdata, &local->interfaces, list) { |
260 | /* | ||
261 | * XXX: there may be more work for other vif types and even | ||
262 | * for station mode: a good thing would be to run most of | ||
263 | * the iface type's dependent _stop (ieee80211_mg_stop, | ||
264 | * ieee80211_ibss_stop) etc... | ||
265 | * For now, fix only the specific bug that was seen: race | ||
266 | * between csa_connection_drop_work and us. | ||
267 | */ | ||
268 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
269 | /* | ||
270 | * This worker is scheduled from the iface worker that | ||
271 | * runs on mac80211's workqueue, so we can't be | ||
272 | * scheduling this worker after the cancel right here. | ||
273 | * The exception is ieee80211_chswitch_done. | ||
274 | * Then we can have a race... | ||
275 | */ | ||
276 | cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); | ||
277 | } | ||
260 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); | 278 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); |
279 | } | ||
261 | ieee80211_scan_cancel(local); | 280 | ieee80211_scan_cancel(local); |
262 | 281 | ||
263 | /* make sure any new ROC will consider local->in_reconfig */ | 282 | /* make sure any new ROC will consider local->in_reconfig */ |
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { | |||
471 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | | 490 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | |
472 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 491 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
473 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 492 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
474 | IEEE80211_VHT_CAP_RXSTBC_1 | | 493 | IEEE80211_VHT_CAP_RXSTBC_MASK | |
475 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
476 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
477 | IEEE80211_VHT_CAP_RXSTBC_4 | | ||
478 | IEEE80211_VHT_CAP_TXSTBC | | 494 | IEEE80211_VHT_CAP_TXSTBC | |
479 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | | 495 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | |
480 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | | 496 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | |
@@ -1212,6 +1228,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1212 | #if IS_ENABLED(CONFIG_IPV6) | 1228 | #if IS_ENABLED(CONFIG_IPV6) |
1213 | unregister_inet6addr_notifier(&local->ifa6_notifier); | 1229 | unregister_inet6addr_notifier(&local->ifa6_notifier); |
1214 | #endif | 1230 | #endif |
1231 | ieee80211_txq_teardown_flows(local); | ||
1215 | 1232 | ||
1216 | rtnl_lock(); | 1233 | rtnl_lock(); |
1217 | 1234 | ||
@@ -1240,7 +1257,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1240 | skb_queue_purge(&local->skb_queue); | 1257 | skb_queue_purge(&local->skb_queue); |
1241 | skb_queue_purge(&local->skb_queue_unreliable); | 1258 | skb_queue_purge(&local->skb_queue_unreliable); |
1242 | skb_queue_purge(&local->skb_queue_tdls_chsw); | 1259 | skb_queue_purge(&local->skb_queue_tdls_chsw); |
1243 | ieee80211_txq_teardown_flows(local); | ||
1244 | 1260 | ||
1245 | destroy_workqueue(local->workqueue); | 1261 | destroy_workqueue(local->workqueue); |
1246 | wiphy_unregister(local->hw.wiphy); | 1262 | wiphy_unregister(local->hw.wiphy); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 35ad3983ae4b..daf9db3c8f24 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
572 | forward = false; | 572 | forward = false; |
573 | reply = true; | 573 | reply = true; |
574 | target_metric = 0; | 574 | target_metric = 0; |
575 | |||
576 | if (SN_GT(target_sn, ifmsh->sn)) | ||
577 | ifmsh->sn = target_sn; | ||
578 | |||
575 | if (time_after(jiffies, ifmsh->last_sn_update + | 579 | if (time_after(jiffies, ifmsh->last_sn_update + |
576 | net_traversal_jiffies(sdata)) || | 580 | net_traversal_jiffies(sdata)) || |
577 | time_before(jiffies, ifmsh->last_sn_update)) { | 581 | time_before(jiffies, ifmsh->last_sn_update)) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 7fb9957359a3..3dbecae4be73 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1073 | */ | 1073 | */ |
1074 | 1074 | ||
1075 | if (sdata->reserved_chanctx) { | 1075 | if (sdata->reserved_chanctx) { |
1076 | struct ieee80211_supported_band *sband = NULL; | ||
1077 | struct sta_info *mgd_sta = NULL; | ||
1078 | enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20; | ||
1079 | |||
1076 | /* | 1080 | /* |
1077 | * with multi-vif csa driver may call ieee80211_csa_finish() | 1081 | * with multi-vif csa driver may call ieee80211_csa_finish() |
1078 | * many times while waiting for other interfaces to use their | 1082 | * many times while waiting for other interfaces to use their |
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1081 | if (sdata->reserved_ready) | 1085 | if (sdata->reserved_ready) |
1082 | goto out; | 1086 | goto out; |
1083 | 1087 | ||
1088 | if (sdata->vif.bss_conf.chandef.width != | ||
1089 | sdata->csa_chandef.width) { | ||
1090 | /* | ||
1091 | * For managed interface, we need to also update the AP | ||
1092 | * station bandwidth and align the rate scale algorithm | ||
1093 | * on the bandwidth change. Here we only consider the | ||
1094 | * bandwidth of the new channel definition (as channel | ||
1095 | * switch flow does not have the full HT/VHT/HE | ||
1096 | * information), assuming that if additional changes are | ||
1097 | * required they would be done as part of the processing | ||
1098 | * of the next beacon from the AP. | ||
1099 | */ | ||
1100 | switch (sdata->csa_chandef.width) { | ||
1101 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
1102 | case NL80211_CHAN_WIDTH_20: | ||
1103 | default: | ||
1104 | bw = IEEE80211_STA_RX_BW_20; | ||
1105 | break; | ||
1106 | case NL80211_CHAN_WIDTH_40: | ||
1107 | bw = IEEE80211_STA_RX_BW_40; | ||
1108 | break; | ||
1109 | case NL80211_CHAN_WIDTH_80: | ||
1110 | bw = IEEE80211_STA_RX_BW_80; | ||
1111 | break; | ||
1112 | case NL80211_CHAN_WIDTH_80P80: | ||
1113 | case NL80211_CHAN_WIDTH_160: | ||
1114 | bw = IEEE80211_STA_RX_BW_160; | ||
1115 | break; | ||
1116 | } | ||
1117 | |||
1118 | mgd_sta = sta_info_get(sdata, ifmgd->bssid); | ||
1119 | sband = | ||
1120 | local->hw.wiphy->bands[sdata->csa_chandef.chan->band]; | ||
1121 | } | ||
1122 | |||
1123 | if (sdata->vif.bss_conf.chandef.width > | ||
1124 | sdata->csa_chandef.width) { | ||
1125 | mgd_sta->sta.bandwidth = bw; | ||
1126 | rate_control_rate_update(local, sband, mgd_sta, | ||
1127 | IEEE80211_RC_BW_CHANGED); | ||
1128 | } | ||
1129 | |||
1084 | ret = ieee80211_vif_use_reserved_context(sdata); | 1130 | ret = ieee80211_vif_use_reserved_context(sdata); |
1085 | if (ret) { | 1131 | if (ret) { |
1086 | sdata_info(sdata, | 1132 | sdata_info(sdata, |
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1091 | goto out; | 1137 | goto out; |
1092 | } | 1138 | } |
1093 | 1139 | ||
1140 | if (sdata->vif.bss_conf.chandef.width < | ||
1141 | sdata->csa_chandef.width) { | ||
1142 | mgd_sta->sta.bandwidth = bw; | ||
1143 | rate_control_rate_update(local, sband, mgd_sta, | ||
1144 | IEEE80211_RC_BW_CHANGED); | ||
1145 | } | ||
1146 | |||
1094 | goto out; | 1147 | goto out; |
1095 | } | 1148 | } |
1096 | 1149 | ||
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1312 | cbss->beacon_interval)); | 1365 | cbss->beacon_interval)); |
1313 | return; | 1366 | return; |
1314 | drop_connection: | 1367 | drop_connection: |
1368 | /* | ||
1369 | * This is just so that the disconnect flow will know that | ||
1370 | * we were trying to switch channel and failed. In case the | ||
1371 | * mode is 1 (we are not allowed to Tx), we will know not to | ||
1372 | * send a deauthentication frame. Those two fields will be | ||
1373 | * reset when the disconnection worker runs. | ||
1374 | */ | ||
1375 | sdata->vif.csa_active = true; | ||
1376 | sdata->csa_block_tx = csa_ie.mode; | ||
1377 | |||
1315 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); | 1378 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); |
1316 | mutex_unlock(&local->chanctx_mtx); | 1379 | mutex_unlock(&local->chanctx_mtx); |
1317 | mutex_unlock(&local->mtx); | 1380 | mutex_unlock(&local->mtx); |
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2522 | struct ieee80211_local *local = sdata->local; | 2585 | struct ieee80211_local *local = sdata->local; |
2523 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2586 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2524 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; | 2587 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
2588 | bool tx; | ||
2525 | 2589 | ||
2526 | sdata_lock(sdata); | 2590 | sdata_lock(sdata); |
2527 | if (!ifmgd->associated) { | 2591 | if (!ifmgd->associated) { |
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2529 | return; | 2593 | return; |
2530 | } | 2594 | } |
2531 | 2595 | ||
2596 | tx = !sdata->csa_block_tx; | ||
2597 | |||
2532 | /* AP is probably out of range (or not reachable for another reason) so | 2598 | /* AP is probably out of range (or not reachable for another reason) so |
2533 | * remove the bss struct for that AP. | 2599 | * remove the bss struct for that AP. |
2534 | */ | 2600 | */ |
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2536 | 2602 | ||
2537 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | 2603 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, |
2538 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | 2604 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, |
2539 | true, frame_buf); | 2605 | tx, frame_buf); |
2540 | mutex_lock(&local->mtx); | 2606 | mutex_lock(&local->mtx); |
2541 | sdata->vif.csa_active = false; | 2607 | sdata->vif.csa_active = false; |
2542 | ifmgd->csa_waiting_bcn = false; | 2608 | ifmgd->csa_waiting_bcn = false; |
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2547 | } | 2613 | } |
2548 | mutex_unlock(&local->mtx); | 2614 | mutex_unlock(&local->mtx); |
2549 | 2615 | ||
2550 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, | 2616 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, |
2551 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); | 2617 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); |
2552 | 2618 | ||
2553 | sdata_unlock(sdata); | 2619 | sdata_unlock(sdata); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 134bbbc9cd6e..355357cc7dbd 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1728 | */ | 1728 | */ |
1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && | 1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && |
1730 | !ieee80211_has_morefrags(hdr->frame_control) && | 1730 | !ieee80211_has_morefrags(hdr->frame_control) && |
1731 | !is_multicast_ether_addr(hdr->addr1) && | ||
1731 | (ieee80211_is_mgmt(hdr->frame_control) || | 1732 | (ieee80211_is_mgmt(hdr->frame_control) || |
1732 | ieee80211_is_data(hdr->frame_control)) && | 1733 | ieee80211_is_data(hdr->frame_control)) && |
1733 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | 1734 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 093108077edc..58502d29be54 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta) | |||
3078 | } | 3078 | } |
3079 | 3079 | ||
3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, | 3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, |
3081 | struct sk_buff *skb, int headroom, | 3081 | struct sk_buff *skb, int headroom) |
3082 | int *subframe_len) | ||
3083 | { | 3082 | { |
3084 | int amsdu_len = *subframe_len + sizeof(struct ethhdr); | 3083 | if (skb_headroom(skb) < headroom) { |
3085 | int padding = (4 - amsdu_len) & 3; | ||
3086 | |||
3087 | if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) { | ||
3088 | I802_DEBUG_INC(local->tx_expand_skb_head); | 3084 | I802_DEBUG_INC(local->tx_expand_skb_head); |
3089 | 3085 | ||
3090 | if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) { | 3086 | if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { |
3091 | wiphy_debug(local->hw.wiphy, | 3087 | wiphy_debug(local->hw.wiphy, |
3092 | "failed to reallocate TX buffer\n"); | 3088 | "failed to reallocate TX buffer\n"); |
3093 | return false; | 3089 | return false; |
3094 | } | 3090 | } |
3095 | } | 3091 | } |
3096 | 3092 | ||
3097 | if (padding) { | ||
3098 | *subframe_len += padding; | ||
3099 | skb_put_zero(skb, padding); | ||
3100 | } | ||
3101 | |||
3102 | return true; | 3093 | return true; |
3103 | } | 3094 | } |
3104 | 3095 | ||
@@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, | |||
3122 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) | 3113 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) |
3123 | return true; | 3114 | return true; |
3124 | 3115 | ||
3125 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr), | 3116 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr))) |
3126 | &subframe_len)) | ||
3127 | return false; | 3117 | return false; |
3128 | 3118 | ||
3129 | data = skb_push(skb, sizeof(*amsdu_hdr)); | 3119 | data = skb_push(skb, sizeof(*amsdu_hdr)); |
@@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3189 | void *data; | 3179 | void *data; |
3190 | bool ret = false; | 3180 | bool ret = false; |
3191 | unsigned int orig_len; | 3181 | unsigned int orig_len; |
3192 | int n = 1, nfrags; | 3182 | int n = 2, nfrags, pad = 0; |
3183 | u16 hdrlen; | ||
3193 | 3184 | ||
3194 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) | 3185 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) |
3195 | return false; | 3186 | return false; |
@@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3222 | if (skb->len + head->len > max_amsdu_len) | 3213 | if (skb->len + head->len > max_amsdu_len) |
3223 | goto out; | 3214 | goto out; |
3224 | 3215 | ||
3225 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) | ||
3226 | goto out; | ||
3227 | |||
3228 | nfrags = 1 + skb_shinfo(skb)->nr_frags; | 3216 | nfrags = 1 + skb_shinfo(skb)->nr_frags; |
3229 | nfrags += 1 + skb_shinfo(head)->nr_frags; | 3217 | nfrags += 1 + skb_shinfo(head)->nr_frags; |
3230 | frag_tail = &skb_shinfo(head)->frag_list; | 3218 | frag_tail = &skb_shinfo(head)->frag_list; |
@@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3240 | if (max_frags && nfrags > max_frags) | 3228 | if (max_frags && nfrags > max_frags) |
3241 | goto out; | 3229 | goto out; |
3242 | 3230 | ||
3243 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2, | 3231 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) |
3244 | &subframe_len)) | ||
3245 | goto out; | 3232 | goto out; |
3246 | 3233 | ||
3234 | /* | ||
3235 | * Pad out the previous subframe to a multiple of 4 by adding the | ||
3236 | * padding to the next one, that's being added. Note that head->len | ||
3237 | * is the length of the full A-MSDU, but that works since each time | ||
3238 | * we add a new subframe we pad out the previous one to a multiple | ||
3239 | * of 4 and thus it no longer matters in the next round. | ||
3240 | */ | ||
3241 | hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); | ||
3242 | if ((head->len - hdrlen) & 3) | ||
3243 | pad = 4 - ((head->len - hdrlen) & 3); | ||
3244 | |||
3245 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + | ||
3246 | 2 + pad)) | ||
3247 | goto out_recalc; | ||
3248 | |||
3247 | ret = true; | 3249 | ret = true; |
3248 | data = skb_push(skb, ETH_ALEN + 2); | 3250 | data = skb_push(skb, ETH_ALEN + 2); |
3249 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); | 3251 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); |
@@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3253 | memcpy(data, &len, 2); | 3255 | memcpy(data, &len, 2); |
3254 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); | 3256 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); |
3255 | 3257 | ||
3258 | memset(skb_push(skb, pad), 0, pad); | ||
3259 | |||
3256 | head->len += skb->len; | 3260 | head->len += skb->len; |
3257 | head->data_len += skb->len; | 3261 | head->data_len += skb->len; |
3258 | *frag_tail = skb; | 3262 | *frag_tail = skb; |
3259 | 3263 | ||
3260 | flow->backlog += head->len - orig_len; | 3264 | out_recalc: |
3261 | tin->backlog_bytes += head->len - orig_len; | 3265 | if (head->len != orig_len) { |
3262 | 3266 | flow->backlog += head->len - orig_len; | |
3263 | fq_recalc_backlog(fq, tin, flow); | 3267 | tin->backlog_bytes += head->len - orig_len; |
3264 | 3268 | ||
3269 | fq_recalc_backlog(fq, tin, flow); | ||
3270 | } | ||
3265 | out: | 3271 | out: |
3266 | spin_unlock_bh(&fq->lock); | 3272 | spin_unlock_bh(&fq->lock); |
3267 | 3273 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d886789ff59e..b51fdcb5adf9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1235,7 +1235,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
1235 | { | 1235 | { |
1236 | struct ieee80211_chanctx_conf *chanctx_conf; | 1236 | struct ieee80211_chanctx_conf *chanctx_conf; |
1237 | const struct ieee80211_reg_rule *rrule; | 1237 | const struct ieee80211_reg_rule *rrule; |
1238 | struct ieee80211_wmm_ac *wmm_ac; | 1238 | const struct ieee80211_wmm_ac *wmm_ac; |
1239 | u16 center_freq = 0; | 1239 | u16 center_freq = 0; |
1240 | 1240 | ||
1241 | if (sdata->vif.type != NL80211_IFTYPE_AP && | 1241 | if (sdata->vif.type != NL80211_IFTYPE_AP && |
@@ -1254,20 +1254,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
1254 | 1254 | ||
1255 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); | 1255 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); |
1256 | 1256 | ||
1257 | if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) { | 1257 | if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { |
1258 | rcu_read_unlock(); | 1258 | rcu_read_unlock(); |
1259 | return; | 1259 | return; |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | if (sdata->vif.type == NL80211_IFTYPE_AP) | 1262 | if (sdata->vif.type == NL80211_IFTYPE_AP) |
1263 | wmm_ac = &rrule->wmm_rule->ap[ac]; | 1263 | wmm_ac = &rrule->wmm_rule.ap[ac]; |
1264 | else | 1264 | else |
1265 | wmm_ac = &rrule->wmm_rule->client[ac]; | 1265 | wmm_ac = &rrule->wmm_rule.client[ac]; |
1266 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); | 1266 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); |
1267 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); | 1267 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); |
1268 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); | 1268 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); |
1269 | qparam->txop = !qparam->txop ? wmm_ac->cot / 32 : | 1269 | qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); |
1270 | min_t(u16, qparam->txop, wmm_ac->cot / 32); | ||
1271 | rcu_read_unlock(); | 1270 | rcu_read_unlock(); |
1272 | } | 1271 | } |
1273 | 1272 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5610061e7f2e..75c92a87e7b2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = { | |||
4137 | .close = packet_mm_close, | 4137 | .close = packet_mm_close, |
4138 | }; | 4138 | }; |
4139 | 4139 | ||
4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int len) | 4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
4141 | unsigned int len) | ||
4141 | { | 4142 | { |
4142 | int i; | 4143 | int i; |
4143 | 4144 | ||
4144 | for (i = 0; i < len; i++) { | 4145 | for (i = 0; i < len; i++) { |
4145 | if (likely(pg_vec[i].buffer)) { | 4146 | if (likely(pg_vec[i].buffer)) { |
4146 | kvfree(pg_vec[i].buffer); | 4147 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
4148 | vfree(pg_vec[i].buffer); | ||
4149 | else | ||
4150 | free_pages((unsigned long)pg_vec[i].buffer, | ||
4151 | order); | ||
4147 | pg_vec[i].buffer = NULL; | 4152 | pg_vec[i].buffer = NULL; |
4148 | } | 4153 | } |
4149 | } | 4154 | } |
4150 | kfree(pg_vec); | 4155 | kfree(pg_vec); |
4151 | } | 4156 | } |
4152 | 4157 | ||
4153 | static char *alloc_one_pg_vec_page(unsigned long size) | 4158 | static char *alloc_one_pg_vec_page(unsigned long order) |
4154 | { | 4159 | { |
4155 | char *buffer; | 4160 | char *buffer; |
4161 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | | ||
4162 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | ||
4156 | 4163 | ||
4157 | buffer = kvzalloc(size, GFP_KERNEL); | 4164 | buffer = (char *) __get_free_pages(gfp_flags, order); |
4158 | if (buffer) | 4165 | if (buffer) |
4159 | return buffer; | 4166 | return buffer; |
4160 | 4167 | ||
4161 | buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); | 4168 | /* __get_free_pages failed, fall back to vmalloc */ |
4169 | buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); | ||
4170 | if (buffer) | ||
4171 | return buffer; | ||
4162 | 4172 | ||
4163 | return buffer; | 4173 | /* vmalloc failed, lets dig into swap here */ |
4174 | gfp_flags &= ~__GFP_NORETRY; | ||
4175 | buffer = (char *) __get_free_pages(gfp_flags, order); | ||
4176 | if (buffer) | ||
4177 | return buffer; | ||
4178 | |||
4179 | /* complete and utter failure */ | ||
4180 | return NULL; | ||
4164 | } | 4181 | } |
4165 | 4182 | ||
4166 | static struct pgv *alloc_pg_vec(struct tpacket_req *req) | 4183 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
4167 | { | 4184 | { |
4168 | unsigned int block_nr = req->tp_block_nr; | 4185 | unsigned int block_nr = req->tp_block_nr; |
4169 | unsigned long size = req->tp_block_size; | ||
4170 | struct pgv *pg_vec; | 4186 | struct pgv *pg_vec; |
4171 | int i; | 4187 | int i; |
4172 | 4188 | ||
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req) | |||
4175 | goto out; | 4191 | goto out; |
4176 | 4192 | ||
4177 | for (i = 0; i < block_nr; i++) { | 4193 | for (i = 0; i < block_nr; i++) { |
4178 | pg_vec[i].buffer = alloc_one_pg_vec_page(size); | 4194 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
4179 | if (unlikely(!pg_vec[i].buffer)) | 4195 | if (unlikely(!pg_vec[i].buffer)) |
4180 | goto out_free_pgvec; | 4196 | goto out_free_pgvec; |
4181 | } | 4197 | } |
@@ -4184,7 +4200,7 @@ out: | |||
4184 | return pg_vec; | 4200 | return pg_vec; |
4185 | 4201 | ||
4186 | out_free_pgvec: | 4202 | out_free_pgvec: |
4187 | free_pg_vec(pg_vec, block_nr); | 4203 | free_pg_vec(pg_vec, order, block_nr); |
4188 | pg_vec = NULL; | 4204 | pg_vec = NULL; |
4189 | goto out; | 4205 | goto out; |
4190 | } | 4206 | } |
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4194 | { | 4210 | { |
4195 | struct pgv *pg_vec = NULL; | 4211 | struct pgv *pg_vec = NULL; |
4196 | struct packet_sock *po = pkt_sk(sk); | 4212 | struct packet_sock *po = pkt_sk(sk); |
4213 | int was_running, order = 0; | ||
4197 | struct packet_ring_buffer *rb; | 4214 | struct packet_ring_buffer *rb; |
4198 | struct sk_buff_head *rb_queue; | 4215 | struct sk_buff_head *rb_queue; |
4199 | int was_running; | ||
4200 | __be16 num; | 4216 | __be16 num; |
4201 | int err = -EINVAL; | 4217 | int err = -EINVAL; |
4202 | /* Added to avoid minimal code churn */ | 4218 | /* Added to avoid minimal code churn */ |
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4258 | goto out; | 4274 | goto out; |
4259 | 4275 | ||
4260 | err = -ENOMEM; | 4276 | err = -ENOMEM; |
4261 | pg_vec = alloc_pg_vec(req); | 4277 | order = get_order(req->tp_block_size); |
4278 | pg_vec = alloc_pg_vec(req, order); | ||
4262 | if (unlikely(!pg_vec)) | 4279 | if (unlikely(!pg_vec)) |
4263 | goto out; | 4280 | goto out; |
4264 | switch (po->tp_version) { | 4281 | switch (po->tp_version) { |
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4312 | rb->frame_size = req->tp_frame_size; | 4329 | rb->frame_size = req->tp_frame_size; |
4313 | spin_unlock_bh(&rb_queue->lock); | 4330 | spin_unlock_bh(&rb_queue->lock); |
4314 | 4331 | ||
4332 | swap(rb->pg_vec_order, order); | ||
4315 | swap(rb->pg_vec_len, req->tp_block_nr); | 4333 | swap(rb->pg_vec_len, req->tp_block_nr); |
4316 | 4334 | ||
4317 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | 4335 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; |
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4337 | } | 4355 | } |
4338 | 4356 | ||
4339 | if (pg_vec) | 4357 | if (pg_vec) |
4340 | free_pg_vec(pg_vec, req->tp_block_nr); | 4358 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
4341 | out: | 4359 | out: |
4342 | return err; | 4360 | return err; |
4343 | } | 4361 | } |
diff --git a/net/packet/internal.h b/net/packet/internal.h index 8f50036f62f0..3bb7c5fb3bff 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h | |||
@@ -64,6 +64,7 @@ struct packet_ring_buffer { | |||
64 | unsigned int frame_size; | 64 | unsigned int frame_size; |
65 | unsigned int frame_max; | 65 | unsigned int frame_max; |
66 | 66 | ||
67 | unsigned int pg_vec_order; | ||
67 | unsigned int pg_vec_pages; | 68 | unsigned int pg_vec_pages; |
68 | unsigned int pg_vec_len; | 69 | unsigned int pg_vec_len; |
69 | 70 | ||
diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 01b3bd6a3708..b9092111bc45 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | 1 | ||
2 | config RDS | 2 | config RDS |
3 | tristate "The RDS Protocol" | 3 | tristate "The Reliable Datagram Sockets Protocol" |
4 | depends on INET | 4 | depends on INET |
5 | ---help--- | 5 | ---help--- |
6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, | 6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, |
diff --git a/net/rds/ib.c b/net/rds/ib.c index c1d97640c0be..eba75c1ba359 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, | |||
341 | 341 | ||
342 | if (rds_conn_state(conn) == RDS_CONN_UP) { | 342 | if (rds_conn_state(conn) == RDS_CONN_UP) { |
343 | struct rds_ib_device *rds_ibdev; | 343 | struct rds_ib_device *rds_ibdev; |
344 | struct rdma_dev_addr *dev_addr; | ||
345 | 344 | ||
346 | ic = conn->c_transport_data; | 345 | ic = conn->c_transport_data; |
347 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; | 346 | rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, |
348 | rdma_addr_get_sgid(dev_addr, | 347 | (union ib_gid *)&iinfo6->dst_gid); |
349 | (union ib_gid *)&iinfo6->src_gid); | ||
350 | rdma_addr_get_dgid(dev_addr, | ||
351 | (union ib_gid *)&iinfo6->dst_gid); | ||
352 | |||
353 | rds_ibdev = ic->rds_ibdev; | 348 | rds_ibdev = ic->rds_ibdev; |
354 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; | 349 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; |
355 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; | 350 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 00192a996be0..0f8465852254 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/mod_devicetable.h> | ||
23 | #include <linux/rfkill.h> | 24 | #include <linux/rfkill.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 398c752ff529..6f118d62c731 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -662,6 +662,13 @@ int tcf_action_destroy(struct tc_action *actions[], int bind) | |||
662 | return ret; | 662 | return ret; |
663 | } | 663 | } |
664 | 664 | ||
665 | static int tcf_action_destroy_1(struct tc_action *a, int bind) | ||
666 | { | ||
667 | struct tc_action *actions[] = { a, NULL }; | ||
668 | |||
669 | return tcf_action_destroy(actions, bind); | ||
670 | } | ||
671 | |||
665 | static int tcf_action_put(struct tc_action *p) | 672 | static int tcf_action_put(struct tc_action *p) |
666 | { | 673 | { |
667 | return __tcf_action_put(p, false); | 674 | return __tcf_action_put(p, false); |
@@ -881,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, | |||
881 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { | 888 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { |
882 | err = tcf_action_goto_chain_init(a, tp); | 889 | err = tcf_action_goto_chain_init(a, tp); |
883 | if (err) { | 890 | if (err) { |
884 | struct tc_action *actions[] = { a, NULL }; | 891 | tcf_action_destroy_1(a, bind); |
885 | |||
886 | tcf_action_destroy(actions, bind); | ||
887 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); | 892 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); |
888 | return ERR_PTR(err); | 893 | return ERR_PTR(err); |
889 | } | 894 | } |
890 | } | 895 | } |
891 | 896 | ||
892 | if (!tcf_action_valid(a->tcfa_action)) { | 897 | if (!tcf_action_valid(a->tcfa_action)) { |
893 | NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead"); | 898 | tcf_action_destroy_1(a, bind); |
894 | a->tcfa_action = TC_ACT_UNSPEC; | 899 | NL_SET_ERR_MSG(extack, "Invalid control action value"); |
900 | return ERR_PTR(-EINVAL); | ||
895 | } | 901 | } |
896 | 902 | ||
897 | return a; | 903 | return a; |
@@ -1175,6 +1181,7 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) | |||
1175 | struct tcf_idrinfo *idrinfo = a->idrinfo; | 1181 | struct tcf_idrinfo *idrinfo = a->idrinfo; |
1176 | u32 act_index = a->tcfa_index; | 1182 | u32 act_index = a->tcfa_index; |
1177 | 1183 | ||
1184 | actions[i] = NULL; | ||
1178 | if (tcf_action_put(a)) { | 1185 | if (tcf_action_put(a)) { |
1179 | /* last reference, action was deleted concurrently */ | 1186 | /* last reference, action was deleted concurrently */ |
1180 | module_put(ops->owner); | 1187 | module_put(ops->owner); |
@@ -1186,7 +1193,6 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) | |||
1186 | if (ret < 0) | 1193 | if (ret < 0) |
1187 | return ret; | 1194 | return ret; |
1188 | } | 1195 | } |
1189 | actions[i] = NULL; | ||
1190 | } | 1196 | } |
1191 | return 0; | 1197 | return 0; |
1192 | } | 1198 | } |
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 19454146f60d..30b63fa23ee2 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
@@ -326,6 +326,20 @@ static int __add_metainfo(const struct tcf_meta_ops *ops, | |||
326 | return ret; | 326 | return ret; |
327 | } | 327 | } |
328 | 328 | ||
329 | static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, | ||
330 | struct tcf_ife_info *ife, u32 metaid, | ||
331 | bool exists) | ||
332 | { | ||
333 | int ret; | ||
334 | |||
335 | if (!try_module_get(ops->owner)) | ||
336 | return -ENOENT; | ||
337 | ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); | ||
338 | if (ret) | ||
339 | module_put(ops->owner); | ||
340 | return ret; | ||
341 | } | ||
342 | |||
329 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | 343 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, |
330 | int len, bool exists) | 344 | int len, bool exists) |
331 | { | 345 | { |
@@ -349,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists) | |||
349 | 363 | ||
350 | read_lock(&ife_mod_lock); | 364 | read_lock(&ife_mod_lock); |
351 | list_for_each_entry(o, &ifeoplist, list) { | 365 | list_for_each_entry(o, &ifeoplist, list) { |
352 | rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists); | 366 | rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); |
353 | if (rc == 0) | 367 | if (rc == 0) |
354 | installed += 1; | 368 | installed += 1; |
355 | } | 369 | } |
@@ -400,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
400 | struct tcf_meta_info *e, *n; | 414 | struct tcf_meta_info *e, *n; |
401 | 415 | ||
402 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { | 416 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { |
403 | module_put(e->ops->owner); | ||
404 | list_del(&e->metalist); | 417 | list_del(&e->metalist); |
405 | if (e->metaval) { | 418 | if (e->metaval) { |
406 | if (e->ops->release) | 419 | if (e->ops->release) |
@@ -408,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
408 | else | 421 | else |
409 | kfree(e->metaval); | 422 | kfree(e->metaval); |
410 | } | 423 | } |
424 | module_put(e->ops->owner); | ||
411 | kfree(e); | 425 | kfree(e); |
412 | } | 426 | } |
413 | } | 427 | } |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 6d6a9450e8ad..da3dd0f68cc2 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
109 | { | 109 | { |
110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); | 110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); |
111 | 111 | ||
112 | if (!keys_start) | ||
113 | goto nla_failure; | ||
112 | for (; n > 0; n--) { | 114 | for (; n > 0; n--) { |
113 | struct nlattr *key_start; | 115 | struct nlattr *key_start; |
114 | 116 | ||
115 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); | 117 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); |
118 | if (!key_start) | ||
119 | goto nla_failure; | ||
116 | 120 | ||
117 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || | 121 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || |
118 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { | 122 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) |
119 | nlmsg_trim(skb, keys_start); | 123 | goto nla_failure; |
120 | return -EINVAL; | ||
121 | } | ||
122 | 124 | ||
123 | nla_nest_end(skb, key_start); | 125 | nla_nest_end(skb, key_start); |
124 | 126 | ||
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
128 | nla_nest_end(skb, keys_start); | 130 | nla_nest_end(skb, keys_start); |
129 | 131 | ||
130 | return 0; | 132 | return 0; |
133 | nla_failure: | ||
134 | nla_nest_cancel(skb, keys_start); | ||
135 | return -EINVAL; | ||
131 | } | 136 | } |
132 | 137 | ||
133 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, | 138 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
418 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; | 423 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; |
419 | 424 | ||
420 | if (p->tcfp_keys_ex) { | 425 | if (p->tcfp_keys_ex) { |
421 | tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); | 426 | if (tcf_pedit_key_ex_dump(skb, |
427 | p->tcfp_keys_ex, | ||
428 | p->tcfp_nkeys)) | ||
429 | goto nla_put_failure; | ||
422 | 430 | ||
423 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) | 431 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) |
424 | goto nla_put_failure; | 432 | goto nla_put_failure; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 31bd1439cf60..1a67af8a6e8c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -1252,7 +1252,7 @@ replay: | |||
1252 | } | 1252 | } |
1253 | chain = tcf_chain_get(block, chain_index, true); | 1253 | chain = tcf_chain_get(block, chain_index, true); |
1254 | if (!chain) { | 1254 | if (!chain) { |
1255 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1255 | NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); |
1256 | err = -ENOMEM; | 1256 | err = -ENOMEM; |
1257 | goto errout; | 1257 | goto errout; |
1258 | } | 1258 | } |
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | |||
1399 | goto errout; | 1399 | goto errout; |
1400 | } | 1400 | } |
1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
1402 | err = -EINVAL; | 1402 | err = -ENOENT; |
1403 | goto errout; | 1403 | goto errout; |
1404 | } | 1404 | } |
1405 | 1405 | ||
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index ef5c9a82d4e8..a644292f9faf 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = { | |||
215 | struct sctp_ht_iter { | 215 | struct sctp_ht_iter { |
216 | struct seq_net_private p; | 216 | struct seq_net_private p; |
217 | struct rhashtable_iter hti; | 217 | struct rhashtable_iter hti; |
218 | int start_fail; | ||
219 | }; | 218 | }; |
220 | 219 | ||
221 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | 220 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | |||
224 | 223 | ||
225 | sctp_transport_walk_start(&iter->hti); | 224 | sctp_transport_walk_start(&iter->hti); |
226 | 225 | ||
227 | iter->start_fail = 0; | ||
228 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); | 226 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); |
229 | } | 227 | } |
230 | 228 | ||
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) | |||
232 | { | 230 | { |
233 | struct sctp_ht_iter *iter = seq->private; | 231 | struct sctp_ht_iter *iter = seq->private; |
234 | 232 | ||
235 | if (iter->start_fail) | ||
236 | return; | ||
237 | sctp_transport_walk_stop(&iter->hti); | 233 | sctp_transport_walk_stop(&iter->hti); |
238 | } | 234 | } |
239 | 235 | ||
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
264 | } | 260 | } |
265 | 261 | ||
266 | transport = (struct sctp_transport *)v; | 262 | transport = (struct sctp_transport *)v; |
267 | if (!sctp_transport_hold(transport)) | ||
268 | return 0; | ||
269 | assoc = transport->asoc; | 263 | assoc = transport->asoc; |
270 | epb = &assoc->base; | 264 | epb = &assoc->base; |
271 | sk = epb->sk; | 265 | sk = epb->sk; |
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
322 | } | 316 | } |
323 | 317 | ||
324 | transport = (struct sctp_transport *)v; | 318 | transport = (struct sctp_transport *)v; |
325 | if (!sctp_transport_hold(transport)) | ||
326 | return 0; | ||
327 | assoc = transport->asoc; | 319 | assoc = transport->asoc; |
328 | 320 | ||
329 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, | 321 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e96b15a66aba..f73e9d38d5ba 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2658 | } | 2658 | } |
2659 | 2659 | ||
2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { | 2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { |
2661 | if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { | 2661 | if (trans) { |
2662 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2662 | if (trans->ipaddr.sa.sa_family == AF_INET6) { |
2663 | SCTP_FLOWLABEL_VAL_MASK; | ||
2664 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
2665 | } else if (asoc) { | ||
2666 | list_for_each_entry(trans, | ||
2667 | &asoc->peer.transport_addr_list, | ||
2668 | transports) { | ||
2669 | if (trans->ipaddr.sa.sa_family != AF_INET6) | ||
2670 | continue; | ||
2671 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2663 | trans->flowlabel = params->spp_ipv6_flowlabel & |
2672 | SCTP_FLOWLABEL_VAL_MASK; | 2664 | SCTP_FLOWLABEL_VAL_MASK; |
2673 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2665 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
2674 | } | 2666 | } |
2667 | } else if (asoc) { | ||
2668 | struct sctp_transport *t; | ||
2669 | |||
2670 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
2671 | transports) { | ||
2672 | if (t->ipaddr.sa.sa_family != AF_INET6) | ||
2673 | continue; | ||
2674 | t->flowlabel = params->spp_ipv6_flowlabel & | ||
2675 | SCTP_FLOWLABEL_VAL_MASK; | ||
2676 | t->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
2677 | } | ||
2675 | asoc->flowlabel = params->spp_ipv6_flowlabel & | 2678 | asoc->flowlabel = params->spp_ipv6_flowlabel & |
2676 | SCTP_FLOWLABEL_VAL_MASK; | 2679 | SCTP_FLOWLABEL_VAL_MASK; |
2677 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2680 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2687 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2690 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
2688 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2691 | trans->dscp |= SCTP_DSCP_SET_MASK; |
2689 | } else if (asoc) { | 2692 | } else if (asoc) { |
2690 | list_for_each_entry(trans, | 2693 | struct sctp_transport *t; |
2691 | &asoc->peer.transport_addr_list, | 2694 | |
2695 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
2692 | transports) { | 2696 | transports) { |
2693 | trans->dscp = params->spp_dscp & | 2697 | t->dscp = params->spp_dscp & |
2694 | SCTP_DSCP_VAL_MASK; | 2698 | SCTP_DSCP_VAL_MASK; |
2695 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2699 | t->dscp |= SCTP_DSCP_SET_MASK; |
2696 | } | 2700 | } |
2697 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2701 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
2698 | asoc->dscp |= SCTP_DSCP_SET_MASK; | 2702 | asoc->dscp |= SCTP_DSCP_SET_MASK; |
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, | |||
5005 | break; | 5009 | break; |
5006 | } | 5010 | } |
5007 | 5011 | ||
5012 | if (!sctp_transport_hold(t)) | ||
5013 | continue; | ||
5014 | |||
5008 | if (net_eq(sock_net(t->asoc->base.sk), net) && | 5015 | if (net_eq(sock_net(t->asoc->base.sk), net) && |
5009 | t->asoc->peer.primary_path == t) | 5016 | t->asoc->peer.primary_path == t) |
5010 | break; | 5017 | break; |
5018 | |||
5019 | sctp_transport_put(t); | ||
5011 | } | 5020 | } |
5012 | 5021 | ||
5013 | return t; | 5022 | return t; |
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net, | |||
5017 | struct rhashtable_iter *iter, | 5026 | struct rhashtable_iter *iter, |
5018 | int pos) | 5027 | int pos) |
5019 | { | 5028 | { |
5020 | void *obj = SEQ_START_TOKEN; | 5029 | struct sctp_transport *t; |
5021 | 5030 | ||
5022 | while (pos && (obj = sctp_transport_get_next(net, iter)) && | 5031 | if (!pos) |
5023 | !IS_ERR(obj)) | 5032 | return SEQ_START_TOKEN; |
5024 | pos--; | ||
5025 | 5033 | ||
5026 | return obj; | 5034 | while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) { |
5035 | if (!--pos) | ||
5036 | break; | ||
5037 | sctp_transport_put(t); | ||
5038 | } | ||
5039 | |||
5040 | return t; | ||
5027 | } | 5041 | } |
5028 | 5042 | ||
5029 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), | 5043 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), |
@@ -5082,8 +5096,6 @@ again: | |||
5082 | 5096 | ||
5083 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); | 5097 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); |
5084 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { | 5098 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { |
5085 | if (!sctp_transport_hold(tsp)) | ||
5086 | continue; | ||
5087 | ret = cb(tsp, p); | 5099 | ret = cb(tsp, p); |
5088 | if (ret) | 5100 | if (ret) |
5089 | break; | 5101 | break; |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 9ee6cfea56dd..d8026543bf4c 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link"; | |||
51 | * struct tipc_bc_base - base structure for keeping broadcast send state | 51 | * struct tipc_bc_base - base structure for keeping broadcast send state |
52 | * @link: broadcast send link structure | 52 | * @link: broadcast send link structure |
53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages | 53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages |
54 | * @dest: array keeping number of reachable destinations per bearer | 54 | * @dests: array keeping number of reachable destinations per bearer |
55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any | 55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any |
56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast | 56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast |
57 | * @rcast_support: indicates if all peer nodes support replicast | 57 | * @rcast_support: indicates if all peer nodes support replicast |
58 | * @rc_ratio: dest count as percentage of cluster size where send method changes | 58 | * @rc_ratio: dest count as percentage of cluster size where send method changes |
59 | * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast | 59 | * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast |
60 | */ | 60 | */ |
61 | struct tipc_bc_base { | 61 | struct tipc_bc_base { |
62 | struct tipc_link *link; | 62 | struct tipc_link *link; |
diff --git a/net/tipc/diag.c b/net/tipc/diag.c index aaabb0b776dd..73137f4aeb68 100644 --- a/net/tipc/diag.c +++ b/net/tipc/diag.c | |||
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb, | |||
84 | 84 | ||
85 | if (h->nlmsg_flags & NLM_F_DUMP) { | 85 | if (h->nlmsg_flags & NLM_F_DUMP) { |
86 | struct netlink_dump_control c = { | 86 | struct netlink_dump_control c = { |
87 | .start = tipc_dump_start, | ||
87 | .dump = tipc_diag_dump, | 88 | .dump = tipc_diag_dump, |
89 | .done = tipc_dump_done, | ||
88 | }; | 90 | }; |
89 | netlink_dump_start(net->diag_nlsk, skb, h, &c); | 91 | netlink_dump_start(net->diag_nlsk, skb, h, &c); |
90 | return 0; | 92 | return 0; |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 88f027b502f6..66d5b2c5987a 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
980 | 980 | ||
981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) | 981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) |
982 | { | 982 | { |
983 | u64 value = (u64)node << 32 | port; | ||
984 | struct tipc_dest *dst; | 983 | struct tipc_dest *dst; |
985 | 984 | ||
986 | list_for_each_entry(dst, l, list) { | 985 | list_for_each_entry(dst, l, list) { |
987 | if (dst->value != value) | 986 | if (dst->node == node && dst->port == port) |
988 | continue; | 987 | return dst; |
989 | return dst; | ||
990 | } | 988 | } |
991 | return NULL; | 989 | return NULL; |
992 | } | 990 | } |
993 | 991 | ||
994 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | 992 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) |
995 | { | 993 | { |
996 | u64 value = (u64)node << 32 | port; | ||
997 | struct tipc_dest *dst; | 994 | struct tipc_dest *dst; |
998 | 995 | ||
999 | if (tipc_dest_find(l, node, port)) | 996 | if (tipc_dest_find(l, node, port)) |
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | |||
1002 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); | 999 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); |
1003 | if (unlikely(!dst)) | 1000 | if (unlikely(!dst)) |
1004 | return false; | 1001 | return false; |
1005 | dst->value = value; | 1002 | dst->node = node; |
1003 | dst->port = port; | ||
1006 | list_add(&dst->list, l); | 1004 | list_add(&dst->list, l); |
1007 | return true; | 1005 | return true; |
1008 | } | 1006 | } |
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 0febba41da86..892bd750b85f 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h | |||
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net); | |||
133 | 133 | ||
134 | struct tipc_dest { | 134 | struct tipc_dest { |
135 | struct list_head list; | 135 | struct list_head list; |
136 | union { | 136 | u32 port; |
137 | struct { | 137 | u32 node; |
138 | u32 port; | ||
139 | u32 node; | ||
140 | }; | ||
141 | u64 value; | ||
142 | }; | ||
143 | }; | 138 | }; |
144 | 139 | ||
145 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); | 140 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 6ff2254088f6..99ee419210ba 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = { | |||
167 | }, | 167 | }, |
168 | { | 168 | { |
169 | .cmd = TIPC_NL_SOCK_GET, | 169 | .cmd = TIPC_NL_SOCK_GET, |
170 | .start = tipc_dump_start, | ||
170 | .dumpit = tipc_nl_sk_dump, | 171 | .dumpit = tipc_nl_sk_dump, |
172 | .done = tipc_dump_done, | ||
171 | .policy = tipc_nl_policy, | 173 | .policy = tipc_nl_policy, |
172 | }, | 174 | }, |
173 | { | 175 | { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c1e93c9515bc..ab7a2a7178f7 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2672,6 +2672,8 @@ void tipc_sk_reinit(struct net *net) | |||
2672 | 2672 | ||
2673 | rhashtable_walk_stop(&iter); | 2673 | rhashtable_walk_stop(&iter); |
2674 | } while (tsk == ERR_PTR(-EAGAIN)); | 2674 | } while (tsk == ERR_PTR(-EAGAIN)); |
2675 | |||
2676 | rhashtable_walk_exit(&iter); | ||
2675 | } | 2677 | } |
2676 | 2678 | ||
2677 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) | 2679 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) |
@@ -3227,45 +3229,69 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
3227 | struct netlink_callback *cb, | 3229 | struct netlink_callback *cb, |
3228 | struct tipc_sock *tsk)) | 3230 | struct tipc_sock *tsk)) |
3229 | { | 3231 | { |
3230 | struct net *net = sock_net(skb->sk); | 3232 | struct rhashtable_iter *iter = (void *)cb->args[0]; |
3231 | struct tipc_net *tn = tipc_net(net); | ||
3232 | const struct bucket_table *tbl; | ||
3233 | u32 prev_portid = cb->args[1]; | ||
3234 | u32 tbl_id = cb->args[0]; | ||
3235 | struct rhash_head *pos; | ||
3236 | struct tipc_sock *tsk; | 3233 | struct tipc_sock *tsk; |
3237 | int err; | 3234 | int err; |
3238 | 3235 | ||
3239 | rcu_read_lock(); | 3236 | rhashtable_walk_start(iter); |
3240 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); | 3237 | while ((tsk = rhashtable_walk_next(iter)) != NULL) { |
3241 | for (; tbl_id < tbl->size; tbl_id++) { | 3238 | if (IS_ERR(tsk)) { |
3242 | rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { | 3239 | err = PTR_ERR(tsk); |
3243 | spin_lock_bh(&tsk->sk.sk_lock.slock); | 3240 | if (err == -EAGAIN) { |
3244 | if (prev_portid && prev_portid != tsk->portid) { | 3241 | err = 0; |
3245 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
3246 | continue; | 3242 | continue; |
3247 | } | 3243 | } |
3244 | break; | ||
3245 | } | ||
3248 | 3246 | ||
3249 | err = skb_handler(skb, cb, tsk); | 3247 | sock_hold(&tsk->sk); |
3250 | if (err) { | 3248 | rhashtable_walk_stop(iter); |
3251 | prev_portid = tsk->portid; | 3249 | lock_sock(&tsk->sk); |
3252 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 3250 | err = skb_handler(skb, cb, tsk); |
3253 | goto out; | 3251 | if (err) { |
3254 | } | 3252 | release_sock(&tsk->sk); |
3255 | 3253 | sock_put(&tsk->sk); | |
3256 | prev_portid = 0; | 3254 | goto out; |
3257 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
3258 | } | 3255 | } |
3256 | release_sock(&tsk->sk); | ||
3257 | rhashtable_walk_start(iter); | ||
3258 | sock_put(&tsk->sk); | ||
3259 | } | 3259 | } |
3260 | rhashtable_walk_stop(iter); | ||
3260 | out: | 3261 | out: |
3261 | rcu_read_unlock(); | ||
3262 | cb->args[0] = tbl_id; | ||
3263 | cb->args[1] = prev_portid; | ||
3264 | |||
3265 | return skb->len; | 3262 | return skb->len; |
3266 | } | 3263 | } |
3267 | EXPORT_SYMBOL(tipc_nl_sk_walk); | 3264 | EXPORT_SYMBOL(tipc_nl_sk_walk); |
3268 | 3265 | ||
3266 | int tipc_dump_start(struct netlink_callback *cb) | ||
3267 | { | ||
3268 | struct rhashtable_iter *iter = (void *)cb->args[0]; | ||
3269 | struct net *net = sock_net(cb->skb->sk); | ||
3270 | struct tipc_net *tn = tipc_net(net); | ||
3271 | |||
3272 | if (!iter) { | ||
3273 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | ||
3274 | if (!iter) | ||
3275 | return -ENOMEM; | ||
3276 | |||
3277 | cb->args[0] = (long)iter; | ||
3278 | } | ||
3279 | |||
3280 | rhashtable_walk_enter(&tn->sk_rht, iter); | ||
3281 | return 0; | ||
3282 | } | ||
3283 | EXPORT_SYMBOL(tipc_dump_start); | ||
3284 | |||
3285 | int tipc_dump_done(struct netlink_callback *cb) | ||
3286 | { | ||
3287 | struct rhashtable_iter *hti = (void *)cb->args[0]; | ||
3288 | |||
3289 | rhashtable_walk_exit(hti); | ||
3290 | kfree(hti); | ||
3291 | return 0; | ||
3292 | } | ||
3293 | EXPORT_SYMBOL(tipc_dump_done); | ||
3294 | |||
3269 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, | 3295 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, |
3270 | struct tipc_sock *tsk, u32 sk_filter_state, | 3296 | struct tipc_sock *tsk, u32 sk_filter_state, |
3271 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) | 3297 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) |
diff --git a/net/tipc/socket.h b/net/tipc/socket.h index aff9b2ae5a1f..d43032e26532 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h | |||
@@ -68,4 +68,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
68 | int (*skb_handler)(struct sk_buff *skb, | 68 | int (*skb_handler)(struct sk_buff *skb, |
69 | struct netlink_callback *cb, | 69 | struct netlink_callback *cb, |
70 | struct tipc_sock *tsk)); | 70 | struct tipc_sock *tsk)); |
71 | int tipc_dump_start(struct netlink_callback *cb); | ||
72 | int tipc_dump_done(struct netlink_callback *cb); | ||
71 | #endif | 73 | #endif |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index c782b352d928..d8956f7daac4 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c | |||
@@ -307,8 +307,8 @@ static void tipc_conn_send_work(struct work_struct *work) | |||
307 | conn_put(con); | 307 | conn_put(con); |
308 | } | 308 | } |
309 | 309 | ||
310 | /* tipc_conn_queue_evt() - interrupt level call from a subscription instance | 310 | /* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance |
311 | * The queued work is launched into tipc_send_work()->tipc_send_to_sock() | 311 | * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock() |
312 | */ | 312 | */ |
313 | void tipc_topsrv_queue_evt(struct net *net, int conid, | 313 | void tipc_topsrv_queue_evt(struct net *net, int conid, |
314 | u32 event, struct tipc_event *evt) | 314 | u32 event, struct tipc_event *evt) |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 62e6679de481..fbd0747a5a9d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, | |||
669 | goto nla_put_failure; | 669 | goto nla_put_failure; |
670 | 670 | ||
671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, | 671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, |
672 | rule->wmm_rule->client[j].cw_min) || | 672 | rule->wmm_rule.client[j].cw_min) || |
673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, | 673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, |
674 | rule->wmm_rule->client[j].cw_max) || | 674 | rule->wmm_rule.client[j].cw_max) || |
675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, | 675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, |
676 | rule->wmm_rule->client[j].aifsn) || | 676 | rule->wmm_rule.client[j].aifsn) || |
677 | nla_put_u8(msg, NL80211_WMMR_TXOP, | 677 | nla_put_u16(msg, NL80211_WMMR_TXOP, |
678 | rule->wmm_rule->client[j].cot)) | 678 | rule->wmm_rule.client[j].cot)) |
679 | goto nla_put_failure; | 679 | goto nla_put_failure; |
680 | 680 | ||
681 | nla_nest_end(msg, nl_wmm_rule); | 681 | nla_nest_end(msg, nl_wmm_rule); |
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, | |||
766 | 766 | ||
767 | if (large) { | 767 | if (large) { |
768 | const struct ieee80211_reg_rule *rule = | 768 | const struct ieee80211_reg_rule *rule = |
769 | freq_reg_info(wiphy, chan->center_freq); | 769 | freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); |
770 | 770 | ||
771 | if (!IS_ERR(rule) && rule->wmm_rule) { | 771 | if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { |
772 | if (nl80211_msg_put_wmm_rules(msg, rule)) | 772 | if (nl80211_msg_put_wmm_rules(msg, rule)) |
773 | goto nla_put_failure; | 773 | goto nla_put_failure; |
774 | } | 774 | } |
@@ -12206,6 +12206,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) | |||
12206 | return -EOPNOTSUPP; | 12206 | return -EOPNOTSUPP; |
12207 | 12207 | ||
12208 | if (!info->attrs[NL80211_ATTR_MDID] || | 12208 | if (!info->attrs[NL80211_ATTR_MDID] || |
12209 | !info->attrs[NL80211_ATTR_IE] || | ||
12209 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 12210 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
12210 | return -EINVAL; | 12211 | return -EINVAL; |
12211 | 12212 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 043d43573ca2..7046d922867a 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain * | |||
425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) | 425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) |
426 | { | 426 | { |
427 | struct ieee80211_regdomain *regd; | 427 | struct ieee80211_regdomain *regd; |
428 | int size_of_regd, size_of_wmms; | 428 | int size_of_regd; |
429 | unsigned int i; | 429 | unsigned int i; |
430 | struct ieee80211_wmm_rule *d_wmm, *s_wmm; | ||
431 | 430 | ||
432 | size_of_regd = | 431 | size_of_regd = |
433 | sizeof(struct ieee80211_regdomain) + | 432 | sizeof(struct ieee80211_regdomain) + |
434 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); | 433 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); |
435 | size_of_wmms = src_regd->n_wmm_rules * | ||
436 | sizeof(struct ieee80211_wmm_rule); | ||
437 | 434 | ||
438 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | 435 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
439 | if (!regd) | 436 | if (!regd) |
440 | return ERR_PTR(-ENOMEM); | 437 | return ERR_PTR(-ENOMEM); |
441 | 438 | ||
442 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); | 439 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); |
443 | 440 | ||
444 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | 441 | for (i = 0; i < src_regd->n_reg_rules; i++) |
445 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd); | ||
446 | memcpy(d_wmm, s_wmm, size_of_wmms); | ||
447 | |||
448 | for (i = 0; i < src_regd->n_reg_rules; i++) { | ||
449 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], | 442 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], |
450 | sizeof(struct ieee80211_reg_rule)); | 443 | sizeof(struct ieee80211_reg_rule)); |
451 | if (!src_regd->reg_rules[i].wmm_rule) | ||
452 | continue; | ||
453 | 444 | ||
454 | regd->reg_rules[i].wmm_rule = d_wmm + | ||
455 | (src_regd->reg_rules[i].wmm_rule - s_wmm) / | ||
456 | sizeof(struct ieee80211_wmm_rule); | ||
457 | } | ||
458 | return regd; | 445 | return regd; |
459 | } | 446 | } |
460 | 447 | ||
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size) | |||
860 | return true; | 847 | return true; |
861 | } | 848 | } |
862 | 849 | ||
863 | static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | 850 | static void set_wmm_rule(struct ieee80211_reg_rule *rrule, |
864 | struct fwdb_wmm_rule *wmm) | 851 | struct fwdb_wmm_rule *wmm) |
865 | { | 852 | { |
853 | struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; | ||
866 | unsigned int i; | 854 | unsigned int i; |
867 | 855 | ||
868 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | 856 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | |||
876 | rule->ap[i].aifsn = wmm->ap[i].aifsn; | 864 | rule->ap[i].aifsn = wmm->ap[i].aifsn; |
877 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); | 865 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); |
878 | } | 866 | } |
867 | |||
868 | rrule->has_wmm = true; | ||
879 | } | 869 | } |
880 | 870 | ||
881 | static int __regdb_query_wmm(const struct fwdb_header *db, | 871 | static int __regdb_query_wmm(const struct fwdb_header *db, |
882 | const struct fwdb_country *country, int freq, | 872 | const struct fwdb_country *country, int freq, |
883 | u32 *dbptr, struct ieee80211_wmm_rule *rule) | 873 | struct ieee80211_reg_rule *rule) |
884 | { | 874 | { |
885 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 875 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
886 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 876 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
901 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; | 891 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; |
902 | wmm = (void *)((u8 *)db + wmm_ptr); | 892 | wmm = (void *)((u8 *)db + wmm_ptr); |
903 | set_wmm_rule(rule, wmm); | 893 | set_wmm_rule(rule, wmm); |
904 | if (dbptr) | ||
905 | *dbptr = wmm_ptr; | ||
906 | return 0; | 894 | return 0; |
907 | } | 895 | } |
908 | } | 896 | } |
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
910 | return -ENODATA; | 898 | return -ENODATA; |
911 | } | 899 | } |
912 | 900 | ||
913 | int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | 901 | int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) |
914 | struct ieee80211_wmm_rule *rule) | ||
915 | { | 902 | { |
916 | const struct fwdb_header *hdr = regdb; | 903 | const struct fwdb_header *hdr = regdb; |
917 | const struct fwdb_country *country; | 904 | const struct fwdb_country *country; |
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
925 | country = &hdr->country[0]; | 912 | country = &hdr->country[0]; |
926 | while (country->coll_ptr) { | 913 | while (country->coll_ptr) { |
927 | if (alpha2_equal(alpha2, country->alpha2)) | 914 | if (alpha2_equal(alpha2, country->alpha2)) |
928 | return __regdb_query_wmm(regdb, country, freq, dbptr, | 915 | return __regdb_query_wmm(regdb, country, freq, rule); |
929 | rule); | ||
930 | 916 | ||
931 | country++; | 917 | country++; |
932 | } | 918 | } |
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
935 | } | 921 | } |
936 | EXPORT_SYMBOL(reg_query_regdb_wmm); | 922 | EXPORT_SYMBOL(reg_query_regdb_wmm); |
937 | 923 | ||
938 | struct wmm_ptrs { | ||
939 | struct ieee80211_wmm_rule *rule; | ||
940 | u32 ptr; | ||
941 | }; | ||
942 | |||
943 | static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs, | ||
944 | u32 wmm_ptr, int n_wmms) | ||
945 | { | ||
946 | int i; | ||
947 | |||
948 | for (i = 0; i < n_wmms; i++) { | ||
949 | if (wmm_ptrs[i].ptr == wmm_ptr) | ||
950 | return wmm_ptrs[i].rule; | ||
951 | } | ||
952 | return NULL; | ||
953 | } | ||
954 | |||
955 | static int regdb_query_country(const struct fwdb_header *db, | 924 | static int regdb_query_country(const struct fwdb_header *db, |
956 | const struct fwdb_country *country) | 925 | const struct fwdb_country *country) |
957 | { | 926 | { |
958 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 927 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
959 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 928 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
960 | struct ieee80211_regdomain *regdom; | 929 | struct ieee80211_regdomain *regdom; |
961 | struct ieee80211_regdomain *tmp_rd; | 930 | unsigned int size_of_regd, i; |
962 | unsigned int size_of_regd, i, n_wmms = 0; | ||
963 | struct wmm_ptrs *wmm_ptrs; | ||
964 | 931 | ||
965 | size_of_regd = sizeof(struct ieee80211_regdomain) + | 932 | size_of_regd = sizeof(struct ieee80211_regdomain) + |
966 | coll->n_rules * sizeof(struct ieee80211_reg_rule); | 933 | coll->n_rules * sizeof(struct ieee80211_reg_rule); |
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
969 | if (!regdom) | 936 | if (!regdom) |
970 | return -ENOMEM; | 937 | return -ENOMEM; |
971 | 938 | ||
972 | wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL); | ||
973 | if (!wmm_ptrs) { | ||
974 | kfree(regdom); | ||
975 | return -ENOMEM; | ||
976 | } | ||
977 | |||
978 | regdom->n_reg_rules = coll->n_rules; | 939 | regdom->n_reg_rules = coll->n_rules; |
979 | regdom->alpha2[0] = country->alpha2[0]; | 940 | regdom->alpha2[0] = country->alpha2[0]; |
980 | regdom->alpha2[1] = country->alpha2[1]; | 941 | regdom->alpha2[1] = country->alpha2[1]; |
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
1013 | 1000 * be16_to_cpu(rule->cac_timeout); | 974 | 1000 * be16_to_cpu(rule->cac_timeout); |
1014 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { | 975 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { |
1015 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; | 976 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; |
1016 | struct ieee80211_wmm_rule *wmm_pos = | 977 | struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); |
1017 | find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms); | ||
1018 | struct fwdb_wmm_rule *wmm; | ||
1019 | struct ieee80211_wmm_rule *wmm_rule; | ||
1020 | |||
1021 | if (wmm_pos) { | ||
1022 | rrule->wmm_rule = wmm_pos; | ||
1023 | continue; | ||
1024 | } | ||
1025 | wmm = (void *)((u8 *)db + wmm_ptr); | ||
1026 | tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) * | ||
1027 | sizeof(struct ieee80211_wmm_rule), | ||
1028 | GFP_KERNEL); | ||
1029 | |||
1030 | if (!tmp_rd) { | ||
1031 | kfree(regdom); | ||
1032 | kfree(wmm_ptrs); | ||
1033 | return -ENOMEM; | ||
1034 | } | ||
1035 | regdom = tmp_rd; | ||
1036 | |||
1037 | wmm_rule = (struct ieee80211_wmm_rule *) | ||
1038 | ((u8 *)regdom + size_of_regd + n_wmms * | ||
1039 | sizeof(struct ieee80211_wmm_rule)); | ||
1040 | 978 | ||
1041 | set_wmm_rule(wmm_rule, wmm); | 979 | set_wmm_rule(rrule, wmm); |
1042 | wmm_ptrs[n_wmms].ptr = wmm_ptr; | ||
1043 | wmm_ptrs[n_wmms++].rule = wmm_rule; | ||
1044 | } | 980 | } |
1045 | } | 981 | } |
1046 | kfree(wmm_ptrs); | ||
1047 | 982 | ||
1048 | return reg_schedule_apply(regdom); | 983 | return reg_schedule_apply(regdom); |
1049 | } | 984 | } |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 2a89db5f2db7..4293f980e9c4 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, | |||
1456 | u8 *op_class) | 1456 | u8 *op_class) |
1457 | { | 1457 | { |
1458 | u8 vht_opclass; | 1458 | u8 vht_opclass; |
1459 | u16 freq = chandef->center_freq1; | 1459 | u32 freq = chandef->center_freq1; |
1460 | 1460 | ||
1461 | if (freq >= 2412 && freq <= 2472) { | 1461 | if (freq >= 2412 && freq <= 2472) { |
1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) | 1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) |
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c75413d05a63..ce53639a864a 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
@@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ | |||
153 | # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) | 153 | # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) |
154 | cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) | 154 | cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) |
155 | 155 | ||
156 | # cc-if-fullversion | ||
157 | # Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1) | ||
158 | cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4)) | ||
159 | |||
160 | # cc-ldoption | 156 | # cc-ldoption |
161 | # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) | 157 | # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) |
162 | cc-ldoption = $(call try-run,\ | 158 | cc-ldoption = $(call try-run,\ |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 1c48572223d1..5a2d1c9578a0 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -246,8 +246,6 @@ objtool_args += --no-fp | |||
246 | endif | 246 | endif |
247 | ifdef CONFIG_GCOV_KERNEL | 247 | ifdef CONFIG_GCOV_KERNEL |
248 | objtool_args += --no-unreachable | 248 | objtool_args += --no-unreachable |
249 | else | ||
250 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) | ||
251 | endif | 249 | endif |
252 | ifdef CONFIG_RETPOLINE | 250 | ifdef CONFIG_RETPOLINE |
253 | ifneq ($(RETPOLINE_CFLAGS),) | 251 | ifneq ($(RETPOLINE_CFLAGS),) |
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index df175bc33c5d..9c55077ca5dd 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c | |||
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = { | |||
68 | [BPF_MAP_TYPE_DEVMAP] = "devmap", | 68 | [BPF_MAP_TYPE_DEVMAP] = "devmap", |
69 | [BPF_MAP_TYPE_SOCKMAP] = "sockmap", | 69 | [BPF_MAP_TYPE_SOCKMAP] = "sockmap", |
70 | [BPF_MAP_TYPE_CPUMAP] = "cpumap", | 70 | [BPF_MAP_TYPE_CPUMAP] = "cpumap", |
71 | [BPF_MAP_TYPE_XSKMAP] = "xskmap", | ||
71 | [BPF_MAP_TYPE_SOCKHASH] = "sockhash", | 72 | [BPF_MAP_TYPE_SOCKHASH] = "sockhash", |
72 | [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", | 73 | [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", |
73 | }; | 74 | }; |
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index f8cc38afffa2..32a194e3e07a 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh | |||
@@ -46,6 +46,9 @@ | |||
46 | # Kselftest framework requirement - SKIP code is 4. | 46 | # Kselftest framework requirement - SKIP code is 4. |
47 | ksft_skip=4 | 47 | ksft_skip=4 |
48 | 48 | ||
49 | # Some systems don't have a ping6 binary anymore | ||
50 | which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping) | ||
51 | |||
49 | tests=" | 52 | tests=" |
50 | pmtu_vti6_exception vti6: PMTU exceptions | 53 | pmtu_vti6_exception vti6: PMTU exceptions |
51 | pmtu_vti4_exception vti4: PMTU exceptions | 54 | pmtu_vti4_exception vti4: PMTU exceptions |
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() { | |||
274 | mtu "${ns_b}" veth_b 4000 | 277 | mtu "${ns_b}" veth_b 4000 |
275 | mtu "${ns_a}" vti6_a 5000 | 278 | mtu "${ns_a}" vti6_a 5000 |
276 | mtu "${ns_b}" vti6_b 5000 | 279 | mtu "${ns_b}" vti6_b 5000 |
277 | ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null | 280 | ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null |
278 | 281 | ||
279 | # Check that exception was created | 282 | # Check that exception was created |
280 | if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then | 283 | if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then |
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() { | |||
334 | fail=0 | 337 | fail=0 |
335 | 338 | ||
336 | min=68 | 339 | min=68 |
337 | max=$((65528 - 20)) | 340 | max=$((65535 - 20)) |
338 | # Check invalid values first | 341 | # Check invalid values first |
339 | for v in $((min - 1)) $((max + 1)); do | 342 | for v in $((min - 1)) $((max + 1)); do |
340 | ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null | 343 | ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json index f03763d81617..30f9b54bd666 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json | |||
@@ -313,6 +313,54 @@ | |||
313 | ] | 313 | ] |
314 | }, | 314 | }, |
315 | { | 315 | { |
316 | "id": "6aaf", | ||
317 | "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]", | ||
318 | "category": [ | ||
319 | "actions", | ||
320 | "police" | ||
321 | ], | ||
322 | "setup": [ | ||
323 | [ | ||
324 | "$TC actions flush action police", | ||
325 | 0, | ||
326 | 1, | ||
327 | 255 | ||
328 | ] | ||
329 | ], | ||
330 | "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1", | ||
331 | "expExitCode": "0", | ||
332 | "verifyCmd": "$TC actions get action police index 1", | ||
333 | "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe", | ||
334 | "matchCount": "1", | ||
335 | "teardown": [ | ||
336 | "$TC actions flush action police" | ||
337 | ] | ||
338 | }, | ||
339 | { | ||
340 | "id": "29b1", | ||
341 | "name": "Add police actions with conform-exceed control <invalid>/drop", | ||
342 | "category": [ | ||
343 | "actions", | ||
344 | "police" | ||
345 | ], | ||
346 | "setup": [ | ||
347 | [ | ||
348 | "$TC actions flush action police", | ||
349 | 0, | ||
350 | 1, | ||
351 | 255 | ||
352 | ] | ||
353 | ], | ||
354 | "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1", | ||
355 | "expExitCode": "255", | ||
356 | "verifyCmd": "$TC actions ls action police", | ||
357 | "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ", | ||
358 | "matchCount": "0", | ||
359 | "teardown": [ | ||
360 | "$TC actions flush action police" | ||
361 | ] | ||
362 | }, | ||
363 | { | ||
316 | "id": "c26f", | 364 | "id": "c26f", |
317 | "name": "Add police action with invalid peakrate value", | 365 | "name": "Add police action with invalid peakrate value", |
318 | "category": [ | 366 | "category": [ |