diff options
author | Olof Johansson <olof@lixom.net> | 2018-09-25 14:30:47 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2018-09-25 14:30:47 -0400 |
commit | cdddeefc39cc26b2d429f8f7ea3c35bf8cb8c6a1 (patch) | |
tree | 2628d4922d0675df5659439f22f5ebda9c4b302a | |
parent | bf1da406909b13903873d1f91346f99303fd8cb3 (diff) | |
parent | 11da3a7f84f19c26da6f86af878298694ede0804 (diff) |
Merge tag 'v4.19-rc3' into next/drivers
Linux 4.19-rc3
545 files changed, 5260 insertions, 3485 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend index 3d5951c8bf5f..e8b60bd766f7 100644 --- a/Documentation/ABI/stable/sysfs-bus-xen-backend +++ b/Documentation/ABI/stable/sysfs-bus-xen-backend | |||
@@ -73,3 +73,12 @@ KernelVersion: 3.0 | |||
73 | Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 73 | Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
74 | Description: | 74 | Description: |
75 | Number of sectors written by the frontend. | 75 | Number of sectors written by the frontend. |
76 | |||
77 | What: /sys/bus/xen-backend/devices/*/state | ||
78 | Date: August 2018 | ||
79 | KernelVersion: 4.19 | ||
80 | Contact: Joe Jin <joe.jin@oracle.com> | ||
81 | Description: | ||
82 | The state of the device. One of: 'Unknown', | ||
83 | 'Initialising', 'Initialised', 'Connected', 'Closing', | ||
84 | 'Closed', 'Reconfiguring', 'Reconfigured'. | ||
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback index 8bb43b66eb55..4e7babb3ba1f 100644 --- a/Documentation/ABI/testing/sysfs-driver-xen-blkback +++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback | |||
@@ -15,3 +15,13 @@ Description: | |||
15 | blkback. If the frontend tries to use more than | 15 | blkback. If the frontend tries to use more than |
16 | max_persistent_grants, the LRU kicks in and starts | 16 | max_persistent_grants, the LRU kicks in and starts |
17 | removing 5% of max_persistent_grants every 100ms. | 17 | removing 5% of max_persistent_grants every 100ms. |
18 | |||
19 | What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds | ||
20 | Date: August 2018 | ||
21 | KernelVersion: 4.19 | ||
22 | Contact: Roger Pau Monné <roger.pau@citrix.com> | ||
23 | Description: | ||
24 | How long a persistent grant is allowed to remain | ||
25 | allocated without being in use. The time is in | ||
26 | seconds, 0 means indefinitely long. | ||
27 | The default is 60 seconds. | ||
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9871e649ffef..64a3bf54b974 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -3523,6 +3523,12 @@ | |||
3523 | ramdisk_size= [RAM] Sizes of RAM disks in kilobytes | 3523 | ramdisk_size= [RAM] Sizes of RAM disks in kilobytes |
3524 | See Documentation/blockdev/ramdisk.txt. | 3524 | See Documentation/blockdev/ramdisk.txt. |
3525 | 3525 | ||
3526 | random.trust_cpu={on,off} | ||
3527 | [KNL] Enable or disable trusting the use of the | ||
3528 | CPU's random number generator (if available) to | ||
3529 | fully seed the kernel's CRNG. Default is controlled | ||
3530 | by CONFIG_RANDOM_TRUST_CPU. | ||
3531 | |||
3526 | ras=option[,option,...] [KNL] RAS-specific options | 3532 | ras=option[,option,...] [KNL] RAS-specific options |
3527 | 3533 | ||
3528 | cec_disable [X86] | 3534 | cec_disable [X86] |
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt index f128f736b4a5..7169a0ec41d8 100644 --- a/Documentation/arm64/sve.txt +++ b/Documentation/arm64/sve.txt | |||
@@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg) | |||
200 | thread. | 200 | thread. |
201 | 201 | ||
202 | * Changing the vector length causes all of P0..P15, FFR and all bits of | 202 | * Changing the vector length causes all of P0..P15, FFR and all bits of |
203 | Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become | 203 | Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become |
204 | unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current | 204 | unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current |
205 | vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC | 205 | vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC |
206 | flag, does not constitute a change to the vector length for this purpose. | 206 | flag, does not constitute a change to the vector length for this purpose. |
@@ -500,7 +500,7 @@ References | |||
500 | [2] arch/arm64/include/uapi/asm/ptrace.h | 500 | [2] arch/arm64/include/uapi/asm/ptrace.h |
501 | AArch64 Linux ptrace ABI definitions | 501 | AArch64 Linux ptrace ABI definitions |
502 | 502 | ||
503 | [3] linux/Documentation/arm64/cpu-feature-registers.txt | 503 | [3] Documentation/arm64/cpu-feature-registers.txt |
504 | 504 | ||
505 | [4] ARM IHI0055C | 505 | [4] ARM IHI0055C |
506 | http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf | 506 | http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt index 00e4365d7206..091c8dfd3229 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt | |||
@@ -3,7 +3,6 @@ | |||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : | 4 | - compatible : |
5 | - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc | 5 | - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc |
6 | - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc | ||
7 | - reg : address and length of the lpi2c master registers | 6 | - reg : address and length of the lpi2c master registers |
8 | - interrupts : lpi2c interrupt | 7 | - interrupts : lpi2c interrupt |
9 | - clocks : lpi2c clock specifier | 8 | - clocks : lpi2c clock specifier |
@@ -11,7 +10,7 @@ Required properties: | |||
11 | Examples: | 10 | Examples: |
12 | 11 | ||
13 | lpi2c7: lpi2c7@40a50000 { | 12 | lpi2c7: lpi2c7@40a50000 { |
14 | compatible = "fsl,imx8dv-lpi2c"; | 13 | compatible = "fsl,imx7ulp-lpi2c"; |
15 | reg = <0x40A50000 0x10000>; | 14 | reg = <0x40A50000 0x10000>; |
16 | interrupt-parent = <&intc>; | 15 | interrupt-parent = <&intc>; |
17 | interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; | 16 | interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt index b0a8af51c388..265b223cd978 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt | |||
@@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are | |||
11 | attached to every HLIC: software interrupts, the timer interrupt, and external | 11 | attached to every HLIC: software interrupts, the timer interrupt, and external |
12 | interrupts. Software interrupts are used to send IPIs between cores. The | 12 | interrupts. Software interrupts are used to send IPIs between cores. The |
13 | timer interrupt comes from an architecturally mandated real-time timer that is | 13 | timer interrupt comes from an architecturally mandated real-time timer that is |
14 | controller via Supervisor Binary Interface (SBI) calls and CSR reads. External | 14 | controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External |
15 | interrupts connect all other device interrupts to the HLIC, which are routed | 15 | interrupts connect all other device interrupts to the HLIC, which are routed |
16 | via the platform-level interrupt controller (PLIC). | 16 | via the platform-level interrupt controller (PLIC). |
17 | 17 | ||
@@ -25,7 +25,15 @@ in the system. | |||
25 | 25 | ||
26 | Required properties: | 26 | Required properties: |
27 | - compatible : "riscv,cpu-intc" | 27 | - compatible : "riscv,cpu-intc" |
28 | - #interrupt-cells : should be <1> | 28 | - #interrupt-cells : should be <1>. The interrupt sources are defined by the |
29 | RISC-V supervisor ISA manual, with only the following three interrupts being | ||
30 | defined for supervisor mode: | ||
31 | - Source 1 is the supervisor software interrupt, which can be sent by an SBI | ||
32 | call and is reserved for use by software. | ||
33 | - Source 5 is the supervisor timer interrupt, which can be configured by | ||
34 | SBI calls and implements a one-shot timer. | ||
35 | - Source 9 is the supervisor external interrupt, which chains to all other | ||
36 | device interrupts. | ||
29 | - interrupt-controller : Identifies the node as an interrupt controller | 37 | - interrupt-controller : Identifies the node as an interrupt controller |
30 | 38 | ||
31 | Furthermore, this interrupt-controller MUST be embedded inside the cpu | 39 | Furthermore, this interrupt-controller MUST be embedded inside the cpu |
@@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below. | |||
38 | ... | 46 | ... |
39 | cpu1-intc: interrupt-controller { | 47 | cpu1-intc: interrupt-controller { |
40 | #interrupt-cells = <1>; | 48 | #interrupt-cells = <1>; |
41 | compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc"; | 49 | compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc"; |
42 | interrupt-controller; | 50 | interrupt-controller; |
43 | }; | 51 | }; |
44 | }; | 52 | }; |
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index 41089369f891..b3acebe08eb0 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt | |||
@@ -19,6 +19,10 @@ Required properties: | |||
19 | - slaves : Specifies number for slaves | 19 | - slaves : Specifies number for slaves |
20 | - active_slave : Specifies the slave to use for time stamping, | 20 | - active_slave : Specifies the slave to use for time stamping, |
21 | ethtool and SIOCGMIIPHY | 21 | ethtool and SIOCGMIIPHY |
22 | - cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection | ||
23 | device. See also cpsw-phy-sel.txt for it's binding. | ||
24 | Note that in legacy cases cpsw-phy-sel may be | ||
25 | a child device instead of a phandle. | ||
22 | 26 | ||
23 | Optional properties: | 27 | Optional properties: |
24 | - ti,hwmods : Must be "cpgmac0" | 28 | - ti,hwmods : Must be "cpgmac0" |
@@ -75,6 +79,7 @@ Examples: | |||
75 | cpts_clock_mult = <0x80000000>; | 79 | cpts_clock_mult = <0x80000000>; |
76 | cpts_clock_shift = <29>; | 80 | cpts_clock_shift = <29>; |
77 | syscon = <&cm>; | 81 | syscon = <&cm>; |
82 | cpsw-phy-sel = <&phy_sel>; | ||
78 | cpsw_emac0: slave@0 { | 83 | cpsw_emac0: slave@0 { |
79 | phy_id = <&davinci_mdio>, <0>; | 84 | phy_id = <&davinci_mdio>, <0>; |
80 | phy-mode = "rgmii-txid"; | 85 | phy-mode = "rgmii-txid"; |
@@ -103,6 +108,7 @@ Examples: | |||
103 | cpts_clock_mult = <0x80000000>; | 108 | cpts_clock_mult = <0x80000000>; |
104 | cpts_clock_shift = <29>; | 109 | cpts_clock_shift = <29>; |
105 | syscon = <&cm>; | 110 | syscon = <&cm>; |
111 | cpsw-phy-sel = <&phy_sel>; | ||
106 | cpsw_emac0: slave@0 { | 112 | cpsw_emac0: slave@0 { |
107 | phy_id = <&davinci_mdio>, <0>; | 113 | phy_id = <&davinci_mdio>, <0>; |
108 | phy-mode = "rgmii-txid"; | 114 | phy-mode = "rgmii-txid"; |
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt index 76db9f13ad96..abc36274227c 100644 --- a/Documentation/devicetree/bindings/net/sh_eth.txt +++ b/Documentation/devicetree/bindings/net/sh_eth.txt | |||
@@ -16,6 +16,7 @@ Required properties: | |||
16 | "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. | 16 | "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. |
17 | "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC. | 17 | "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC. |
18 | "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. | 18 | "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. |
19 | "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC. | ||
19 | "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device. | 20 | "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device. |
20 | "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1 | 21 | "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1 |
21 | device. | 22 | device. |
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt index 5d47a262474c..9407212a85a8 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt | |||
@@ -7,6 +7,7 @@ Required properties: | |||
7 | Examples with soctypes are: | 7 | Examples with soctypes are: |
8 | - "renesas,r8a7743-wdt" (RZ/G1M) | 8 | - "renesas,r8a7743-wdt" (RZ/G1M) |
9 | - "renesas,r8a7745-wdt" (RZ/G1E) | 9 | - "renesas,r8a7745-wdt" (RZ/G1E) |
10 | - "renesas,r8a774a1-wdt" (RZ/G2M) | ||
10 | - "renesas,r8a7790-wdt" (R-Car H2) | 11 | - "renesas,r8a7790-wdt" (R-Car H2) |
11 | - "renesas,r8a7791-wdt" (R-Car M2-W) | 12 | - "renesas,r8a7791-wdt" (R-Car M2-W) |
12 | - "renesas,r8a7792-wdt" (R-Car V2H) | 13 | - "renesas,r8a7792-wdt" (R-Car V2H) |
@@ -21,8 +22,8 @@ Required properties: | |||
21 | - "renesas,r7s72100-wdt" (RZ/A1) | 22 | - "renesas,r7s72100-wdt" (RZ/A1) |
22 | The generic compatible string must be: | 23 | The generic compatible string must be: |
23 | - "renesas,rza-wdt" for RZ/A | 24 | - "renesas,rza-wdt" for RZ/A |
24 | - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G | 25 | - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1 |
25 | - "renesas,rcar-gen3-wdt" for R-Car Gen3 | 26 | - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2 |
26 | 27 | ||
27 | - reg : Should contain WDT registers location and length | 28 | - reg : Should contain WDT registers location and length |
28 | - clocks : the clock feeding the watchdog timer. | 29 | - clocks : the clock feeding the watchdog timer. |
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index 72d16f08e431..b8df81f6d6bc 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx | |||
@@ -32,7 +32,7 @@ Supported chips: | |||
32 | Datasheet: Publicly available at the Texas Instruments website | 32 | Datasheet: Publicly available at the Texas Instruments website |
33 | http://www.ti.com/ | 33 | http://www.ti.com/ |
34 | 34 | ||
35 | Author: Lothar Felten <l-felten@ti.com> | 35 | Author: Lothar Felten <lothar.felten@gmail.com> |
36 | 36 | ||
37 | Description | 37 | Description |
38 | ----------- | 38 | ----------- |
diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations index 966610aa4620..203002054120 100644 --- a/Documentation/i2c/DMA-considerations +++ b/Documentation/i2c/DMA-considerations | |||
@@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the | |||
50 | returned buffer. If NULL is returned, the threshold was not met or a bounce | 50 | returned buffer. If NULL is returned, the threshold was not met or a bounce |
51 | buffer could not be allocated. Fall back to PIO in that case. | 51 | buffer could not be allocated. Fall back to PIO in that case. |
52 | 52 | ||
53 | In any case, a buffer obtained from above needs to be released. It ensures data | 53 | In any case, a buffer obtained from above needs to be released. Another helper |
54 | is copied back to the message and a potentially used bounce buffer is freed:: | 54 | function ensures a potentially used bounce buffer is freed:: |
55 | 55 | ||
56 | i2c_release_dma_safe_msg_buf(msg, dma_buf); | 56 | i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred); |
57 | |||
58 | The last argument 'xferred' controls if the buffer is synced back to the | ||
59 | message or not. No syncing is needed in cases setting up DMA had an error and | ||
60 | there was no data transferred. | ||
57 | 61 | ||
58 | The bounce buffer handling from the core is generic and simple. It will always | 62 | The bounce buffer handling from the core is generic and simple. It will always |
59 | allocate a new bounce buffer. If you want a more sophisticated handling (e.g. | 63 | allocate a new bounce buffer. If you want a more sophisticated handling (e.g. |
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 61f918b10a0c..d1bf143b446f 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst | |||
@@ -86,7 +86,7 @@ pkg-config | |||
86 | 86 | ||
87 | The build system, as of 4.18, requires pkg-config to check for installed | 87 | The build system, as of 4.18, requires pkg-config to check for installed |
88 | kconfig tools and to determine flags settings for use in | 88 | kconfig tools and to determine flags settings for use in |
89 | 'make {menu,n,g,x}config'. Previously pkg-config was being used but not | 89 | 'make {g,x}config'. Previously pkg-config was being used but not |
90 | verified or documented. | 90 | verified or documented. |
91 | 91 | ||
92 | Flex | 92 | Flex |
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt index 25a4b4cf04a6..92999d4e0cb8 100644 --- a/Documentation/scsi/scsi-parameters.txt +++ b/Documentation/scsi/scsi-parameters.txt | |||
@@ -97,6 +97,11 @@ parameters may be changed at runtime by the command | |||
97 | allowing boot to proceed. none ignores them, expecting | 97 | allowing boot to proceed. none ignores them, expecting |
98 | user space to do the scan. | 98 | user space to do the scan. |
99 | 99 | ||
100 | scsi_mod.use_blk_mq= | ||
101 | [SCSI] use blk-mq I/O path by default | ||
102 | See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig. | ||
103 | Format: <y/n> | ||
104 | |||
100 | sim710= [SCSI,HW] | 105 | sim710= [SCSI,HW] |
101 | See header of drivers/scsi/sim710.c. | 106 | See header of drivers/scsi/sim710.c. |
102 | 107 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index a5b256b25905..d870cb57c887 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2311,6 +2311,7 @@ F: drivers/clocksource/cadence_ttc_timer.c | |||
2311 | F: drivers/i2c/busses/i2c-cadence.c | 2311 | F: drivers/i2c/busses/i2c-cadence.c |
2312 | F: drivers/mmc/host/sdhci-of-arasan.c | 2312 | F: drivers/mmc/host/sdhci-of-arasan.c |
2313 | F: drivers/edac/synopsys_edac.c | 2313 | F: drivers/edac/synopsys_edac.c |
2314 | F: drivers/i2c/busses/i2c-xiic.c | ||
2314 | 2315 | ||
2315 | ARM64 PORT (AARCH64 ARCHITECTURE) | 2316 | ARM64 PORT (AARCH64 ARCHITECTURE) |
2316 | M: Catalin Marinas <catalin.marinas@arm.com> | 2317 | M: Catalin Marinas <catalin.marinas@arm.com> |
@@ -8255,9 +8256,9 @@ F: drivers/ata/pata_arasan_cf.c | |||
8255 | 8256 | ||
8256 | LIBATA PATA DRIVERS | 8257 | LIBATA PATA DRIVERS |
8257 | M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 8258 | M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> |
8258 | M: Jens Axboe <kernel.dk> | 8259 | M: Jens Axboe <axboe@kernel.dk> |
8259 | L: linux-ide@vger.kernel.org | 8260 | L: linux-ide@vger.kernel.org |
8260 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git | 8261 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git |
8261 | S: Maintained | 8262 | S: Maintained |
8262 | F: drivers/ata/pata_*.c | 8263 | F: drivers/ata/pata_*.c |
8263 | F: drivers/ata/ata_generic.c | 8264 | F: drivers/ata/ata_generic.c |
@@ -8275,7 +8276,7 @@ LIBATA SATA AHCI PLATFORM devices support | |||
8275 | M: Hans de Goede <hdegoede@redhat.com> | 8276 | M: Hans de Goede <hdegoede@redhat.com> |
8276 | M: Jens Axboe <axboe@kernel.dk> | 8277 | M: Jens Axboe <axboe@kernel.dk> |
8277 | L: linux-ide@vger.kernel.org | 8278 | L: linux-ide@vger.kernel.org |
8278 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git | 8279 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git |
8279 | S: Maintained | 8280 | S: Maintained |
8280 | F: drivers/ata/ahci_platform.c | 8281 | F: drivers/ata/ahci_platform.c |
8281 | F: drivers/ata/libahci_platform.c | 8282 | F: drivers/ata/libahci_platform.c |
@@ -8291,7 +8292,7 @@ F: drivers/ata/sata_promise.* | |||
8291 | LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) | 8292 | LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) |
8292 | M: Jens Axboe <axboe@kernel.dk> | 8293 | M: Jens Axboe <axboe@kernel.dk> |
8293 | L: linux-ide@vger.kernel.org | 8294 | L: linux-ide@vger.kernel.org |
8294 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git | 8295 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git |
8295 | S: Maintained | 8296 | S: Maintained |
8296 | F: drivers/ata/ | 8297 | F: drivers/ata/ |
8297 | F: include/linux/ata.h | 8298 | F: include/linux/ata.h |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 19 | 3 | PATCHLEVEL = 19 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc1 | 5 | EXTRAVERSION = -rc3 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) | |||
807 | # disable pointer signed / unsigned warnings in gcc 4.0 | 807 | # disable pointer signed / unsigned warnings in gcc 4.0 |
808 | KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) | 808 | KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) |
809 | 809 | ||
810 | # disable stringop warnings in gcc 8+ | ||
811 | KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) | ||
812 | |||
810 | # disable invalid "can't wrap" optimizations for signed / pointers | 813 | # disable invalid "can't wrap" optimizations for signed / pointers |
811 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) | 814 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) |
812 | 815 | ||
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 6d5eb8267e42..b4441b0764d7 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -9,6 +9,7 @@ | |||
9 | config ARC | 9 | config ARC |
10 | def_bool y | 10 | def_bool y |
11 | select ARC_TIMERS | 11 | select ARC_TIMERS |
12 | select ARCH_HAS_PTE_SPECIAL | ||
12 | select ARCH_HAS_SYNC_DMA_FOR_CPU | 13 | select ARCH_HAS_SYNC_DMA_FOR_CPU |
13 | select ARCH_HAS_SYNC_DMA_FOR_DEVICE | 14 | select ARCH_HAS_SYNC_DMA_FOR_DEVICE |
14 | select ARCH_HAS_SG_CHAIN | 15 | select ARCH_HAS_SG_CHAIN |
@@ -28,8 +29,12 @@ config ARC | |||
28 | select GENERIC_SMP_IDLE_THREAD | 29 | select GENERIC_SMP_IDLE_THREAD |
29 | select HAVE_ARCH_KGDB | 30 | select HAVE_ARCH_KGDB |
30 | select HAVE_ARCH_TRACEHOOK | 31 | select HAVE_ARCH_TRACEHOOK |
32 | select HAVE_DEBUG_STACKOVERFLOW | ||
31 | select HAVE_FUTEX_CMPXCHG if FUTEX | 33 | select HAVE_FUTEX_CMPXCHG if FUTEX |
34 | select HAVE_GENERIC_DMA_COHERENT | ||
32 | select HAVE_IOREMAP_PROT | 35 | select HAVE_IOREMAP_PROT |
36 | select HAVE_KERNEL_GZIP | ||
37 | select HAVE_KERNEL_LZMA | ||
33 | select HAVE_KPROBES | 38 | select HAVE_KPROBES |
34 | select HAVE_KRETPROBES | 39 | select HAVE_KRETPROBES |
35 | select HAVE_MEMBLOCK | 40 | select HAVE_MEMBLOCK |
@@ -44,11 +49,6 @@ config ARC | |||
44 | select OF_EARLY_FLATTREE | 49 | select OF_EARLY_FLATTREE |
45 | select OF_RESERVED_MEM | 50 | select OF_RESERVED_MEM |
46 | select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING | 51 | select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING |
47 | select HAVE_DEBUG_STACKOVERFLOW | ||
48 | select HAVE_GENERIC_DMA_COHERENT | ||
49 | select HAVE_KERNEL_GZIP | ||
50 | select HAVE_KERNEL_LZMA | ||
51 | select ARCH_HAS_PTE_SPECIAL | ||
52 | 52 | ||
53 | config ARCH_HAS_CACHE_LINE_SIZE | 53 | config ARCH_HAS_CACHE_LINE_SIZE |
54 | def_bool y | 54 | def_bool y |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index fb026196aaab..99cce77ab98f 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
@@ -43,10 +43,7 @@ ifdef CONFIG_ARC_CURR_IN_REG | |||
43 | LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h | 43 | LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h |
44 | endif | 44 | endif |
45 | 45 | ||
46 | upto_gcc44 := $(call cc-ifversion, -le, 0404, y) | 46 | cflags-y += -fsection-anchors |
47 | atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) | ||
48 | |||
49 | cflags-$(atleast_gcc44) += -fsection-anchors | ||
50 | 47 | ||
51 | cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock | 48 | cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock |
52 | cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape | 49 | cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape |
@@ -82,11 +79,6 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp | |||
82 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian | 79 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian |
83 | ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB | 80 | ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB |
84 | 81 | ||
85 | # STAR 9000518362: (fixed with binutils shipping with gcc 4.8) | ||
86 | # arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept | ||
87 | # --build-id w/o "-marclinux". Default arc-elf32-ld is OK | ||
88 | ldflags-$(upto_gcc44) += -marclinux | ||
89 | |||
90 | LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) | 82 | LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) |
91 | 83 | ||
92 | # Modules with short calls might break for calls into builtin-kernel | 84 | # Modules with short calls might break for calls into builtin-kernel |
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index dc91c663bcc0..d75d65ddf8e3 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi | |||
@@ -94,6 +94,32 @@ | |||
94 | }; | 94 | }; |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Mark DMA peripherals connected via IOC port as dma-coherent. We do | ||
98 | * it via overlay because peripherals defined in axs10x_mb.dtsi are | ||
99 | * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so | ||
100 | * only AXS103 board has HW-coherent DMA peripherals) | ||
101 | * We don't need to mark pgu@17000 as dma-coherent because it uses | ||
102 | * external DMA buffer located outside of IOC aperture. | ||
103 | */ | ||
104 | axs10x_mb { | ||
105 | ethernet@0x18000 { | ||
106 | dma-coherent; | ||
107 | }; | ||
108 | |||
109 | ehci@0x40000 { | ||
110 | dma-coherent; | ||
111 | }; | ||
112 | |||
113 | ohci@0x60000 { | ||
114 | dma-coherent; | ||
115 | }; | ||
116 | |||
117 | mmc@0x15000 { | ||
118 | dma-coherent; | ||
119 | }; | ||
120 | }; | ||
121 | |||
122 | /* | ||
97 | * The DW APB ICTL intc on MB is connected to CPU intc via a | 123 | * The DW APB ICTL intc on MB is connected to CPU intc via a |
98 | * DT "invisible" DW APB GPIO block, configured to simply pass thru | 124 | * DT "invisible" DW APB GPIO block, configured to simply pass thru |
99 | * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c) | 125 | * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c) |
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 69ff4895f2ba..a05bb737ea63 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi | |||
@@ -101,6 +101,32 @@ | |||
101 | }; | 101 | }; |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * Mark DMA peripherals connected via IOC port as dma-coherent. We do | ||
105 | * it via overlay because peripherals defined in axs10x_mb.dtsi are | ||
106 | * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so | ||
107 | * only AXS103 board has HW-coherent DMA peripherals) | ||
108 | * We don't need to mark pgu@17000 as dma-coherent because it uses | ||
109 | * external DMA buffer located outside of IOC aperture. | ||
110 | */ | ||
111 | axs10x_mb { | ||
112 | ethernet@0x18000 { | ||
113 | dma-coherent; | ||
114 | }; | ||
115 | |||
116 | ehci@0x40000 { | ||
117 | dma-coherent; | ||
118 | }; | ||
119 | |||
120 | ohci@0x60000 { | ||
121 | dma-coherent; | ||
122 | }; | ||
123 | |||
124 | mmc@0x15000 { | ||
125 | dma-coherent; | ||
126 | }; | ||
127 | }; | ||
128 | |||
129 | /* | ||
104 | * This INTC is actually connected to DW APB GPIO | 130 | * This INTC is actually connected to DW APB GPIO |
105 | * which acts as a wire between MB INTC and CPU INTC. | 131 | * which acts as a wire between MB INTC and CPU INTC. |
106 | * GPIO INTC is configured in platform init code | 132 | * GPIO INTC is configured in platform init code |
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 47b74fbc403c..37bafd44e36d 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi | |||
@@ -9,6 +9,10 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | / { | 11 | / { |
12 | aliases { | ||
13 | ethernet = &gmac; | ||
14 | }; | ||
15 | |||
12 | axs10x_mb { | 16 | axs10x_mb { |
13 | compatible = "simple-bus"; | 17 | compatible = "simple-bus"; |
14 | #address-cells = <1>; | 18 | #address-cells = <1>; |
@@ -68,7 +72,7 @@ | |||
68 | }; | 72 | }; |
69 | }; | 73 | }; |
70 | 74 | ||
71 | ethernet@0x18000 { | 75 | gmac: ethernet@0x18000 { |
72 | #interrupt-cells = <1>; | 76 | #interrupt-cells = <1>; |
73 | compatible = "snps,dwmac"; | 77 | compatible = "snps,dwmac"; |
74 | reg = < 0x18000 0x2000 >; | 78 | reg = < 0x18000 0x2000 >; |
@@ -81,6 +85,7 @@ | |||
81 | max-speed = <100>; | 85 | max-speed = <100>; |
82 | resets = <&creg_rst 5>; | 86 | resets = <&creg_rst 5>; |
83 | reset-names = "stmmaceth"; | 87 | reset-names = "stmmaceth"; |
88 | mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ | ||
84 | }; | 89 | }; |
85 | 90 | ||
86 | ehci@0x40000 { | 91 | ehci@0x40000 { |
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 006aa3de5348..ef149f59929a 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts | |||
@@ -25,6 +25,10 @@ | |||
25 | bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; | 25 | bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | aliases { | ||
29 | ethernet = &gmac; | ||
30 | }; | ||
31 | |||
28 | cpus { | 32 | cpus { |
29 | #address-cells = <1>; | 33 | #address-cells = <1>; |
30 | #size-cells = <0>; | 34 | #size-cells = <0>; |
@@ -163,7 +167,7 @@ | |||
163 | #clock-cells = <0>; | 167 | #clock-cells = <0>; |
164 | }; | 168 | }; |
165 | 169 | ||
166 | ethernet@8000 { | 170 | gmac: ethernet@8000 { |
167 | #interrupt-cells = <1>; | 171 | #interrupt-cells = <1>; |
168 | compatible = "snps,dwmac"; | 172 | compatible = "snps,dwmac"; |
169 | reg = <0x8000 0x2000>; | 173 | reg = <0x8000 0x2000>; |
@@ -176,6 +180,8 @@ | |||
176 | phy-handle = <&phy0>; | 180 | phy-handle = <&phy0>; |
177 | resets = <&cgu_rst HSDK_ETH_RESET>; | 181 | resets = <&cgu_rst HSDK_ETH_RESET>; |
178 | reset-names = "stmmaceth"; | 182 | reset-names = "stmmaceth"; |
183 | mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ | ||
184 | dma-coherent; | ||
179 | 185 | ||
180 | mdio { | 186 | mdio { |
181 | #address-cells = <1>; | 187 | #address-cells = <1>; |
@@ -194,12 +200,14 @@ | |||
194 | compatible = "snps,hsdk-v1.0-ohci", "generic-ohci"; | 200 | compatible = "snps,hsdk-v1.0-ohci", "generic-ohci"; |
195 | reg = <0x60000 0x100>; | 201 | reg = <0x60000 0x100>; |
196 | interrupts = <15>; | 202 | interrupts = <15>; |
203 | dma-coherent; | ||
197 | }; | 204 | }; |
198 | 205 | ||
199 | ehci@40000 { | 206 | ehci@40000 { |
200 | compatible = "snps,hsdk-v1.0-ehci", "generic-ehci"; | 207 | compatible = "snps,hsdk-v1.0-ehci", "generic-ehci"; |
201 | reg = <0x40000 0x100>; | 208 | reg = <0x40000 0x100>; |
202 | interrupts = <15>; | 209 | interrupts = <15>; |
210 | dma-coherent; | ||
203 | }; | 211 | }; |
204 | 212 | ||
205 | mmc@a000 { | 213 | mmc@a000 { |
@@ -212,6 +220,7 @@ | |||
212 | clock-names = "biu", "ciu"; | 220 | clock-names = "biu", "ciu"; |
213 | interrupts = <12>; | 221 | interrupts = <12>; |
214 | bus-width = <4>; | 222 | bus-width = <4>; |
223 | dma-coherent; | ||
215 | }; | 224 | }; |
216 | }; | 225 | }; |
217 | 226 | ||
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index a635ea972304..41bc08be6a3b 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig | |||
@@ -1,5 +1,3 @@ | |||
1 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
2 | # CONFIG_SWAP is not set | ||
3 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
4 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
5 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 3 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
@@ -63,7 +61,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
63 | CONFIG_MOUSE_SERIAL=y | 61 | CONFIG_MOUSE_SERIAL=y |
64 | CONFIG_MOUSE_SYNAPTICS_USB=y | 62 | CONFIG_MOUSE_SYNAPTICS_USB=y |
65 | # CONFIG_LEGACY_PTYS is not set | 63 | # CONFIG_LEGACY_PTYS is not set |
66 | # CONFIG_DEVKMEM is not set | ||
67 | CONFIG_SERIAL_8250=y | 64 | CONFIG_SERIAL_8250=y |
68 | CONFIG_SERIAL_8250_CONSOLE=y | 65 | CONFIG_SERIAL_8250_CONSOLE=y |
69 | CONFIG_SERIAL_8250_DW=y | 66 | CONFIG_SERIAL_8250_DW=y |
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index aa507e423075..1e1c4a8011b5 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig | |||
@@ -1,5 +1,3 @@ | |||
1 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
2 | # CONFIG_SWAP is not set | ||
3 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
4 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
5 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 3 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
@@ -64,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
64 | CONFIG_MOUSE_SERIAL=y | 62 | CONFIG_MOUSE_SERIAL=y |
65 | CONFIG_MOUSE_SYNAPTICS_USB=y | 63 | CONFIG_MOUSE_SYNAPTICS_USB=y |
66 | # CONFIG_LEGACY_PTYS is not set | 64 | # CONFIG_LEGACY_PTYS is not set |
67 | # CONFIG_DEVKMEM is not set | ||
68 | CONFIG_SERIAL_8250=y | 65 | CONFIG_SERIAL_8250=y |
69 | CONFIG_SERIAL_8250_CONSOLE=y | 66 | CONFIG_SERIAL_8250_CONSOLE=y |
70 | CONFIG_SERIAL_8250_DW=y | 67 | CONFIG_SERIAL_8250_DW=y |
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index eba07f468654..6b0c0cfd5c30 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig | |||
@@ -1,5 +1,3 @@ | |||
1 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
2 | # CONFIG_SWAP is not set | ||
3 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
4 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
5 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 3 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
@@ -65,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
65 | CONFIG_MOUSE_SERIAL=y | 63 | CONFIG_MOUSE_SERIAL=y |
66 | CONFIG_MOUSE_SYNAPTICS_USB=y | 64 | CONFIG_MOUSE_SYNAPTICS_USB=y |
67 | # CONFIG_LEGACY_PTYS is not set | 65 | # CONFIG_LEGACY_PTYS is not set |
68 | # CONFIG_DEVKMEM is not set | ||
69 | CONFIG_SERIAL_8250=y | 66 | CONFIG_SERIAL_8250=y |
70 | CONFIG_SERIAL_8250_CONSOLE=y | 67 | CONFIG_SERIAL_8250_CONSOLE=y |
71 | CONFIG_SERIAL_8250_DW=y | 68 | CONFIG_SERIAL_8250_DW=y |
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index 098b19fbaa51..240dd2cd5148 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
2 | # CONFIG_SWAP is not set | 1 | # CONFIG_SWAP is not set |
3 | CONFIG_SYSVIPC=y | 2 | CONFIG_SYSVIPC=y |
4 | CONFIG_POSIX_MQUEUE=y | 3 | CONFIG_POSIX_MQUEUE=y |
@@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
57 | # CONFIG_SERIO_SERPORT is not set | 56 | # CONFIG_SERIO_SERPORT is not set |
58 | CONFIG_SERIO_ARC_PS2=y | 57 | CONFIG_SERIO_ARC_PS2=y |
59 | # CONFIG_LEGACY_PTYS is not set | 58 | # CONFIG_LEGACY_PTYS is not set |
60 | # CONFIG_DEVKMEM is not set | ||
61 | CONFIG_SERIAL_8250=y | 59 | CONFIG_SERIAL_8250=y |
62 | CONFIG_SERIAL_8250_CONSOLE=y | 60 | CONFIG_SERIAL_8250_CONSOLE=y |
63 | CONFIG_SERIAL_8250_NR_UARTS=1 | 61 | CONFIG_SERIAL_8250_NR_UARTS=1 |
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 0104c404d897..14ae7e5acc7c 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
2 | # CONFIG_SWAP is not set | 1 | # CONFIG_SWAP is not set |
3 | CONFIG_SYSVIPC=y | 2 | CONFIG_SYSVIPC=y |
4 | CONFIG_POSIX_MQUEUE=y | 3 | CONFIG_POSIX_MQUEUE=y |
@@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
60 | # CONFIG_SERIO_SERPORT is not set | 59 | # CONFIG_SERIO_SERPORT is not set |
61 | CONFIG_SERIO_ARC_PS2=y | 60 | CONFIG_SERIO_ARC_PS2=y |
62 | # CONFIG_LEGACY_PTYS is not set | 61 | # CONFIG_LEGACY_PTYS is not set |
63 | # CONFIG_DEVKMEM is not set | ||
64 | CONFIG_SERIAL_8250=y | 62 | CONFIG_SERIAL_8250=y |
65 | CONFIG_SERIAL_8250_CONSOLE=y | 63 | CONFIG_SERIAL_8250_CONSOLE=y |
66 | CONFIG_SERIAL_8250_NR_UARTS=1 | 64 | CONFIG_SERIAL_8250_NR_UARTS=1 |
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 6491be0ddbc9..1dec2b4bc5e6 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 2 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
4 | CONFIG_NO_HZ_IDLE=y | 3 | CONFIG_NO_HZ_IDLE=y |
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 7c9c706ae7f6..31ba224bbfb4 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig | |||
@@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y | |||
59 | # CONFIG_INPUT_MOUSE is not set | 59 | # CONFIG_INPUT_MOUSE is not set |
60 | # CONFIG_SERIO is not set | 60 | # CONFIG_SERIO is not set |
61 | # CONFIG_LEGACY_PTYS is not set | 61 | # CONFIG_LEGACY_PTYS is not set |
62 | # CONFIG_DEVKMEM is not set | ||
63 | CONFIG_SERIAL_8250=y | 62 | CONFIG_SERIAL_8250=y |
64 | CONFIG_SERIAL_8250_CONSOLE=y | 63 | CONFIG_SERIAL_8250_CONSOLE=y |
65 | CONFIG_SERIAL_8250_NR_UARTS=1 | 64 | CONFIG_SERIAL_8250_NR_UARTS=1 |
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 99e05cf63fca..8e0b8b134cd9 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | 1 | # CONFIG_LOCALVERSION_AUTO is not set |
2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
3 | # CONFIG_SWAP is not set | 2 | # CONFIG_SWAP is not set |
4 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
5 | CONFIG_POSIX_MQUEUE=y | 4 | CONFIG_POSIX_MQUEUE=y |
@@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y | |||
44 | # CONFIG_INPUT_MOUSE is not set | 43 | # CONFIG_INPUT_MOUSE is not set |
45 | # CONFIG_SERIO is not set | 44 | # CONFIG_SERIO is not set |
46 | # CONFIG_LEGACY_PTYS is not set | 45 | # CONFIG_LEGACY_PTYS is not set |
47 | # CONFIG_DEVKMEM is not set | ||
48 | CONFIG_SERIAL_ARC=y | 46 | CONFIG_SERIAL_ARC=y |
49 | CONFIG_SERIAL_ARC_CONSOLE=y | 47 | CONFIG_SERIAL_ARC_CONSOLE=y |
50 | # CONFIG_HW_RANDOM is not set | 48 | # CONFIG_HW_RANDOM is not set |
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 0dc4f9b737e7..739b90e5e893 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | 1 | # CONFIG_LOCALVERSION_AUTO is not set |
2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
3 | # CONFIG_SWAP is not set | 2 | # CONFIG_SWAP is not set |
4 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
5 | CONFIG_POSIX_MQUEUE=y | 4 | CONFIG_POSIX_MQUEUE=y |
@@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y | |||
45 | # CONFIG_INPUT_MOUSE is not set | 44 | # CONFIG_INPUT_MOUSE is not set |
46 | # CONFIG_SERIO is not set | 45 | # CONFIG_SERIO is not set |
47 | # CONFIG_LEGACY_PTYS is not set | 46 | # CONFIG_LEGACY_PTYS is not set |
48 | # CONFIG_DEVKMEM is not set | ||
49 | CONFIG_SERIAL_ARC=y | 47 | CONFIG_SERIAL_ARC=y |
50 | CONFIG_SERIAL_ARC_CONSOLE=y | 48 | CONFIG_SERIAL_ARC_CONSOLE=y |
51 | # CONFIG_HW_RANDOM is not set | 49 | # CONFIG_HW_RANDOM is not set |
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index be3c30a15e54..b5895bdf3a93 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | 1 | # CONFIG_LOCALVERSION_AUTO is not set |
2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
3 | # CONFIG_SWAP is not set | 2 | # CONFIG_SWAP is not set |
4 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 3 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
5 | CONFIG_HIGH_RES_TIMERS=y | 4 | CONFIG_HIGH_RES_TIMERS=y |
@@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y | |||
44 | # CONFIG_INPUT_MOUSE is not set | 43 | # CONFIG_INPUT_MOUSE is not set |
45 | # CONFIG_SERIO is not set | 44 | # CONFIG_SERIO is not set |
46 | # CONFIG_LEGACY_PTYS is not set | 45 | # CONFIG_LEGACY_PTYS is not set |
47 | # CONFIG_DEVKMEM is not set | ||
48 | CONFIG_SERIAL_ARC=y | 46 | CONFIG_SERIAL_ARC=y |
49 | CONFIG_SERIAL_ARC_CONSOLE=y | 47 | CONFIG_SERIAL_ARC_CONSOLE=y |
50 | # CONFIG_HW_RANDOM is not set | 48 | # CONFIG_HW_RANDOM is not set |
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 3a74b9b21772..f14eeff7d308 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | 1 | # CONFIG_LOCALVERSION_AUTO is not set |
2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
3 | # CONFIG_SWAP is not set | 2 | # CONFIG_SWAP is not set |
4 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
5 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 4 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
@@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
48 | # CONFIG_SERIO_SERPORT is not set | 47 | # CONFIG_SERIO_SERPORT is not set |
49 | CONFIG_SERIO_ARC_PS2=y | 48 | CONFIG_SERIO_ARC_PS2=y |
50 | # CONFIG_LEGACY_PTYS is not set | 49 | # CONFIG_LEGACY_PTYS is not set |
51 | # CONFIG_DEVKMEM is not set | ||
52 | CONFIG_SERIAL_8250=y | 50 | CONFIG_SERIAL_8250=y |
53 | CONFIG_SERIAL_8250_CONSOLE=y | 51 | CONFIG_SERIAL_8250_CONSOLE=y |
54 | CONFIG_SERIAL_8250_NR_UARTS=1 | 52 | CONFIG_SERIAL_8250_NR_UARTS=1 |
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index ea2834b4dc1d..025298a48305 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | 1 | # CONFIG_LOCALVERSION_AUTO is not set |
2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
3 | # CONFIG_SWAP is not set | 2 | # CONFIG_SWAP is not set |
4 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
5 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 4 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
@@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
47 | # CONFIG_SERIO_SERPORT is not set | 46 | # CONFIG_SERIO_SERPORT is not set |
48 | CONFIG_SERIO_ARC_PS2=y | 47 | CONFIG_SERIO_ARC_PS2=y |
49 | # CONFIG_LEGACY_PTYS is not set | 48 | # CONFIG_LEGACY_PTYS is not set |
50 | # CONFIG_DEVKMEM is not set | ||
51 | CONFIG_SERIAL_8250=y | 49 | CONFIG_SERIAL_8250=y |
52 | CONFIG_SERIAL_8250_CONSOLE=y | 50 | CONFIG_SERIAL_8250_CONSOLE=y |
53 | CONFIG_SERIAL_8250_NR_UARTS=1 | 51 | CONFIG_SERIAL_8250_NR_UARTS=1 |
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 80a5a1b4924b..df7b77b13b82 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
2 | # CONFIG_SWAP is not set | 1 | # CONFIG_SWAP is not set |
3 | CONFIG_SYSVIPC=y | 2 | CONFIG_SYSVIPC=y |
4 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 3 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
@@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y | |||
58 | # CONFIG_SERIO_SERPORT is not set | 57 | # CONFIG_SERIO_SERPORT is not set |
59 | CONFIG_SERIO_ARC_PS2=y | 58 | CONFIG_SERIO_ARC_PS2=y |
60 | # CONFIG_LEGACY_PTYS is not set | 59 | # CONFIG_LEGACY_PTYS is not set |
61 | # CONFIG_DEVKMEM is not set | ||
62 | CONFIG_SERIAL_8250=y | 60 | CONFIG_SERIAL_8250=y |
63 | CONFIG_SERIAL_8250_CONSOLE=y | 61 | CONFIG_SERIAL_8250_CONSOLE=y |
64 | CONFIG_SERIAL_8250_NR_UARTS=1 | 62 | CONFIG_SERIAL_8250_NR_UARTS=1 |
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 2cc87f909747..a7f65313f84a 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig | |||
@@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y | |||
57 | # CONFIG_SERIO is not set | 57 | # CONFIG_SERIO is not set |
58 | # CONFIG_VT is not set | 58 | # CONFIG_VT is not set |
59 | # CONFIG_LEGACY_PTYS is not set | 59 | # CONFIG_LEGACY_PTYS is not set |
60 | # CONFIG_DEVKMEM is not set | ||
61 | CONFIG_SERIAL_8250=y | 60 | CONFIG_SERIAL_8250=y |
62 | CONFIG_SERIAL_8250_CONSOLE=y | 61 | CONFIG_SERIAL_8250_CONSOLE=y |
63 | CONFIG_SERIAL_8250_NR_UARTS=1 | 62 | CONFIG_SERIAL_8250_NR_UARTS=1 |
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index f629493929ea..db47c3541f15 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | 1 | # CONFIG_LOCALVERSION_AUTO is not set |
2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
3 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 2 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
4 | CONFIG_HIGH_RES_TIMERS=y | 3 | CONFIG_HIGH_RES_TIMERS=y |
5 | CONFIG_IKCONFIG=y | 4 | CONFIG_IKCONFIG=y |
@@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y | |||
53 | CONFIG_MOUSE_PS2_TOUCHKIT=y | 52 | CONFIG_MOUSE_PS2_TOUCHKIT=y |
54 | CONFIG_SERIO_ARC_PS2=y | 53 | CONFIG_SERIO_ARC_PS2=y |
55 | # CONFIG_LEGACY_PTYS is not set | 54 | # CONFIG_LEGACY_PTYS is not set |
56 | # CONFIG_DEVKMEM is not set | ||
57 | CONFIG_SERIAL_8250=y | 55 | CONFIG_SERIAL_8250=y |
58 | CONFIG_SERIAL_8250_CONSOLE=y | 56 | CONFIG_SERIAL_8250_CONSOLE=y |
59 | CONFIG_SERIAL_8250_DW=y | 57 | CONFIG_SERIAL_8250_DW=y |
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 21f0ca26a05d..a8ac5e917d9a 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | 1 | # CONFIG_LOCALVERSION_AUTO is not set |
2 | CONFIG_DEFAULT_HOSTNAME="ARCLinux" | ||
3 | # CONFIG_CROSS_MEMORY_ATTACH is not set | 2 | # CONFIG_CROSS_MEMORY_ATTACH is not set |
4 | CONFIG_HIGH_RES_TIMERS=y | 3 | CONFIG_HIGH_RES_TIMERS=y |
5 | CONFIG_IKCONFIG=y | 4 | CONFIG_IKCONFIG=y |
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 4e0072730241..158af079838d 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h | |||
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ | |||
84 | "1: llock %[orig], [%[ctr]] \n" \ | 84 | "1: llock %[orig], [%[ctr]] \n" \ |
85 | " " #asm_op " %[val], %[orig], %[i] \n" \ | 85 | " " #asm_op " %[val], %[orig], %[i] \n" \ |
86 | " scond %[val], [%[ctr]] \n" \ | 86 | " scond %[val], [%[ctr]] \n" \ |
87 | " \n" \ | 87 | " bnz 1b \n" \ |
88 | : [val] "=&r" (val), \ | 88 | : [val] "=&r" (val), \ |
89 | [orig] "=&r" (orig) \ | 89 | [orig] "=&r" (orig) \ |
90 | : [ctr] "r" (&v->counter), \ | 90 | : [ctr] "r" (&v->counter), \ |
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h new file mode 100644 index 000000000000..c946c0a83e76 --- /dev/null +++ b/arch/arc/include/asm/dma-mapping.h | |||
@@ -0,0 +1,13 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // (C) 2018 Synopsys, Inc. (www.synopsys.com) | ||
3 | |||
4 | #ifndef ASM_ARC_DMA_MAPPING_H | ||
5 | #define ASM_ARC_DMA_MAPPING_H | ||
6 | |||
7 | #include <asm-generic/dma-mapping.h> | ||
8 | |||
9 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | ||
10 | const struct iommu_ops *iommu, bool coherent); | ||
11 | #define arch_setup_dma_ops arch_setup_dma_ops | ||
12 | |||
13 | #endif | ||
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 783b20354f8b..e8d9fb452346 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c | |||
@@ -83,9 +83,6 @@ done: | |||
83 | static void show_faulting_vma(unsigned long address, char *buf) | 83 | static void show_faulting_vma(unsigned long address, char *buf) |
84 | { | 84 | { |
85 | struct vm_area_struct *vma; | 85 | struct vm_area_struct *vma; |
86 | struct inode *inode; | ||
87 | unsigned long ino = 0; | ||
88 | dev_t dev = 0; | ||
89 | char *nm = buf; | 86 | char *nm = buf; |
90 | struct mm_struct *active_mm = current->active_mm; | 87 | struct mm_struct *active_mm = current->active_mm; |
91 | 88 | ||
@@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf) | |||
99 | * if the container VMA is not found | 96 | * if the container VMA is not found |
100 | */ | 97 | */ |
101 | if (vma && (vma->vm_start <= address)) { | 98 | if (vma && (vma->vm_start <= address)) { |
102 | struct file *file = vma->vm_file; | 99 | if (vma->vm_file) { |
103 | if (file) { | 100 | nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); |
104 | nm = file_path(file, buf, PAGE_SIZE - 1); | 101 | if (IS_ERR(nm)) |
105 | inode = file_inode(vma->vm_file); | 102 | nm = "?"; |
106 | dev = inode->i_sb->s_dev; | ||
107 | ino = inode->i_ino; | ||
108 | } | 103 | } |
109 | pr_info(" @off 0x%lx in [%s]\n" | 104 | pr_info(" @off 0x%lx in [%s]\n" |
110 | " VMA: 0x%08lx to 0x%08lx\n", | 105 | " VMA: 0x%08lx to 0x%08lx\n", |
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 25c631942500..f2701c13a66b 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) | |||
65 | 65 | ||
66 | n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", | 66 | n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", |
67 | perip_base, | 67 | perip_base, |
68 | IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); | 68 | IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); |
69 | 69 | ||
70 | return buf; | 70 | return buf; |
71 | } | 71 | } |
@@ -897,15 +897,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) | |||
897 | } | 897 | } |
898 | 898 | ||
899 | /* | 899 | /* |
900 | * DMA ops for systems with IOC | ||
901 | * IOC hardware snoops all DMA traffic keeping the caches consistent with | ||
902 | * memory - eliding need for any explicit cache maintenance of DMA buffers | ||
903 | */ | ||
904 | static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {} | ||
905 | static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {} | ||
906 | static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {} | ||
907 | |||
908 | /* | ||
909 | * Exported DMA API | 900 | * Exported DMA API |
910 | */ | 901 | */ |
911 | void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) | 902 | void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) |
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void) | |||
1153 | { | 1144 | { |
1154 | unsigned int ioc_base, mem_sz; | 1145 | unsigned int ioc_base, mem_sz; |
1155 | 1146 | ||
1147 | /* | ||
1148 | * As for today we don't support both IOC and ZONE_HIGHMEM enabled | ||
1149 | * simultaneously. This happens because as of today IOC aperture covers | ||
1150 | * only ZONE_NORMAL (low mem) and any dma transactions outside this | ||
1151 | * region won't be HW coherent. | ||
1152 | * If we want to use both IOC and ZONE_HIGHMEM we can use | ||
1153 | * bounce_buffer to handle dma transactions to HIGHMEM. | ||
1154 | * Also it is possible to modify dma_direct cache ops or increase IOC | ||
1155 | * aperture size if we are planning to use HIGHMEM without PAE. | ||
1156 | */ | ||
1157 | if (IS_ENABLED(CONFIG_HIGHMEM)) | ||
1158 | panic("IOC and HIGHMEM can't be used simultaneously"); | ||
1159 | |||
1156 | /* Flush + invalidate + disable L1 dcache */ | 1160 | /* Flush + invalidate + disable L1 dcache */ |
1157 | __dc_disable(); | 1161 | __dc_disable(); |
1158 | 1162 | ||
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void) | |||
1264 | if (is_isa_arcv2() && ioc_enable) | 1268 | if (is_isa_arcv2() && ioc_enable) |
1265 | arc_ioc_setup(); | 1269 | arc_ioc_setup(); |
1266 | 1270 | ||
1267 | if (is_isa_arcv2() && ioc_enable) { | 1271 | if (is_isa_arcv2() && l2_line_sz && slc_enable) { |
1268 | __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; | ||
1269 | __dma_cache_inv = __dma_cache_inv_ioc; | ||
1270 | __dma_cache_wback = __dma_cache_wback_ioc; | ||
1271 | } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { | ||
1272 | __dma_cache_wback_inv = __dma_cache_wback_inv_slc; | 1272 | __dma_cache_wback_inv = __dma_cache_wback_inv_slc; |
1273 | __dma_cache_inv = __dma_cache_inv_slc; | 1273 | __dma_cache_inv = __dma_cache_inv_slc; |
1274 | __dma_cache_wback = __dma_cache_wback_slc; | 1274 | __dma_cache_wback = __dma_cache_wback_slc; |
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void) | |||
1277 | __dma_cache_inv = __dma_cache_inv_l1; | 1277 | __dma_cache_inv = __dma_cache_inv_l1; |
1278 | __dma_cache_wback = __dma_cache_wback_l1; | 1278 | __dma_cache_wback = __dma_cache_wback_l1; |
1279 | } | 1279 | } |
1280 | /* | ||
1281 | * In case of IOC (say IOC+SLC case), pointers above could still be set | ||
1282 | * but end up not being relevant as the first function in chain is not | ||
1283 | * called at all for @dma_direct_ops | ||
1284 | * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() | ||
1285 | */ | ||
1280 | } | 1286 | } |
1281 | 1287 | ||
1282 | void __ref arc_cache_init(void) | 1288 | void __ref arc_cache_init(void) |
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index ec47e6079f5d..c75d5c3470e3 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
@@ -6,20 +6,17 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | ||
10 | * DMA Coherent API Notes | ||
11 | * | ||
12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is | ||
13 | * implemented by accessing it using a kernel virtual address, with | ||
14 | * Cache bit off in the TLB entry. | ||
15 | * | ||
16 | * The default DMA address == Phy address which is 0x8000_0000 based. | ||
17 | */ | ||
18 | |||
19 | #include <linux/dma-noncoherent.h> | 9 | #include <linux/dma-noncoherent.h> |
20 | #include <asm/cache.h> | 10 | #include <asm/cache.h> |
21 | #include <asm/cacheflush.h> | 11 | #include <asm/cacheflush.h> |
22 | 12 | ||
13 | /* | ||
14 | * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c) | ||
15 | * - hardware IOC not available (or "dma-coherent" not set for device in DT) | ||
16 | * - But still handle both coherent and non-coherent requests from caller | ||
17 | * | ||
18 | * For DMA coherent hardware (IOC) generic code suffices | ||
19 | */ | ||
23 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | 20 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
24 | gfp_t gfp, unsigned long attrs) | 21 | gfp_t gfp, unsigned long attrs) |
25 | { | 22 | { |
@@ -27,42 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
27 | struct page *page; | 24 | struct page *page; |
28 | phys_addr_t paddr; | 25 | phys_addr_t paddr; |
29 | void *kvaddr; | 26 | void *kvaddr; |
30 | int need_coh = 1, need_kvaddr = 0; | 27 | bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT); |
31 | |||
32 | page = alloc_pages(gfp, order); | ||
33 | if (!page) | ||
34 | return NULL; | ||
35 | 28 | ||
36 | /* | 29 | /* |
37 | * IOC relies on all data (even coherent DMA data) being in cache | 30 | * __GFP_HIGHMEM flag is cleared by upper layer functions |
38 | * Thus allocate normal cached memory | 31 | * (in include/linux/dma-mapping.h) so we should never get a |
39 | * | 32 | * __GFP_HIGHMEM here. |
40 | * The gains with IOC are two pronged: | ||
41 | * -For streaming data, elides need for cache maintenance, saving | ||
42 | * cycles in flush code, and bus bandwidth as all the lines of a | ||
43 | * buffer need to be flushed out to memory | ||
44 | * -For coherent data, Read/Write to buffers terminate early in cache | ||
45 | * (vs. always going to memory - thus are faster) | ||
46 | */ | 33 | */ |
47 | if ((is_isa_arcv2() && ioc_enable) || | 34 | BUG_ON(gfp & __GFP_HIGHMEM); |
48 | (attrs & DMA_ATTR_NON_CONSISTENT)) | ||
49 | need_coh = 0; | ||
50 | 35 | ||
51 | /* | 36 | page = alloc_pages(gfp, order); |
52 | * - A coherent buffer needs MMU mapping to enforce non-cachability | 37 | if (!page) |
53 | * - A highmem page needs a virtual handle (hence MMU mapping) | 38 | return NULL; |
54 | * independent of cachability | ||
55 | */ | ||
56 | if (PageHighMem(page) || need_coh) | ||
57 | need_kvaddr = 1; | ||
58 | 39 | ||
59 | /* This is linear addr (0x8000_0000 based) */ | 40 | /* This is linear addr (0x8000_0000 based) */ |
60 | paddr = page_to_phys(page); | 41 | paddr = page_to_phys(page); |
61 | 42 | ||
62 | *dma_handle = paddr; | 43 | *dma_handle = paddr; |
63 | 44 | ||
64 | /* This is kernel Virtual address (0x7000_0000 based) */ | 45 | /* |
65 | if (need_kvaddr) { | 46 | * A coherent buffer needs MMU mapping to enforce non-cachability. |
47 | * kvaddr is kernel Virtual address (0x7000_0000 based). | ||
48 | */ | ||
49 | if (need_coh) { | ||
66 | kvaddr = ioremap_nocache(paddr, size); | 50 | kvaddr = ioremap_nocache(paddr, size); |
67 | if (kvaddr == NULL) { | 51 | if (kvaddr == NULL) { |
68 | __free_pages(page, order); | 52 | __free_pages(page, order); |
@@ -93,12 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, | |||
93 | { | 77 | { |
94 | phys_addr_t paddr = dma_handle; | 78 | phys_addr_t paddr = dma_handle; |
95 | struct page *page = virt_to_page(paddr); | 79 | struct page *page = virt_to_page(paddr); |
96 | int is_non_coh = 1; | ||
97 | |||
98 | is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || | ||
99 | (is_isa_arcv2() && ioc_enable); | ||
100 | 80 | ||
101 | if (PageHighMem(page) || !is_non_coh) | 81 | if (!(attrs & DMA_ATTR_NON_CONSISTENT)) |
102 | iounmap((void __force __iomem *)vaddr); | 82 | iounmap((void __force __iomem *)vaddr); |
103 | 83 | ||
104 | __free_pages(page, get_order(size)); | 84 | __free_pages(page, get_order(size)); |
@@ -185,3 +165,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | |||
185 | break; | 165 | break; |
186 | } | 166 | } |
187 | } | 167 | } |
168 | |||
169 | /* | ||
170 | * Plug in coherent or noncoherent dma ops | ||
171 | */ | ||
172 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | ||
173 | const struct iommu_ops *iommu, bool coherent) | ||
174 | { | ||
175 | /* | ||
176 | * IOC hardware snoops all DMA traffic keeping the caches consistent | ||
177 | * with memory - eliding need for any explicit cache maintenance of | ||
178 | * DMA buffers - so we can use dma_direct cache ops. | ||
179 | */ | ||
180 | if (is_isa_arcv2() && ioc_enable && coherent) { | ||
181 | set_dma_ops(dev, &dma_direct_ops); | ||
182 | dev_info(dev, "use dma_direct_ops cache ops\n"); | ||
183 | } else { | ||
184 | set_dma_ops(dev, &dma_noncoherent_ops); | ||
185 | dev_info(dev, "use dma_noncoherent_ops cache ops\n"); | ||
186 | } | ||
187 | } | ||
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts index 4d969013f99a..4d969013f99a 100755..100644 --- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts +++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts | |||
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index f0cbd86312dc..d4b7c59eec68 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
@@ -469,6 +469,7 @@ | |||
469 | ti,hwmods = "rtc"; | 469 | ti,hwmods = "rtc"; |
470 | clocks = <&clk_32768_ck>; | 470 | clocks = <&clk_32768_ck>; |
471 | clock-names = "int-clk"; | 471 | clock-names = "int-clk"; |
472 | system-power-controller; | ||
472 | status = "disabled"; | 473 | status = "disabled"; |
473 | }; | 474 | }; |
474 | 475 | ||
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 9fb47724b9c1..ad2ae25b7b4d 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts | |||
@@ -13,6 +13,43 @@ | |||
13 | reg = <0x40000000 0x08000000>; | 13 | reg = <0x40000000 0x08000000>; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | reg_vddio_sd0: regulator-vddio-sd0 { | ||
17 | compatible = "regulator-fixed"; | ||
18 | regulator-name = "vddio-sd0"; | ||
19 | regulator-min-microvolt = <3300000>; | ||
20 | regulator-max-microvolt = <3300000>; | ||
21 | gpio = <&gpio1 29 0>; | ||
22 | }; | ||
23 | |||
24 | reg_lcd_3v3: regulator-lcd-3v3 { | ||
25 | compatible = "regulator-fixed"; | ||
26 | regulator-name = "lcd-3v3"; | ||
27 | regulator-min-microvolt = <3300000>; | ||
28 | regulator-max-microvolt = <3300000>; | ||
29 | gpio = <&gpio1 18 0>; | ||
30 | enable-active-high; | ||
31 | }; | ||
32 | |||
33 | reg_lcd_5v: regulator-lcd-5v { | ||
34 | compatible = "regulator-fixed"; | ||
35 | regulator-name = "lcd-5v"; | ||
36 | regulator-min-microvolt = <5000000>; | ||
37 | regulator-max-microvolt = <5000000>; | ||
38 | }; | ||
39 | |||
40 | panel { | ||
41 | compatible = "sii,43wvf1g"; | ||
42 | backlight = <&backlight_display>; | ||
43 | dvdd-supply = <®_lcd_3v3>; | ||
44 | avdd-supply = <®_lcd_5v>; | ||
45 | |||
46 | port { | ||
47 | panel_in: endpoint { | ||
48 | remote-endpoint = <&display_out>; | ||
49 | }; | ||
50 | }; | ||
51 | }; | ||
52 | |||
16 | apb@80000000 { | 53 | apb@80000000 { |
17 | apbh@80000000 { | 54 | apbh@80000000 { |
18 | gpmi-nand@8000c000 { | 55 | gpmi-nand@8000c000 { |
@@ -52,31 +89,11 @@ | |||
52 | lcdif@80030000 { | 89 | lcdif@80030000 { |
53 | pinctrl-names = "default"; | 90 | pinctrl-names = "default"; |
54 | pinctrl-0 = <&lcdif_24bit_pins_a>; | 91 | pinctrl-0 = <&lcdif_24bit_pins_a>; |
55 | lcd-supply = <®_lcd_3v3>; | ||
56 | display = <&display0>; | ||
57 | status = "okay"; | 92 | status = "okay"; |
58 | 93 | ||
59 | display0: display0 { | 94 | port { |
60 | bits-per-pixel = <32>; | 95 | display_out: endpoint { |
61 | bus-width = <24>; | 96 | remote-endpoint = <&panel_in>; |
62 | |||
63 | display-timings { | ||
64 | native-mode = <&timing0>; | ||
65 | timing0: timing0 { | ||
66 | clock-frequency = <9200000>; | ||
67 | hactive = <480>; | ||
68 | vactive = <272>; | ||
69 | hback-porch = <15>; | ||
70 | hfront-porch = <8>; | ||
71 | vback-porch = <12>; | ||
72 | vfront-porch = <4>; | ||
73 | hsync-len = <1>; | ||
74 | vsync-len = <1>; | ||
75 | hsync-active = <0>; | ||
76 | vsync-active = <0>; | ||
77 | de-active = <1>; | ||
78 | pixelclk-active = <0>; | ||
79 | }; | ||
80 | }; | 97 | }; |
81 | }; | 98 | }; |
82 | }; | 99 | }; |
@@ -118,32 +135,7 @@ | |||
118 | }; | 135 | }; |
119 | }; | 136 | }; |
120 | 137 | ||
121 | regulators { | 138 | backlight_display: backlight { |
122 | compatible = "simple-bus"; | ||
123 | #address-cells = <1>; | ||
124 | #size-cells = <0>; | ||
125 | |||
126 | reg_vddio_sd0: regulator@0 { | ||
127 | compatible = "regulator-fixed"; | ||
128 | reg = <0>; | ||
129 | regulator-name = "vddio-sd0"; | ||
130 | regulator-min-microvolt = <3300000>; | ||
131 | regulator-max-microvolt = <3300000>; | ||
132 | gpio = <&gpio1 29 0>; | ||
133 | }; | ||
134 | |||
135 | reg_lcd_3v3: regulator@1 { | ||
136 | compatible = "regulator-fixed"; | ||
137 | reg = <1>; | ||
138 | regulator-name = "lcd-3v3"; | ||
139 | regulator-min-microvolt = <3300000>; | ||
140 | regulator-max-microvolt = <3300000>; | ||
141 | gpio = <&gpio1 18 0>; | ||
142 | enable-active-high; | ||
143 | }; | ||
144 | }; | ||
145 | |||
146 | backlight { | ||
147 | compatible = "pwm-backlight"; | 139 | compatible = "pwm-backlight"; |
148 | pwms = <&pwm 2 5000000>; | 140 | pwms = <&pwm 2 5000000>; |
149 | brightness-levels = <0 4 8 16 32 64 128 255>; | 141 | brightness-levels = <0 4 8 16 32 64 128 255>; |
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 6b0ae667640f..93ab5bdfe068 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts | |||
@@ -13,6 +13,87 @@ | |||
13 | reg = <0x40000000 0x08000000>; | 13 | reg = <0x40000000 0x08000000>; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | |||
17 | reg_3p3v: regulator-3p3v { | ||
18 | compatible = "regulator-fixed"; | ||
19 | regulator-name = "3P3V"; | ||
20 | regulator-min-microvolt = <3300000>; | ||
21 | regulator-max-microvolt = <3300000>; | ||
22 | regulator-always-on; | ||
23 | }; | ||
24 | |||
25 | reg_vddio_sd0: regulator-vddio-sd0 { | ||
26 | compatible = "regulator-fixed"; | ||
27 | regulator-name = "vddio-sd0"; | ||
28 | regulator-min-microvolt = <3300000>; | ||
29 | regulator-max-microvolt = <3300000>; | ||
30 | gpio = <&gpio3 28 0>; | ||
31 | }; | ||
32 | |||
33 | reg_fec_3v3: regulator-fec-3v3 { | ||
34 | compatible = "regulator-fixed"; | ||
35 | regulator-name = "fec-3v3"; | ||
36 | regulator-min-microvolt = <3300000>; | ||
37 | regulator-max-microvolt = <3300000>; | ||
38 | gpio = <&gpio2 15 0>; | ||
39 | }; | ||
40 | |||
41 | reg_usb0_vbus: regulator-usb0-vbus { | ||
42 | compatible = "regulator-fixed"; | ||
43 | regulator-name = "usb0_vbus"; | ||
44 | regulator-min-microvolt = <5000000>; | ||
45 | regulator-max-microvolt = <5000000>; | ||
46 | gpio = <&gpio3 9 0>; | ||
47 | enable-active-high; | ||
48 | }; | ||
49 | |||
50 | reg_usb1_vbus: regulator-usb1-vbus { | ||
51 | compatible = "regulator-fixed"; | ||
52 | regulator-name = "usb1_vbus"; | ||
53 | regulator-min-microvolt = <5000000>; | ||
54 | regulator-max-microvolt = <5000000>; | ||
55 | gpio = <&gpio3 8 0>; | ||
56 | enable-active-high; | ||
57 | }; | ||
58 | |||
59 | reg_lcd_3v3: regulator-lcd-3v3 { | ||
60 | compatible = "regulator-fixed"; | ||
61 | regulator-name = "lcd-3v3"; | ||
62 | regulator-min-microvolt = <3300000>; | ||
63 | regulator-max-microvolt = <3300000>; | ||
64 | gpio = <&gpio3 30 0>; | ||
65 | enable-active-high; | ||
66 | }; | ||
67 | |||
68 | reg_can_3v3: regulator-can-3v3 { | ||
69 | compatible = "regulator-fixed"; | ||
70 | regulator-name = "can-3v3"; | ||
71 | regulator-min-microvolt = <3300000>; | ||
72 | regulator-max-microvolt = <3300000>; | ||
73 | gpio = <&gpio2 13 0>; | ||
74 | enable-active-high; | ||
75 | }; | ||
76 | |||
77 | reg_lcd_5v: regulator-lcd-5v { | ||
78 | compatible = "regulator-fixed"; | ||
79 | regulator-name = "lcd-5v"; | ||
80 | regulator-min-microvolt = <5000000>; | ||
81 | regulator-max-microvolt = <5000000>; | ||
82 | }; | ||
83 | |||
84 | panel { | ||
85 | compatible = "sii,43wvf1g"; | ||
86 | backlight = <&backlight_display>; | ||
87 | dvdd-supply = <®_lcd_3v3>; | ||
88 | avdd-supply = <®_lcd_5v>; | ||
89 | |||
90 | port { | ||
91 | panel_in: endpoint { | ||
92 | remote-endpoint = <&display_out>; | ||
93 | }; | ||
94 | }; | ||
95 | }; | ||
96 | |||
16 | apb@80000000 { | 97 | apb@80000000 { |
17 | apbh@80000000 { | 98 | apbh@80000000 { |
18 | gpmi-nand@8000c000 { | 99 | gpmi-nand@8000c000 { |
@@ -116,31 +197,11 @@ | |||
116 | pinctrl-names = "default"; | 197 | pinctrl-names = "default"; |
117 | pinctrl-0 = <&lcdif_24bit_pins_a | 198 | pinctrl-0 = <&lcdif_24bit_pins_a |
118 | &lcdif_pins_evk>; | 199 | &lcdif_pins_evk>; |
119 | lcd-supply = <®_lcd_3v3>; | ||
120 | display = <&display0>; | ||
121 | status = "okay"; | 200 | status = "okay"; |
122 | 201 | ||
123 | display0: display0 { | 202 | port { |
124 | bits-per-pixel = <32>; | 203 | display_out: endpoint { |
125 | bus-width = <24>; | 204 | remote-endpoint = <&panel_in>; |
126 | |||
127 | display-timings { | ||
128 | native-mode = <&timing0>; | ||
129 | timing0: timing0 { | ||
130 | clock-frequency = <33500000>; | ||
131 | hactive = <800>; | ||
132 | vactive = <480>; | ||
133 | hback-porch = <89>; | ||
134 | hfront-porch = <164>; | ||
135 | vback-porch = <23>; | ||
136 | vfront-porch = <10>; | ||
137 | hsync-len = <10>; | ||
138 | vsync-len = <10>; | ||
139 | hsync-active = <0>; | ||
140 | vsync-active = <0>; | ||
141 | de-active = <1>; | ||
142 | pixelclk-active = <0>; | ||
143 | }; | ||
144 | }; | 205 | }; |
145 | }; | 206 | }; |
146 | }; | 207 | }; |
@@ -269,80 +330,6 @@ | |||
269 | }; | 330 | }; |
270 | }; | 331 | }; |
271 | 332 | ||
272 | regulators { | ||
273 | compatible = "simple-bus"; | ||
274 | #address-cells = <1>; | ||
275 | #size-cells = <0>; | ||
276 | |||
277 | reg_3p3v: regulator@0 { | ||
278 | compatible = "regulator-fixed"; | ||
279 | reg = <0>; | ||
280 | regulator-name = "3P3V"; | ||
281 | regulator-min-microvolt = <3300000>; | ||
282 | regulator-max-microvolt = <3300000>; | ||
283 | regulator-always-on; | ||
284 | }; | ||
285 | |||
286 | reg_vddio_sd0: regulator@1 { | ||
287 | compatible = "regulator-fixed"; | ||
288 | reg = <1>; | ||
289 | regulator-name = "vddio-sd0"; | ||
290 | regulator-min-microvolt = <3300000>; | ||
291 | regulator-max-microvolt = <3300000>; | ||
292 | gpio = <&gpio3 28 0>; | ||
293 | }; | ||
294 | |||
295 | reg_fec_3v3: regulator@2 { | ||
296 | compatible = "regulator-fixed"; | ||
297 | reg = <2>; | ||
298 | regulator-name = "fec-3v3"; | ||
299 | regulator-min-microvolt = <3300000>; | ||
300 | regulator-max-microvolt = <3300000>; | ||
301 | gpio = <&gpio2 15 0>; | ||
302 | }; | ||
303 | |||
304 | reg_usb0_vbus: regulator@3 { | ||
305 | compatible = "regulator-fixed"; | ||
306 | reg = <3>; | ||
307 | regulator-name = "usb0_vbus"; | ||
308 | regulator-min-microvolt = <5000000>; | ||
309 | regulator-max-microvolt = <5000000>; | ||
310 | gpio = <&gpio3 9 0>; | ||
311 | enable-active-high; | ||
312 | }; | ||
313 | |||
314 | reg_usb1_vbus: regulator@4 { | ||
315 | compatible = "regulator-fixed"; | ||
316 | reg = <4>; | ||
317 | regulator-name = "usb1_vbus"; | ||
318 | regulator-min-microvolt = <5000000>; | ||
319 | regulator-max-microvolt = <5000000>; | ||
320 | gpio = <&gpio3 8 0>; | ||
321 | enable-active-high; | ||
322 | }; | ||
323 | |||
324 | reg_lcd_3v3: regulator@5 { | ||
325 | compatible = "regulator-fixed"; | ||
326 | reg = <5>; | ||
327 | regulator-name = "lcd-3v3"; | ||
328 | regulator-min-microvolt = <3300000>; | ||
329 | regulator-max-microvolt = <3300000>; | ||
330 | gpio = <&gpio3 30 0>; | ||
331 | enable-active-high; | ||
332 | }; | ||
333 | |||
334 | reg_can_3v3: regulator@6 { | ||
335 | compatible = "regulator-fixed"; | ||
336 | reg = <6>; | ||
337 | regulator-name = "can-3v3"; | ||
338 | regulator-min-microvolt = <3300000>; | ||
339 | regulator-max-microvolt = <3300000>; | ||
340 | gpio = <&gpio2 13 0>; | ||
341 | enable-active-high; | ||
342 | }; | ||
343 | |||
344 | }; | ||
345 | |||
346 | sound { | 333 | sound { |
347 | compatible = "fsl,imx28-evk-sgtl5000", | 334 | compatible = "fsl,imx28-evk-sgtl5000", |
348 | "fsl,mxs-audio-sgtl5000"; | 335 | "fsl,mxs-audio-sgtl5000"; |
@@ -363,7 +350,7 @@ | |||
363 | }; | 350 | }; |
364 | }; | 351 | }; |
365 | 352 | ||
366 | backlight { | 353 | backlight_display: backlight { |
367 | compatible = "pwm-backlight"; | 354 | compatible = "pwm-backlight"; |
368 | pwms = <&pwm 2 5000000>; | 355 | pwms = <&pwm 2 5000000>; |
369 | brightness-levels = <0 4 8 16 32 64 128 255>; | 356 | brightness-levels = <0 4 8 16 32 64 128 255>; |
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 7cbc2ffa4b3a..7234e8330a57 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi | |||
@@ -126,10 +126,14 @@ | |||
126 | interrupt-names = "msi"; | 126 | interrupt-names = "msi"; |
127 | #interrupt-cells = <1>; | 127 | #interrupt-cells = <1>; |
128 | interrupt-map-mask = <0 0 0 0x7>; | 128 | interrupt-map-mask = <0 0 0 0x7>; |
129 | interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, | 129 | /* |
130 | <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, | 130 | * Reference manual lists pci irqs incorrectly |
131 | <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, | 131 | * Real hardware ordering is same as imx6: D+MSI, C, B, A |
132 | <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; | 132 | */ |
133 | interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, | ||
134 | <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, | ||
135 | <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, | ||
136 | <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; | ||
133 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, | 137 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, |
134 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, | 138 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, |
135 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; | 139 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; |
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 12d6822f0057..04758a2a87f0 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts | |||
@@ -354,7 +354,7 @@ | |||
354 | &mmc2 { | 354 | &mmc2 { |
355 | vmmc-supply = <&vsdio>; | 355 | vmmc-supply = <&vsdio>; |
356 | bus-width = <8>; | 356 | bus-width = <8>; |
357 | non-removable; | 357 | ti,non-removable; |
358 | }; | 358 | }; |
359 | 359 | ||
360 | &mmc3 { | 360 | &mmc3 { |
@@ -621,15 +621,6 @@ | |||
621 | OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ | 621 | OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ |
622 | >; | 622 | >; |
623 | }; | 623 | }; |
624 | }; | ||
625 | |||
626 | &omap4_pmx_wkup { | ||
627 | usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { | ||
628 | /* gpio_wk0 */ | ||
629 | pinctrl-single,pins = < | ||
630 | OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) | ||
631 | >; | ||
632 | }; | ||
633 | 624 | ||
634 | vibrator_direction_pin: pinmux_vibrator_direction_pin { | 625 | vibrator_direction_pin: pinmux_vibrator_direction_pin { |
635 | pinctrl-single,pins = < | 626 | pinctrl-single,pins = < |
@@ -644,6 +635,15 @@ | |||
644 | }; | 635 | }; |
645 | }; | 636 | }; |
646 | 637 | ||
638 | &omap4_pmx_wkup { | ||
639 | usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { | ||
640 | /* gpio_wk0 */ | ||
641 | pinctrl-single,pins = < | ||
642 | OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) | ||
643 | >; | ||
644 | }; | ||
645 | }; | ||
646 | |||
647 | /* | 647 | /* |
648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for | 648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for |
649 | * uart1 wakeirq. | 649 | * uart1 wakeirq. |
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index e2c127608bcc..7eca43ff69bb 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig | |||
@@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y | |||
257 | CONFIG_DRM=y | 257 | CONFIG_DRM=y |
258 | CONFIG_DRM_PANEL_LVDS=y | 258 | CONFIG_DRM_PANEL_LVDS=y |
259 | CONFIG_DRM_PANEL_SIMPLE=y | 259 | CONFIG_DRM_PANEL_SIMPLE=y |
260 | CONFIG_DRM_PANEL_SEIKO_43WVF1G=y | ||
260 | CONFIG_DRM_DW_HDMI_AHB_AUDIO=m | 261 | CONFIG_DRM_DW_HDMI_AHB_AUDIO=m |
261 | CONFIG_DRM_DW_HDMI_CEC=y | 262 | CONFIG_DRM_DW_HDMI_CEC=y |
262 | CONFIG_DRM_IMX=y | 263 | CONFIG_DRM_IMX=y |
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index 148226e36152..7b8212857535 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig | |||
@@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y | |||
95 | CONFIG_REGULATOR=y | 95 | CONFIG_REGULATOR=y |
96 | CONFIG_REGULATOR_FIXED_VOLTAGE=y | 96 | CONFIG_REGULATOR_FIXED_VOLTAGE=y |
97 | CONFIG_DRM=y | 97 | CONFIG_DRM=y |
98 | CONFIG_DRM_PANEL_SEIKO_43WVF1G=y | ||
98 | CONFIG_DRM_MXSFB=y | 99 | CONFIG_DRM_MXSFB=y |
99 | CONFIG_FB_MODE_HELPERS=y | 100 | CONFIG_FB_MODE_HELPERS=y |
100 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 101 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index df68dc4056e5..5282324c7cef 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig | |||
@@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | CONFIG_SLAB=y | 7 | CONFIG_SLAB=y |
8 | CONFIG_MODULES=y | ||
9 | CONFIG_MODULE_UNLOAD=y | ||
10 | CONFIG_PARTITION_ADVANCED=y | ||
11 | # CONFIG_ARCH_MULTI_V7 is not set | 8 | # CONFIG_ARCH_MULTI_V7 is not set |
12 | CONFIG_ARCH_VERSATILE=y | 9 | CONFIG_ARCH_VERSATILE=y |
13 | CONFIG_AEABI=y | 10 | CONFIG_AEABI=y |
14 | CONFIG_OABI_COMPAT=y | 11 | CONFIG_OABI_COMPAT=y |
15 | CONFIG_CMA=y | ||
16 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 12 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
17 | CONFIG_ZBOOT_ROM_BSS=0x0 | 13 | CONFIG_ZBOOT_ROM_BSS=0x0 |
18 | CONFIG_CMDLINE="root=1f03 mem=32M" | 14 | CONFIG_CMDLINE="root=1f03 mem=32M" |
19 | CONFIG_FPE_NWFPE=y | 15 | CONFIG_FPE_NWFPE=y |
20 | CONFIG_VFP=y | 16 | CONFIG_VFP=y |
17 | CONFIG_MODULES=y | ||
18 | CONFIG_MODULE_UNLOAD=y | ||
19 | CONFIG_PARTITION_ADVANCED=y | ||
20 | CONFIG_CMA=y | ||
21 | CONFIG_NET=y | 21 | CONFIG_NET=y |
22 | CONFIG_PACKET=y | 22 | CONFIG_PACKET=y |
23 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
@@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y | |||
59 | CONFIG_DRM=y | 59 | CONFIG_DRM=y |
60 | CONFIG_DRM_PANEL_ARM_VERSATILE=y | 60 | CONFIG_DRM_PANEL_ARM_VERSATILE=y |
61 | CONFIG_DRM_PANEL_SIMPLE=y | 61 | CONFIG_DRM_PANEL_SIMPLE=y |
62 | CONFIG_DRM_DUMB_VGA_DAC=y | ||
62 | CONFIG_DRM_PL111=y | 63 | CONFIG_DRM_PL111=y |
63 | CONFIG_FB_MODE_HELPERS=y | 64 | CONFIG_FB_MODE_HELPERS=y |
64 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 65 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
@@ -89,9 +90,10 @@ CONFIG_NFSD=y | |||
89 | CONFIG_NFSD_V3=y | 90 | CONFIG_NFSD_V3=y |
90 | CONFIG_NLS_CODEPAGE_850=m | 91 | CONFIG_NLS_CODEPAGE_850=m |
91 | CONFIG_NLS_ISO8859_1=m | 92 | CONFIG_NLS_ISO8859_1=m |
93 | CONFIG_FONTS=y | ||
94 | CONFIG_FONT_ACORN_8x8=y | ||
95 | CONFIG_DEBUG_FS=y | ||
92 | CONFIG_MAGIC_SYSRQ=y | 96 | CONFIG_MAGIC_SYSRQ=y |
93 | CONFIG_DEBUG_KERNEL=y | 97 | CONFIG_DEBUG_KERNEL=y |
94 | CONFIG_DEBUG_USER=y | 98 | CONFIG_DEBUG_USER=y |
95 | CONFIG_DEBUG_LL=y | 99 | CONFIG_DEBUG_LL=y |
96 | CONFIG_FONTS=y | ||
97 | CONFIG_FONT_ACORN_8x8=y | ||
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 79906cecb091..3ad482d2f1eb 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, | |||
223 | struct kvm_vcpu_events *events); | 223 | struct kvm_vcpu_events *events); |
224 | 224 | ||
225 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 225 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
226 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
227 | int kvm_unmap_hva_range(struct kvm *kvm, | 226 | int kvm_unmap_hva_range(struct kvm *kvm, |
228 | unsigned long start, unsigned long end); | 227 | unsigned long start, unsigned long end); |
229 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 228 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 2ceffd85dd3d..cd65ea4e9c54 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -2161,6 +2161,37 @@ static int of_dev_hwmod_lookup(struct device_node *np, | |||
2161 | } | 2161 | } |
2162 | 2162 | ||
2163 | /** | 2163 | /** |
2164 | * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets | ||
2165 | * | ||
2166 | * @oh: struct omap_hwmod * | ||
2167 | * @np: struct device_node * | ||
2168 | * | ||
2169 | * Fix up module register offsets for modules with mpu_rt_idx. | ||
2170 | * Only needed for cpsw with interconnect target module defined | ||
2171 | * in device tree while still using legacy hwmod platform data | ||
2172 | * for rev, sysc and syss registers. | ||
2173 | * | ||
2174 | * Can be removed when all cpsw hwmod platform data has been | ||
2175 | * dropped. | ||
2176 | */ | ||
2177 | static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh, | ||
2178 | struct device_node *np, | ||
2179 | struct resource *res) | ||
2180 | { | ||
2181 | struct device_node *child = NULL; | ||
2182 | int error; | ||
2183 | |||
2184 | child = of_get_next_child(np, child); | ||
2185 | if (!child) | ||
2186 | return; | ||
2187 | |||
2188 | error = of_address_to_resource(child, oh->mpu_rt_idx, res); | ||
2189 | if (error) | ||
2190 | pr_err("%s: error mapping mpu_rt_idx: %i\n", | ||
2191 | __func__, error); | ||
2192 | } | ||
2193 | |||
2194 | /** | ||
2164 | * omap_hwmod_parse_module_range - map module IO range from device tree | 2195 | * omap_hwmod_parse_module_range - map module IO range from device tree |
2165 | * @oh: struct omap_hwmod * | 2196 | * @oh: struct omap_hwmod * |
2166 | * @np: struct device_node * | 2197 | * @np: struct device_node * |
@@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh, | |||
2220 | size = be32_to_cpup(ranges); | 2251 | size = be32_to_cpup(ranges); |
2221 | 2252 | ||
2222 | pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", | 2253 | pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", |
2223 | oh->name, np->name, base, size); | 2254 | oh ? oh->name : "", np->name, base, size); |
2255 | |||
2256 | if (oh && oh->mpu_rt_idx) { | ||
2257 | omap_hwmod_fix_mpu_rt_idx(oh, np, res); | ||
2258 | |||
2259 | return 0; | ||
2260 | } | ||
2224 | 2261 | ||
2225 | res->start = base; | 2262 | res->start = base; |
2226 | res->end = base + size - 1; | 2263 | res->end = base + size - 1; |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 29e75b47becd..1b1a0e95c751 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK | |||
763 | 763 | ||
764 | config HOLES_IN_ZONE | 764 | config HOLES_IN_ZONE |
765 | def_bool y | 765 | def_bool y |
766 | depends on NUMA | ||
767 | 766 | ||
768 | source kernel/Kconfig.hz | 767 | source kernel/Kconfig.hz |
769 | 768 | ||
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts index ceffc40810ee..48daec7f78ba 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts | |||
@@ -46,6 +46,7 @@ | |||
46 | pinctrl-0 = <&mmc0_pins>; | 46 | pinctrl-0 = <&mmc0_pins>; |
47 | vmmc-supply = <®_cldo1>; | 47 | vmmc-supply = <®_cldo1>; |
48 | cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; | 48 | cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; |
49 | bus-width = <4>; | ||
49 | status = "okay"; | 50 | status = "okay"; |
50 | }; | 51 | }; |
51 | 52 | ||
@@ -56,6 +57,7 @@ | |||
56 | vqmmc-supply = <®_bldo2>; | 57 | vqmmc-supply = <®_bldo2>; |
57 | non-removable; | 58 | non-removable; |
58 | cap-mmc-hw-reset; | 59 | cap-mmc-hw-reset; |
60 | bus-width = <8>; | ||
59 | status = "okay"; | 61 | status = "okay"; |
60 | }; | 62 | }; |
61 | 63 | ||
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f67e8d5e93ad..db8d364f8476 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y | |||
38 | CONFIG_ARCH_BERLIN=y | 38 | CONFIG_ARCH_BERLIN=y |
39 | CONFIG_ARCH_BRCMSTB=y | 39 | CONFIG_ARCH_BRCMSTB=y |
40 | CONFIG_ARCH_EXYNOS=y | 40 | CONFIG_ARCH_EXYNOS=y |
41 | CONFIG_ARCH_K3=y | ||
41 | CONFIG_ARCH_LAYERSCAPE=y | 42 | CONFIG_ARCH_LAYERSCAPE=y |
42 | CONFIG_ARCH_LG1K=y | 43 | CONFIG_ARCH_LG1K=y |
43 | CONFIG_ARCH_HISI=y | 44 | CONFIG_ARCH_HISI=y |
@@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y | |||
605 | CONFIG_ARCH_TEGRA_210_SOC=y | 606 | CONFIG_ARCH_TEGRA_210_SOC=y |
606 | CONFIG_ARCH_TEGRA_186_SOC=y | 607 | CONFIG_ARCH_TEGRA_186_SOC=y |
607 | CONFIG_ARCH_TEGRA_194_SOC=y | 608 | CONFIG_ARCH_TEGRA_194_SOC=y |
609 | CONFIG_ARCH_K3_AM6_SOC=y | ||
610 | CONFIG_SOC_TI=y | ||
608 | CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y | 611 | CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y |
609 | CONFIG_EXTCON_USB_GPIO=y | 612 | CONFIG_EXTCON_USB_GPIO=y |
610 | CONFIG_EXTCON_USBC_CROS_EC=y | 613 | CONFIG_EXTCON_USBC_CROS_EC=y |
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 6e9f33d14930..067d8937d5af 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c | |||
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req) | |||
417 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); | 417 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); |
418 | put_unaligned_be32(2, iv + GCM_IV_SIZE); | 418 | put_unaligned_be32(2, iv + GCM_IV_SIZE); |
419 | 419 | ||
420 | while (walk.nbytes >= AES_BLOCK_SIZE) { | 420 | while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { |
421 | int blocks = walk.nbytes / AES_BLOCK_SIZE; | 421 | int blocks = walk.nbytes / AES_BLOCK_SIZE; |
422 | u8 *dst = walk.dst.virt.addr; | 422 | u8 *dst = walk.dst.virt.addr; |
423 | u8 *src = walk.src.virt.addr; | 423 | u8 *src = walk.src.virt.addr; |
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req) | |||
437 | NULL); | 437 | NULL); |
438 | 438 | ||
439 | err = skcipher_walk_done(&walk, | 439 | err = skcipher_walk_done(&walk, |
440 | walk.nbytes % AES_BLOCK_SIZE); | 440 | walk.nbytes % (2 * AES_BLOCK_SIZE)); |
441 | } | 441 | } |
442 | if (walk.nbytes) | 442 | if (walk.nbytes) { |
443 | __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, | 443 | __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, |
444 | nrounds); | 444 | nrounds); |
445 | if (walk.nbytes > AES_BLOCK_SIZE) { | ||
446 | crypto_inc(iv, AES_BLOCK_SIZE); | ||
447 | __aes_arm64_encrypt(ctx->aes_key.key_enc, | ||
448 | ks + AES_BLOCK_SIZE, iv, | ||
449 | nrounds); | ||
450 | } | ||
451 | } | ||
445 | } | 452 | } |
446 | 453 | ||
447 | /* handle the tail */ | 454 | /* handle the tail */ |
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req) | |||
545 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); | 552 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); |
546 | put_unaligned_be32(2, iv + GCM_IV_SIZE); | 553 | put_unaligned_be32(2, iv + GCM_IV_SIZE); |
547 | 554 | ||
548 | while (walk.nbytes >= AES_BLOCK_SIZE) { | 555 | while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { |
549 | int blocks = walk.nbytes / AES_BLOCK_SIZE; | 556 | int blocks = walk.nbytes / AES_BLOCK_SIZE; |
550 | u8 *dst = walk.dst.virt.addr; | 557 | u8 *dst = walk.dst.virt.addr; |
551 | u8 *src = walk.src.virt.addr; | 558 | u8 *src = walk.src.virt.addr; |
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req) | |||
564 | } while (--blocks > 0); | 571 | } while (--blocks > 0); |
565 | 572 | ||
566 | err = skcipher_walk_done(&walk, | 573 | err = skcipher_walk_done(&walk, |
567 | walk.nbytes % AES_BLOCK_SIZE); | 574 | walk.nbytes % (2 * AES_BLOCK_SIZE)); |
568 | } | 575 | } |
569 | if (walk.nbytes) | 576 | if (walk.nbytes) { |
577 | if (walk.nbytes > AES_BLOCK_SIZE) { | ||
578 | u8 *iv2 = iv + AES_BLOCK_SIZE; | ||
579 | |||
580 | memcpy(iv2, iv, AES_BLOCK_SIZE); | ||
581 | crypto_inc(iv2, AES_BLOCK_SIZE); | ||
582 | |||
583 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, | ||
584 | iv2, nrounds); | ||
585 | } | ||
570 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, | 586 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, |
571 | nrounds); | 587 | nrounds); |
588 | } | ||
572 | } | 589 | } |
573 | 590 | ||
574 | /* handle the tail */ | 591 | /* handle the tail */ |
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index b7fb5274b250..0c4fc223f225 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c | |||
@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void) | |||
69 | crypto_unregister_alg(&sm4_ce_alg); | 69 | crypto_unregister_alg(&sm4_ce_alg); |
70 | } | 70 | } |
71 | 71 | ||
72 | module_cpu_feature_match(SM3, sm4_ce_mod_init); | 72 | module_cpu_feature_match(SM4, sm4_ce_mod_init); |
73 | module_exit(sm4_ce_mod_fini); | 73 | module_exit(sm4_ce_mod_fini); |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f26055f2306e..3d6d7336f871 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -61,8 +61,7 @@ struct kvm_arch { | |||
61 | u64 vmid_gen; | 61 | u64 vmid_gen; |
62 | u32 vmid; | 62 | u32 vmid; |
63 | 63 | ||
64 | /* 1-level 2nd stage table and lock */ | 64 | /* 1-level 2nd stage table, protected by kvm->mmu_lock */ |
65 | spinlock_t pgd_lock; | ||
66 | pgd_t *pgd; | 65 | pgd_t *pgd; |
67 | 66 | ||
68 | /* VTTBR value associated with above pgd and vmid */ | 67 | /* VTTBR value associated with above pgd and vmid */ |
@@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, | |||
357 | struct kvm_vcpu_events *events); | 356 | struct kvm_vcpu_events *events); |
358 | 357 | ||
359 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 358 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
360 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
361 | int kvm_unmap_hva_range(struct kvm *kvm, | 359 | int kvm_unmap_hva_range(struct kvm *kvm, |
362 | unsigned long start, unsigned long end); | 360 | unsigned long start, unsigned long end); |
363 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 361 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index d496ef579859..ca46153d7915 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) | |||
98 | val = read_sysreg(cpacr_el1); | 98 | val = read_sysreg(cpacr_el1); |
99 | val |= CPACR_EL1_TTA; | 99 | val |= CPACR_EL1_TTA; |
100 | val &= ~CPACR_EL1_ZEN; | 100 | val &= ~CPACR_EL1_ZEN; |
101 | if (!update_fp_enabled(vcpu)) | 101 | if (!update_fp_enabled(vcpu)) { |
102 | val &= ~CPACR_EL1_FPEN; | 102 | val &= ~CPACR_EL1_FPEN; |
103 | __activate_traps_fpsimd32(vcpu); | ||
104 | } | ||
103 | 105 | ||
104 | write_sysreg(val, cpacr_el1); | 106 | write_sysreg(val, cpacr_el1); |
105 | 107 | ||
@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) | |||
114 | 116 | ||
115 | val = CPTR_EL2_DEFAULT; | 117 | val = CPTR_EL2_DEFAULT; |
116 | val |= CPTR_EL2_TTA | CPTR_EL2_TZ; | 118 | val |= CPTR_EL2_TTA | CPTR_EL2_TZ; |
117 | if (!update_fp_enabled(vcpu)) | 119 | if (!update_fp_enabled(vcpu)) { |
118 | val |= CPTR_EL2_TFP; | 120 | val |= CPTR_EL2_TFP; |
121 | __activate_traps_fpsimd32(vcpu); | ||
122 | } | ||
119 | 123 | ||
120 | write_sysreg(val, cptr_el2); | 124 | write_sysreg(val, cptr_el2); |
121 | } | 125 | } |
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) | |||
129 | if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) | 133 | if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) |
130 | write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); | 134 | write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); |
131 | 135 | ||
132 | __activate_traps_fpsimd32(vcpu); | ||
133 | if (has_vhe()) | 136 | if (has_vhe()) |
134 | activate_traps_vhe(vcpu); | 137 | activate_traps_vhe(vcpu); |
135 | else | 138 | else |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 65f86271f02b..8080c9f489c3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) | |||
985 | 985 | ||
986 | pmd = READ_ONCE(*pmdp); | 986 | pmd = READ_ONCE(*pmdp); |
987 | 987 | ||
988 | /* No-op for empty entry and WARN_ON for valid entry */ | 988 | if (!pmd_present(pmd)) |
989 | if (!pmd_present(pmd) || !pmd_table(pmd)) { | 989 | return 1; |
990 | if (!pmd_table(pmd)) { | ||
990 | VM_WARN_ON(!pmd_table(pmd)); | 991 | VM_WARN_ON(!pmd_table(pmd)); |
991 | return 1; | 992 | return 1; |
992 | } | 993 | } |
@@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) | |||
1007 | 1008 | ||
1008 | pud = READ_ONCE(*pudp); | 1009 | pud = READ_ONCE(*pudp); |
1009 | 1010 | ||
1010 | /* No-op for empty entry and WARN_ON for valid entry */ | 1011 | if (!pud_present(pud)) |
1011 | if (!pud_present(pud) || !pud_table(pud)) { | 1012 | return 1; |
1013 | if (!pud_table(pud)) { | ||
1012 | VM_WARN_ON(!pud_table(pud)); | 1014 | VM_WARN_ON(!pud_table(pud)); |
1013 | return 1; | 1015 | return 1; |
1014 | } | 1016 | } |
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 3534aa6a4dc2..1b083c500b9a 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c | |||
@@ -98,11 +98,10 @@ static time64_t pmu_read_time(void) | |||
98 | 98 | ||
99 | if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) | 99 | if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) |
100 | return 0; | 100 | return 0; |
101 | while (!req.complete) | 101 | pmu_wait_complete(&req); |
102 | pmu_poll(); | ||
103 | 102 | ||
104 | time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) | | 103 | time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) | |
105 | (req.reply[3] << 8) | req.reply[4]); | 104 | (req.reply[2] << 8) | req.reply[3]); |
106 | 105 | ||
107 | return time - RTC_OFFSET; | 106 | return time - RTC_OFFSET; |
108 | } | 107 | } |
@@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time) | |||
116 | (data >> 24) & 0xFF, (data >> 16) & 0xFF, | 115 | (data >> 24) & 0xFF, (data >> 16) & 0xFF, |
117 | (data >> 8) & 0xFF, data & 0xFF) < 0) | 116 | (data >> 8) & 0xFF, data & 0xFF) < 0) |
118 | return; | 117 | return; |
119 | while (!req.complete) | 118 | pmu_wait_complete(&req); |
120 | pmu_poll(); | ||
121 | } | 119 | } |
122 | 120 | ||
123 | static __u8 pmu_read_pram(int offset) | 121 | static __u8 pmu_read_pram(int offset) |
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 70dde040779b..f5453d944ff5 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c | |||
@@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void) | |||
172 | high_memory = (void *)_ramend; | 172 | high_memory = (void *)_ramend; |
173 | 173 | ||
174 | /* Reserve kernel text/data/bss */ | 174 | /* Reserve kernel text/data/bss */ |
175 | memblock_reserve(memstart, memstart - _rambase); | 175 | memblock_reserve(_rambase, memstart - _rambase); |
176 | 176 | ||
177 | m68k_virt_to_node_shift = fls(_ramend - 1) - 6; | 177 | m68k_virt_to_node_shift = fls(_ramend - 1) - 6; |
178 | module_fixup(NULL, __start_fixup, __stop_fixup); | 178 | module_fixup(NULL, __start_fixup, __stop_fixup); |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index a9af1d2dcd69..2c1c53d12179 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, | |||
931 | bool write); | 931 | bool write); |
932 | 932 | ||
933 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 933 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
934 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
935 | int kvm_unmap_hva_range(struct kvm *kvm, | 934 | int kvm_unmap_hva_range(struct kvm *kvm, |
936 | unsigned long start, unsigned long end); | 935 | unsigned long start, unsigned long end); |
937 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 936 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 019035d7225c..8f845f6e5f42 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/ioport.h> | 15 | #include <linux/ioport.h> |
16 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
17 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
18 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -20,6 +21,7 @@ | |||
20 | 21 | ||
21 | #include <asm/abi.h> | 22 | #include <asm/abi.h> |
22 | #include <asm/mips-cps.h> | 23 | #include <asm/mips-cps.h> |
24 | #include <asm/page.h> | ||
23 | #include <asm/vdso.h> | 25 | #include <asm/vdso.h> |
24 | 26 | ||
25 | /* Kernel-provided data used by the VDSO. */ | 27 | /* Kernel-provided data used by the VDSO. */ |
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
128 | vvar_size = gic_size + PAGE_SIZE; | 130 | vvar_size = gic_size + PAGE_SIZE; |
129 | size = vvar_size + image->size; | 131 | size = vvar_size + image->size; |
130 | 132 | ||
133 | /* | ||
134 | * Find a region that's large enough for us to perform the | ||
135 | * colour-matching alignment below. | ||
136 | */ | ||
137 | if (cpu_has_dc_aliases) | ||
138 | size += shm_align_mask + 1; | ||
139 | |||
131 | base = get_unmapped_area(NULL, 0, size, 0, 0); | 140 | base = get_unmapped_area(NULL, 0, size, 0, 0); |
132 | if (IS_ERR_VALUE(base)) { | 141 | if (IS_ERR_VALUE(base)) { |
133 | ret = base; | 142 | ret = base; |
134 | goto out; | 143 | goto out; |
135 | } | 144 | } |
136 | 145 | ||
146 | /* | ||
147 | * If we suffer from dcache aliasing, ensure that the VDSO data page | ||
148 | * mapping is coloured the same as the kernel's mapping of that memory. | ||
149 | * This ensures that when the kernel updates the VDSO data userland | ||
150 | * will observe it without requiring cache invalidations. | ||
151 | */ | ||
152 | if (cpu_has_dc_aliases) { | ||
153 | base = __ALIGN_MASK(base, shm_align_mask); | ||
154 | base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask; | ||
155 | } | ||
156 | |||
137 | data_addr = base + gic_size; | 157 | data_addr = base + gic_size; |
138 | vdso_addr = data_addr + PAGE_SIZE; | 158 | vdso_addr = data_addr + PAGE_SIZE; |
139 | 159 | ||
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index ee64db032793..d8dcdb350405 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c | |||
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | |||
512 | return 1; | 512 | return 1; |
513 | } | 513 | } |
514 | 514 | ||
515 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
516 | { | ||
517 | unsigned long end = hva + PAGE_SIZE; | ||
518 | |||
519 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | ||
520 | |||
521 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | 515 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) |
526 | { | 516 | { |
527 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | 517 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); |
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 1d4248fa55e9..7068f341133d 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig | |||
@@ -40,6 +40,10 @@ config NDS32 | |||
40 | select NO_IOPORT_MAP | 40 | select NO_IOPORT_MAP |
41 | select RTC_LIB | 41 | select RTC_LIB |
42 | select THREAD_INFO_IN_TASK | 42 | select THREAD_INFO_IN_TASK |
43 | select HAVE_FUNCTION_TRACER | ||
44 | select HAVE_FUNCTION_GRAPH_TRACER | ||
45 | select HAVE_FTRACE_MCOUNT_RECORD | ||
46 | select HAVE_DYNAMIC_FTRACE | ||
43 | help | 47 | help |
44 | Andes(nds32) Linux support. | 48 | Andes(nds32) Linux support. |
45 | 49 | ||
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 63f4f173e5f4..3509fac10491 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile | |||
@@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig | |||
5 | 5 | ||
6 | comma = , | 6 | comma = , |
7 | 7 | ||
8 | ifdef CONFIG_FUNCTION_TRACER | ||
9 | arch-y += -malways-save-lp -mno-relax | ||
10 | endif | ||
11 | |||
8 | KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) | 12 | KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) |
9 | KBUILD_CFLAGS += -mcmodel=large | 13 | KBUILD_CFLAGS += -mcmodel=large |
10 | 14 | ||
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h index 56c479058802..f5f9cf7e0544 100644 --- a/arch/nds32/include/asm/elf.h +++ b/arch/nds32/include/asm/elf.h | |||
@@ -121,9 +121,9 @@ struct elf32_hdr; | |||
121 | */ | 121 | */ |
122 | #define ELF_CLASS ELFCLASS32 | 122 | #define ELF_CLASS ELFCLASS32 |
123 | #ifdef __NDS32_EB__ | 123 | #ifdef __NDS32_EB__ |
124 | #define ELF_DATA ELFDATA2MSB; | 124 | #define ELF_DATA ELFDATA2MSB |
125 | #else | 125 | #else |
126 | #define ELF_DATA ELFDATA2LSB; | 126 | #define ELF_DATA ELFDATA2LSB |
127 | #endif | 127 | #endif |
128 | #define ELF_ARCH EM_NDS32 | 128 | #define ELF_ARCH EM_NDS32 |
129 | #define USE_ELF_CORE_DUMP | 129 | #define USE_ELF_CORE_DUMP |
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h new file mode 100644 index 000000000000..2f96cc96aa35 --- /dev/null +++ b/arch/nds32/include/asm/ftrace.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifndef __ASM_NDS32_FTRACE_H | ||
4 | #define __ASM_NDS32_FTRACE_H | ||
5 | |||
6 | #ifdef CONFIG_FUNCTION_TRACER | ||
7 | |||
8 | #define HAVE_FUNCTION_GRAPH_FP_TEST | ||
9 | |||
10 | #define MCOUNT_ADDR ((unsigned long)(_mcount)) | ||
11 | /* mcount call is composed of three instructions: | ||
12 | * sethi + ori + jral | ||
13 | */ | ||
14 | #define MCOUNT_INSN_SIZE 12 | ||
15 | |||
16 | extern void _mcount(unsigned long parent_ip); | ||
17 | |||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
19 | |||
20 | #define FTRACE_ADDR ((unsigned long)_ftrace_caller) | ||
21 | |||
22 | #ifdef __NDS32_EL__ | ||
23 | #define INSN_NOP 0x09000040 | ||
24 | #define INSN_SIZE(insn) (((insn & 0x00000080) == 0) ? 4 : 2) | ||
25 | #define IS_SETHI(insn) ((insn & 0x000000fe) == 0x00000046) | ||
26 | #define ENDIAN_CONVERT(insn) be32_to_cpu(insn) | ||
27 | #else /* __NDS32_EB__ */ | ||
28 | #define INSN_NOP 0x40000009 | ||
29 | #define INSN_SIZE(insn) (((insn & 0x80000000) == 0) ? 4 : 2) | ||
30 | #define IS_SETHI(insn) ((insn & 0xfe000000) == 0x46000000) | ||
31 | #define ENDIAN_CONVERT(insn) (insn) | ||
32 | #endif | ||
33 | |||
34 | extern void _ftrace_caller(unsigned long parent_ip); | ||
35 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | ||
36 | { | ||
37 | return addr; | ||
38 | } | ||
39 | struct dyn_arch_ftrace { | ||
40 | }; | ||
41 | |||
42 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
43 | |||
44 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
45 | |||
46 | #endif /* __ASM_NDS32_FTRACE_H */ | ||
diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h index 19b19394a936..68c38151c3e4 100644 --- a/arch/nds32/include/asm/nds32.h +++ b/arch/nds32/include/asm/nds32.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #else | 17 | #else |
18 | #define FP_OFFSET (-2) | 18 | #define FP_OFFSET (-2) |
19 | #endif | 19 | #endif |
20 | #define LP_OFFSET (-1) | ||
20 | 21 | ||
21 | extern void __init early_trap_init(void); | 22 | extern void __init early_trap_init(void); |
22 | static inline void GIE_ENABLE(void) | 23 | static inline void GIE_ENABLE(void) |
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 18a009f3804d..362a32d9bd16 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h | |||
@@ -38,7 +38,7 @@ struct exception_table_entry { | |||
38 | extern int fixup_exception(struct pt_regs *regs); | 38 | extern int fixup_exception(struct pt_regs *regs); |
39 | 39 | ||
40 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) | 40 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) |
41 | #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) | 41 | #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) |
42 | 42 | ||
43 | #define get_ds() (KERNEL_DS) | 43 | #define get_ds() (KERNEL_DS) |
44 | #define get_fs() (current_thread_info()->addr_limit) | 44 | #define get_fs() (current_thread_info()->addr_limit) |
@@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs) | |||
49 | current_thread_info()->addr_limit = fs; | 49 | current_thread_info()->addr_limit = fs; |
50 | } | 50 | } |
51 | 51 | ||
52 | #define segment_eq(a, b) ((a) == (b)) | 52 | #define segment_eq(a, b) ((a) == (b)) |
53 | 53 | ||
54 | #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) | 54 | #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) |
55 | 55 | ||
56 | #define access_ok(type, addr, size) \ | 56 | #define access_ok(type, addr, size) \ |
57 | __range_ok((unsigned long)addr, (unsigned long)size) | 57 | __range_ok((unsigned long)addr, (unsigned long)size) |
58 | /* | 58 | /* |
59 | * Single-value transfer routines. They automatically use the right | 59 | * Single-value transfer routines. They automatically use the right |
@@ -75,70 +75,73 @@ static inline void set_fs(mm_segment_t fs) | |||
75 | * versions are void (ie, don't return a value as such). | 75 | * versions are void (ie, don't return a value as such). |
76 | */ | 76 | */ |
77 | 77 | ||
78 | #define get_user(x,p) \ | 78 | #define get_user __get_user \ |
79 | ({ \ | 79 | |
80 | long __e = -EFAULT; \ | 80 | #define __get_user(x, ptr) \ |
81 | if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \ | ||
82 | __e = __get_user(x,p); \ | ||
83 | } else \ | ||
84 | x = 0; \ | ||
85 | __e; \ | ||
86 | }) | ||
87 | #define __get_user(x,ptr) \ | ||
88 | ({ \ | 81 | ({ \ |
89 | long __gu_err = 0; \ | 82 | long __gu_err = 0; \ |
90 | __get_user_err((x),(ptr),__gu_err); \ | 83 | __get_user_check((x), (ptr), __gu_err); \ |
91 | __gu_err; \ | 84 | __gu_err; \ |
92 | }) | 85 | }) |
93 | 86 | ||
94 | #define __get_user_error(x,ptr,err) \ | 87 | #define __get_user_error(x, ptr, err) \ |
95 | ({ \ | 88 | ({ \ |
96 | __get_user_err((x),(ptr),err); \ | 89 | __get_user_check((x), (ptr), (err)); \ |
97 | (void) 0; \ | 90 | (void)0; \ |
98 | }) | 91 | }) |
99 | 92 | ||
100 | #define __get_user_err(x,ptr,err) \ | 93 | #define __get_user_check(x, ptr, err) \ |
94 | ({ \ | ||
95 | const __typeof__(*(ptr)) __user *__p = (ptr); \ | ||
96 | might_fault(); \ | ||
97 | if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ | ||
98 | __get_user_err((x), __p, (err)); \ | ||
99 | } else { \ | ||
100 | (x) = 0; (err) = -EFAULT; \ | ||
101 | } \ | ||
102 | }) | ||
103 | |||
104 | #define __get_user_err(x, ptr, err) \ | ||
101 | do { \ | 105 | do { \ |
102 | unsigned long __gu_addr = (unsigned long)(ptr); \ | ||
103 | unsigned long __gu_val; \ | 106 | unsigned long __gu_val; \ |
104 | __chk_user_ptr(ptr); \ | 107 | __chk_user_ptr(ptr); \ |
105 | switch (sizeof(*(ptr))) { \ | 108 | switch (sizeof(*(ptr))) { \ |
106 | case 1: \ | 109 | case 1: \ |
107 | __get_user_asm("lbi",__gu_val,__gu_addr,err); \ | 110 | __get_user_asm("lbi", __gu_val, (ptr), (err)); \ |
108 | break; \ | 111 | break; \ |
109 | case 2: \ | 112 | case 2: \ |
110 | __get_user_asm("lhi",__gu_val,__gu_addr,err); \ | 113 | __get_user_asm("lhi", __gu_val, (ptr), (err)); \ |
111 | break; \ | 114 | break; \ |
112 | case 4: \ | 115 | case 4: \ |
113 | __get_user_asm("lwi",__gu_val,__gu_addr,err); \ | 116 | __get_user_asm("lwi", __gu_val, (ptr), (err)); \ |
114 | break; \ | 117 | break; \ |
115 | case 8: \ | 118 | case 8: \ |
116 | __get_user_asm_dword(__gu_val,__gu_addr,err); \ | 119 | __get_user_asm_dword(__gu_val, (ptr), (err)); \ |
117 | break; \ | 120 | break; \ |
118 | default: \ | 121 | default: \ |
119 | BUILD_BUG(); \ | 122 | BUILD_BUG(); \ |
120 | break; \ | 123 | break; \ |
121 | } \ | 124 | } \ |
122 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 125 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
123 | } while (0) | 126 | } while (0) |
124 | 127 | ||
125 | #define __get_user_asm(inst,x,addr,err) \ | 128 | #define __get_user_asm(inst, x, addr, err) \ |
126 | asm volatile( \ | 129 | __asm__ __volatile__ ( \ |
127 | "1: "inst" %1,[%2]\n" \ | 130 | "1: "inst" %1,[%2]\n" \ |
128 | "2:\n" \ | 131 | "2:\n" \ |
129 | " .section .fixup,\"ax\"\n" \ | 132 | " .section .fixup,\"ax\"\n" \ |
130 | " .align 2\n" \ | 133 | " .align 2\n" \ |
131 | "3: move %0, %3\n" \ | 134 | "3: move %0, %3\n" \ |
132 | " move %1, #0\n" \ | 135 | " move %1, #0\n" \ |
133 | " b 2b\n" \ | 136 | " b 2b\n" \ |
134 | " .previous\n" \ | 137 | " .previous\n" \ |
135 | " .section __ex_table,\"a\"\n" \ | 138 | " .section __ex_table,\"a\"\n" \ |
136 | " .align 3\n" \ | 139 | " .align 3\n" \ |
137 | " .long 1b, 3b\n" \ | 140 | " .long 1b, 3b\n" \ |
138 | " .previous" \ | 141 | " .previous" \ |
139 | : "+r" (err), "=&r" (x) \ | 142 | : "+r" (err), "=&r" (x) \ |
140 | : "r" (addr), "i" (-EFAULT) \ | 143 | : "r" (addr), "i" (-EFAULT) \ |
141 | : "cc") | 144 | : "cc") |
142 | 145 | ||
143 | #ifdef __NDS32_EB__ | 146 | #ifdef __NDS32_EB__ |
144 | #define __gu_reg_oper0 "%H1" | 147 | #define __gu_reg_oper0 "%H1" |
@@ -149,61 +152,66 @@ do { \ | |||
149 | #endif | 152 | #endif |
150 | 153 | ||
151 | #define __get_user_asm_dword(x, addr, err) \ | 154 | #define __get_user_asm_dword(x, addr, err) \ |
152 | asm volatile( \ | 155 | __asm__ __volatile__ ( \ |
153 | "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ | 156 | "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ |
154 | "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ | 157 | "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ |
155 | "3:\n" \ | 158 | "3:\n" \ |
156 | " .section .fixup,\"ax\"\n" \ | 159 | " .section .fixup,\"ax\"\n" \ |
157 | " .align 2\n" \ | 160 | " .align 2\n" \ |
158 | "4: move %0, %3\n" \ | 161 | "4: move %0, %3\n" \ |
159 | " b 3b\n" \ | 162 | " b 3b\n" \ |
160 | " .previous\n" \ | 163 | " .previous\n" \ |
161 | " .section __ex_table,\"a\"\n" \ | 164 | " .section __ex_table,\"a\"\n" \ |
162 | " .align 3\n" \ | 165 | " .align 3\n" \ |
163 | " .long 1b, 4b\n" \ | 166 | " .long 1b, 4b\n" \ |
164 | " .long 2b, 4b\n" \ | 167 | " .long 2b, 4b\n" \ |
165 | " .previous" \ | 168 | " .previous" \ |
166 | : "+r"(err), "=&r"(x) \ | 169 | : "+r"(err), "=&r"(x) \ |
167 | : "r"(addr), "i"(-EFAULT) \ | 170 | : "r"(addr), "i"(-EFAULT) \ |
168 | : "cc") | 171 | : "cc") |
169 | #define put_user(x,p) \ | 172 | |
170 | ({ \ | 173 | #define put_user __put_user \ |
171 | long __e = -EFAULT; \ | 174 | |
172 | if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \ | 175 | #define __put_user(x, ptr) \ |
173 | __e = __put_user(x,p); \ | ||
174 | } \ | ||
175 | __e; \ | ||
176 | }) | ||
177 | #define __put_user(x,ptr) \ | ||
178 | ({ \ | 176 | ({ \ |
179 | long __pu_err = 0; \ | 177 | long __pu_err = 0; \ |
180 | __put_user_err((x),(ptr),__pu_err); \ | 178 | __put_user_err((x), (ptr), __pu_err); \ |
181 | __pu_err; \ | 179 | __pu_err; \ |
182 | }) | 180 | }) |
183 | 181 | ||
184 | #define __put_user_error(x,ptr,err) \ | 182 | #define __put_user_error(x, ptr, err) \ |
183 | ({ \ | ||
184 | __put_user_err((x), (ptr), (err)); \ | ||
185 | (void)0; \ | ||
186 | }) | ||
187 | |||
188 | #define __put_user_check(x, ptr, err) \ | ||
185 | ({ \ | 189 | ({ \ |
186 | __put_user_err((x),(ptr),err); \ | 190 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
187 | (void) 0; \ | 191 | might_fault(); \ |
192 | if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ | ||
193 | __put_user_err((x), __p, (err)); \ | ||
194 | } else { \ | ||
195 | (err) = -EFAULT; \ | ||
196 | } \ | ||
188 | }) | 197 | }) |
189 | 198 | ||
190 | #define __put_user_err(x,ptr,err) \ | 199 | #define __put_user_err(x, ptr, err) \ |
191 | do { \ | 200 | do { \ |
192 | unsigned long __pu_addr = (unsigned long)(ptr); \ | ||
193 | __typeof__(*(ptr)) __pu_val = (x); \ | 201 | __typeof__(*(ptr)) __pu_val = (x); \ |
194 | __chk_user_ptr(ptr); \ | 202 | __chk_user_ptr(ptr); \ |
195 | switch (sizeof(*(ptr))) { \ | 203 | switch (sizeof(*(ptr))) { \ |
196 | case 1: \ | 204 | case 1: \ |
197 | __put_user_asm("sbi",__pu_val,__pu_addr,err); \ | 205 | __put_user_asm("sbi", __pu_val, (ptr), (err)); \ |
198 | break; \ | 206 | break; \ |
199 | case 2: \ | 207 | case 2: \ |
200 | __put_user_asm("shi",__pu_val,__pu_addr,err); \ | 208 | __put_user_asm("shi", __pu_val, (ptr), (err)); \ |
201 | break; \ | 209 | break; \ |
202 | case 4: \ | 210 | case 4: \ |
203 | __put_user_asm("swi",__pu_val,__pu_addr,err); \ | 211 | __put_user_asm("swi", __pu_val, (ptr), (err)); \ |
204 | break; \ | 212 | break; \ |
205 | case 8: \ | 213 | case 8: \ |
206 | __put_user_asm_dword(__pu_val,__pu_addr,err); \ | 214 | __put_user_asm_dword(__pu_val, (ptr), (err)); \ |
207 | break; \ | 215 | break; \ |
208 | default: \ | 216 | default: \ |
209 | BUILD_BUG(); \ | 217 | BUILD_BUG(); \ |
@@ -211,22 +219,22 @@ do { \ | |||
211 | } \ | 219 | } \ |
212 | } while (0) | 220 | } while (0) |
213 | 221 | ||
214 | #define __put_user_asm(inst,x,addr,err) \ | 222 | #define __put_user_asm(inst, x, addr, err) \ |
215 | asm volatile( \ | 223 | __asm__ __volatile__ ( \ |
216 | "1: "inst" %1,[%2]\n" \ | 224 | "1: "inst" %1,[%2]\n" \ |
217 | "2:\n" \ | 225 | "2:\n" \ |
218 | " .section .fixup,\"ax\"\n" \ | 226 | " .section .fixup,\"ax\"\n" \ |
219 | " .align 2\n" \ | 227 | " .align 2\n" \ |
220 | "3: move %0, %3\n" \ | 228 | "3: move %0, %3\n" \ |
221 | " b 2b\n" \ | 229 | " b 2b\n" \ |
222 | " .previous\n" \ | 230 | " .previous\n" \ |
223 | " .section __ex_table,\"a\"\n" \ | 231 | " .section __ex_table,\"a\"\n" \ |
224 | " .align 3\n" \ | 232 | " .align 3\n" \ |
225 | " .long 1b, 3b\n" \ | 233 | " .long 1b, 3b\n" \ |
226 | " .previous" \ | 234 | " .previous" \ |
227 | : "+r" (err) \ | 235 | : "+r" (err) \ |
228 | : "r" (x), "r" (addr), "i" (-EFAULT) \ | 236 | : "r" (x), "r" (addr), "i" (-EFAULT) \ |
229 | : "cc") | 237 | : "cc") |
230 | 238 | ||
231 | #ifdef __NDS32_EB__ | 239 | #ifdef __NDS32_EB__ |
232 | #define __pu_reg_oper0 "%H2" | 240 | #define __pu_reg_oper0 "%H2" |
@@ -237,23 +245,24 @@ do { \ | |||
237 | #endif | 245 | #endif |
238 | 246 | ||
239 | #define __put_user_asm_dword(x, addr, err) \ | 247 | #define __put_user_asm_dword(x, addr, err) \ |
240 | asm volatile( \ | 248 | __asm__ __volatile__ ( \ |
241 | "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ | 249 | "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ |
242 | "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ | 250 | "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ |
243 | "3:\n" \ | 251 | "3:\n" \ |
244 | " .section .fixup,\"ax\"\n" \ | 252 | " .section .fixup,\"ax\"\n" \ |
245 | " .align 2\n" \ | 253 | " .align 2\n" \ |
246 | "4: move %0, %3\n" \ | 254 | "4: move %0, %3\n" \ |
247 | " b 3b\n" \ | 255 | " b 3b\n" \ |
248 | " .previous\n" \ | 256 | " .previous\n" \ |
249 | " .section __ex_table,\"a\"\n" \ | 257 | " .section __ex_table,\"a\"\n" \ |
250 | " .align 3\n" \ | 258 | " .align 3\n" \ |
251 | " .long 1b, 4b\n" \ | 259 | " .long 1b, 4b\n" \ |
252 | " .long 2b, 4b\n" \ | 260 | " .long 2b, 4b\n" \ |
253 | " .previous" \ | 261 | " .previous" \ |
254 | : "+r"(err) \ | 262 | : "+r"(err) \ |
255 | : "r"(addr), "r"(x), "i"(-EFAULT) \ | 263 | : "r"(addr), "r"(x), "i"(-EFAULT) \ |
256 | : "cc") | 264 | : "cc") |
265 | |||
257 | extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); | 266 | extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); |
258 | extern long strncpy_from_user(char *dest, const char __user * src, long count); | 267 | extern long strncpy_from_user(char *dest, const char __user * src, long count); |
259 | extern __must_check long strlen_user(const char __user * str); | 268 | extern __must_check long strlen_user(const char __user * str); |
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile index 42792743e8b9..27cded39fa66 100644 --- a/arch/nds32/kernel/Makefile +++ b/arch/nds32/kernel/Makefile | |||
@@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds | |||
21 | 21 | ||
22 | 22 | ||
23 | obj-y += vdso/ | 23 | obj-y += vdso/ |
24 | |||
25 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o | ||
26 | |||
27 | ifdef CONFIG_FUNCTION_TRACER | ||
28 | CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) | ||
29 | endif | ||
diff --git a/arch/nds32/kernel/atl2c.c b/arch/nds32/kernel/atl2c.c index 0c6d031a1c4a..0c5386e72098 100644 --- a/arch/nds32/kernel/atl2c.c +++ b/arch/nds32/kernel/atl2c.c | |||
@@ -9,7 +9,8 @@ | |||
9 | 9 | ||
10 | void __iomem *atl2c_base; | 10 | void __iomem *atl2c_base; |
11 | static const struct of_device_id atl2c_ids[] __initconst = { | 11 | static const struct of_device_id atl2c_ids[] __initconst = { |
12 | {.compatible = "andestech,atl2c",} | 12 | {.compatible = "andestech,atl2c",}, |
13 | {} | ||
13 | }; | 14 | }; |
14 | 15 | ||
15 | static int __init atl2c_of_init(void) | 16 | static int __init atl2c_of_init(void) |
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S index b8ae4e9a6b93..21a144071566 100644 --- a/arch/nds32/kernel/ex-entry.S +++ b/arch/nds32/kernel/ex-entry.S | |||
@@ -118,7 +118,7 @@ common_exception_handler: | |||
118 | /* interrupt */ | 118 | /* interrupt */ |
119 | 2: | 119 | 2: |
120 | #ifdef CONFIG_TRACE_IRQFLAGS | 120 | #ifdef CONFIG_TRACE_IRQFLAGS |
121 | jal trace_hardirqs_off | 121 | jal __trace_hardirqs_off |
122 | #endif | 122 | #endif |
123 | move $r0, $sp | 123 | move $r0, $sp |
124 | sethi $lp, hi20(ret_from_intr) | 124 | sethi $lp, hi20(ret_from_intr) |
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S index 03e4f7788a18..f00af92f7e22 100644 --- a/arch/nds32/kernel/ex-exit.S +++ b/arch/nds32/kernel/ex-exit.S | |||
@@ -138,8 +138,8 @@ no_work_pending: | |||
138 | #ifdef CONFIG_TRACE_IRQFLAGS | 138 | #ifdef CONFIG_TRACE_IRQFLAGS |
139 | lwi $p0, [$sp+(#IPSW_OFFSET)] | 139 | lwi $p0, [$sp+(#IPSW_OFFSET)] |
140 | andi $p0, $p0, #0x1 | 140 | andi $p0, $p0, #0x1 |
141 | la $r10, trace_hardirqs_off | 141 | la $r10, __trace_hardirqs_off |
142 | la $r9, trace_hardirqs_on | 142 | la $r9, __trace_hardirqs_on |
143 | cmovz $r9, $p0, $r10 | 143 | cmovz $r9, $p0, $r10 |
144 | jral $r9 | 144 | jral $r9 |
145 | #endif | 145 | #endif |
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c new file mode 100644 index 000000000000..a0a9679ad5de --- /dev/null +++ b/arch/nds32/kernel/ftrace.c | |||
@@ -0,0 +1,309 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | #include <linux/ftrace.h> | ||
4 | #include <linux/uaccess.h> | ||
5 | #include <asm/cacheflush.h> | ||
6 | |||
7 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
8 | extern void (*ftrace_trace_function)(unsigned long, unsigned long, | ||
9 | struct ftrace_ops*, struct pt_regs*); | ||
10 | extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); | ||
11 | extern void ftrace_graph_caller(void); | ||
12 | |||
13 | noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, | ||
14 | struct ftrace_ops *op, struct pt_regs *regs) | ||
15 | { | ||
16 | __asm__ (""); /* avoid to optimize as pure function */ | ||
17 | } | ||
18 | |||
19 | noinline void _mcount(unsigned long parent_ip) | ||
20 | { | ||
21 | /* save all state by the compiler prologue */ | ||
22 | |||
23 | unsigned long ip = (unsigned long)__builtin_return_address(0); | ||
24 | |||
25 | if (ftrace_trace_function != ftrace_stub) | ||
26 | ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip, | ||
27 | NULL, NULL); | ||
28 | |||
29 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
30 | if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub | ||
31 | || ftrace_graph_entry != ftrace_graph_entry_stub) | ||
32 | ftrace_graph_caller(); | ||
33 | #endif | ||
34 | |||
35 | /* restore all state by the compiler epilogue */ | ||
36 | } | ||
37 | EXPORT_SYMBOL(_mcount); | ||
38 | |||
39 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
40 | |||
41 | noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, | ||
42 | struct ftrace_ops *op, struct pt_regs *regs) | ||
43 | { | ||
44 | __asm__ (""); /* avoid to optimize as pure function */ | ||
45 | } | ||
46 | |||
47 | noinline void __naked _mcount(unsigned long parent_ip) | ||
48 | { | ||
49 | __asm__ (""); /* avoid to optimize as pure function */ | ||
50 | } | ||
51 | EXPORT_SYMBOL(_mcount); | ||
52 | |||
53 | #define XSTR(s) STR(s) | ||
54 | #define STR(s) #s | ||
55 | void _ftrace_caller(unsigned long parent_ip) | ||
56 | { | ||
57 | /* save all state needed by the compiler prologue */ | ||
58 | |||
59 | /* | ||
60 | * prepare arguments for real tracing function | ||
61 | * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE | ||
62 | * second arg : parent_ip | ||
63 | */ | ||
64 | __asm__ __volatile__ ( | ||
65 | "move $r1, %0 \n\t" | ||
66 | "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t" | ||
67 | : | ||
68 | : "r" (parent_ip), "r" (__builtin_return_address(0))); | ||
69 | |||
70 | /* a placeholder for the call to a real tracing function */ | ||
71 | __asm__ __volatile__ ( | ||
72 | "ftrace_call: \n\t" | ||
73 | "nop \n\t" | ||
74 | "nop \n\t" | ||
75 | "nop \n\t"); | ||
76 | |||
77 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
78 | /* a placeholder for the call to ftrace_graph_caller */ | ||
79 | __asm__ __volatile__ ( | ||
80 | "ftrace_graph_call: \n\t" | ||
81 | "nop \n\t" | ||
82 | "nop \n\t" | ||
83 | "nop \n\t"); | ||
84 | #endif | ||
85 | /* restore all state needed by the compiler epilogue */ | ||
86 | } | ||
87 | |||
88 | int __init ftrace_dyn_arch_init(void) | ||
89 | { | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | int ftrace_arch_code_modify_prepare(void) | ||
94 | { | ||
95 | set_all_modules_text_rw(); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | int ftrace_arch_code_modify_post_process(void) | ||
100 | { | ||
101 | set_all_modules_text_ro(); | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static unsigned long gen_sethi_insn(unsigned long addr) | ||
106 | { | ||
107 | unsigned long opcode = 0x46000000; | ||
108 | unsigned long imm = addr >> 12; | ||
109 | unsigned long rt_num = 0xf << 20; | ||
110 | |||
111 | return ENDIAN_CONVERT(opcode | rt_num | imm); | ||
112 | } | ||
113 | |||
114 | static unsigned long gen_ori_insn(unsigned long addr) | ||
115 | { | ||
116 | unsigned long opcode = 0x58000000; | ||
117 | unsigned long imm = addr & 0x0000fff; | ||
118 | unsigned long rt_num = 0xf << 20; | ||
119 | unsigned long ra_num = 0xf << 15; | ||
120 | |||
121 | return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm); | ||
122 | } | ||
123 | |||
124 | static unsigned long gen_jral_insn(unsigned long addr) | ||
125 | { | ||
126 | unsigned long opcode = 0x4a000001; | ||
127 | unsigned long rt_num = 0x1e << 20; | ||
128 | unsigned long rb_num = 0xf << 10; | ||
129 | |||
130 | return ENDIAN_CONVERT(opcode | rt_num | rb_num); | ||
131 | } | ||
132 | |||
133 | static void ftrace_gen_call_insn(unsigned long *call_insns, | ||
134 | unsigned long addr) | ||
135 | { | ||
136 | call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */ | ||
137 | call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */ | ||
138 | call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */ | ||
139 | } | ||
140 | |||
141 | static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn, | ||
142 | unsigned long *new_insn, bool validate) | ||
143 | { | ||
144 | unsigned long orig_insn[3]; | ||
145 | |||
146 | if (validate) { | ||
147 | if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE)) | ||
148 | return -EFAULT; | ||
149 | if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE)) | ||
150 | return -EINVAL; | ||
151 | } | ||
152 | |||
153 | if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE)) | ||
154 | return -EPERM; | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn, | ||
160 | unsigned long *new_insn, bool validate) | ||
161 | { | ||
162 | int ret; | ||
163 | |||
164 | ret = __ftrace_modify_code(pc, old_insn, new_insn, validate); | ||
165 | if (ret) | ||
166 | return ret; | ||
167 | |||
168 | flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); | ||
169 | |||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
174 | { | ||
175 | unsigned long pc = (unsigned long)&ftrace_call; | ||
176 | unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
177 | unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
178 | |||
179 | if (func != ftrace_stub) | ||
180 | ftrace_gen_call_insn(new_insn, (unsigned long)func); | ||
181 | |||
182 | return ftrace_modify_code(pc, old_insn, new_insn, false); | ||
183 | } | ||
184 | |||
185 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
186 | { | ||
187 | unsigned long pc = rec->ip; | ||
188 | unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
189 | unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
190 | |||
191 | ftrace_gen_call_insn(call_insn, addr); | ||
192 | |||
193 | return ftrace_modify_code(pc, nop_insn, call_insn, true); | ||
194 | } | ||
195 | |||
196 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | ||
197 | unsigned long addr) | ||
198 | { | ||
199 | unsigned long pc = rec->ip; | ||
200 | unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
201 | unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
202 | |||
203 | ftrace_gen_call_insn(call_insn, addr); | ||
204 | |||
205 | return ftrace_modify_code(pc, call_insn, nop_insn, true); | ||
206 | } | ||
207 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
208 | |||
209 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
210 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | ||
211 | unsigned long frame_pointer) | ||
212 | { | ||
213 | unsigned long return_hooker = (unsigned long)&return_to_handler; | ||
214 | struct ftrace_graph_ent trace; | ||
215 | unsigned long old; | ||
216 | int err; | ||
217 | |||
218 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
219 | return; | ||
220 | |||
221 | old = *parent; | ||
222 | |||
223 | trace.func = self_addr; | ||
224 | trace.depth = current->curr_ret_stack + 1; | ||
225 | |||
226 | /* Only trace if the calling function expects to */ | ||
227 | if (!ftrace_graph_entry(&trace)) | ||
228 | return; | ||
229 | |||
230 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
231 | frame_pointer, NULL); | ||
232 | |||
233 | if (err == -EBUSY) | ||
234 | return; | ||
235 | |||
236 | *parent = return_hooker; | ||
237 | } | ||
238 | |||
239 | noinline void ftrace_graph_caller(void) | ||
240 | { | ||
241 | unsigned long *parent_ip = | ||
242 | (unsigned long *)(__builtin_frame_address(2) - 4); | ||
243 | |||
244 | unsigned long selfpc = | ||
245 | (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE); | ||
246 | |||
247 | unsigned long frame_pointer = | ||
248 | (unsigned long)__builtin_frame_address(3); | ||
249 | |||
250 | prepare_ftrace_return(parent_ip, selfpc, frame_pointer); | ||
251 | } | ||
252 | |||
253 | extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer); | ||
254 | void __naked return_to_handler(void) | ||
255 | { | ||
256 | __asm__ __volatile__ ( | ||
257 | /* save state needed by the ABI */ | ||
258 | "smw.adm $r0,[$sp],$r1,#0x0 \n\t" | ||
259 | |||
260 | /* get original return address */ | ||
261 | "move $r0, $fp \n\t" | ||
262 | "bal ftrace_return_to_handler\n\t" | ||
263 | "move $lp, $r0 \n\t" | ||
264 | |||
265 | /* restore state nedded by the ABI */ | ||
266 | "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); | ||
267 | } | ||
268 | |||
269 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
270 | extern unsigned long ftrace_graph_call; | ||
271 | |||
272 | static int ftrace_modify_graph_caller(bool enable) | ||
273 | { | ||
274 | unsigned long pc = (unsigned long)&ftrace_graph_call; | ||
275 | unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
276 | unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; | ||
277 | |||
278 | ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller); | ||
279 | |||
280 | if (enable) | ||
281 | return ftrace_modify_code(pc, nop_insn, call_insn, true); | ||
282 | else | ||
283 | return ftrace_modify_code(pc, call_insn, nop_insn, true); | ||
284 | } | ||
285 | |||
286 | int ftrace_enable_ftrace_graph_caller(void) | ||
287 | { | ||
288 | return ftrace_modify_graph_caller(true); | ||
289 | } | ||
290 | |||
291 | int ftrace_disable_ftrace_graph_caller(void) | ||
292 | { | ||
293 | return ftrace_modify_graph_caller(false); | ||
294 | } | ||
295 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
296 | |||
297 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
298 | |||
299 | |||
300 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
301 | noinline void __trace_hardirqs_off(void) | ||
302 | { | ||
303 | trace_hardirqs_off(); | ||
304 | } | ||
305 | noinline void __trace_hardirqs_on(void) | ||
306 | { | ||
307 | trace_hardirqs_on(); | ||
308 | } | ||
309 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c index 4167283d8293..1e31829cbc2a 100644 --- a/arch/nds32/kernel/module.c +++ b/arch/nds32/kernel/module.c | |||
@@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask, | |||
40 | 40 | ||
41 | tmp2 = tmp & loc_mask; | 41 | tmp2 = tmp & loc_mask; |
42 | if (partial_in_place) { | 42 | if (partial_in_place) { |
43 | tmp &= (!loc_mask); | 43 | tmp &= (~loc_mask); |
44 | tmp = | 44 | tmp = |
45 | tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); | 45 | tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); |
46 | } else { | 46 | } else { |
@@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask, | |||
70 | 70 | ||
71 | tmp2 = tmp & loc_mask; | 71 | tmp2 = tmp & loc_mask; |
72 | if (partial_in_place) { | 72 | if (partial_in_place) { |
73 | tmp &= (!loc_mask); | 73 | tmp &= (~loc_mask); |
74 | tmp = | 74 | tmp = |
75 | tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); | 75 | tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); |
76 | } else { | 76 | } else { |
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c index 8b231e910ea6..d974c0c1c65f 100644 --- a/arch/nds32/kernel/stacktrace.c +++ b/arch/nds32/kernel/stacktrace.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/sched/debug.h> | 4 | #include <linux/sched/debug.h> |
5 | #include <linux/sched/task_stack.h> | 5 | #include <linux/sched/task_stack.h> |
6 | #include <linux/stacktrace.h> | 6 | #include <linux/stacktrace.h> |
7 | #include <linux/ftrace.h> | ||
7 | 8 | ||
8 | void save_stack_trace(struct stack_trace *trace) | 9 | void save_stack_trace(struct stack_trace *trace) |
9 | { | 10 | { |
@@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
16 | unsigned long *fpn; | 17 | unsigned long *fpn; |
17 | int skip = trace->skip; | 18 | int skip = trace->skip; |
18 | int savesched; | 19 | int savesched; |
20 | int graph_idx = 0; | ||
19 | 21 | ||
20 | if (tsk == current) { | 22 | if (tsk == current) { |
21 | __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); | 23 | __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); |
@@ -29,10 +31,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
29 | && (fpn >= (unsigned long *)TASK_SIZE)) { | 31 | && (fpn >= (unsigned long *)TASK_SIZE)) { |
30 | unsigned long lpp, fpp; | 32 | unsigned long lpp, fpp; |
31 | 33 | ||
32 | lpp = fpn[-1]; | 34 | lpp = fpn[LP_OFFSET]; |
33 | fpp = fpn[FP_OFFSET]; | 35 | fpp = fpn[FP_OFFSET]; |
34 | if (!__kernel_text_address(lpp)) | 36 | if (!__kernel_text_address(lpp)) |
35 | break; | 37 | break; |
38 | else | ||
39 | lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); | ||
36 | 40 | ||
37 | if (savesched || !in_sched_functions(lpp)) { | 41 | if (savesched || !in_sched_functions(lpp)) { |
38 | if (skip) { | 42 | if (skip) { |
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index a6205fd4db52..1496aab48998 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/kdebug.h> | 8 | #include <linux/kdebug.h> |
9 | #include <linux/sched/task_stack.h> | 9 | #include <linux/sched/task_stack.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/ftrace.h> | ||
11 | 12 | ||
12 | #include <asm/proc-fns.h> | 13 | #include <asm/proc-fns.h> |
13 | #include <asm/unistd.h> | 14 | #include <asm/unistd.h> |
@@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs) | |||
94 | set_fs(fs); | 95 | set_fs(fs); |
95 | } | 96 | } |
96 | 97 | ||
97 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
98 | #include <linux/ftrace.h> | ||
99 | static void | ||
100 | get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) | ||
101 | { | ||
102 | if (*addr == (unsigned long)return_to_handler) { | ||
103 | int index = tsk->curr_ret_stack; | ||
104 | |||
105 | if (tsk->ret_stack && index >= *graph) { | ||
106 | index -= *graph; | ||
107 | *addr = tsk->ret_stack[index].ret; | ||
108 | (*graph)++; | ||
109 | } | ||
110 | } | ||
111 | } | ||
112 | #else | ||
113 | static inline void | ||
114 | get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) | ||
115 | { | ||
116 | } | ||
117 | #endif | ||
118 | |||
119 | #define LOOP_TIMES (100) | 98 | #define LOOP_TIMES (100) |
120 | static void __dump(struct task_struct *tsk, unsigned long *base_reg) | 99 | static void __dump(struct task_struct *tsk, unsigned long *base_reg) |
121 | { | 100 | { |
@@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) | |||
126 | while (!kstack_end(base_reg)) { | 105 | while (!kstack_end(base_reg)) { |
127 | ret_addr = *base_reg++; | 106 | ret_addr = *base_reg++; |
128 | if (__kernel_text_address(ret_addr)) { | 107 | if (__kernel_text_address(ret_addr)) { |
129 | get_real_ret_addr(&ret_addr, tsk, &graph); | 108 | ret_addr = ftrace_graph_ret_addr( |
109 | tsk, &graph, ret_addr, NULL); | ||
130 | print_ip_sym(ret_addr); | 110 | print_ip_sym(ret_addr); |
131 | } | 111 | } |
132 | if (--cnt < 0) | 112 | if (--cnt < 0) |
@@ -137,15 +117,12 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) | |||
137 | !((unsigned long)base_reg & 0x3) && | 117 | !((unsigned long)base_reg & 0x3) && |
138 | ((unsigned long)base_reg >= TASK_SIZE)) { | 118 | ((unsigned long)base_reg >= TASK_SIZE)) { |
139 | unsigned long next_fp; | 119 | unsigned long next_fp; |
140 | #if !defined(NDS32_ABI_2) | 120 | ret_addr = base_reg[LP_OFFSET]; |
141 | ret_addr = base_reg[0]; | ||
142 | next_fp = base_reg[1]; | ||
143 | #else | ||
144 | ret_addr = base_reg[-1]; | ||
145 | next_fp = base_reg[FP_OFFSET]; | 121 | next_fp = base_reg[FP_OFFSET]; |
146 | #endif | ||
147 | if (__kernel_text_address(ret_addr)) { | 122 | if (__kernel_text_address(ret_addr)) { |
148 | get_real_ret_addr(&ret_addr, tsk, &graph); | 123 | |
124 | ret_addr = ftrace_graph_ret_addr( | ||
125 | tsk, &graph, ret_addr, NULL); | ||
149 | print_ip_sym(ret_addr); | 126 | print_ip_sym(ret_addr); |
150 | } | 127 | } |
151 | if (--cnt < 0) | 128 | if (--cnt < 0) |
@@ -196,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err) | |||
196 | pr_emerg("CPU: %i\n", smp_processor_id()); | 173 | pr_emerg("CPU: %i\n", smp_processor_id()); |
197 | show_regs(regs); | 174 | show_regs(regs); |
198 | pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", | 175 | pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", |
199 | tsk->comm, tsk->pid, task_thread_info(tsk) + 1); | 176 | tsk->comm, tsk->pid, end_of_stack(tsk)); |
200 | 177 | ||
201 | if (!user_mode(regs) || in_interrupt()) { | 178 | if (!user_mode(regs) || in_interrupt()) { |
202 | dump_mem("Stack: ", regs->sp, | 179 | dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK); |
203 | THREAD_SIZE + (unsigned long)task_thread_info(tsk)); | ||
204 | dump_instr(regs); | 180 | dump_instr(regs); |
205 | dump_stack(); | 181 | dump_stack(); |
206 | } | 182 | } |
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index 288313b886ef..9e90f30a181d 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S | |||
@@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32) | |||
13 | ENTRY(_stext_lma) | 13 | ENTRY(_stext_lma) |
14 | jiffies = jiffies_64; | 14 | jiffies = jiffies_64; |
15 | 15 | ||
16 | #if defined(CONFIG_GCOV_KERNEL) | ||
17 | #define NDS32_EXIT_KEEP(x) x | ||
18 | #else | ||
19 | #define NDS32_EXIT_KEEP(x) | ||
20 | #endif | ||
21 | |||
16 | SECTIONS | 22 | SECTIONS |
17 | { | 23 | { |
18 | _stext_lma = TEXTADDR - LOAD_OFFSET; | 24 | _stext_lma = TEXTADDR - LOAD_OFFSET; |
19 | . = TEXTADDR; | 25 | . = TEXTADDR; |
20 | __init_begin = .; | 26 | __init_begin = .; |
21 | HEAD_TEXT_SECTION | 27 | HEAD_TEXT_SECTION |
28 | .exit.text : { | ||
29 | NDS32_EXIT_KEEP(EXIT_TEXT) | ||
30 | } | ||
22 | INIT_TEXT_SECTION(PAGE_SIZE) | 31 | INIT_TEXT_SECTION(PAGE_SIZE) |
23 | INIT_DATA_SECTION(16) | 32 | INIT_DATA_SECTION(16) |
33 | .exit.data : { | ||
34 | NDS32_EXIT_KEEP(EXIT_DATA) | ||
35 | } | ||
24 | PERCPU_SECTION(L1_CACHE_BYTES) | 36 | PERCPU_SECTION(L1_CACHE_BYTES) |
25 | __init_end = .; | 37 | __init_end = .; |
26 | 38 | ||
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug index 7a49f0d28d14..f1da8a7b17ff 100644 --- a/arch/nios2/Kconfig.debug +++ b/arch/nios2/Kconfig.debug | |||
@@ -3,15 +3,6 @@ | |||
3 | config TRACE_IRQFLAGS_SUPPORT | 3 | config TRACE_IRQFLAGS_SUPPORT |
4 | def_bool y | 4 | def_bool y |
5 | 5 | ||
6 | config DEBUG_STACK_USAGE | ||
7 | bool "Enable stack utilization instrumentation" | ||
8 | depends on DEBUG_KERNEL | ||
9 | help | ||
10 | Enables the display of the minimum amount of free stack which each | ||
11 | task has ever had available in the sysrq-T and sysrq-P debug output. | ||
12 | |||
13 | This option will slow down process creation somewhat. | ||
14 | |||
15 | config EARLY_PRINTK | 6 | config EARLY_PRINTK |
16 | bool "Activate early kernel debugging" | 7 | bool "Activate early kernel debugging" |
17 | default y | 8 | default y |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index db0b6eebbfa5..a80669209155 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -177,7 +177,6 @@ config PPC | |||
177 | select HAVE_ARCH_KGDB | 177 | select HAVE_ARCH_KGDB |
178 | select HAVE_ARCH_MMAP_RND_BITS | 178 | select HAVE_ARCH_MMAP_RND_BITS |
179 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT | 179 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT |
180 | select HAVE_ARCH_PREL32_RELOCATIONS | ||
181 | select HAVE_ARCH_SECCOMP_FILTER | 180 | select HAVE_ARCH_SECCOMP_FILTER |
182 | select HAVE_ARCH_TRACEHOOK | 181 | select HAVE_ARCH_TRACEHOOK |
183 | select HAVE_CBPF_JIT if !PPC64 | 182 | select HAVE_CBPF_JIT if !PPC64 |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 3c0e8fb2b773..68e14afecac8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
358 | unsigned long pp, key; | 358 | unsigned long pp, key; |
359 | unsigned long v, orig_v, gr; | 359 | unsigned long v, orig_v, gr; |
360 | __be64 *hptep; | 360 | __be64 *hptep; |
361 | int index; | 361 | long int index; |
362 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); | 362 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); |
363 | 363 | ||
364 | if (kvm_is_radix(vcpu->kvm)) | 364 | if (kvm_is_radix(vcpu->kvm)) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 0af1c0aea1fe..fd6e8c13685f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
@@ -725,10 +725,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
725 | gpa, shift); | 725 | gpa, shift); |
726 | kvmppc_radix_tlbie_page(kvm, gpa, shift); | 726 | kvmppc_radix_tlbie_page(kvm, gpa, shift); |
727 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { | 727 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { |
728 | unsigned long npages = 1; | 728 | unsigned long psize = PAGE_SIZE; |
729 | if (shift) | 729 | if (shift) |
730 | npages = 1ul << (shift - PAGE_SHIFT); | 730 | psize = 1ul << shift; |
731 | kvmppc_update_dirty_map(memslot, gfn, npages); | 731 | kvmppc_update_dirty_map(memslot, gfn, psize); |
732 | } | 732 | } |
733 | } | 733 | } |
734 | return 0; | 734 | return 0; |
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index c229509288ea..439dc7072e05 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h | |||
@@ -14,6 +14,10 @@ | |||
14 | #ifndef _ASM_RISCV_TLB_H | 14 | #ifndef _ASM_RISCV_TLB_H |
15 | #define _ASM_RISCV_TLB_H | 15 | #define _ASM_RISCV_TLB_H |
16 | 16 | ||
17 | struct mmu_gather; | ||
18 | |||
19 | static void tlb_flush(struct mmu_gather *tlb); | ||
20 | |||
17 | #include <asm-generic/tlb.h> | 21 | #include <asm-generic/tlb.h> |
18 | 22 | ||
19 | static inline void tlb_flush(struct mmu_gather *tlb) | 23 | static inline void tlb_flush(struct mmu_gather *tlb) |
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 568026ccf6e8..fb03a4482ad6 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c | |||
@@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | |||
65 | SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, | 65 | SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, |
66 | uintptr_t, flags) | 66 | uintptr_t, flags) |
67 | { | 67 | { |
68 | #ifdef CONFIG_SMP | ||
69 | struct mm_struct *mm = current->mm; | ||
70 | bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; | ||
71 | #endif | ||
72 | |||
73 | /* Check the reserved flags. */ | 68 | /* Check the reserved flags. */ |
74 | if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) | 69 | if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) |
75 | return -EINVAL; | 70 | return -EINVAL; |
76 | 71 | ||
77 | /* | 72 | flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); |
78 | * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(), | ||
79 | * which generates unused variable warnings all over this function. | ||
80 | */ | ||
81 | #ifdef CONFIG_SMP | ||
82 | flush_icache_mm(mm, local); | ||
83 | #else | ||
84 | flush_icache_all(); | ||
85 | #endif | ||
86 | 73 | ||
87 | return 0; | 74 | return 0; |
88 | } | 75 | } |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index f31a15044c24..a8418e1379eb 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -16,7 +16,13 @@ typedef struct { | |||
16 | unsigned long asce; | 16 | unsigned long asce; |
17 | unsigned long asce_limit; | 17 | unsigned long asce_limit; |
18 | unsigned long vdso_base; | 18 | unsigned long vdso_base; |
19 | /* The mmu context allocates 4K page tables. */ | 19 | /* |
20 | * The following bitfields need a down_write on the mm | ||
21 | * semaphore when they are written to. As they are only | ||
22 | * written once, they can be read without a lock. | ||
23 | * | ||
24 | * The mmu context allocates 4K page tables. | ||
25 | */ | ||
20 | unsigned int alloc_pgste:1; | 26 | unsigned int alloc_pgste:1; |
21 | /* The mmu context uses extended page tables. */ | 27 | /* The mmu context uses extended page tables. */ |
22 | unsigned int has_pgste:1; | 28 | unsigned int has_pgste:1; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 91ad4a9425c0..f69333fd2fa3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -695,7 +695,9 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
695 | r = -EINVAL; | 695 | r = -EINVAL; |
696 | else { | 696 | else { |
697 | r = 0; | 697 | r = 0; |
698 | down_write(&kvm->mm->mmap_sem); | ||
698 | kvm->mm->context.allow_gmap_hpage_1m = 1; | 699 | kvm->mm->context.allow_gmap_hpage_1m = 1; |
700 | up_write(&kvm->mm->mmap_sem); | ||
699 | /* | 701 | /* |
700 | * We might have to create fake 4k page | 702 | * We might have to create fake 4k page |
701 | * tables. To avoid that the hardware works on | 703 | * tables. To avoid that the hardware works on |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d68f10441a16..8679bd74d337 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -280,9 +280,11 @@ retry: | |||
280 | goto retry; | 280 | goto retry; |
281 | } | 281 | } |
282 | } | 282 | } |
283 | if (rc) | ||
284 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
285 | up_read(¤t->mm->mmap_sem); | 283 | up_read(¤t->mm->mmap_sem); |
284 | if (rc == -EFAULT) | ||
285 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
286 | if (rc < 0) | ||
287 | return rc; | ||
286 | vcpu->run->s.regs.gprs[reg1] &= ~0xff; | 288 | vcpu->run->s.regs.gprs[reg1] &= ~0xff; |
287 | vcpu->run->s.regs.gprs[reg1] |= key; | 289 | vcpu->run->s.regs.gprs[reg1] |= key; |
288 | return 0; | 290 | return 0; |
@@ -324,9 +326,11 @@ retry: | |||
324 | goto retry; | 326 | goto retry; |
325 | } | 327 | } |
326 | } | 328 | } |
327 | if (rc < 0) | ||
328 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
329 | up_read(¤t->mm->mmap_sem); | 329 | up_read(¤t->mm->mmap_sem); |
330 | if (rc == -EFAULT) | ||
331 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
332 | if (rc < 0) | ||
333 | return rc; | ||
330 | kvm_s390_set_psw_cc(vcpu, rc); | 334 | kvm_s390_set_psw_cc(vcpu, rc); |
331 | return 0; | 335 | return 0; |
332 | } | 336 | } |
@@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu) | |||
390 | FAULT_FLAG_WRITE, &unlocked); | 394 | FAULT_FLAG_WRITE, &unlocked); |
391 | rc = !rc ? -EAGAIN : rc; | 395 | rc = !rc ? -EAGAIN : rc; |
392 | } | 396 | } |
397 | up_read(¤t->mm->mmap_sem); | ||
393 | if (rc == -EFAULT) | 398 | if (rc == -EFAULT) |
394 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 399 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
395 | 400 | if (rc < 0) | |
396 | up_read(¤t->mm->mmap_sem); | 401 | return rc; |
397 | if (rc >= 0) | 402 | start += PAGE_SIZE; |
398 | start += PAGE_SIZE; | ||
399 | } | 403 | } |
400 | 404 | ||
401 | if (m3 & (SSKE_MC | SSKE_MR)) { | 405 | if (m3 & (SSKE_MC | SSKE_MR)) { |
@@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
1002 | FAULT_FLAG_WRITE, &unlocked); | 1006 | FAULT_FLAG_WRITE, &unlocked); |
1003 | rc = !rc ? -EAGAIN : rc; | 1007 | rc = !rc ? -EAGAIN : rc; |
1004 | } | 1008 | } |
1009 | up_read(¤t->mm->mmap_sem); | ||
1005 | if (rc == -EFAULT) | 1010 | if (rc == -EFAULT) |
1006 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 1011 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
1007 | 1012 | if (rc == -EAGAIN) | |
1008 | up_read(¤t->mm->mmap_sem); | 1013 | continue; |
1009 | if (rc >= 0) | 1014 | if (rc < 0) |
1010 | start += PAGE_SIZE; | 1015 | return rc; |
1011 | } | 1016 | } |
1017 | start += PAGE_SIZE; | ||
1012 | } | 1018 | } |
1013 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { | 1019 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { |
1014 | if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { | 1020 | if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 63844b95c22c..a2b28cd1e3fe 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
173 | return set_validity_icpt(scb_s, 0x0039U); | 173 | return set_validity_icpt(scb_s, 0x0039U); |
174 | 174 | ||
175 | /* copy only the wrapping keys */ | 175 | /* copy only the wrapping keys */ |
176 | if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) | 176 | if (read_guest_real(vcpu, crycb_addr + 72, |
177 | vsie_page->crycb.dea_wrapping_key_mask, 56)) | ||
177 | return set_validity_icpt(scb_s, 0x0035U); | 178 | return set_validity_icpt(scb_s, 0x0035U); |
178 | 179 | ||
179 | scb_s->ecb3 |= ecb3_flags; | 180 | scb_s->ecb3 |= ecb3_flags; |
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 3641a294ed54..e4abe9b8f97a 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/of_device.h> | 10 | #include <linux/of_device.h> |
11 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
12 | #include <linux/dma-mapping.h> | ||
12 | #include <asm/leon.h> | 13 | #include <asm/leon.h> |
13 | #include <asm/leon_amba.h> | 14 | #include <asm/leon_amba.h> |
14 | 15 | ||
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, | |||
381 | else | 382 | else |
382 | dev_set_name(&op->dev, "%08x", dp->phandle); | 383 | dev_set_name(&op->dev, "%08x", dp->phandle); |
383 | 384 | ||
385 | op->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
386 | op->dev.dma_mask = &op->dev.coherent_dma_mask; | ||
387 | |||
384 | if (of_device_register(op)) { | 388 | if (of_device_register(op)) { |
385 | printk("%s: Could not register of device.\n", | 389 | printk("%s: Could not register of device.\n", |
386 | dp->full_name); | 390 | dp->full_name); |
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 44e4d4435bed..6df6086968c6 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/string.h> | 2 | #include <linux/string.h> |
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/of.h> | 4 | #include <linux/of.h> |
5 | #include <linux/dma-mapping.h> | ||
5 | #include <linux/init.h> | 6 | #include <linux/init.h> |
6 | #include <linux/export.h> | 7 | #include <linux/export.h> |
7 | #include <linux/mod_devicetable.h> | 8 | #include <linux/mod_devicetable.h> |
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, | |||
675 | dev_set_name(&op->dev, "root"); | 676 | dev_set_name(&op->dev, "root"); |
676 | else | 677 | else |
677 | dev_set_name(&op->dev, "%08x", dp->phandle); | 678 | dev_set_name(&op->dev, "%08x", dp->phandle); |
679 | op->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
680 | op->dev.dma_mask = &op->dev.coherent_dma_mask; | ||
678 | 681 | ||
679 | if (of_device_register(op)) { | 682 | if (of_device_register(op)) { |
680 | printk("%s: Could not register of device.\n", | 683 | printk("%s: Could not register of device.\n", |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c5ff296bc5d1..1a0be022f91d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -2843,7 +2843,7 @@ config X86_SYSFB | |||
2843 | This option, if enabled, marks VGA/VBE/EFI framebuffers as generic | 2843 | This option, if enabled, marks VGA/VBE/EFI framebuffers as generic |
2844 | framebuffers so the new generic system-framebuffer drivers can be | 2844 | framebuffers so the new generic system-framebuffer drivers can be |
2845 | used on x86. If the framebuffer is not compatible with the generic | 2845 | used on x86. If the framebuffer is not compatible with the generic |
2846 | modes, it is adverticed as fallback platform framebuffer so legacy | 2846 | modes, it is advertised as fallback platform framebuffer so legacy |
2847 | drivers like efifb, vesafb and uvesafb can pick it up. | 2847 | drivers like efifb, vesafb and uvesafb can pick it up. |
2848 | If this option is not selected, all system framebuffers are always | 2848 | If this option is not selected, all system framebuffers are always |
2849 | marked as fallback platform framebuffers as usual. | 2849 | marked as fallback platform framebuffers as usual. |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94859241bc3e..8f6e7eb8ae9f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER | |||
175 | endif | 175 | endif |
176 | endif | 176 | endif |
177 | 177 | ||
178 | ifndef CC_HAVE_ASM_GOTO | ||
179 | $(error Compiler lacks asm-goto support.) | ||
180 | endif | ||
181 | |||
182 | # | ||
183 | # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a | ||
184 | # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way | ||
185 | # to test for this bug at compile-time because the test case needs to execute, | ||
186 | # which is a no-go for cross compilers. So check the GCC version instead. | ||
187 | # | ||
188 | ifdef CONFIG_JUMP_LABEL | ||
189 | ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1) | ||
190 | ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1) | ||
191 | endif | ||
192 | endif | ||
193 | |||
194 | ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) | 178 | ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) |
195 | # This compiler flag is not supported by Clang: | 179 | # This compiler flag is not supported by Clang: |
196 | KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) | 180 | KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) |
@@ -312,6 +296,13 @@ PHONY += vdso_install | |||
312 | vdso_install: | 296 | vdso_install: |
313 | $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ | 297 | $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ |
314 | 298 | ||
299 | archprepare: checkbin | ||
300 | checkbin: | ||
301 | ifndef CC_HAVE_ASM_GOTO | ||
302 | @echo Compiler lacks asm-goto support. | ||
303 | @exit 1 | ||
304 | endif | ||
305 | |||
315 | archclean: | 306 | archclean: |
316 | $(Q)rm -rf $(objtree)/arch/i386 | 307 | $(Q)rm -rf $(objtree)/arch/i386 |
317 | $(Q)rm -rf $(objtree)/arch/x86_64 | 308 | $(Q)rm -rf $(objtree)/arch/x86_64 |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 9bd139569b41..cb2deb61c5d9 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff | |||
223 | pcmpeqd TWOONE(%rip), \TMP2 | 223 | pcmpeqd TWOONE(%rip), \TMP2 |
224 | pand POLY(%rip), \TMP2 | 224 | pand POLY(%rip), \TMP2 |
225 | pxor \TMP2, \TMP3 | 225 | pxor \TMP2, \TMP3 |
226 | movdqa \TMP3, HashKey(%arg2) | 226 | movdqu \TMP3, HashKey(%arg2) |
227 | 227 | ||
228 | movdqa \TMP3, \TMP5 | 228 | movdqa \TMP3, \TMP5 |
229 | pshufd $78, \TMP3, \TMP1 | 229 | pshufd $78, \TMP3, \TMP1 |
230 | pxor \TMP3, \TMP1 | 230 | pxor \TMP3, \TMP1 |
231 | movdqa \TMP1, HashKey_k(%arg2) | 231 | movdqu \TMP1, HashKey_k(%arg2) |
232 | 232 | ||
233 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 233 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
234 | # TMP5 = HashKey^2<<1 (mod poly) | 234 | # TMP5 = HashKey^2<<1 (mod poly) |
235 | movdqa \TMP5, HashKey_2(%arg2) | 235 | movdqu \TMP5, HashKey_2(%arg2) |
236 | # HashKey_2 = HashKey^2<<1 (mod poly) | 236 | # HashKey_2 = HashKey^2<<1 (mod poly) |
237 | pshufd $78, \TMP5, \TMP1 | 237 | pshufd $78, \TMP5, \TMP1 |
238 | pxor \TMP5, \TMP1 | 238 | pxor \TMP5, \TMP1 |
239 | movdqa \TMP1, HashKey_2_k(%arg2) | 239 | movdqu \TMP1, HashKey_2_k(%arg2) |
240 | 240 | ||
241 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 241 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
242 | # TMP5 = HashKey^3<<1 (mod poly) | 242 | # TMP5 = HashKey^3<<1 (mod poly) |
243 | movdqa \TMP5, HashKey_3(%arg2) | 243 | movdqu \TMP5, HashKey_3(%arg2) |
244 | pshufd $78, \TMP5, \TMP1 | 244 | pshufd $78, \TMP5, \TMP1 |
245 | pxor \TMP5, \TMP1 | 245 | pxor \TMP5, \TMP1 |
246 | movdqa \TMP1, HashKey_3_k(%arg2) | 246 | movdqu \TMP1, HashKey_3_k(%arg2) |
247 | 247 | ||
248 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 248 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
249 | # TMP5 = HashKey^3<<1 (mod poly) | 249 | # TMP5 = HashKey^3<<1 (mod poly) |
250 | movdqa \TMP5, HashKey_4(%arg2) | 250 | movdqu \TMP5, HashKey_4(%arg2) |
251 | pshufd $78, \TMP5, \TMP1 | 251 | pshufd $78, \TMP5, \TMP1 |
252 | pxor \TMP5, \TMP1 | 252 | pxor \TMP5, \TMP1 |
253 | movdqa \TMP1, HashKey_4_k(%arg2) | 253 | movdqu \TMP1, HashKey_4_k(%arg2) |
254 | .endm | 254 | .endm |
255 | 255 | ||
256 | # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. | 256 | # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. |
@@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff | |||
271 | movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv | 271 | movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv |
272 | 272 | ||
273 | PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, | 273 | PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, |
274 | movdqa HashKey(%arg2), %xmm13 | 274 | movdqu HashKey(%arg2), %xmm13 |
275 | 275 | ||
276 | CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ | 276 | CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ |
277 | %xmm4, %xmm5, %xmm6 | 277 | %xmm4, %xmm5, %xmm6 |
@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
997 | pshufd $78, \XMM5, \TMP6 | 997 | pshufd $78, \XMM5, \TMP6 |
998 | pxor \XMM5, \TMP6 | 998 | pxor \XMM5, \TMP6 |
999 | paddd ONE(%rip), \XMM0 # INCR CNT | 999 | paddd ONE(%rip), \XMM0 # INCR CNT |
1000 | movdqa HashKey_4(%arg2), \TMP5 | 1000 | movdqu HashKey_4(%arg2), \TMP5 |
1001 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 | 1001 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
1002 | movdqa \XMM0, \XMM1 | 1002 | movdqa \XMM0, \XMM1 |
1003 | paddd ONE(%rip), \XMM0 # INCR CNT | 1003 | paddd ONE(%rip), \XMM0 # INCR CNT |
@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1016 | pxor (%arg1), \XMM2 | 1016 | pxor (%arg1), \XMM2 |
1017 | pxor (%arg1), \XMM3 | 1017 | pxor (%arg1), \XMM3 |
1018 | pxor (%arg1), \XMM4 | 1018 | pxor (%arg1), \XMM4 |
1019 | movdqa HashKey_4_k(%arg2), \TMP5 | 1019 | movdqu HashKey_4_k(%arg2), \TMP5 |
1020 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) | 1020 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
1021 | movaps 0x10(%arg1), \TMP1 | 1021 | movaps 0x10(%arg1), \TMP1 |
1022 | AESENC \TMP1, \XMM1 # Round 1 | 1022 | AESENC \TMP1, \XMM1 # Round 1 |
@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1031 | movdqa \XMM6, \TMP1 | 1031 | movdqa \XMM6, \TMP1 |
1032 | pshufd $78, \XMM6, \TMP2 | 1032 | pshufd $78, \XMM6, \TMP2 |
1033 | pxor \XMM6, \TMP2 | 1033 | pxor \XMM6, \TMP2 |
1034 | movdqa HashKey_3(%arg2), \TMP5 | 1034 | movdqu HashKey_3(%arg2), \TMP5 |
1035 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 | 1035 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
1036 | movaps 0x30(%arg1), \TMP3 | 1036 | movaps 0x30(%arg1), \TMP3 |
1037 | AESENC \TMP3, \XMM1 # Round 3 | 1037 | AESENC \TMP3, \XMM1 # Round 3 |
@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1044 | AESENC \TMP3, \XMM2 | 1044 | AESENC \TMP3, \XMM2 |
1045 | AESENC \TMP3, \XMM3 | 1045 | AESENC \TMP3, \XMM3 |
1046 | AESENC \TMP3, \XMM4 | 1046 | AESENC \TMP3, \XMM4 |
1047 | movdqa HashKey_3_k(%arg2), \TMP5 | 1047 | movdqu HashKey_3_k(%arg2), \TMP5 |
1048 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1048 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1049 | movaps 0x50(%arg1), \TMP3 | 1049 | movaps 0x50(%arg1), \TMP3 |
1050 | AESENC \TMP3, \XMM1 # Round 5 | 1050 | AESENC \TMP3, \XMM1 # Round 5 |
@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1058 | movdqa \XMM7, \TMP1 | 1058 | movdqa \XMM7, \TMP1 |
1059 | pshufd $78, \XMM7, \TMP2 | 1059 | pshufd $78, \XMM7, \TMP2 |
1060 | pxor \XMM7, \TMP2 | 1060 | pxor \XMM7, \TMP2 |
1061 | movdqa HashKey_2(%arg2), \TMP5 | 1061 | movdqu HashKey_2(%arg2), \TMP5 |
1062 | 1062 | ||
1063 | # Multiply TMP5 * HashKey using karatsuba | 1063 | # Multiply TMP5 * HashKey using karatsuba |
1064 | 1064 | ||
@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1074 | AESENC \TMP3, \XMM2 | 1074 | AESENC \TMP3, \XMM2 |
1075 | AESENC \TMP3, \XMM3 | 1075 | AESENC \TMP3, \XMM3 |
1076 | AESENC \TMP3, \XMM4 | 1076 | AESENC \TMP3, \XMM4 |
1077 | movdqa HashKey_2_k(%arg2), \TMP5 | 1077 | movdqu HashKey_2_k(%arg2), \TMP5 |
1078 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1078 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1079 | movaps 0x80(%arg1), \TMP3 | 1079 | movaps 0x80(%arg1), \TMP3 |
1080 | AESENC \TMP3, \XMM1 # Round 8 | 1080 | AESENC \TMP3, \XMM1 # Round 8 |
@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1092 | movdqa \XMM8, \TMP1 | 1092 | movdqa \XMM8, \TMP1 |
1093 | pshufd $78, \XMM8, \TMP2 | 1093 | pshufd $78, \XMM8, \TMP2 |
1094 | pxor \XMM8, \TMP2 | 1094 | pxor \XMM8, \TMP2 |
1095 | movdqa HashKey(%arg2), \TMP5 | 1095 | movdqu HashKey(%arg2), \TMP5 |
1096 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1096 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1097 | movaps 0x90(%arg1), \TMP3 | 1097 | movaps 0x90(%arg1), \TMP3 |
1098 | AESENC \TMP3, \XMM1 # Round 9 | 1098 | AESENC \TMP3, \XMM1 # Round 9 |
@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@: | |||
1121 | AESENCLAST \TMP3, \XMM2 | 1121 | AESENCLAST \TMP3, \XMM2 |
1122 | AESENCLAST \TMP3, \XMM3 | 1122 | AESENCLAST \TMP3, \XMM3 |
1123 | AESENCLAST \TMP3, \XMM4 | 1123 | AESENCLAST \TMP3, \XMM4 |
1124 | movdqa HashKey_k(%arg2), \TMP5 | 1124 | movdqu HashKey_k(%arg2), \TMP5 |
1125 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1125 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1126 | movdqu (%arg4,%r11,1), \TMP3 | 1126 | movdqu (%arg4,%r11,1), \TMP3 |
1127 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK | 1127 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1205 | pshufd $78, \XMM5, \TMP6 | 1205 | pshufd $78, \XMM5, \TMP6 |
1206 | pxor \XMM5, \TMP6 | 1206 | pxor \XMM5, \TMP6 |
1207 | paddd ONE(%rip), \XMM0 # INCR CNT | 1207 | paddd ONE(%rip), \XMM0 # INCR CNT |
1208 | movdqa HashKey_4(%arg2), \TMP5 | 1208 | movdqu HashKey_4(%arg2), \TMP5 |
1209 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 | 1209 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
1210 | movdqa \XMM0, \XMM1 | 1210 | movdqa \XMM0, \XMM1 |
1211 | paddd ONE(%rip), \XMM0 # INCR CNT | 1211 | paddd ONE(%rip), \XMM0 # INCR CNT |
@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1224 | pxor (%arg1), \XMM2 | 1224 | pxor (%arg1), \XMM2 |
1225 | pxor (%arg1), \XMM3 | 1225 | pxor (%arg1), \XMM3 |
1226 | pxor (%arg1), \XMM4 | 1226 | pxor (%arg1), \XMM4 |
1227 | movdqa HashKey_4_k(%arg2), \TMP5 | 1227 | movdqu HashKey_4_k(%arg2), \TMP5 |
1228 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) | 1228 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
1229 | movaps 0x10(%arg1), \TMP1 | 1229 | movaps 0x10(%arg1), \TMP1 |
1230 | AESENC \TMP1, \XMM1 # Round 1 | 1230 | AESENC \TMP1, \XMM1 # Round 1 |
@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1239 | movdqa \XMM6, \TMP1 | 1239 | movdqa \XMM6, \TMP1 |
1240 | pshufd $78, \XMM6, \TMP2 | 1240 | pshufd $78, \XMM6, \TMP2 |
1241 | pxor \XMM6, \TMP2 | 1241 | pxor \XMM6, \TMP2 |
1242 | movdqa HashKey_3(%arg2), \TMP5 | 1242 | movdqu HashKey_3(%arg2), \TMP5 |
1243 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 | 1243 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
1244 | movaps 0x30(%arg1), \TMP3 | 1244 | movaps 0x30(%arg1), \TMP3 |
1245 | AESENC \TMP3, \XMM1 # Round 3 | 1245 | AESENC \TMP3, \XMM1 # Round 3 |
@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1252 | AESENC \TMP3, \XMM2 | 1252 | AESENC \TMP3, \XMM2 |
1253 | AESENC \TMP3, \XMM3 | 1253 | AESENC \TMP3, \XMM3 |
1254 | AESENC \TMP3, \XMM4 | 1254 | AESENC \TMP3, \XMM4 |
1255 | movdqa HashKey_3_k(%arg2), \TMP5 | 1255 | movdqu HashKey_3_k(%arg2), \TMP5 |
1256 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1256 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1257 | movaps 0x50(%arg1), \TMP3 | 1257 | movaps 0x50(%arg1), \TMP3 |
1258 | AESENC \TMP3, \XMM1 # Round 5 | 1258 | AESENC \TMP3, \XMM1 # Round 5 |
@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1266 | movdqa \XMM7, \TMP1 | 1266 | movdqa \XMM7, \TMP1 |
1267 | pshufd $78, \XMM7, \TMP2 | 1267 | pshufd $78, \XMM7, \TMP2 |
1268 | pxor \XMM7, \TMP2 | 1268 | pxor \XMM7, \TMP2 |
1269 | movdqa HashKey_2(%arg2), \TMP5 | 1269 | movdqu HashKey_2(%arg2), \TMP5 |
1270 | 1270 | ||
1271 | # Multiply TMP5 * HashKey using karatsuba | 1271 | # Multiply TMP5 * HashKey using karatsuba |
1272 | 1272 | ||
@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1282 | AESENC \TMP3, \XMM2 | 1282 | AESENC \TMP3, \XMM2 |
1283 | AESENC \TMP3, \XMM3 | 1283 | AESENC \TMP3, \XMM3 |
1284 | AESENC \TMP3, \XMM4 | 1284 | AESENC \TMP3, \XMM4 |
1285 | movdqa HashKey_2_k(%arg2), \TMP5 | 1285 | movdqu HashKey_2_k(%arg2), \TMP5 |
1286 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1286 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1287 | movaps 0x80(%arg1), \TMP3 | 1287 | movaps 0x80(%arg1), \TMP3 |
1288 | AESENC \TMP3, \XMM1 # Round 8 | 1288 | AESENC \TMP3, \XMM1 # Round 8 |
@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
1300 | movdqa \XMM8, \TMP1 | 1300 | movdqa \XMM8, \TMP1 |
1301 | pshufd $78, \XMM8, \TMP2 | 1301 | pshufd $78, \XMM8, \TMP2 |
1302 | pxor \XMM8, \TMP2 | 1302 | pxor \XMM8, \TMP2 |
1303 | movdqa HashKey(%arg2), \TMP5 | 1303 | movdqu HashKey(%arg2), \TMP5 |
1304 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1304 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1305 | movaps 0x90(%arg1), \TMP3 | 1305 | movaps 0x90(%arg1), \TMP3 |
1306 | AESENC \TMP3, \XMM1 # Round 9 | 1306 | AESENC \TMP3, \XMM1 # Round 9 |
@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@: | |||
1329 | AESENCLAST \TMP3, \XMM2 | 1329 | AESENCLAST \TMP3, \XMM2 |
1330 | AESENCLAST \TMP3, \XMM3 | 1330 | AESENCLAST \TMP3, \XMM3 |
1331 | AESENCLAST \TMP3, \XMM4 | 1331 | AESENCLAST \TMP3, \XMM4 |
1332 | movdqa HashKey_k(%arg2), \TMP5 | 1332 | movdqu HashKey_k(%arg2), \TMP5 |
1333 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1333 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1334 | movdqu (%arg4,%r11,1), \TMP3 | 1334 | movdqu (%arg4,%r11,1), \TMP3 |
1335 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK | 1335 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1405 | movdqa \XMM1, \TMP6 | 1405 | movdqa \XMM1, \TMP6 |
1406 | pshufd $78, \XMM1, \TMP2 | 1406 | pshufd $78, \XMM1, \TMP2 |
1407 | pxor \XMM1, \TMP2 | 1407 | pxor \XMM1, \TMP2 |
1408 | movdqa HashKey_4(%arg2), \TMP5 | 1408 | movdqu HashKey_4(%arg2), \TMP5 |
1409 | PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 | 1409 | PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 |
1410 | PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 | 1410 | PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 |
1411 | movdqa HashKey_4_k(%arg2), \TMP4 | 1411 | movdqu HashKey_4_k(%arg2), \TMP4 |
1412 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1412 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1413 | movdqa \XMM1, \XMMDst | 1413 | movdqa \XMM1, \XMMDst |
1414 | movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 | 1414 | movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 |
@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1418 | movdqa \XMM2, \TMP1 | 1418 | movdqa \XMM2, \TMP1 |
1419 | pshufd $78, \XMM2, \TMP2 | 1419 | pshufd $78, \XMM2, \TMP2 |
1420 | pxor \XMM2, \TMP2 | 1420 | pxor \XMM2, \TMP2 |
1421 | movdqa HashKey_3(%arg2), \TMP5 | 1421 | movdqu HashKey_3(%arg2), \TMP5 |
1422 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1422 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1423 | PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 | 1423 | PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 |
1424 | movdqa HashKey_3_k(%arg2), \TMP4 | 1424 | movdqu HashKey_3_k(%arg2), \TMP4 |
1425 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1425 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1426 | pxor \TMP1, \TMP6 | 1426 | pxor \TMP1, \TMP6 |
1427 | pxor \XMM2, \XMMDst | 1427 | pxor \XMM2, \XMMDst |
@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1433 | movdqa \XMM3, \TMP1 | 1433 | movdqa \XMM3, \TMP1 |
1434 | pshufd $78, \XMM3, \TMP2 | 1434 | pshufd $78, \XMM3, \TMP2 |
1435 | pxor \XMM3, \TMP2 | 1435 | pxor \XMM3, \TMP2 |
1436 | movdqa HashKey_2(%arg2), \TMP5 | 1436 | movdqu HashKey_2(%arg2), \TMP5 |
1437 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1437 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1438 | PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 | 1438 | PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 |
1439 | movdqa HashKey_2_k(%arg2), \TMP4 | 1439 | movdqu HashKey_2_k(%arg2), \TMP4 |
1440 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1440 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1441 | pxor \TMP1, \TMP6 | 1441 | pxor \TMP1, \TMP6 |
1442 | pxor \XMM3, \XMMDst | 1442 | pxor \XMM3, \XMMDst |
@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
1446 | movdqa \XMM4, \TMP1 | 1446 | movdqa \XMM4, \TMP1 |
1447 | pshufd $78, \XMM4, \TMP2 | 1447 | pshufd $78, \XMM4, \TMP2 |
1448 | pxor \XMM4, \TMP2 | 1448 | pxor \XMM4, \TMP2 |
1449 | movdqa HashKey(%arg2), \TMP5 | 1449 | movdqu HashKey(%arg2), \TMP5 |
1450 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1450 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1451 | PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 | 1451 | PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 |
1452 | movdqa HashKey_k(%arg2), \TMP4 | 1452 | movdqu HashKey_k(%arg2), \TMP4 |
1453 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1453 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1454 | pxor \TMP1, \TMP6 | 1454 | pxor \TMP1, \TMP6 |
1455 | pxor \XMM4, \XMMDst | 1455 | pxor \XMM4, \XMMDst |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 5f4829f10129..dfb2f7c0d019 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs | |||
2465 | 2465 | ||
2466 | perf_callchain_store(entry, regs->ip); | 2466 | perf_callchain_store(entry, regs->ip); |
2467 | 2467 | ||
2468 | if (!current->mm) | 2468 | if (!nmi_uaccess_okay()) |
2469 | return; | 2469 | return; |
2470 | 2470 | ||
2471 | if (perf_callchain_user32(regs, entry)) | 2471 | if (perf_callchain_user32(regs, entry)) |
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b143717b92b3..ce84388e540c 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) | |||
80 | * true if the result is zero, or false for all | 80 | * true if the result is zero, or false for all |
81 | * other cases. | 81 | * other cases. |
82 | */ | 82 | */ |
83 | #define arch_atomic_sub_and_test arch_atomic_sub_and_test | ||
84 | static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) | 83 | static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) |
85 | { | 84 | { |
86 | GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); | 85 | GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); |
87 | } | 86 | } |
87 | #define arch_atomic_sub_and_test arch_atomic_sub_and_test | ||
88 | 88 | ||
89 | /** | 89 | /** |
90 | * arch_atomic_inc - increment atomic variable | 90 | * arch_atomic_inc - increment atomic variable |
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) | |||
92 | * | 92 | * |
93 | * Atomically increments @v by 1. | 93 | * Atomically increments @v by 1. |
94 | */ | 94 | */ |
95 | #define arch_atomic_inc arch_atomic_inc | ||
96 | static __always_inline void arch_atomic_inc(atomic_t *v) | 95 | static __always_inline void arch_atomic_inc(atomic_t *v) |
97 | { | 96 | { |
98 | asm volatile(LOCK_PREFIX "incl %0" | 97 | asm volatile(LOCK_PREFIX "incl %0" |
99 | : "+m" (v->counter)); | 98 | : "+m" (v->counter)); |
100 | } | 99 | } |
100 | #define arch_atomic_inc arch_atomic_inc | ||
101 | 101 | ||
102 | /** | 102 | /** |
103 | * arch_atomic_dec - decrement atomic variable | 103 | * arch_atomic_dec - decrement atomic variable |
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v) | |||
105 | * | 105 | * |
106 | * Atomically decrements @v by 1. | 106 | * Atomically decrements @v by 1. |
107 | */ | 107 | */ |
108 | #define arch_atomic_dec arch_atomic_dec | ||
109 | static __always_inline void arch_atomic_dec(atomic_t *v) | 108 | static __always_inline void arch_atomic_dec(atomic_t *v) |
110 | { | 109 | { |
111 | asm volatile(LOCK_PREFIX "decl %0" | 110 | asm volatile(LOCK_PREFIX "decl %0" |
112 | : "+m" (v->counter)); | 111 | : "+m" (v->counter)); |
113 | } | 112 | } |
113 | #define arch_atomic_dec arch_atomic_dec | ||
114 | 114 | ||
115 | /** | 115 | /** |
116 | * arch_atomic_dec_and_test - decrement and test | 116 | * arch_atomic_dec_and_test - decrement and test |
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v) | |||
120 | * returns true if the result is 0, or false for all other | 120 | * returns true if the result is 0, or false for all other |
121 | * cases. | 121 | * cases. |
122 | */ | 122 | */ |
123 | #define arch_atomic_dec_and_test arch_atomic_dec_and_test | ||
124 | static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) | 123 | static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) |
125 | { | 124 | { |
126 | GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); | 125 | GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); |
127 | } | 126 | } |
127 | #define arch_atomic_dec_and_test arch_atomic_dec_and_test | ||
128 | 128 | ||
129 | /** | 129 | /** |
130 | * arch_atomic_inc_and_test - increment and test | 130 | * arch_atomic_inc_and_test - increment and test |
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) | |||
134 | * and returns true if the result is zero, or false for all | 134 | * and returns true if the result is zero, or false for all |
135 | * other cases. | 135 | * other cases. |
136 | */ | 136 | */ |
137 | #define arch_atomic_inc_and_test arch_atomic_inc_and_test | ||
138 | static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) | 137 | static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) |
139 | { | 138 | { |
140 | GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); | 139 | GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); |
141 | } | 140 | } |
141 | #define arch_atomic_inc_and_test arch_atomic_inc_and_test | ||
142 | 142 | ||
143 | /** | 143 | /** |
144 | * arch_atomic_add_negative - add and test if negative | 144 | * arch_atomic_add_negative - add and test if negative |
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) | |||
149 | * if the result is negative, or false when | 149 | * if the result is negative, or false when |
150 | * result is greater than or equal to zero. | 150 | * result is greater than or equal to zero. |
151 | */ | 151 | */ |
152 | #define arch_atomic_add_negative arch_atomic_add_negative | ||
153 | static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) | 152 | static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) |
154 | { | 153 | { |
155 | GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); | 154 | GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); |
156 | } | 155 | } |
156 | #define arch_atomic_add_negative arch_atomic_add_negative | ||
157 | 157 | ||
158 | /** | 158 | /** |
159 | * arch_atomic_add_return - add integer and return | 159 | * arch_atomic_add_return - add integer and return |
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index ef959f02d070..6a5b0ec460da 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h | |||
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v) | |||
205 | * | 205 | * |
206 | * Atomically increments @v by 1. | 206 | * Atomically increments @v by 1. |
207 | */ | 207 | */ |
208 | #define arch_atomic64_inc arch_atomic64_inc | ||
209 | static inline void arch_atomic64_inc(atomic64_t *v) | 208 | static inline void arch_atomic64_inc(atomic64_t *v) |
210 | { | 209 | { |
211 | __alternative_atomic64(inc, inc_return, /* no output */, | 210 | __alternative_atomic64(inc, inc_return, /* no output */, |
212 | "S" (v) : "memory", "eax", "ecx", "edx"); | 211 | "S" (v) : "memory", "eax", "ecx", "edx"); |
213 | } | 212 | } |
213 | #define arch_atomic64_inc arch_atomic64_inc | ||
214 | 214 | ||
215 | /** | 215 | /** |
216 | * arch_atomic64_dec - decrement atomic64 variable | 216 | * arch_atomic64_dec - decrement atomic64 variable |
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v) | |||
218 | * | 218 | * |
219 | * Atomically decrements @v by 1. | 219 | * Atomically decrements @v by 1. |
220 | */ | 220 | */ |
221 | #define arch_atomic64_dec arch_atomic64_dec | ||
222 | static inline void arch_atomic64_dec(atomic64_t *v) | 221 | static inline void arch_atomic64_dec(atomic64_t *v) |
223 | { | 222 | { |
224 | __alternative_atomic64(dec, dec_return, /* no output */, | 223 | __alternative_atomic64(dec, dec_return, /* no output */, |
225 | "S" (v) : "memory", "eax", "ecx", "edx"); | 224 | "S" (v) : "memory", "eax", "ecx", "edx"); |
226 | } | 225 | } |
226 | #define arch_atomic64_dec arch_atomic64_dec | ||
227 | 227 | ||
228 | /** | 228 | /** |
229 | * arch_atomic64_add_unless - add unless the number is a given value | 229 | * arch_atomic64_add_unless - add unless the number is a given value |
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a, | |||
245 | return (int)a; | 245 | return (int)a; |
246 | } | 246 | } |
247 | 247 | ||
248 | #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero | ||
249 | static inline int arch_atomic64_inc_not_zero(atomic64_t *v) | 248 | static inline int arch_atomic64_inc_not_zero(atomic64_t *v) |
250 | { | 249 | { |
251 | int r; | 250 | int r; |
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v) | |||
253 | "S" (v) : "ecx", "edx", "memory"); | 252 | "S" (v) : "ecx", "edx", "memory"); |
254 | return r; | 253 | return r; |
255 | } | 254 | } |
255 | #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero | ||
256 | 256 | ||
257 | #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive | ||
258 | static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) | 257 | static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) |
259 | { | 258 | { |
260 | long long r; | 259 | long long r; |
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) | |||
262 | "S" (v) : "ecx", "memory"); | 261 | "S" (v) : "ecx", "memory"); |
263 | return r; | 262 | return r; |
264 | } | 263 | } |
264 | #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive | ||
265 | 265 | ||
266 | #undef alternative_atomic64 | 266 | #undef alternative_atomic64 |
267 | #undef __alternative_atomic64 | 267 | #undef __alternative_atomic64 |
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 4343d9b4f30e..5f851d92eecd 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v) | |||
71 | * true if the result is zero, or false for all | 71 | * true if the result is zero, or false for all |
72 | * other cases. | 72 | * other cases. |
73 | */ | 73 | */ |
74 | #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test | ||
75 | static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) | 74 | static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) |
76 | { | 75 | { |
77 | GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); | 76 | GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); |
78 | } | 77 | } |
78 | #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test | ||
79 | 79 | ||
80 | /** | 80 | /** |
81 | * arch_atomic64_inc - increment atomic64 variable | 81 | * arch_atomic64_inc - increment atomic64 variable |
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) | |||
83 | * | 83 | * |
84 | * Atomically increments @v by 1. | 84 | * Atomically increments @v by 1. |
85 | */ | 85 | */ |
86 | #define arch_atomic64_inc arch_atomic64_inc | ||
87 | static __always_inline void arch_atomic64_inc(atomic64_t *v) | 86 | static __always_inline void arch_atomic64_inc(atomic64_t *v) |
88 | { | 87 | { |
89 | asm volatile(LOCK_PREFIX "incq %0" | 88 | asm volatile(LOCK_PREFIX "incq %0" |
90 | : "=m" (v->counter) | 89 | : "=m" (v->counter) |
91 | : "m" (v->counter)); | 90 | : "m" (v->counter)); |
92 | } | 91 | } |
92 | #define arch_atomic64_inc arch_atomic64_inc | ||
93 | 93 | ||
94 | /** | 94 | /** |
95 | * arch_atomic64_dec - decrement atomic64 variable | 95 | * arch_atomic64_dec - decrement atomic64 variable |
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v) | |||
97 | * | 97 | * |
98 | * Atomically decrements @v by 1. | 98 | * Atomically decrements @v by 1. |
99 | */ | 99 | */ |
100 | #define arch_atomic64_dec arch_atomic64_dec | ||
101 | static __always_inline void arch_atomic64_dec(atomic64_t *v) | 100 | static __always_inline void arch_atomic64_dec(atomic64_t *v) |
102 | { | 101 | { |
103 | asm volatile(LOCK_PREFIX "decq %0" | 102 | asm volatile(LOCK_PREFIX "decq %0" |
104 | : "=m" (v->counter) | 103 | : "=m" (v->counter) |
105 | : "m" (v->counter)); | 104 | : "m" (v->counter)); |
106 | } | 105 | } |
106 | #define arch_atomic64_dec arch_atomic64_dec | ||
107 | 107 | ||
108 | /** | 108 | /** |
109 | * arch_atomic64_dec_and_test - decrement and test | 109 | * arch_atomic64_dec_and_test - decrement and test |
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) | |||
113 | * returns true if the result is 0, or false for all other | 113 | * returns true if the result is 0, or false for all other |
114 | * cases. | 114 | * cases. |
115 | */ | 115 | */ |
116 | #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test | ||
117 | static inline bool arch_atomic64_dec_and_test(atomic64_t *v) | 116 | static inline bool arch_atomic64_dec_and_test(atomic64_t *v) |
118 | { | 117 | { |
119 | GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); | 118 | GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); |
120 | } | 119 | } |
120 | #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test | ||
121 | 121 | ||
122 | /** | 122 | /** |
123 | * arch_atomic64_inc_and_test - increment and test | 123 | * arch_atomic64_inc_and_test - increment and test |
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) | |||
127 | * and returns true if the result is zero, or false for all | 127 | * and returns true if the result is zero, or false for all |
128 | * other cases. | 128 | * other cases. |
129 | */ | 129 | */ |
130 | #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test | ||
131 | static inline bool arch_atomic64_inc_and_test(atomic64_t *v) | 130 | static inline bool arch_atomic64_inc_and_test(atomic64_t *v) |
132 | { | 131 | { |
133 | GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); | 132 | GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); |
134 | } | 133 | } |
134 | #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test | ||
135 | 135 | ||
136 | /** | 136 | /** |
137 | * arch_atomic64_add_negative - add and test if negative | 137 | * arch_atomic64_add_negative - add and test if negative |
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) | |||
142 | * if the result is negative, or false when | 142 | * if the result is negative, or false when |
143 | * result is greater than or equal to zero. | 143 | * result is greater than or equal to zero. |
144 | */ | 144 | */ |
145 | #define arch_atomic64_add_negative arch_atomic64_add_negative | ||
146 | static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) | 145 | static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) |
147 | { | 146 | { |
148 | GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); | 147 | GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); |
149 | } | 148 | } |
149 | #define arch_atomic64_add_negative arch_atomic64_add_negative | ||
150 | 150 | ||
151 | /** | 151 | /** |
152 | * arch_atomic64_add_return - add and return | 152 | * arch_atomic64_add_return - add and return |
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c14f2a74b2be..15450a675031 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void) | |||
33 | return flags; | 33 | return flags; |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void native_restore_fl(unsigned long flags) | 36 | extern inline void native_restore_fl(unsigned long flags); |
37 | extern inline void native_restore_fl(unsigned long flags) | ||
37 | { | 38 | { |
38 | asm volatile("push %0 ; popf" | 39 | asm volatile("push %0 ; popf" |
39 | : /* no output */ | 40 | : /* no output */ |
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index 395c9631e000..75f1e35e7c15 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h | |||
@@ -22,10 +22,20 @@ enum die_val { | |||
22 | DIE_NMIUNKNOWN, | 22 | DIE_NMIUNKNOWN, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | enum show_regs_mode { | ||
26 | SHOW_REGS_SHORT, | ||
27 | /* | ||
28 | * For when userspace crashed, but we don't think it's our fault, and | ||
29 | * therefore don't print kernel registers. | ||
30 | */ | ||
31 | SHOW_REGS_USER, | ||
32 | SHOW_REGS_ALL | ||
33 | }; | ||
34 | |||
25 | extern void die(const char *, struct pt_regs *,long); | 35 | extern void die(const char *, struct pt_regs *,long); |
26 | extern int __must_check __die(const char *, struct pt_regs *, long); | 36 | extern int __must_check __die(const char *, struct pt_regs *, long); |
27 | extern void show_stack_regs(struct pt_regs *regs); | 37 | extern void show_stack_regs(struct pt_regs *regs); |
28 | extern void __show_regs(struct pt_regs *regs, int all); | 38 | extern void __show_regs(struct pt_regs *regs, enum show_regs_mode); |
29 | extern void show_iret_regs(struct pt_regs *regs); | 39 | extern void show_iret_regs(struct pt_regs *regs); |
30 | extern unsigned long oops_begin(void); | 40 | extern unsigned long oops_begin(void); |
31 | extern void oops_end(unsigned long, struct pt_regs *, int signr); | 41 | extern void oops_end(unsigned long, struct pt_regs *, int signr); |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 00ddb0c9e612..8e90488c3d56 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -1237,19 +1237,12 @@ enum emulation_result { | |||
1237 | #define EMULTYPE_NO_DECODE (1 << 0) | 1237 | #define EMULTYPE_NO_DECODE (1 << 0) |
1238 | #define EMULTYPE_TRAP_UD (1 << 1) | 1238 | #define EMULTYPE_TRAP_UD (1 << 1) |
1239 | #define EMULTYPE_SKIP (1 << 2) | 1239 | #define EMULTYPE_SKIP (1 << 2) |
1240 | #define EMULTYPE_RETRY (1 << 3) | 1240 | #define EMULTYPE_ALLOW_RETRY (1 << 3) |
1241 | #define EMULTYPE_NO_REEXECUTE (1 << 4) | 1241 | #define EMULTYPE_NO_UD_ON_FAIL (1 << 4) |
1242 | #define EMULTYPE_NO_UD_ON_FAIL (1 << 5) | 1242 | #define EMULTYPE_VMWARE (1 << 5) |
1243 | #define EMULTYPE_VMWARE (1 << 6) | 1243 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); |
1244 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, | 1244 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, |
1245 | int emulation_type, void *insn, int insn_len); | 1245 | void *insn, int insn_len); |
1246 | |||
1247 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | ||
1248 | int emulation_type) | ||
1249 | { | ||
1250 | return x86_emulate_instruction(vcpu, 0, | ||
1251 | emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); | ||
1252 | } | ||
1253 | 1246 | ||
1254 | void kvm_enable_efer_bits(u64); | 1247 | void kvm_enable_efer_bits(u64); |
1255 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); | 1248 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
@@ -1450,7 +1443,6 @@ asmlinkage void kvm_spurious_fault(void); | |||
1450 | ____kvm_handle_fault_on_reboot(insn, "") | 1443 | ____kvm_handle_fault_on_reboot(insn, "") |
1451 | 1444 | ||
1452 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 1445 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
1453 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
1454 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); | 1446 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); |
1455 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); | 1447 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
1456 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 1448 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
@@ -1463,7 +1455,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); | |||
1463 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); | 1455 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); |
1464 | 1456 | ||
1465 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, | 1457 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, |
1466 | unsigned long ipi_bitmap_high, int min, | 1458 | unsigned long ipi_bitmap_high, u32 min, |
1467 | unsigned long icr, int op_64_bit); | 1459 | unsigned long icr, int op_64_bit); |
1468 | 1460 | ||
1469 | u64 kvm_get_arch_capabilities(void); | 1461 | u64 kvm_get_arch_capabilities(void); |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index a564084c6141..f8b1ad2c3828 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H | 2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
3 | #define _ASM_X86_PGTABLE_3LEVEL_H | 3 | #define _ASM_X86_PGTABLE_3LEVEL_H |
4 | 4 | ||
5 | #include <asm/atomic64_32.h> | ||
6 | |||
5 | /* | 7 | /* |
6 | * Intel Physical Address Extension (PAE) Mode - three-level page | 8 | * Intel Physical Address Extension (PAE) Mode - three-level page |
7 | * tables on PPro+ CPUs. | 9 | * tables on PPro+ CPUs. |
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) | |||
150 | { | 152 | { |
151 | pte_t res; | 153 | pte_t res; |
152 | 154 | ||
153 | /* xchg acts as a barrier before the setting of the high bits */ | 155 | res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0); |
154 | res.pte_low = xchg(&ptep->pte_low, 0); | ||
155 | res.pte_high = ptep->pte_high; | ||
156 | ptep->pte_high = 0; | ||
157 | 156 | ||
158 | return res; | 157 | return res; |
159 | } | 158 | } |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index e4ffa565a69f..690c0307afed 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | |||
1195 | return xchg(pmdp, pmd); | 1195 | return xchg(pmdp, pmd); |
1196 | } else { | 1196 | } else { |
1197 | pmd_t old = *pmdp; | 1197 | pmd_t old = *pmdp; |
1198 | *pmdp = pmd; | 1198 | WRITE_ONCE(*pmdp, pmd); |
1199 | return old; | 1199 | return old; |
1200 | } | 1200 | } |
1201 | } | 1201 | } |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index f773d5e6c8cc..ce2b59047cb8 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -55,15 +55,15 @@ struct mm_struct; | |||
55 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); | 55 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); |
56 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); | 56 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); |
57 | 57 | ||
58 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, | 58 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
59 | pte_t *ptep) | ||
60 | { | 59 | { |
61 | *ptep = native_make_pte(0); | 60 | WRITE_ONCE(*ptep, pte); |
62 | } | 61 | } |
63 | 62 | ||
64 | static inline void native_set_pte(pte_t *ptep, pte_t pte) | 63 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
64 | pte_t *ptep) | ||
65 | { | 65 | { |
66 | *ptep = pte; | 66 | native_set_pte(ptep, native_make_pte(0)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 69 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
@@ -73,7 +73,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | |||
73 | 73 | ||
74 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | 74 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
75 | { | 75 | { |
76 | *pmdp = pmd; | 76 | WRITE_ONCE(*pmdp, pmd); |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline void native_pmd_clear(pmd_t *pmd) | 79 | static inline void native_pmd_clear(pmd_t *pmd) |
@@ -109,7 +109,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
109 | 109 | ||
110 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | 110 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
111 | { | 111 | { |
112 | *pudp = pud; | 112 | WRITE_ONCE(*pudp, pud); |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void native_pud_clear(pud_t *pud) | 115 | static inline void native_pud_clear(pud_t *pud) |
@@ -137,13 +137,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) | |||
137 | pgd_t pgd; | 137 | pgd_t pgd; |
138 | 138 | ||
139 | if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { | 139 | if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { |
140 | *p4dp = p4d; | 140 | WRITE_ONCE(*p4dp, p4d); |
141 | return; | 141 | return; |
142 | } | 142 | } |
143 | 143 | ||
144 | pgd = native_make_pgd(native_p4d_val(p4d)); | 144 | pgd = native_make_pgd(native_p4d_val(p4d)); |
145 | pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); | 145 | pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); |
146 | *p4dp = native_make_p4d(native_pgd_val(pgd)); | 146 | WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd))); |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline void native_p4d_clear(p4d_t *p4d) | 149 | static inline void native_p4d_clear(p4d_t *p4d) |
@@ -153,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d) | |||
153 | 153 | ||
154 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) | 154 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) |
155 | { | 155 | { |
156 | *pgdp = pti_set_user_pgtbl(pgdp, pgd); | 156 | WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd)); |
157 | } | 157 | } |
158 | 158 | ||
159 | static inline void native_pgd_clear(pgd_t *pgd) | 159 | static inline void native_pgd_clear(pgd_t *pgd) |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c24297268ebc..d53c54b842da 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -132,6 +132,8 @@ struct cpuinfo_x86 { | |||
132 | /* Index into per_cpu list: */ | 132 | /* Index into per_cpu list: */ |
133 | u16 cpu_index; | 133 | u16 cpu_index; |
134 | u32 microcode; | 134 | u32 microcode; |
135 | /* Address space bits used by the cache internally */ | ||
136 | u8 x86_cache_bits; | ||
135 | unsigned initialized : 1; | 137 | unsigned initialized : 1; |
136 | } __randomize_layout; | 138 | } __randomize_layout; |
137 | 139 | ||
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c); | |||
183 | 185 | ||
184 | static inline unsigned long long l1tf_pfn_limit(void) | 186 | static inline unsigned long long l1tf_pfn_limit(void) |
185 | { | 187 | { |
186 | return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT); | 188 | return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); |
187 | } | 189 | } |
188 | 190 | ||
189 | extern void early_cpu_init(void); | 191 | extern void early_cpu_init(void); |
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 5f9012ff52ed..33d3c88a7225 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h | |||
@@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs); | |||
39 | 39 | ||
40 | #define __ARCH_HAS_SA_RESTORER | 40 | #define __ARCH_HAS_SA_RESTORER |
41 | 41 | ||
42 | #include <asm/asm.h> | ||
42 | #include <uapi/asm/sigcontext.h> | 43 | #include <uapi/asm/sigcontext.h> |
43 | 44 | ||
44 | #ifdef __i386__ | 45 | #ifdef __i386__ |
@@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig) | |||
86 | 87 | ||
87 | static inline int __gen_sigismember(sigset_t *set, int _sig) | 88 | static inline int __gen_sigismember(sigset_t *set, int _sig) |
88 | { | 89 | { |
89 | unsigned char ret; | 90 | bool ret; |
90 | asm("btl %2,%1\n\tsetc %0" | 91 | asm("btl %2,%1" CC_SET(c) |
91 | : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); | 92 | : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); |
92 | return ret; | 93 | return ret; |
93 | } | 94 | } |
94 | 95 | ||
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index b6dc698f992a..f335aad404a4 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void) | |||
111 | return (unsigned long)frame; | 111 | return (unsigned long)frame; |
112 | } | 112 | } |
113 | 113 | ||
114 | void show_opcodes(u8 *rip, const char *loglvl); | 114 | void show_opcodes(struct pt_regs *regs, const char *loglvl); |
115 | void show_ip(struct pt_regs *regs, const char *loglvl); | 115 | void show_ip(struct pt_regs *regs, const char *loglvl); |
116 | #endif /* _ASM_X86_STACKTRACE_H */ | 116 | #endif /* _ASM_X86_STACKTRACE_H */ |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 29c9da6c62fc..58ce5288878e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -175,8 +175,16 @@ struct tlb_state { | |||
175 | * are on. This means that it may not match current->active_mm, | 175 | * are on. This means that it may not match current->active_mm, |
176 | * which will contain the previous user mm when we're in lazy TLB | 176 | * which will contain the previous user mm when we're in lazy TLB |
177 | * mode even if we've already switched back to swapper_pg_dir. | 177 | * mode even if we've already switched back to swapper_pg_dir. |
178 | * | ||
179 | * During switch_mm_irqs_off(), loaded_mm will be set to | ||
180 | * LOADED_MM_SWITCHING during the brief interrupts-off window | ||
181 | * when CR3 and loaded_mm would otherwise be inconsistent. This | ||
182 | * is for nmi_uaccess_okay()'s benefit. | ||
178 | */ | 183 | */ |
179 | struct mm_struct *loaded_mm; | 184 | struct mm_struct *loaded_mm; |
185 | |||
186 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1) | ||
187 | |||
180 | u16 loaded_mm_asid; | 188 | u16 loaded_mm_asid; |
181 | u16 next_asid; | 189 | u16 next_asid; |
182 | /* last user mm's ctx id */ | 190 | /* last user mm's ctx id */ |
@@ -246,6 +254,38 @@ struct tlb_state { | |||
246 | }; | 254 | }; |
247 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | 255 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
248 | 256 | ||
257 | /* | ||
258 | * Blindly accessing user memory from NMI context can be dangerous | ||
259 | * if we're in the middle of switching the current user task or | ||
260 | * switching the loaded mm. It can also be dangerous if we | ||
261 | * interrupted some kernel code that was temporarily using a | ||
262 | * different mm. | ||
263 | */ | ||
264 | static inline bool nmi_uaccess_okay(void) | ||
265 | { | ||
266 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); | ||
267 | struct mm_struct *current_mm = current->mm; | ||
268 | |||
269 | VM_WARN_ON_ONCE(!loaded_mm); | ||
270 | |||
271 | /* | ||
272 | * The condition we want to check is | ||
273 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, | ||
274 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() | ||
275 | * is supposed to be reasonably fast. | ||
276 | * | ||
277 | * Instead, we check the almost equivalent but somewhat conservative | ||
278 | * condition below, and we rely on the fact that switch_mm_irqs_off() | ||
279 | * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. | ||
280 | */ | ||
281 | if (loaded_mm != current_mm) | ||
282 | return false; | ||
283 | |||
284 | VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); | ||
285 | |||
286 | return true; | ||
287 | } | ||
288 | |||
249 | /* Initialize cr4 shadow for this CPU. */ | 289 | /* Initialize cr4 shadow for this CPU. */ |
250 | static inline void cr4_init_shadow(void) | 290 | static inline void cr4_init_shadow(void) |
251 | { | 291 | { |
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index fb856c9f0449..53748541c487 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void) | |||
93 | * | 93 | * |
94 | * If RDPID is available, use it. | 94 | * If RDPID is available, use it. |
95 | */ | 95 | */ |
96 | alternative_io ("lsl %[p],%[seg]", | 96 | alternative_io ("lsl %[seg],%[p]", |
97 | ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ | 97 | ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ |
98 | X86_FEATURE_RDPID, | 98 | X86_FEATURE_RDPID, |
99 | [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); | 99 | [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 014f214da581..b9d5e7c9ef43 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, | |||
684 | * It means the size must be writable atomically and the address must be aligned | 684 | * It means the size must be writable atomically and the address must be aligned |
685 | * in a way that permits an atomic write. It also makes sure we fit on a single | 685 | * in a way that permits an atomic write. It also makes sure we fit on a single |
686 | * page. | 686 | * page. |
687 | * | ||
688 | * Note: Must be called under text_mutex. | ||
689 | */ | 687 | */ |
690 | void *text_poke(void *addr, const void *opcode, size_t len) | 688 | void *text_poke(void *addr, const void *opcode, size_t len) |
691 | { | 689 | { |
@@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len) | |||
700 | */ | 698 | */ |
701 | BUG_ON(!after_bootmem); | 699 | BUG_ON(!after_bootmem); |
702 | 700 | ||
701 | lockdep_assert_held(&text_mutex); | ||
702 | |||
703 | if (!core_kernel_text((unsigned long)addr)) { | 703 | if (!core_kernel_text((unsigned long)addr)) { |
704 | pages[0] = vmalloc_to_page(addr); | 704 | pages[0] = vmalloc_to_page(addr); |
705 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | 705 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
@@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs) | |||
782 | * - replace the first byte (int3) by the first byte of | 782 | * - replace the first byte (int3) by the first byte of |
783 | * replacing opcode | 783 | * replacing opcode |
784 | * - sync cores | 784 | * - sync cores |
785 | * | ||
786 | * Note: must be called under text_mutex. | ||
787 | */ | 785 | */ |
788 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) | 786 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) |
789 | { | 787 | { |
@@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) | |||
792 | bp_int3_handler = handler; | 790 | bp_int3_handler = handler; |
793 | bp_int3_addr = (u8 *)addr + sizeof(int3); | 791 | bp_int3_addr = (u8 *)addr + sizeof(int3); |
794 | bp_patching_in_progress = true; | 792 | bp_patching_in_progress = true; |
793 | |||
794 | lockdep_assert_held(&text_mutex); | ||
795 | |||
795 | /* | 796 | /* |
796 | * Corresponding read barrier in int3 notifier for making sure the | 797 | * Corresponding read barrier in int3 notifier for making sure the |
797 | * in_progress and handler are correctly ordered wrt. patching. | 798 | * in_progress and handler are correctly ordered wrt. patching. |
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 9f148e3d45b4..7654febd5102 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd) | |||
413 | if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { | 413 | if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { |
414 | /* Something in the core code broke! Survive gracefully */ | 414 | /* Something in the core code broke! Survive gracefully */ |
415 | pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); | 415 | pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); |
416 | return EINVAL; | 416 | return -EINVAL; |
417 | } | 417 | } |
418 | 418 | ||
419 | ret = assign_managed_vector(irqd, vector_searchmask); | 419 | ret = assign_managed_vector(irqd, vector_searchmask); |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4c2313d0b9ca..40bdaea97fe7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); | |||
668 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; | 668 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
669 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); | 669 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
670 | 670 | ||
671 | /* | ||
672 | * These CPUs all support 44bits physical address space internally in the | ||
673 | * cache but CPUID can report a smaller number of physical address bits. | ||
674 | * | ||
675 | * The L1TF mitigation uses the top most address bit for the inversion of | ||
676 | * non present PTEs. When the installed memory reaches into the top most | ||
677 | * address bit due to memory holes, which has been observed on machines | ||
678 | * which report 36bits physical address bits and have 32G RAM installed, | ||
679 | * then the mitigation range check in l1tf_select_mitigation() triggers. | ||
680 | * This is a false positive because the mitigation is still possible due to | ||
681 | * the fact that the cache uses 44bit internally. Use the cache bits | ||
682 | * instead of the reported physical bits and adjust them on the affected | ||
683 | * machines to 44bit if the reported bits are less than 44. | ||
684 | */ | ||
685 | static void override_cache_bits(struct cpuinfo_x86 *c) | ||
686 | { | ||
687 | if (c->x86 != 6) | ||
688 | return; | ||
689 | |||
690 | switch (c->x86_model) { | ||
691 | case INTEL_FAM6_NEHALEM: | ||
692 | case INTEL_FAM6_WESTMERE: | ||
693 | case INTEL_FAM6_SANDYBRIDGE: | ||
694 | case INTEL_FAM6_IVYBRIDGE: | ||
695 | case INTEL_FAM6_HASWELL_CORE: | ||
696 | case INTEL_FAM6_HASWELL_ULT: | ||
697 | case INTEL_FAM6_HASWELL_GT3E: | ||
698 | case INTEL_FAM6_BROADWELL_CORE: | ||
699 | case INTEL_FAM6_BROADWELL_GT3E: | ||
700 | case INTEL_FAM6_SKYLAKE_MOBILE: | ||
701 | case INTEL_FAM6_SKYLAKE_DESKTOP: | ||
702 | case INTEL_FAM6_KABYLAKE_MOBILE: | ||
703 | case INTEL_FAM6_KABYLAKE_DESKTOP: | ||
704 | if (c->x86_cache_bits < 44) | ||
705 | c->x86_cache_bits = 44; | ||
706 | break; | ||
707 | } | ||
708 | } | ||
709 | |||
671 | static void __init l1tf_select_mitigation(void) | 710 | static void __init l1tf_select_mitigation(void) |
672 | { | 711 | { |
673 | u64 half_pa; | 712 | u64 half_pa; |
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void) | |||
675 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) | 714 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
676 | return; | 715 | return; |
677 | 716 | ||
717 | override_cache_bits(&boot_cpu_data); | ||
718 | |||
678 | switch (l1tf_mitigation) { | 719 | switch (l1tf_mitigation) { |
679 | case L1TF_MITIGATION_OFF: | 720 | case L1TF_MITIGATION_OFF: |
680 | case L1TF_MITIGATION_FLUSH_NOWARN: | 721 | case L1TF_MITIGATION_FLUSH_NOWARN: |
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void) | |||
694 | return; | 735 | return; |
695 | #endif | 736 | #endif |
696 | 737 | ||
697 | /* | ||
698 | * This is extremely unlikely to happen because almost all | ||
699 | * systems have far more MAX_PA/2 than RAM can be fit into | ||
700 | * DIMM slots. | ||
701 | */ | ||
702 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; | 738 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; |
703 | if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { | 739 | if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { |
704 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); | 740 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 84dee5ab745a..44c4ef3d989b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c) | |||
919 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | 919 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) |
920 | c->x86_phys_bits = 36; | 920 | c->x86_phys_bits = 36; |
921 | #endif | 921 | #endif |
922 | c->x86_cache_bits = c->x86_phys_bits; | ||
922 | } | 923 | } |
923 | 924 | ||
924 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | 925 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 401e8c133108..fc3c07fe7df5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) | |||
150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) | 150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) |
151 | return false; | 151 | return false; |
152 | 152 | ||
153 | if (c->x86 != 6) | ||
154 | return false; | ||
155 | |||
153 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { | 156 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { |
154 | if (c->x86_model == spectre_bad_microcodes[i].model && | 157 | if (c->x86_model == spectre_bad_microcodes[i].model && |
155 | c->x86_stepping == spectre_bad_microcodes[i].stepping) | 158 | c->x86_stepping == spectre_bad_microcodes[i].stepping) |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 0624957aa068..07b5fc00b188 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu) | |||
504 | struct microcode_amd *mc_amd; | 504 | struct microcode_amd *mc_amd; |
505 | struct ucode_cpu_info *uci; | 505 | struct ucode_cpu_info *uci; |
506 | struct ucode_patch *p; | 506 | struct ucode_patch *p; |
507 | enum ucode_state ret; | ||
507 | u32 rev, dummy; | 508 | u32 rev, dummy; |
508 | 509 | ||
509 | BUG_ON(raw_smp_processor_id() != cpu); | 510 | BUG_ON(raw_smp_processor_id() != cpu); |
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu) | |||
521 | 522 | ||
522 | /* need to apply patch? */ | 523 | /* need to apply patch? */ |
523 | if (rev >= mc_amd->hdr.patch_id) { | 524 | if (rev >= mc_amd->hdr.patch_id) { |
524 | c->microcode = rev; | 525 | ret = UCODE_OK; |
525 | uci->cpu_sig.rev = rev; | 526 | goto out; |
526 | return UCODE_OK; | ||
527 | } | 527 | } |
528 | 528 | ||
529 | if (__apply_microcode_amd(mc_amd)) { | 529 | if (__apply_microcode_amd(mc_amd)) { |
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu) | |||
531 | cpu, mc_amd->hdr.patch_id); | 531 | cpu, mc_amd->hdr.patch_id); |
532 | return UCODE_ERROR; | 532 | return UCODE_ERROR; |
533 | } | 533 | } |
534 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | ||
535 | mc_amd->hdr.patch_id); | ||
536 | 534 | ||
537 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; | 535 | rev = mc_amd->hdr.patch_id; |
538 | c->microcode = mc_amd->hdr.patch_id; | 536 | ret = UCODE_UPDATED; |
537 | |||
538 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); | ||
539 | 539 | ||
540 | return UCODE_UPDATED; | 540 | out: |
541 | uci->cpu_sig.rev = rev; | ||
542 | c->microcode = rev; | ||
543 | |||
544 | /* Update boot_cpu_data's revision too, if we're on the BSP: */ | ||
545 | if (c->cpu_index == boot_cpu_data.cpu_index) | ||
546 | boot_cpu_data.microcode = rev; | ||
547 | |||
548 | return ret; | ||
541 | } | 549 | } |
542 | 550 | ||
543 | static int install_equiv_cpu_table(const u8 *buf) | 551 | static int install_equiv_cpu_table(const u8 *buf) |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 97ccf4c3b45b..16936a24795c 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
795 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 795 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
796 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 796 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
797 | struct microcode_intel *mc; | 797 | struct microcode_intel *mc; |
798 | enum ucode_state ret; | ||
798 | static int prev_rev; | 799 | static int prev_rev; |
799 | u32 rev; | 800 | u32 rev; |
800 | 801 | ||
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
817 | */ | 818 | */ |
818 | rev = intel_get_microcode_revision(); | 819 | rev = intel_get_microcode_revision(); |
819 | if (rev >= mc->hdr.rev) { | 820 | if (rev >= mc->hdr.rev) { |
820 | uci->cpu_sig.rev = rev; | 821 | ret = UCODE_OK; |
821 | c->microcode = rev; | 822 | goto out; |
822 | return UCODE_OK; | ||
823 | } | 823 | } |
824 | 824 | ||
825 | /* | 825 | /* |
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
848 | prev_rev = rev; | 848 | prev_rev = rev; |
849 | } | 849 | } |
850 | 850 | ||
851 | ret = UCODE_UPDATED; | ||
852 | |||
853 | out: | ||
851 | uci->cpu_sig.rev = rev; | 854 | uci->cpu_sig.rev = rev; |
852 | c->microcode = rev; | 855 | c->microcode = rev; |
856 | |||
857 | /* Update boot_cpu_data's revision too, if we're on the BSP: */ | ||
858 | if (c->cpu_index == boot_cpu_data.cpu_index) | ||
859 | boot_cpu_data.microcode = rev; | ||
853 | 860 | ||
854 | return UCODE_UPDATED; | 861 | return ret; |
855 | } | 862 | } |
856 | 863 | ||
857 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | 864 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9c8652974f8e..2b5886401e5f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/bug.h> | 17 | #include <linux/bug.h> |
18 | #include <linux/nmi.h> | 18 | #include <linux/nmi.h> |
19 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
20 | #include <linux/kasan.h> | ||
20 | 21 | ||
21 | #include <asm/cpu_entry_area.h> | 22 | #include <asm/cpu_entry_area.h> |
22 | #include <asm/stacktrace.h> | 23 | #include <asm/stacktrace.h> |
@@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable, | |||
89 | * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random | 90 | * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random |
90 | * guesstimate in attempt to achieve all of the above. | 91 | * guesstimate in attempt to achieve all of the above. |
91 | */ | 92 | */ |
92 | void show_opcodes(u8 *rip, const char *loglvl) | 93 | void show_opcodes(struct pt_regs *regs, const char *loglvl) |
93 | { | 94 | { |
94 | #define PROLOGUE_SIZE 42 | 95 | #define PROLOGUE_SIZE 42 |
95 | #define EPILOGUE_SIZE 21 | 96 | #define EPILOGUE_SIZE 21 |
96 | #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) | 97 | #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) |
97 | u8 opcodes[OPCODE_BUFSIZE]; | 98 | u8 opcodes[OPCODE_BUFSIZE]; |
99 | unsigned long prologue = regs->ip - PROLOGUE_SIZE; | ||
100 | bool bad_ip; | ||
98 | 101 | ||
99 | if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) { | 102 | /* |
103 | * Make sure userspace isn't trying to trick us into dumping kernel | ||
104 | * memory by pointing the userspace instruction pointer at it. | ||
105 | */ | ||
106 | bad_ip = user_mode(regs) && | ||
107 | __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); | ||
108 | |||
109 | if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue, | ||
110 | OPCODE_BUFSIZE)) { | ||
100 | printk("%sCode: Bad RIP value.\n", loglvl); | 111 | printk("%sCode: Bad RIP value.\n", loglvl); |
101 | } else { | 112 | } else { |
102 | printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" | 113 | printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" |
@@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl) | |||
112 | #else | 123 | #else |
113 | printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); | 124 | printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); |
114 | #endif | 125 | #endif |
115 | show_opcodes((u8 *)regs->ip, loglvl); | 126 | show_opcodes(regs, loglvl); |
116 | } | 127 | } |
117 | 128 | ||
118 | void show_iret_regs(struct pt_regs *regs) | 129 | void show_iret_regs(struct pt_regs *regs) |
@@ -135,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, | |||
135 | * they can be printed in the right context. | 146 | * they can be printed in the right context. |
136 | */ | 147 | */ |
137 | if (!partial && on_stack(info, regs, sizeof(*regs))) { | 148 | if (!partial && on_stack(info, regs, sizeof(*regs))) { |
138 | __show_regs(regs, 0); | 149 | __show_regs(regs, SHOW_REGS_SHORT); |
139 | 150 | ||
140 | } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, | 151 | } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, |
141 | IRET_FRAME_SIZE)) { | 152 | IRET_FRAME_SIZE)) { |
@@ -333,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
333 | oops_exit(); | 344 | oops_exit(); |
334 | 345 | ||
335 | /* Executive summary in case the oops scrolled away */ | 346 | /* Executive summary in case the oops scrolled away */ |
336 | __show_regs(&exec_summary_regs, true); | 347 | __show_regs(&exec_summary_regs, SHOW_REGS_ALL); |
337 | 348 | ||
338 | if (!signr) | 349 | if (!signr) |
339 | return; | 350 | return; |
@@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
346 | * We're not going to return, but we might be on an IST stack or | 357 | * We're not going to return, but we might be on an IST stack or |
347 | * have very little stack space left. Rewind the stack and kill | 358 | * have very little stack space left. Rewind the stack and kill |
348 | * the task. | 359 | * the task. |
360 | * Before we rewind the stack, we have to tell KASAN that we're going to | ||
361 | * reuse the task stack and that existing poisons are invalid. | ||
349 | */ | 362 | */ |
363 | kasan_unpoison_task_stack(current); | ||
350 | rewind_stack_do_exit(signr); | 364 | rewind_stack_do_exit(signr); |
351 | } | 365 | } |
352 | NOKPROBE_SYMBOL(oops_end); | 366 | NOKPROBE_SYMBOL(oops_end); |
@@ -393,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err) | |||
393 | 407 | ||
394 | void show_regs(struct pt_regs *regs) | 408 | void show_regs(struct pt_regs *regs) |
395 | { | 409 | { |
396 | bool all = true; | ||
397 | |||
398 | show_regs_print_info(KERN_DEFAULT); | 410 | show_regs_print_info(KERN_DEFAULT); |
399 | 411 | ||
400 | if (IS_ENABLED(CONFIG_X86_32)) | 412 | __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL); |
401 | all = !user_mode(regs); | ||
402 | |||
403 | __show_regs(regs, all); | ||
404 | 413 | ||
405 | /* | 414 | /* |
406 | * When in-kernel, we also print out the stack at the time of the fault.. | 415 | * When in-kernel, we also print out the stack at the time of the fault.. |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2924fd447e61..5046a3c9dec2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -59,7 +59,7 @@ | |||
59 | #include <asm/intel_rdt_sched.h> | 59 | #include <asm/intel_rdt_sched.h> |
60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
61 | 61 | ||
62 | void __show_regs(struct pt_regs *regs, int all) | 62 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
63 | { | 63 | { |
64 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | 64 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
65 | unsigned long d0, d1, d2, d3, d6, d7; | 65 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
85 | printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", | 85 | printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", |
86 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); | 86 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); |
87 | 87 | ||
88 | if (!all) | 88 | if (mode != SHOW_REGS_ALL) |
89 | return; | 89 | return; |
90 | 90 | ||
91 | cr0 = read_cr0(); | 91 | cr0 = read_cr0(); |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a451bc374b9b..ea5ea850348d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -62,7 +62,7 @@ | |||
62 | __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); | 62 | __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); |
63 | 63 | ||
64 | /* Prints also some state that isn't saved in the pt_regs */ | 64 | /* Prints also some state that isn't saved in the pt_regs */ |
65 | void __show_regs(struct pt_regs *regs, int all) | 65 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
66 | { | 66 | { |
67 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 67 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
68 | unsigned long d0, d1, d2, d3, d6, d7; | 68 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all) | |||
87 | printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", | 87 | printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", |
88 | regs->r13, regs->r14, regs->r15); | 88 | regs->r13, regs->r14, regs->r15); |
89 | 89 | ||
90 | if (!all) | 90 | if (mode == SHOW_REGS_SHORT) |
91 | return; | 91 | return; |
92 | 92 | ||
93 | if (mode == SHOW_REGS_USER) { | ||
94 | rdmsrl(MSR_FS_BASE, fs); | ||
95 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | ||
96 | printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n", | ||
97 | fs, shadowgs); | ||
98 | return; | ||
99 | } | ||
100 | |||
93 | asm("movl %%ds,%0" : "=r" (ds)); | 101 | asm("movl %%ds,%0" : "=r" (ds)); |
94 | asm("movl %%cs,%0" : "=r" (cs)); | 102 | asm("movl %%cs,%0" : "=r" (cs)); |
95 | asm("movl %%es,%0" : "=r" (es)); | 103 | asm("movl %%es,%0" : "=r" (es)); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 1463468ba9a0..6490f618e096 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -1415,7 +1415,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early) | |||
1415 | 1415 | ||
1416 | static unsigned long __init get_loops_per_jiffy(void) | 1416 | static unsigned long __init get_loops_per_jiffy(void) |
1417 | { | 1417 | { |
1418 | unsigned long lpj = tsc_khz * KHZ; | 1418 | u64 lpj = (u64)tsc_khz * KHZ; |
1419 | 1419 | ||
1420 | do_div(lpj, HZ); | 1420 | do_div(lpj, HZ); |
1421 | return lpj; | 1421 | return lpj; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0cefba28c864..17c0472c5b34 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, | |||
548 | } | 548 | } |
549 | 549 | ||
550 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, | 550 | int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, |
551 | unsigned long ipi_bitmap_high, int min, | 551 | unsigned long ipi_bitmap_high, u32 min, |
552 | unsigned long icr, int op_64_bit) | 552 | unsigned long icr, int op_64_bit) |
553 | { | 553 | { |
554 | int i; | 554 | int i; |
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, | |||
571 | rcu_read_lock(); | 571 | rcu_read_lock(); |
572 | map = rcu_dereference(kvm->arch.apic_map); | 572 | map = rcu_dereference(kvm->arch.apic_map); |
573 | 573 | ||
574 | if (min > map->max_apic_id) | ||
575 | goto out; | ||
574 | /* Bits above cluster_size are masked in the caller. */ | 576 | /* Bits above cluster_size are masked in the caller. */ |
575 | for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) { | 577 | for_each_set_bit(i, &ipi_bitmap_low, |
576 | vcpu = map->phys_map[min + i]->vcpu; | 578 | min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { |
577 | count += kvm_apic_set_irq(vcpu, &irq, NULL); | 579 | if (map->phys_map[min + i]) { |
580 | vcpu = map->phys_map[min + i]->vcpu; | ||
581 | count += kvm_apic_set_irq(vcpu, &irq, NULL); | ||
582 | } | ||
578 | } | 583 | } |
579 | 584 | ||
580 | min += cluster_size; | 585 | min += cluster_size; |
581 | for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) { | 586 | |
582 | vcpu = map->phys_map[min + i]->vcpu; | 587 | if (min > map->max_apic_id) |
583 | count += kvm_apic_set_irq(vcpu, &irq, NULL); | 588 | goto out; |
589 | |||
590 | for_each_set_bit(i, &ipi_bitmap_high, | ||
591 | min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { | ||
592 | if (map->phys_map[min + i]) { | ||
593 | vcpu = map->phys_map[min + i]->vcpu; | ||
594 | count += kvm_apic_set_irq(vcpu, &irq, NULL); | ||
595 | } | ||
584 | } | 596 | } |
585 | 597 | ||
598 | out: | ||
586 | rcu_read_unlock(); | 599 | rcu_read_unlock(); |
587 | return count; | 600 | return count; |
588 | } | 601 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a282321329b5..e24ea7067373 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |||
1853 | return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); | 1853 | return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); |
1854 | } | 1854 | } |
1855 | 1855 | ||
1856 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
1857 | { | ||
1858 | return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); | ||
1859 | } | ||
1860 | |||
1861 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | 1856 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) |
1862 | { | 1857 | { |
1863 | return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); | 1858 | return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); |
@@ -5217,7 +5212,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) | |||
5217 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, | 5212 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, |
5218 | void *insn, int insn_len) | 5213 | void *insn, int insn_len) |
5219 | { | 5214 | { |
5220 | int r, emulation_type = EMULTYPE_RETRY; | 5215 | int r, emulation_type = 0; |
5221 | enum emulation_result er; | 5216 | enum emulation_result er; |
5222 | bool direct = vcpu->arch.mmu.direct_map; | 5217 | bool direct = vcpu->arch.mmu.direct_map; |
5223 | 5218 | ||
@@ -5230,10 +5225,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, | |||
5230 | r = RET_PF_INVALID; | 5225 | r = RET_PF_INVALID; |
5231 | if (unlikely(error_code & PFERR_RSVD_MASK)) { | 5226 | if (unlikely(error_code & PFERR_RSVD_MASK)) { |
5232 | r = handle_mmio_page_fault(vcpu, cr2, direct); | 5227 | r = handle_mmio_page_fault(vcpu, cr2, direct); |
5233 | if (r == RET_PF_EMULATE) { | 5228 | if (r == RET_PF_EMULATE) |
5234 | emulation_type = 0; | ||
5235 | goto emulate; | 5229 | goto emulate; |
5236 | } | ||
5237 | } | 5230 | } |
5238 | 5231 | ||
5239 | if (r == RET_PF_INVALID) { | 5232 | if (r == RET_PF_INVALID) { |
@@ -5260,8 +5253,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, | |||
5260 | return 1; | 5253 | return 1; |
5261 | } | 5254 | } |
5262 | 5255 | ||
5263 | if (mmio_info_in_cache(vcpu, cr2, direct)) | 5256 | /* |
5264 | emulation_type = 0; | 5257 | * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still |
5258 | * optimistically try to just unprotect the page and let the processor | ||
5259 | * re-execute the instruction that caused the page fault. Do not allow | ||
5260 | * retrying MMIO emulation, as it's not only pointless but could also | ||
5261 | * cause us to enter an infinite loop because the processor will keep | ||
5262 | * faulting on the non-existent MMIO address. Retrying an instruction | ||
5263 | * from a nested guest is also pointless and dangerous as we are only | ||
5264 | * explicitly shadowing L1's page tables, i.e. unprotecting something | ||
5265 | * for L1 isn't going to magically fix whatever issue cause L2 to fail. | ||
5266 | */ | ||
5267 | if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu)) | ||
5268 | emulation_type = EMULTYPE_ALLOW_RETRY; | ||
5265 | emulate: | 5269 | emulate: |
5266 | /* | 5270 | /* |
5267 | * On AMD platforms, under certain conditions insn_len may be zero on #NPF. | 5271 | * On AMD platforms, under certain conditions insn_len may be zero on #NPF. |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6276140044d0..89c4c5aa15f1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
776 | } | 776 | } |
777 | 777 | ||
778 | if (!svm->next_rip) { | 778 | if (!svm->next_rip) { |
779 | if (emulate_instruction(vcpu, EMULTYPE_SKIP) != | 779 | if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) != |
780 | EMULATE_DONE) | 780 | EMULATE_DONE) |
781 | printk(KERN_DEBUG "%s: NOP\n", __func__); | 781 | printk(KERN_DEBUG "%s: NOP\n", __func__); |
782 | return; | 782 | return; |
@@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm) | |||
2715 | 2715 | ||
2716 | WARN_ON_ONCE(!enable_vmware_backdoor); | 2716 | WARN_ON_ONCE(!enable_vmware_backdoor); |
2717 | 2717 | ||
2718 | er = emulate_instruction(vcpu, | 2718 | er = kvm_emulate_instruction(vcpu, |
2719 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); | 2719 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); |
2720 | if (er == EMULATE_USER_EXIT) | 2720 | if (er == EMULATE_USER_EXIT) |
2721 | return 0; | 2721 | return 0; |
@@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm) | |||
2819 | string = (io_info & SVM_IOIO_STR_MASK) != 0; | 2819 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
2820 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; | 2820 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
2821 | if (string) | 2821 | if (string) |
2822 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 2822 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
2823 | 2823 | ||
2824 | port = io_info >> 16; | 2824 | port = io_info >> 16; |
2825 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; | 2825 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
@@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm) | |||
3861 | static int invlpg_interception(struct vcpu_svm *svm) | 3861 | static int invlpg_interception(struct vcpu_svm *svm) |
3862 | { | 3862 | { |
3863 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) | 3863 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
3864 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 3864 | return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
3865 | 3865 | ||
3866 | kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); | 3866 | kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); |
3867 | return kvm_skip_emulated_instruction(&svm->vcpu); | 3867 | return kvm_skip_emulated_instruction(&svm->vcpu); |
@@ -3869,13 +3869,13 @@ static int invlpg_interception(struct vcpu_svm *svm) | |||
3869 | 3869 | ||
3870 | static int emulate_on_interception(struct vcpu_svm *svm) | 3870 | static int emulate_on_interception(struct vcpu_svm *svm) |
3871 | { | 3871 | { |
3872 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 3872 | return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
3873 | } | 3873 | } |
3874 | 3874 | ||
3875 | static int rsm_interception(struct vcpu_svm *svm) | 3875 | static int rsm_interception(struct vcpu_svm *svm) |
3876 | { | 3876 | { |
3877 | return x86_emulate_instruction(&svm->vcpu, 0, 0, | 3877 | return kvm_emulate_instruction_from_buffer(&svm->vcpu, |
3878 | rsm_ins_bytes, 2) == EMULATE_DONE; | 3878 | rsm_ins_bytes, 2) == EMULATE_DONE; |
3879 | } | 3879 | } |
3880 | 3880 | ||
3881 | static int rdpmc_interception(struct vcpu_svm *svm) | 3881 | static int rdpmc_interception(struct vcpu_svm *svm) |
@@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm) | |||
4700 | ret = avic_unaccel_trap_write(svm); | 4700 | ret = avic_unaccel_trap_write(svm); |
4701 | } else { | 4701 | } else { |
4702 | /* Handling Fault */ | 4702 | /* Handling Fault */ |
4703 | ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); | 4703 | ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); |
4704 | } | 4704 | } |
4705 | 4705 | ||
4706 | return ret; | 4706 | return ret; |
@@ -6747,7 +6747,7 @@ e_free: | |||
6747 | static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | 6747 | static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) |
6748 | { | 6748 | { |
6749 | unsigned long vaddr, vaddr_end, next_vaddr; | 6749 | unsigned long vaddr, vaddr_end, next_vaddr; |
6750 | unsigned long dst_vaddr, dst_vaddr_end; | 6750 | unsigned long dst_vaddr; |
6751 | struct page **src_p, **dst_p; | 6751 | struct page **src_p, **dst_p; |
6752 | struct kvm_sev_dbg debug; | 6752 | struct kvm_sev_dbg debug; |
6753 | unsigned long n; | 6753 | unsigned long n; |
@@ -6763,7 +6763,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
6763 | size = debug.len; | 6763 | size = debug.len; |
6764 | vaddr_end = vaddr + size; | 6764 | vaddr_end = vaddr + size; |
6765 | dst_vaddr = debug.dst_uaddr; | 6765 | dst_vaddr = debug.dst_uaddr; |
6766 | dst_vaddr_end = dst_vaddr + size; | ||
6767 | 6766 | ||
6768 | for (; vaddr < vaddr_end; vaddr = next_vaddr) { | 6767 | for (; vaddr < vaddr_end; vaddr = next_vaddr) { |
6769 | int len, s_off, d_off; | 6768 | int len, s_off, d_off; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d26f3c4985b..533a327372c8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
6983 | * Cause the #SS fault with 0 error code in VM86 mode. | 6983 | * Cause the #SS fault with 0 error code in VM86 mode. |
6984 | */ | 6984 | */ |
6985 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { | 6985 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { |
6986 | if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { | 6986 | if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { |
6987 | if (vcpu->arch.halt_request) { | 6987 | if (vcpu->arch.halt_request) { |
6988 | vcpu->arch.halt_request = 0; | 6988 | vcpu->arch.halt_request = 0; |
6989 | return kvm_vcpu_halt(vcpu); | 6989 | return kvm_vcpu_halt(vcpu); |
@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
7054 | 7054 | ||
7055 | if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { | 7055 | if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { |
7056 | WARN_ON_ONCE(!enable_vmware_backdoor); | 7056 | WARN_ON_ONCE(!enable_vmware_backdoor); |
7057 | er = emulate_instruction(vcpu, | 7057 | er = kvm_emulate_instruction(vcpu, |
7058 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); | 7058 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); |
7059 | if (er == EMULATE_USER_EXIT) | 7059 | if (er == EMULATE_USER_EXIT) |
7060 | return 0; | 7060 | return 0; |
@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu) | |||
7157 | ++vcpu->stat.io_exits; | 7157 | ++vcpu->stat.io_exits; |
7158 | 7158 | ||
7159 | if (string) | 7159 | if (string) |
7160 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7160 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7161 | 7161 | ||
7162 | port = exit_qualification >> 16; | 7162 | port = exit_qualification >> 16; |
7163 | size = (exit_qualification & 7) + 1; | 7163 | size = (exit_qualification & 7) + 1; |
@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) | |||
7231 | static int handle_desc(struct kvm_vcpu *vcpu) | 7231 | static int handle_desc(struct kvm_vcpu *vcpu) |
7232 | { | 7232 | { |
7233 | WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); | 7233 | WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); |
7234 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7234 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7235 | } | 7235 | } |
7236 | 7236 | ||
7237 | static int handle_cr(struct kvm_vcpu *vcpu) | 7237 | static int handle_cr(struct kvm_vcpu *vcpu) |
@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu) | |||
7480 | 7480 | ||
7481 | static int handle_invd(struct kvm_vcpu *vcpu) | 7481 | static int handle_invd(struct kvm_vcpu *vcpu) |
7482 | { | 7482 | { |
7483 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7483 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7484 | } | 7484 | } |
7485 | 7485 | ||
7486 | static int handle_invlpg(struct kvm_vcpu *vcpu) | 7486 | static int handle_invlpg(struct kvm_vcpu *vcpu) |
@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) | |||
7547 | return kvm_skip_emulated_instruction(vcpu); | 7547 | return kvm_skip_emulated_instruction(vcpu); |
7548 | } | 7548 | } |
7549 | } | 7549 | } |
7550 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7550 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
7551 | } | 7551 | } |
7552 | 7552 | ||
7553 | static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) | 7553 | static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) |
@@ -7704,8 +7704,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) | |||
7704 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | 7704 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) |
7705 | return kvm_skip_emulated_instruction(vcpu); | 7705 | return kvm_skip_emulated_instruction(vcpu); |
7706 | else | 7706 | else |
7707 | return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, | 7707 | return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == |
7708 | NULL, 0) == EMULATE_DONE; | 7708 | EMULATE_DONE; |
7709 | } | 7709 | } |
7710 | 7710 | ||
7711 | return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); | 7711 | return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); |
@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
7748 | if (kvm_test_request(KVM_REQ_EVENT, vcpu)) | 7748 | if (kvm_test_request(KVM_REQ_EVENT, vcpu)) |
7749 | return 1; | 7749 | return 1; |
7750 | 7750 | ||
7751 | err = emulate_instruction(vcpu, 0); | 7751 | err = kvm_emulate_instruction(vcpu, 0); |
7752 | 7752 | ||
7753 | if (err == EMULATE_USER_EXIT) { | 7753 | if (err == EMULATE_USER_EXIT) { |
7754 | ++vcpu->stat.mmio_exits; | 7754 | ++vcpu->stat.mmio_exits; |
@@ -12537,8 +12537,11 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) | |||
12537 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 12537 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
12538 | bool from_vmentry = !!exit_qual; | 12538 | bool from_vmentry = !!exit_qual; |
12539 | u32 dummy_exit_qual; | 12539 | u32 dummy_exit_qual; |
12540 | u32 vmcs01_cpu_exec_ctrl; | ||
12540 | int r = 0; | 12541 | int r = 0; |
12541 | 12542 | ||
12543 | vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
12544 | |||
12542 | enter_guest_mode(vcpu); | 12545 | enter_guest_mode(vcpu); |
12543 | 12546 | ||
12544 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) | 12547 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) |
@@ -12575,6 +12578,25 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) | |||
12575 | } | 12578 | } |
12576 | 12579 | ||
12577 | /* | 12580 | /* |
12581 | * If L1 had a pending IRQ/NMI until it executed | ||
12582 | * VMLAUNCH/VMRESUME which wasn't delivered because it was | ||
12583 | * disallowed (e.g. interrupts disabled), L0 needs to | ||
12584 | * evaluate if this pending event should cause an exit from L2 | ||
12585 | * to L1 or delivered directly to L2 (e.g. In case L1 don't | ||
12586 | * intercept EXTERNAL_INTERRUPT). | ||
12587 | * | ||
12588 | * Usually this would be handled by L0 requesting a | ||
12589 | * IRQ/NMI window by setting VMCS accordingly. However, | ||
12590 | * this setting was done on VMCS01 and now VMCS02 is active | ||
12591 | * instead. Thus, we force L0 to perform pending event | ||
12592 | * evaluation by requesting a KVM_REQ_EVENT. | ||
12593 | */ | ||
12594 | if (vmcs01_cpu_exec_ctrl & | ||
12595 | (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) { | ||
12596 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
12597 | } | ||
12598 | |||
12599 | /* | ||
12578 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point | 12600 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point |
12579 | * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet | 12601 | * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet |
12580 | * returned as far as L1 is concerned. It will only return (and set | 12602 | * returned as far as L1 is concerned. It will only return (and set |
@@ -13988,9 +14010,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, | |||
13988 | check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) | 14010 | check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) |
13989 | return -EINVAL; | 14011 | return -EINVAL; |
13990 | 14012 | ||
13991 | if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING) | ||
13992 | vmx->nested.nested_run_pending = 1; | ||
13993 | |||
13994 | vmx->nested.dirty_vmcs12 = true; | 14013 | vmx->nested.dirty_vmcs12 = true; |
13995 | ret = enter_vmx_non_root_mode(vcpu, NULL); | 14014 | ret = enter_vmx_non_root_mode(vcpu, NULL); |
13996 | if (ret) | 14015 | if (ret) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 506bd2b4b8bb..542f6315444d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu) | |||
4987 | emul_type = 0; | 4987 | emul_type = 0; |
4988 | } | 4988 | } |
4989 | 4989 | ||
4990 | er = emulate_instruction(vcpu, emul_type); | 4990 | er = kvm_emulate_instruction(vcpu, emul_type); |
4991 | if (er == EMULATE_USER_EXIT) | 4991 | if (er == EMULATE_USER_EXIT) |
4992 | return 0; | 4992 | return 0; |
4993 | if (er != EMULATE_DONE) | 4993 | if (er != EMULATE_DONE) |
@@ -5870,7 +5870,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, | |||
5870 | gpa_t gpa = cr2; | 5870 | gpa_t gpa = cr2; |
5871 | kvm_pfn_t pfn; | 5871 | kvm_pfn_t pfn; |
5872 | 5872 | ||
5873 | if (emulation_type & EMULTYPE_NO_REEXECUTE) | 5873 | if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) |
5874 | return false; | ||
5875 | |||
5876 | if (WARN_ON_ONCE(is_guest_mode(vcpu))) | ||
5874 | return false; | 5877 | return false; |
5875 | 5878 | ||
5876 | if (!vcpu->arch.mmu.direct_map) { | 5879 | if (!vcpu->arch.mmu.direct_map) { |
@@ -5958,7 +5961,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, | |||
5958 | */ | 5961 | */ |
5959 | vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; | 5962 | vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; |
5960 | 5963 | ||
5961 | if (!(emulation_type & EMULTYPE_RETRY)) | 5964 | if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) |
5965 | return false; | ||
5966 | |||
5967 | if (WARN_ON_ONCE(is_guest_mode(vcpu))) | ||
5962 | return false; | 5968 | return false; |
5963 | 5969 | ||
5964 | if (x86_page_table_writing_insn(ctxt)) | 5970 | if (x86_page_table_writing_insn(ctxt)) |
@@ -6276,7 +6282,19 @@ restart: | |||
6276 | 6282 | ||
6277 | return r; | 6283 | return r; |
6278 | } | 6284 | } |
6279 | EXPORT_SYMBOL_GPL(x86_emulate_instruction); | 6285 | |
6286 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) | ||
6287 | { | ||
6288 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); | ||
6289 | } | ||
6290 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction); | ||
6291 | |||
6292 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, | ||
6293 | void *insn, int insn_len) | ||
6294 | { | ||
6295 | return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); | ||
6296 | } | ||
6297 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); | ||
6280 | 6298 | ||
6281 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, | 6299 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, |
6282 | unsigned short port) | 6300 | unsigned short port) |
@@ -7734,7 +7752,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu) | |||
7734 | { | 7752 | { |
7735 | int r; | 7753 | int r; |
7736 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 7754 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
7737 | r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); | 7755 | r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); |
7738 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 7756 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
7739 | if (r != EMULATE_DONE) | 7757 | if (r != EMULATE_DONE) |
7740 | return 0; | 7758 | return 0; |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 257f27620bc2..67b9568613f3 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
@@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |||
274 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, | 274 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, |
275 | int page_num); | 275 | int page_num); |
276 | bool kvm_vector_hashing_enabled(void); | 276 | bool kvm_vector_hashing_enabled(void); |
277 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, | ||
278 | int emulation_type, void *insn, int insn_len); | ||
277 | 279 | ||
278 | #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | 280 | #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ |
279 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | 281 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c8c6ad0d58b8..3f435d7fca5e 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <linux/export.h> | 8 | #include <linux/export.h> |
9 | 9 | ||
10 | #include <asm/tlbflush.h> | ||
11 | |||
10 | /* | 12 | /* |
11 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the | 13 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the |
12 | * nested NMI paths are careful to preserve CR2. | 14 | * nested NMI paths are careful to preserve CR2. |
@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
19 | if (__range_not_ok(from, n, TASK_SIZE)) | 21 | if (__range_not_ok(from, n, TASK_SIZE)) |
20 | return n; | 22 | return n; |
21 | 23 | ||
24 | if (!nmi_uaccess_okay()) | ||
25 | return n; | ||
26 | |||
22 | /* | 27 | /* |
23 | * Even though this function is typically called from NMI/IRQ context | 28 | * Even though this function is typically called from NMI/IRQ context |
24 | * disable pagefaults so that its behaviour is consistent even when | 29 | * disable pagefaults so that its behaviour is consistent even when |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b9123c497e0a..47bebfe6efa7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |||
837 | 837 | ||
838 | printk(KERN_CONT "\n"); | 838 | printk(KERN_CONT "\n"); |
839 | 839 | ||
840 | show_opcodes((u8 *)regs->ip, loglvl); | 840 | show_opcodes(regs, loglvl); |
841 | } | 841 | } |
842 | 842 | ||
843 | static void | 843 | static void |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8d6c34fe49be..51a5a69ecac9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | |||
1420 | return 0; | 1420 | return 0; |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | /* | ||
1424 | * Machine check recovery code needs to change cache mode of poisoned | ||
1425 | * pages to UC to avoid speculative access logging another error. But | ||
1426 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine | ||
1427 | * way to encourage a speculative access. So we cheat and flip the top | ||
1428 | * bit of the address. This works fine for the code that updates the | ||
1429 | * page tables. But at the end of the process we need to flush the cache | ||
1430 | * and the non-canonical address causes a #GP fault when used by the | ||
1431 | * CLFLUSH instruction. | ||
1432 | * | ||
1433 | * But in the common case we already have a canonical address. This code | ||
1434 | * will fix the top bit if needed and is a no-op otherwise. | ||
1435 | */ | ||
1436 | static inline unsigned long make_addr_canonical_again(unsigned long addr) | ||
1437 | { | ||
1438 | #ifdef CONFIG_X86_64 | ||
1439 | return (long)(addr << 1) >> 1; | ||
1440 | #else | ||
1441 | return addr; | ||
1442 | #endif | ||
1443 | } | ||
1444 | |||
1445 | |||
1423 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 1446 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
1424 | pgprot_t mask_set, pgprot_t mask_clr, | 1447 | pgprot_t mask_set, pgprot_t mask_clr, |
1425 | int force_split, int in_flag, | 1448 | int force_split, int in_flag, |
@@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
1465 | * Save address for cache flush. *addr is modified in the call | 1488 | * Save address for cache flush. *addr is modified in the call |
1466 | * to __change_page_attr_set_clr() below. | 1489 | * to __change_page_attr_set_clr() below. |
1467 | */ | 1490 | */ |
1468 | baddr = *addr; | 1491 | baddr = make_addr_canonical_again(*addr); |
1469 | } | 1492 | } |
1470 | 1493 | ||
1471 | /* Must avoid aliasing mappings in the highmem code */ | 1494 | /* Must avoid aliasing mappings in the highmem code */ |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index e848a4811785..ae394552fb94 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp) | |||
269 | if (pgd_val(pgd) != 0) { | 269 | if (pgd_val(pgd) != 0) { |
270 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | 270 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); |
271 | 271 | ||
272 | *pgdp = native_make_pgd(0); | 272 | pgd_clear(pgdp); |
273 | 273 | ||
274 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); | 274 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); |
275 | pmd_free(mm, pmd); | 275 | pmd_free(mm, pmd); |
@@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
494 | int changed = !pte_same(*ptep, entry); | 494 | int changed = !pte_same(*ptep, entry); |
495 | 495 | ||
496 | if (changed && dirty) | 496 | if (changed && dirty) |
497 | *ptep = entry; | 497 | set_pte(ptep, entry); |
498 | 498 | ||
499 | return changed; | 499 | return changed; |
500 | } | 500 | } |
@@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
509 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 509 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
510 | 510 | ||
511 | if (changed && dirty) { | 511 | if (changed && dirty) { |
512 | *pmdp = entry; | 512 | set_pmd(pmdp, entry); |
513 | /* | 513 | /* |
514 | * We had a write-protection fault here and changed the pmd | 514 | * We had a write-protection fault here and changed the pmd |
515 | * to to more permissive. No need to flush the TLB for that, | 515 | * to to more permissive. No need to flush the TLB for that, |
@@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |||
529 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); | 529 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); |
530 | 530 | ||
531 | if (changed && dirty) { | 531 | if (changed && dirty) { |
532 | *pudp = entry; | 532 | set_pud(pudp, entry); |
533 | /* | 533 | /* |
534 | * We had a write-protection fault here and changed the pud | 534 | * We had a write-protection fault here and changed the pud |
535 | * to to more permissive. No need to flush the TLB for that, | 535 | * to to more permissive. No need to flush the TLB for that, |
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 31341ae7309f..c1fc1ae6b429 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c | |||
@@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) | |||
248 | * | 248 | * |
249 | * Returns a pointer to a PTE on success, or NULL on failure. | 249 | * Returns a pointer to a PTE on success, or NULL on failure. |
250 | */ | 250 | */ |
251 | static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) | 251 | static pte_t *pti_user_pagetable_walk_pte(unsigned long address) |
252 | { | 252 | { |
253 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); | 253 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
254 | pmd_t *pmd; | 254 | pmd_t *pmd; |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 9517d1b2a281..e96b99eb800c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
305 | 305 | ||
306 | choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); | 306 | choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); |
307 | 307 | ||
308 | /* Let nmi_uaccess_okay() know that we're changing CR3. */ | ||
309 | this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); | ||
310 | barrier(); | ||
311 | |||
308 | if (need_flush) { | 312 | if (need_flush) { |
309 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); | 313 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); |
310 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); | 314 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); |
@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
335 | if (next != &init_mm) | 339 | if (next != &init_mm) |
336 | this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); | 340 | this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); |
337 | 341 | ||
342 | /* Make sure we write CR3 before loaded_mm. */ | ||
343 | barrier(); | ||
344 | |||
338 | this_cpu_write(cpu_tlbstate.loaded_mm, next); | 345 | this_cpu_write(cpu_tlbstate.loaded_mm, next); |
339 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); | 346 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); |
340 | } | 347 | } |
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 324b93328b37..05ca14222463 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c | |||
@@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void) | |||
85 | 85 | ||
86 | void __init efi_call_phys_epilog(pgd_t *save_pgd) | 86 | void __init efi_call_phys_epilog(pgd_t *save_pgd) |
87 | { | 87 | { |
88 | struct desc_ptr gdt_descr; | ||
89 | |||
90 | gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0); | ||
91 | gdt_descr.size = GDT_SIZE - 1; | ||
92 | load_gdt(&gdt_descr); | ||
93 | |||
94 | load_cr3(save_pgd); | 88 | load_cr3(save_pgd); |
95 | __flush_tlb_all(); | 89 | __flush_tlb_all(); |
90 | |||
91 | load_fixmap_gdt(0); | ||
96 | } | 92 | } |
97 | 93 | ||
98 | void __init efi_runtime_update_mappings(void) | 94 | void __init efi_runtime_update_mappings(void) |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 45b700ac5fe7..2fe5c9b1816b 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val) | |||
435 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | 435 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
436 | { | 436 | { |
437 | trace_xen_mmu_set_pte_atomic(ptep, pte); | 437 | trace_xen_mmu_set_pte_atomic(ptep, pte); |
438 | set_64bit((u64 *)ptep, native_pte_val(pte)); | 438 | __xen_set_pte(ptep, pte); |
439 | } | 439 | } |
440 | 440 | ||
441 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 441 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
442 | { | 442 | { |
443 | trace_xen_mmu_pte_clear(mm, addr, ptep); | 443 | trace_xen_mmu_pte_clear(mm, addr, ptep); |
444 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) | 444 | __xen_set_pte(ptep, native_make_pte(0)); |
445 | native_pte_clear(mm, addr, ptep); | ||
446 | } | 445 | } |
447 | 446 | ||
448 | static void xen_pmd_clear(pmd_t *pmdp) | 447 | static void xen_pmd_clear(pmd_t *pmdp) |
@@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | |||
1570 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 1569 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
1571 | pte_val_ma(pte)); | 1570 | pte_val_ma(pte)); |
1572 | #endif | 1571 | #endif |
1573 | native_set_pte(ptep, pte); | 1572 | __xen_set_pte(ptep, pte); |
1574 | } | 1573 | } |
1575 | 1574 | ||
1576 | /* Early in boot, while setting up the initial pagetable, assume | 1575 | /* Early in boot, while setting up the initial pagetable, assume |
@@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void) | |||
2061 | pud_t *pud; | 2060 | pud_t *pud; |
2062 | pgd_t *pgd; | 2061 | pgd_t *pgd; |
2063 | unsigned long *new_p2m; | 2062 | unsigned long *new_p2m; |
2064 | int save_pud; | ||
2065 | 2063 | ||
2066 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | 2064 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); |
2067 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; | 2065 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; |
@@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void) | |||
2091 | 2089 | ||
2092 | pgd = __va(read_cr3_pa()); | 2090 | pgd = __va(read_cr3_pa()); |
2093 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); | 2091 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); |
2094 | save_pud = n_pud; | ||
2095 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { | 2092 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { |
2096 | pud = early_memremap(pud_phys, PAGE_SIZE); | 2093 | pud = early_memremap(pud_phys, PAGE_SIZE); |
2097 | clear_page(pud); | 2094 | clear_page(pud); |
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 58c6efa9f9a9..9fe5952d117d 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c | |||
@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg) | |||
275 | 275 | ||
276 | void bfqg_and_blkg_put(struct bfq_group *bfqg) | 276 | void bfqg_and_blkg_put(struct bfq_group *bfqg) |
277 | { | 277 | { |
278 | bfqg_put(bfqg); | ||
279 | |||
280 | blkg_put(bfqg_to_blkg(bfqg)); | 278 | blkg_put(bfqg_to_blkg(bfqg)); |
279 | |||
280 | bfqg_put(bfqg); | ||
281 | } | 281 | } |
282 | 282 | ||
283 | /* @stats = 0 */ | 283 | /* @stats = 0 */ |
diff --git a/block/bio.c b/block/bio.c index b12966e415d3..8c680a776171 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -2015,7 +2015,8 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) | |||
2015 | { | 2015 | { |
2016 | if (unlikely(bio->bi_blkg)) | 2016 | if (unlikely(bio->bi_blkg)) |
2017 | return -EBUSY; | 2017 | return -EBUSY; |
2018 | blkg_get(blkg); | 2018 | if (!blkg_try_get(blkg)) |
2019 | return -ENODEV; | ||
2019 | bio->bi_blkg = blkg; | 2020 | bio->bi_blkg = blkg; |
2020 | return 0; | 2021 | return 0; |
2021 | } | 2022 | } |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 694595b29b8f..c19f9078da1e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -310,28 +310,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | |||
310 | } | 310 | } |
311 | } | 311 | } |
312 | 312 | ||
313 | static void blkg_pd_offline(struct blkcg_gq *blkg) | ||
314 | { | ||
315 | int i; | ||
316 | |||
317 | lockdep_assert_held(blkg->q->queue_lock); | ||
318 | lockdep_assert_held(&blkg->blkcg->lock); | ||
319 | |||
320 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | ||
321 | struct blkcg_policy *pol = blkcg_policy[i]; | ||
322 | |||
323 | if (blkg->pd[i] && !blkg->pd[i]->offline && | ||
324 | pol->pd_offline_fn) { | ||
325 | pol->pd_offline_fn(blkg->pd[i]); | ||
326 | blkg->pd[i]->offline = true; | ||
327 | } | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static void blkg_destroy(struct blkcg_gq *blkg) | 313 | static void blkg_destroy(struct blkcg_gq *blkg) |
332 | { | 314 | { |
333 | struct blkcg *blkcg = blkg->blkcg; | 315 | struct blkcg *blkcg = blkg->blkcg; |
334 | struct blkcg_gq *parent = blkg->parent; | 316 | struct blkcg_gq *parent = blkg->parent; |
317 | int i; | ||
335 | 318 | ||
336 | lockdep_assert_held(blkg->q->queue_lock); | 319 | lockdep_assert_held(blkg->q->queue_lock); |
337 | lockdep_assert_held(&blkcg->lock); | 320 | lockdep_assert_held(&blkcg->lock); |
@@ -340,6 +323,13 @@ static void blkg_destroy(struct blkcg_gq *blkg) | |||
340 | WARN_ON_ONCE(list_empty(&blkg->q_node)); | 323 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
341 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); | 324 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
342 | 325 | ||
326 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | ||
327 | struct blkcg_policy *pol = blkcg_policy[i]; | ||
328 | |||
329 | if (blkg->pd[i] && pol->pd_offline_fn) | ||
330 | pol->pd_offline_fn(blkg->pd[i]); | ||
331 | } | ||
332 | |||
343 | if (parent) { | 333 | if (parent) { |
344 | blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); | 334 | blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); |
345 | blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); | 335 | blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); |
@@ -382,7 +372,6 @@ static void blkg_destroy_all(struct request_queue *q) | |||
382 | struct blkcg *blkcg = blkg->blkcg; | 372 | struct blkcg *blkcg = blkg->blkcg; |
383 | 373 | ||
384 | spin_lock(&blkcg->lock); | 374 | spin_lock(&blkcg->lock); |
385 | blkg_pd_offline(blkg); | ||
386 | blkg_destroy(blkg); | 375 | blkg_destroy(blkg); |
387 | spin_unlock(&blkcg->lock); | 376 | spin_unlock(&blkcg->lock); |
388 | } | 377 | } |
@@ -1053,59 +1042,64 @@ static struct cftype blkcg_legacy_files[] = { | |||
1053 | { } /* terminate */ | 1042 | { } /* terminate */ |
1054 | }; | 1043 | }; |
1055 | 1044 | ||
1045 | /* | ||
1046 | * blkcg destruction is a three-stage process. | ||
1047 | * | ||
1048 | * 1. Destruction starts. The blkcg_css_offline() callback is invoked | ||
1049 | * which offlines writeback. Here we tie the next stage of blkg destruction | ||
1050 | * to the completion of writeback associated with the blkcg. This lets us | ||
1051 | * avoid punting potentially large amounts of outstanding writeback to root | ||
1052 | * while maintaining any ongoing policies. The next stage is triggered when | ||
1053 | * the nr_cgwbs count goes to zero. | ||
1054 | * | ||
1055 | * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called | ||
1056 | * and handles the destruction of blkgs. Here the css reference held by | ||
1057 | * the blkg is put back eventually allowing blkcg_css_free() to be called. | ||
1058 | * This work may occur in cgwb_release_workfn() on the cgwb_release | ||
1059 | * workqueue. Any submitted ios that fail to get the blkg ref will be | ||
1060 | * punted to the root_blkg. | ||
1061 | * | ||
1062 | * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. | ||
1063 | * This finally frees the blkcg. | ||
1064 | */ | ||
1065 | |||
1056 | /** | 1066 | /** |
1057 | * blkcg_css_offline - cgroup css_offline callback | 1067 | * blkcg_css_offline - cgroup css_offline callback |
1058 | * @css: css of interest | 1068 | * @css: css of interest |
1059 | * | 1069 | * |
1060 | * This function is called when @css is about to go away and responsible | 1070 | * This function is called when @css is about to go away. Here the cgwbs are |
1061 | * for offlining all blkgs pd and killing all wbs associated with @css. | 1071 | * offlined first and only once writeback associated with the blkcg has |
1062 | * blkgs pd offline should be done while holding both q and blkcg locks. | 1072 | * finished do we start step 2 (see above). |
1063 | * As blkcg lock is nested inside q lock, this function performs reverse | ||
1064 | * double lock dancing. | ||
1065 | * | ||
1066 | * This is the blkcg counterpart of ioc_release_fn(). | ||
1067 | */ | 1073 | */ |
1068 | static void blkcg_css_offline(struct cgroup_subsys_state *css) | 1074 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
1069 | { | 1075 | { |
1070 | struct blkcg *blkcg = css_to_blkcg(css); | 1076 | struct blkcg *blkcg = css_to_blkcg(css); |
1071 | struct blkcg_gq *blkg; | ||
1072 | |||
1073 | spin_lock_irq(&blkcg->lock); | ||
1074 | |||
1075 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { | ||
1076 | struct request_queue *q = blkg->q; | ||
1077 | |||
1078 | if (spin_trylock(q->queue_lock)) { | ||
1079 | blkg_pd_offline(blkg); | ||
1080 | spin_unlock(q->queue_lock); | ||
1081 | } else { | ||
1082 | spin_unlock_irq(&blkcg->lock); | ||
1083 | cpu_relax(); | ||
1084 | spin_lock_irq(&blkcg->lock); | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | spin_unlock_irq(&blkcg->lock); | ||
1089 | 1077 | ||
1078 | /* this prevents anyone from attaching or migrating to this blkcg */ | ||
1090 | wb_blkcg_offline(blkcg); | 1079 | wb_blkcg_offline(blkcg); |
1080 | |||
1081 | /* put the base cgwb reference allowing step 2 to be triggered */ | ||
1082 | blkcg_cgwb_put(blkcg); | ||
1091 | } | 1083 | } |
1092 | 1084 | ||
1093 | /** | 1085 | /** |
1094 | * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg | 1086 | * blkcg_destroy_blkgs - responsible for shooting down blkgs |
1095 | * @blkcg: blkcg of interest | 1087 | * @blkcg: blkcg of interest |
1096 | * | 1088 | * |
1097 | * This function is called when blkcg css is about to free and responsible for | 1089 | * blkgs should be removed while holding both q and blkcg locks. As blkcg lock |
1098 | * destroying all blkgs associated with @blkcg. | ||
1099 | * blkgs should be removed while holding both q and blkcg locks. As blkcg lock | ||
1100 | * is nested inside q lock, this function performs reverse double lock dancing. | 1090 | * is nested inside q lock, this function performs reverse double lock dancing. |
1091 | * Destroying the blkgs releases the reference held on the blkcg's css allowing | ||
1092 | * blkcg_css_free to eventually be called. | ||
1093 | * | ||
1094 | * This is the blkcg counterpart of ioc_release_fn(). | ||
1101 | */ | 1095 | */ |
1102 | static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) | 1096 | void blkcg_destroy_blkgs(struct blkcg *blkcg) |
1103 | { | 1097 | { |
1104 | spin_lock_irq(&blkcg->lock); | 1098 | spin_lock_irq(&blkcg->lock); |
1099 | |||
1105 | while (!hlist_empty(&blkcg->blkg_list)) { | 1100 | while (!hlist_empty(&blkcg->blkg_list)) { |
1106 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, | 1101 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
1107 | struct blkcg_gq, | 1102 | struct blkcg_gq, blkcg_node); |
1108 | blkcg_node); | ||
1109 | struct request_queue *q = blkg->q; | 1103 | struct request_queue *q = blkg->q; |
1110 | 1104 | ||
1111 | if (spin_trylock(q->queue_lock)) { | 1105 | if (spin_trylock(q->queue_lock)) { |
@@ -1117,6 +1111,7 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) | |||
1117 | spin_lock_irq(&blkcg->lock); | 1111 | spin_lock_irq(&blkcg->lock); |
1118 | } | 1112 | } |
1119 | } | 1113 | } |
1114 | |||
1120 | spin_unlock_irq(&blkcg->lock); | 1115 | spin_unlock_irq(&blkcg->lock); |
1121 | } | 1116 | } |
1122 | 1117 | ||
@@ -1125,8 +1120,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css) | |||
1125 | struct blkcg *blkcg = css_to_blkcg(css); | 1120 | struct blkcg *blkcg = css_to_blkcg(css); |
1126 | int i; | 1121 | int i; |
1127 | 1122 | ||
1128 | blkcg_destroy_all_blkgs(blkcg); | ||
1129 | |||
1130 | mutex_lock(&blkcg_pol_mutex); | 1123 | mutex_lock(&blkcg_pol_mutex); |
1131 | 1124 | ||
1132 | list_del(&blkcg->all_blkcgs_node); | 1125 | list_del(&blkcg->all_blkcgs_node); |
@@ -1189,6 +1182,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) | |||
1189 | INIT_HLIST_HEAD(&blkcg->blkg_list); | 1182 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
1190 | #ifdef CONFIG_CGROUP_WRITEBACK | 1183 | #ifdef CONFIG_CGROUP_WRITEBACK |
1191 | INIT_LIST_HEAD(&blkcg->cgwb_list); | 1184 | INIT_LIST_HEAD(&blkcg->cgwb_list); |
1185 | refcount_set(&blkcg->cgwb_refcnt, 1); | ||
1192 | #endif | 1186 | #endif |
1193 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); | 1187 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); |
1194 | 1188 | ||
@@ -1480,11 +1474,8 @@ void blkcg_deactivate_policy(struct request_queue *q, | |||
1480 | 1474 | ||
1481 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | 1475 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
1482 | if (blkg->pd[pol->plid]) { | 1476 | if (blkg->pd[pol->plid]) { |
1483 | if (!blkg->pd[pol->plid]->offline && | 1477 | if (pol->pd_offline_fn) |
1484 | pol->pd_offline_fn) { | ||
1485 | pol->pd_offline_fn(blkg->pd[pol->plid]); | 1478 | pol->pd_offline_fn(blkg->pd[pol->plid]); |
1486 | blkg->pd[pol->plid]->offline = true; | ||
1487 | } | ||
1488 | pol->pd_free_fn(blkg->pd[pol->plid]); | 1479 | pol->pd_free_fn(blkg->pd[pol->plid]); |
1489 | blkg->pd[pol->plid] = NULL; | 1480 | blkg->pd[pol->plid] = NULL; |
1490 | } | 1481 | } |
diff --git a/block/blk-core.c b/block/blk-core.c index dee56c282efb..4dbc93f43b38 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2163,9 +2163,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) | |||
2163 | { | 2163 | { |
2164 | const int op = bio_op(bio); | 2164 | const int op = bio_op(bio); |
2165 | 2165 | ||
2166 | if (part->policy && (op_is_write(op) && !op_is_flush(op))) { | 2166 | if (part->policy && op_is_write(op)) { |
2167 | char b[BDEVNAME_SIZE]; | 2167 | char b[BDEVNAME_SIZE]; |
2168 | 2168 | ||
2169 | if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) | ||
2170 | return false; | ||
2171 | |||
2169 | WARN_ONCE(1, | 2172 | WARN_ONCE(1, |
2170 | "generic_make_request: Trying to write " | 2173 | "generic_make_request: Trying to write " |
2171 | "to read-only block-device %s (partno %d)\n", | 2174 | "to read-only block-device %s (partno %d)\n", |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a3eede00d302..01d0620a4e4a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -2129,8 +2129,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td) | |||
2129 | static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) | 2129 | static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) |
2130 | { | 2130 | { |
2131 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | 2131 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
2132 | if (bio->bi_css) | 2132 | /* fallback to root_blkg if we fail to get a blkg ref */ |
2133 | bio_associate_blkg(bio, tg_to_blkg(tg)); | 2133 | if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV)) |
2134 | bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg); | ||
2134 | bio_issue_init(&bio->bi_issue, bio_sectors(bio)); | 2135 | bio_issue_init(&bio->bi_issue, bio_sectors(bio)); |
2135 | #endif | 2136 | #endif |
2136 | } | 2137 | } |
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 84507d3e9a98..8e20a0677dcf 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
@@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb) | |||
123 | } | 123 | } |
124 | } | 124 | } |
125 | 125 | ||
126 | static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | 126 | static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, |
127 | enum wbt_flags wb_acct) | ||
127 | { | 128 | { |
128 | struct rq_wb *rwb = RQWB(rqos); | ||
129 | struct rq_wait *rqw; | ||
130 | int inflight, limit; | 129 | int inflight, limit; |
131 | 130 | ||
132 | if (!(wb_acct & WBT_TRACKED)) | ||
133 | return; | ||
134 | |||
135 | rqw = get_rq_wait(rwb, wb_acct); | ||
136 | inflight = atomic_dec_return(&rqw->inflight); | 131 | inflight = atomic_dec_return(&rqw->inflight); |
137 | 132 | ||
138 | /* | 133 | /* |
@@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | |||
166 | int diff = limit - inflight; | 161 | int diff = limit - inflight; |
167 | 162 | ||
168 | if (!inflight || diff >= rwb->wb_background / 2) | 163 | if (!inflight || diff >= rwb->wb_background / 2) |
169 | wake_up(&rqw->wait); | 164 | wake_up_all(&rqw->wait); |
170 | } | 165 | } |
171 | } | 166 | } |
172 | 167 | ||
168 | static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | ||
169 | { | ||
170 | struct rq_wb *rwb = RQWB(rqos); | ||
171 | struct rq_wait *rqw; | ||
172 | |||
173 | if (!(wb_acct & WBT_TRACKED)) | ||
174 | return; | ||
175 | |||
176 | rqw = get_rq_wait(rwb, wb_acct); | ||
177 | wbt_rqw_done(rwb, rqw, wb_acct); | ||
178 | } | ||
179 | |||
173 | /* | 180 | /* |
174 | * Called on completion of a request. Note that it's also called when | 181 | * Called on completion of a request. Note that it's also called when |
175 | * a request is merged, when the request gets freed. | 182 | * a request is merged, when the request gets freed. |
@@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) | |||
481 | return limit; | 488 | return limit; |
482 | } | 489 | } |
483 | 490 | ||
491 | struct wbt_wait_data { | ||
492 | struct wait_queue_entry wq; | ||
493 | struct task_struct *task; | ||
494 | struct rq_wb *rwb; | ||
495 | struct rq_wait *rqw; | ||
496 | unsigned long rw; | ||
497 | bool got_token; | ||
498 | }; | ||
499 | |||
500 | static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, | ||
501 | int wake_flags, void *key) | ||
502 | { | ||
503 | struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data, | ||
504 | wq); | ||
505 | |||
506 | /* | ||
507 | * If we fail to get a budget, return -1 to interrupt the wake up | ||
508 | * loop in __wake_up_common. | ||
509 | */ | ||
510 | if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw))) | ||
511 | return -1; | ||
512 | |||
513 | data->got_token = true; | ||
514 | list_del_init(&curr->entry); | ||
515 | wake_up_process(data->task); | ||
516 | return 1; | ||
517 | } | ||
518 | |||
484 | /* | 519 | /* |
485 | * Block if we will exceed our limit, or if we are currently waiting for | 520 | * Block if we will exceed our limit, or if we are currently waiting for |
486 | * the timer to kick off queuing again. | 521 | * the timer to kick off queuing again. |
@@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, | |||
491 | __acquires(lock) | 526 | __acquires(lock) |
492 | { | 527 | { |
493 | struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); | 528 | struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); |
494 | DECLARE_WAITQUEUE(wait, current); | 529 | struct wbt_wait_data data = { |
530 | .wq = { | ||
531 | .func = wbt_wake_function, | ||
532 | .entry = LIST_HEAD_INIT(data.wq.entry), | ||
533 | }, | ||
534 | .task = current, | ||
535 | .rwb = rwb, | ||
536 | .rqw = rqw, | ||
537 | .rw = rw, | ||
538 | }; | ||
495 | bool has_sleeper; | 539 | bool has_sleeper; |
496 | 540 | ||
497 | has_sleeper = wq_has_sleeper(&rqw->wait); | 541 | has_sleeper = wq_has_sleeper(&rqw->wait); |
498 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) | 542 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) |
499 | return; | 543 | return; |
500 | 544 | ||
501 | add_wait_queue_exclusive(&rqw->wait, &wait); | 545 | prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); |
502 | do { | 546 | do { |
503 | set_current_state(TASK_UNINTERRUPTIBLE); | 547 | if (data.got_token) |
548 | break; | ||
504 | 549 | ||
505 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) | 550 | if (!has_sleeper && |
551 | rq_wait_inc_below(rqw, get_limit(rwb, rw))) { | ||
552 | finish_wait(&rqw->wait, &data.wq); | ||
553 | |||
554 | /* | ||
555 | * We raced with wbt_wake_function() getting a token, | ||
556 | * which means we now have two. Put our local token | ||
557 | * and wake anyone else potentially waiting for one. | ||
558 | */ | ||
559 | if (data.got_token) | ||
560 | wbt_rqw_done(rwb, rqw, wb_acct); | ||
506 | break; | 561 | break; |
562 | } | ||
507 | 563 | ||
508 | if (lock) { | 564 | if (lock) { |
509 | spin_unlock_irq(lock); | 565 | spin_unlock_irq(lock); |
@@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, | |||
511 | spin_lock_irq(lock); | 567 | spin_lock_irq(lock); |
512 | } else | 568 | } else |
513 | io_schedule(); | 569 | io_schedule(); |
570 | |||
514 | has_sleeper = false; | 571 | has_sleeper = false; |
515 | } while (1); | 572 | } while (1); |
516 | 573 | ||
517 | __set_current_state(TASK_RUNNING); | 574 | finish_wait(&rqw->wait, &data.wq); |
518 | remove_wait_queue(&rqw->wait, &wait); | ||
519 | } | 575 | } |
520 | 576 | ||
521 | static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) | 577 | static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) |
@@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) | |||
580 | return; | 636 | return; |
581 | } | 637 | } |
582 | 638 | ||
583 | if (current_is_kswapd()) | ||
584 | flags |= WBT_KSWAPD; | ||
585 | if (bio_op(bio) == REQ_OP_DISCARD) | ||
586 | flags |= WBT_DISCARD; | ||
587 | |||
588 | __wbt_wait(rwb, flags, bio->bi_opf, lock); | 639 | __wbt_wait(rwb, flags, bio->bi_opf, lock); |
589 | 640 | ||
590 | if (!blk_stat_is_active(rwb->cb)) | 641 | if (!blk_stat_is_active(rwb->cb)) |
diff --git a/block/bsg.c b/block/bsg.c index db588add6ba6..9a442c23a715 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -37,7 +37,7 @@ struct bsg_device { | |||
37 | struct request_queue *queue; | 37 | struct request_queue *queue; |
38 | spinlock_t lock; | 38 | spinlock_t lock; |
39 | struct hlist_node dev_list; | 39 | struct hlist_node dev_list; |
40 | atomic_t ref_count; | 40 | refcount_t ref_count; |
41 | char name[20]; | 41 | char name[20]; |
42 | int max_queue; | 42 | int max_queue; |
43 | }; | 43 | }; |
@@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd) | |||
252 | 252 | ||
253 | mutex_lock(&bsg_mutex); | 253 | mutex_lock(&bsg_mutex); |
254 | 254 | ||
255 | if (!atomic_dec_and_test(&bd->ref_count)) { | 255 | if (!refcount_dec_and_test(&bd->ref_count)) { |
256 | mutex_unlock(&bsg_mutex); | 256 | mutex_unlock(&bsg_mutex); |
257 | return 0; | 257 | return 0; |
258 | } | 258 | } |
@@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
290 | 290 | ||
291 | bd->queue = rq; | 291 | bd->queue = rq; |
292 | 292 | ||
293 | atomic_set(&bd->ref_count, 1); | 293 | refcount_set(&bd->ref_count, 1); |
294 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); | 294 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
295 | 295 | ||
296 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); | 296 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); |
@@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) | |||
308 | 308 | ||
309 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { | 309 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { |
310 | if (bd->queue == q) { | 310 | if (bd->queue == q) { |
311 | atomic_inc(&bd->ref_count); | 311 | refcount_inc(&bd->ref_count); |
312 | goto found; | 312 | goto found; |
313 | } | 313 | } |
314 | } | 314 | } |
diff --git a/block/elevator.c b/block/elevator.c index 5ea6e7d600e4..6a06b5d040e5 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e) | |||
895 | spin_lock(&elv_list_lock); | 895 | spin_lock(&elv_list_lock); |
896 | if (elevator_find(e->elevator_name, e->uses_mq)) { | 896 | if (elevator_find(e->elevator_name, e->uses_mq)) { |
897 | spin_unlock(&elv_list_lock); | 897 | spin_unlock(&elv_list_lock); |
898 | if (e->icq_cache) | 898 | kmem_cache_destroy(e->icq_cache); |
899 | kmem_cache_destroy(e->icq_cache); | ||
900 | return -EBUSY; | 899 | return -EBUSY; |
901 | } | 900 | } |
902 | list_add_tail(&e->list, &elv_list); | 901 | list_add_tail(&e->list, &elv_list); |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 9706613eecf9..bf64cfa30feb 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev) | |||
879 | #define LPSS_GPIODEF0_DMA_LLP BIT(13) | 879 | #define LPSS_GPIODEF0_DMA_LLP BIT(13) |
880 | 880 | ||
881 | static DEFINE_MUTEX(lpss_iosf_mutex); | 881 | static DEFINE_MUTEX(lpss_iosf_mutex); |
882 | static bool lpss_iosf_d3_entered; | 882 | static bool lpss_iosf_d3_entered = true; |
883 | 883 | ||
884 | static void lpss_iosf_enter_d3_state(void) | 884 | static void lpss_iosf_enter_d3_state(void) |
885 | { | 885 | { |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 292088fcc624..d2e29a19890d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -35,11 +35,11 @@ | |||
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #ifdef CONFIG_X86 | 36 | #ifdef CONFIG_X86 |
37 | #include <asm/mpspec.h> | 37 | #include <asm/mpspec.h> |
38 | #include <linux/dmi.h> | ||
38 | #endif | 39 | #endif |
39 | #include <linux/acpi_iort.h> | 40 | #include <linux/acpi_iort.h> |
40 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
41 | #include <acpi/apei.h> | 42 | #include <acpi/apei.h> |
42 | #include <linux/dmi.h> | ||
43 | #include <linux/suspend.h> | 43 | #include <linux/suspend.h> |
44 | 44 | ||
45 | #include "internal.h" | 45 | #include "internal.h" |
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = { | |||
82 | }, | 82 | }, |
83 | {} | 83 | {} |
84 | }; | 84 | }; |
85 | #else | ||
86 | static const struct dmi_system_id dsdt_dmi_table[] __initconst = { | ||
87 | {} | ||
88 | }; | ||
89 | #endif | 85 | #endif |
90 | 86 | ||
91 | /* -------------------------------------------------------------------------- | 87 | /* -------------------------------------------------------------------------- |
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void) | |||
1033 | 1029 | ||
1034 | acpi_permanent_mmap = true; | 1030 | acpi_permanent_mmap = true; |
1035 | 1031 | ||
1032 | #ifdef CONFIG_X86 | ||
1036 | /* | 1033 | /* |
1037 | * If the machine falls into the DMI check table, | 1034 | * If the machine falls into the DMI check table, |
1038 | * DSDT will be copied to memory | 1035 | * DSDT will be copied to memory. |
1036 | * Note that calling dmi_check_system() here on other architectures | ||
1037 | * would not be OK because only x86 initializes dmi early enough. | ||
1038 | * Thankfully only x86 systems need such quirks for now. | ||
1039 | */ | 1039 | */ |
1040 | dmi_check_system(dsdt_dmi_table); | 1040 | dmi_check_system(dsdt_dmi_table); |
1041 | #endif | ||
1041 | 1042 | ||
1042 | status = acpi_reallocate_root_table(); | 1043 | status = acpi_reallocate_root_table(); |
1043 | if (ACPI_FAILURE(status)) { | 1044 | if (ACPI_FAILURE(status)) { |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 172e32840256..599e01bcdef2 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -7394,4 +7394,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown); | |||
7394 | EXPORT_SYMBOL_GPL(ata_cable_ignore); | 7394 | EXPORT_SYMBOL_GPL(ata_cable_ignore); |
7395 | EXPORT_SYMBOL_GPL(ata_cable_sata); | 7395 | EXPORT_SYMBOL_GPL(ata_cable_sata); |
7396 | EXPORT_SYMBOL_GPL(ata_host_get); | 7396 | EXPORT_SYMBOL_GPL(ata_host_get); |
7397 | EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file | 7397 | EXPORT_SYMBOL_GPL(ata_host_put); |
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161..569a4a662dcd 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c | |||
@@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { | |||
256 | .qc_issue = ftide010_qc_issue, | 256 | .qc_issue = ftide010_qc_issue, |
257 | }; | 257 | }; |
258 | 258 | ||
259 | static struct ata_port_info ftide010_port_info[] = { | 259 | static struct ata_port_info ftide010_port_info = { |
260 | { | 260 | .flags = ATA_FLAG_SLAVE_POSS, |
261 | .flags = ATA_FLAG_SLAVE_POSS, | 261 | .mwdma_mask = ATA_MWDMA2, |
262 | .mwdma_mask = ATA_MWDMA2, | 262 | .udma_mask = ATA_UDMA6, |
263 | .udma_mask = ATA_UDMA6, | 263 | .pio_mask = ATA_PIO4, |
264 | .pio_mask = ATA_PIO4, | 264 | .port_ops = &pata_ftide010_port_ops, |
265 | .port_ops = &pata_ftide010_port_ops, | ||
266 | }, | ||
267 | }; | 265 | }; |
268 | 266 | ||
269 | #if IS_ENABLED(CONFIG_SATA_GEMINI) | 267 | #if IS_ENABLED(CONFIG_SATA_GEMINI) |
@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) | |||
349 | } | 347 | } |
350 | 348 | ||
351 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 349 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
350 | struct ata_port_info *pi, | ||
352 | bool is_ata1) | 351 | bool is_ata1) |
353 | { | 352 | { |
354 | struct device *dev = ftide->dev; | 353 | struct device *dev = ftide->dev; |
@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
373 | 372 | ||
374 | /* Flag port as SATA-capable */ | 373 | /* Flag port as SATA-capable */ |
375 | if (gemini_sata_bridge_enabled(sg, is_ata1)) | 374 | if (gemini_sata_bridge_enabled(sg, is_ata1)) |
376 | ftide010_port_info[0].flags |= ATA_FLAG_SATA; | 375 | pi->flags |= ATA_FLAG_SATA; |
376 | |||
377 | /* This device has broken DMA, only PIO works */ | ||
378 | if (of_machine_is_compatible("itian,sq201")) { | ||
379 | pi->mwdma_mask = 0; | ||
380 | pi->udma_mask = 0; | ||
381 | } | ||
377 | 382 | ||
378 | /* | 383 | /* |
379 | * We assume that a simple 40-wire cable is used in the PATA mode. | 384 | * We assume that a simple 40-wire cable is used in the PATA mode. |
@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
435 | } | 440 | } |
436 | #else | 441 | #else |
437 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 442 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
443 | struct ata_port_info *pi, | ||
438 | bool is_ata1) | 444 | bool is_ata1) |
439 | { | 445 | { |
440 | return -ENOTSUPP; | 446 | return -ENOTSUPP; |
@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
446 | { | 452 | { |
447 | struct device *dev = &pdev->dev; | 453 | struct device *dev = &pdev->dev; |
448 | struct device_node *np = dev->of_node; | 454 | struct device_node *np = dev->of_node; |
449 | const struct ata_port_info pi = ftide010_port_info[0]; | 455 | struct ata_port_info pi = ftide010_port_info; |
450 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 456 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
451 | struct ftide010 *ftide; | 457 | struct ftide010 *ftide; |
452 | struct resource *res; | 458 | struct resource *res; |
@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
490 | * are ATA0. This will also set up the cable types. | 496 | * are ATA0. This will also set up the cable types. |
491 | */ | 497 | */ |
492 | ret = pata_ftide010_gemini_init(ftide, | 498 | ret = pata_ftide010_gemini_init(ftide, |
499 | &pi, | ||
493 | (res->start == 0x63400000)); | 500 | (res->start == 0x63400000)); |
494 | if (ret) | 501 | if (ret) |
495 | goto err_dis_clk; | 502 | goto err_dis_clk; |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c8a1cb0b6136..817320c7c4c1 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -417,25 +417,23 @@ static ssize_t show_valid_zones(struct device *dev, | |||
417 | int nid; | 417 | int nid; |
418 | 418 | ||
419 | /* | 419 | /* |
420 | * The block contains more than one zone can not be offlined. | ||
421 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 | ||
422 | */ | ||
423 | if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn)) | ||
424 | return sprintf(buf, "none\n"); | ||
425 | |||
426 | start_pfn = valid_start_pfn; | ||
427 | nr_pages = valid_end_pfn - start_pfn; | ||
428 | |||
429 | /* | ||
430 | * Check the existing zone. Make sure that we do that only on the | 420 | * Check the existing zone. Make sure that we do that only on the |
431 | * online nodes otherwise the page_zone is not reliable | 421 | * online nodes otherwise the page_zone is not reliable |
432 | */ | 422 | */ |
433 | if (mem->state == MEM_ONLINE) { | 423 | if (mem->state == MEM_ONLINE) { |
424 | /* | ||
425 | * The block contains more than one zone can not be offlined. | ||
426 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 | ||
427 | */ | ||
428 | if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, | ||
429 | &valid_start_pfn, &valid_end_pfn)) | ||
430 | return sprintf(buf, "none\n"); | ||
431 | start_pfn = valid_start_pfn; | ||
434 | strcat(buf, page_zone(pfn_to_page(start_pfn))->name); | 432 | strcat(buf, page_zone(pfn_to_page(start_pfn))->name); |
435 | goto out; | 433 | goto out; |
436 | } | 434 | } |
437 | 435 | ||
438 | nid = pfn_to_nid(start_pfn); | 436 | nid = mem->nid; |
439 | default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); | 437 | default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); |
440 | strcat(buf, default_zone->name); | 438 | strcat(buf, default_zone->name); |
441 | 439 | ||
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb..5a42ae4078c2 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); | |||
185 | int of_pm_clk_add_clks(struct device *dev) | 185 | int of_pm_clk_add_clks(struct device *dev) |
186 | { | 186 | { |
187 | struct clk **clks; | 187 | struct clk **clks; |
188 | unsigned int i, count; | 188 | int i, count; |
189 | int ret; | 189 | int ret; |
190 | 190 | ||
191 | if (!dev || !dev->of_node) | 191 | if (!dev || !dev->of_node) |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3863c00372bb..14a51254c3db 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
1239 | case NBD_SET_SOCK: | 1239 | case NBD_SET_SOCK: |
1240 | return nbd_add_socket(nbd, arg, false); | 1240 | return nbd_add_socket(nbd, arg, false); |
1241 | case NBD_SET_BLKSIZE: | 1241 | case NBD_SET_BLKSIZE: |
1242 | if (!arg || !is_power_of_2(arg) || arg < 512 || | ||
1243 | arg > PAGE_SIZE) | ||
1244 | return -EINVAL; | ||
1242 | nbd_size_set(nbd, arg, | 1245 | nbd_size_set(nbd, arg, |
1243 | div_s64(config->bytesize, arg)); | 1246 | div_s64(config->bytesize, arg)); |
1244 | return 0; | 1247 | return 0; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7915f3b03736..73ed5f3a862d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev, | |||
4207 | 4207 | ||
4208 | count += sprintf(&buf[count], "%s" | 4208 | count += sprintf(&buf[count], "%s" |
4209 | "pool_id %llu\npool_name %s\n" | 4209 | "pool_id %llu\npool_name %s\n" |
4210 | "pool_ns %s\n" | ||
4210 | "image_id %s\nimage_name %s\n" | 4211 | "image_id %s\nimage_name %s\n" |
4211 | "snap_id %llu\nsnap_name %s\n" | 4212 | "snap_id %llu\nsnap_name %s\n" |
4212 | "overlap %llu\n", | 4213 | "overlap %llu\n", |
4213 | !count ? "" : "\n", /* first? */ | 4214 | !count ? "" : "\n", /* first? */ |
4214 | spec->pool_id, spec->pool_name, | 4215 | spec->pool_id, spec->pool_name, |
4216 | spec->pool_ns ?: "", | ||
4215 | spec->image_id, spec->image_name ?: "(unknown)", | 4217 | spec->image_id, spec->image_name ?: "(unknown)", |
4216 | spec->snap_id, spec->snap_name, | 4218 | spec->snap_id, spec->snap_name, |
4217 | rbd_dev->parent_overlap); | 4219 | rbd_dev->parent_overlap); |
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev) | |||
4584 | &rbd_dev->header.features); | 4586 | &rbd_dev->header.features); |
4585 | } | 4587 | } |
4586 | 4588 | ||
4589 | struct parent_image_info { | ||
4590 | u64 pool_id; | ||
4591 | const char *pool_ns; | ||
4592 | const char *image_id; | ||
4593 | u64 snap_id; | ||
4594 | |||
4595 | bool has_overlap; | ||
4596 | u64 overlap; | ||
4597 | }; | ||
4598 | |||
4599 | /* | ||
4600 | * The caller is responsible for @pii. | ||
4601 | */ | ||
4602 | static int decode_parent_image_spec(void **p, void *end, | ||
4603 | struct parent_image_info *pii) | ||
4604 | { | ||
4605 | u8 struct_v; | ||
4606 | u32 struct_len; | ||
4607 | int ret; | ||
4608 | |||
4609 | ret = ceph_start_decoding(p, end, 1, "ParentImageSpec", | ||
4610 | &struct_v, &struct_len); | ||
4611 | if (ret) | ||
4612 | return ret; | ||
4613 | |||
4614 | ceph_decode_64_safe(p, end, pii->pool_id, e_inval); | ||
4615 | pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); | ||
4616 | if (IS_ERR(pii->pool_ns)) { | ||
4617 | ret = PTR_ERR(pii->pool_ns); | ||
4618 | pii->pool_ns = NULL; | ||
4619 | return ret; | ||
4620 | } | ||
4621 | pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); | ||
4622 | if (IS_ERR(pii->image_id)) { | ||
4623 | ret = PTR_ERR(pii->image_id); | ||
4624 | pii->image_id = NULL; | ||
4625 | return ret; | ||
4626 | } | ||
4627 | ceph_decode_64_safe(p, end, pii->snap_id, e_inval); | ||
4628 | return 0; | ||
4629 | |||
4630 | e_inval: | ||
4631 | return -EINVAL; | ||
4632 | } | ||
4633 | |||
4634 | static int __get_parent_info(struct rbd_device *rbd_dev, | ||
4635 | struct page *req_page, | ||
4636 | struct page *reply_page, | ||
4637 | struct parent_image_info *pii) | ||
4638 | { | ||
4639 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | ||
4640 | size_t reply_len = PAGE_SIZE; | ||
4641 | void *p, *end; | ||
4642 | int ret; | ||
4643 | |||
4644 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
4645 | "rbd", "parent_get", CEPH_OSD_FLAG_READ, | ||
4646 | req_page, sizeof(u64), reply_page, &reply_len); | ||
4647 | if (ret) | ||
4648 | return ret == -EOPNOTSUPP ? 1 : ret; | ||
4649 | |||
4650 | p = page_address(reply_page); | ||
4651 | end = p + reply_len; | ||
4652 | ret = decode_parent_image_spec(&p, end, pii); | ||
4653 | if (ret) | ||
4654 | return ret; | ||
4655 | |||
4656 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
4657 | "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ, | ||
4658 | req_page, sizeof(u64), reply_page, &reply_len); | ||
4659 | if (ret) | ||
4660 | return ret; | ||
4661 | |||
4662 | p = page_address(reply_page); | ||
4663 | end = p + reply_len; | ||
4664 | ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval); | ||
4665 | if (pii->has_overlap) | ||
4666 | ceph_decode_64_safe(&p, end, pii->overlap, e_inval); | ||
4667 | |||
4668 | return 0; | ||
4669 | |||
4670 | e_inval: | ||
4671 | return -EINVAL; | ||
4672 | } | ||
4673 | |||
4674 | /* | ||
4675 | * The caller is responsible for @pii. | ||
4676 | */ | ||
4677 | static int __get_parent_info_legacy(struct rbd_device *rbd_dev, | ||
4678 | struct page *req_page, | ||
4679 | struct page *reply_page, | ||
4680 | struct parent_image_info *pii) | ||
4681 | { | ||
4682 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | ||
4683 | size_t reply_len = PAGE_SIZE; | ||
4684 | void *p, *end; | ||
4685 | int ret; | ||
4686 | |||
4687 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
4688 | "rbd", "get_parent", CEPH_OSD_FLAG_READ, | ||
4689 | req_page, sizeof(u64), reply_page, &reply_len); | ||
4690 | if (ret) | ||
4691 | return ret; | ||
4692 | |||
4693 | p = page_address(reply_page); | ||
4694 | end = p + reply_len; | ||
4695 | ceph_decode_64_safe(&p, end, pii->pool_id, e_inval); | ||
4696 | pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); | ||
4697 | if (IS_ERR(pii->image_id)) { | ||
4698 | ret = PTR_ERR(pii->image_id); | ||
4699 | pii->image_id = NULL; | ||
4700 | return ret; | ||
4701 | } | ||
4702 | ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); | ||
4703 | pii->has_overlap = true; | ||
4704 | ceph_decode_64_safe(&p, end, pii->overlap, e_inval); | ||
4705 | |||
4706 | return 0; | ||
4707 | |||
4708 | e_inval: | ||
4709 | return -EINVAL; | ||
4710 | } | ||
4711 | |||
4712 | static int get_parent_info(struct rbd_device *rbd_dev, | ||
4713 | struct parent_image_info *pii) | ||
4714 | { | ||
4715 | struct page *req_page, *reply_page; | ||
4716 | void *p; | ||
4717 | int ret; | ||
4718 | |||
4719 | req_page = alloc_page(GFP_KERNEL); | ||
4720 | if (!req_page) | ||
4721 | return -ENOMEM; | ||
4722 | |||
4723 | reply_page = alloc_page(GFP_KERNEL); | ||
4724 | if (!reply_page) { | ||
4725 | __free_page(req_page); | ||
4726 | return -ENOMEM; | ||
4727 | } | ||
4728 | |||
4729 | p = page_address(req_page); | ||
4730 | ceph_encode_64(&p, rbd_dev->spec->snap_id); | ||
4731 | ret = __get_parent_info(rbd_dev, req_page, reply_page, pii); | ||
4732 | if (ret > 0) | ||
4733 | ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, | ||
4734 | pii); | ||
4735 | |||
4736 | __free_page(req_page); | ||
4737 | __free_page(reply_page); | ||
4738 | return ret; | ||
4739 | } | ||
4740 | |||
4587 | static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | 4741 | static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) |
4588 | { | 4742 | { |
4589 | struct rbd_spec *parent_spec; | 4743 | struct rbd_spec *parent_spec; |
4590 | size_t size; | 4744 | struct parent_image_info pii = { 0 }; |
4591 | void *reply_buf = NULL; | ||
4592 | __le64 snapid; | ||
4593 | void *p; | ||
4594 | void *end; | ||
4595 | u64 pool_id; | ||
4596 | char *image_id; | ||
4597 | u64 snap_id; | ||
4598 | u64 overlap; | ||
4599 | int ret; | 4745 | int ret; |
4600 | 4746 | ||
4601 | parent_spec = rbd_spec_alloc(); | 4747 | parent_spec = rbd_spec_alloc(); |
4602 | if (!parent_spec) | 4748 | if (!parent_spec) |
4603 | return -ENOMEM; | 4749 | return -ENOMEM; |
4604 | 4750 | ||
4605 | size = sizeof (__le64) + /* pool_id */ | 4751 | ret = get_parent_info(rbd_dev, &pii); |
4606 | sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ | 4752 | if (ret) |
4607 | sizeof (__le64) + /* snap_id */ | ||
4608 | sizeof (__le64); /* overlap */ | ||
4609 | reply_buf = kmalloc(size, GFP_KERNEL); | ||
4610 | if (!reply_buf) { | ||
4611 | ret = -ENOMEM; | ||
4612 | goto out_err; | 4753 | goto out_err; |
4613 | } | ||
4614 | 4754 | ||
4615 | snapid = cpu_to_le64(rbd_dev->spec->snap_id); | 4755 | dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", |
4616 | ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, | 4756 | __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, |
4617 | &rbd_dev->header_oloc, "get_parent", | 4757 | pii.has_overlap, pii.overlap); |
4618 | &snapid, sizeof(snapid), reply_buf, size); | ||
4619 | dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); | ||
4620 | if (ret < 0) | ||
4621 | goto out_err; | ||
4622 | 4758 | ||
4623 | p = reply_buf; | 4759 | if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { |
4624 | end = reply_buf + ret; | ||
4625 | ret = -ERANGE; | ||
4626 | ceph_decode_64_safe(&p, end, pool_id, out_err); | ||
4627 | if (pool_id == CEPH_NOPOOL) { | ||
4628 | /* | 4760 | /* |
4629 | * Either the parent never existed, or we have | 4761 | * Either the parent never existed, or we have |
4630 | * record of it but the image got flattened so it no | 4762 | * record of it but the image got flattened so it no |
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
4633 | * overlap to 0. The effect of this is that all new | 4765 | * overlap to 0. The effect of this is that all new |
4634 | * requests will be treated as if the image had no | 4766 | * requests will be treated as if the image had no |
4635 | * parent. | 4767 | * parent. |
4768 | * | ||
4769 | * If !pii.has_overlap, the parent image spec is not | ||
4770 | * applicable. It's there to avoid duplication in each | ||
4771 | * snapshot record. | ||
4636 | */ | 4772 | */ |
4637 | if (rbd_dev->parent_overlap) { | 4773 | if (rbd_dev->parent_overlap) { |
4638 | rbd_dev->parent_overlap = 0; | 4774 | rbd_dev->parent_overlap = 0; |
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
4647 | /* The ceph file layout needs to fit pool id in 32 bits */ | 4783 | /* The ceph file layout needs to fit pool id in 32 bits */ |
4648 | 4784 | ||
4649 | ret = -EIO; | 4785 | ret = -EIO; |
4650 | if (pool_id > (u64)U32_MAX) { | 4786 | if (pii.pool_id > (u64)U32_MAX) { |
4651 | rbd_warn(NULL, "parent pool id too large (%llu > %u)", | 4787 | rbd_warn(NULL, "parent pool id too large (%llu > %u)", |
4652 | (unsigned long long)pool_id, U32_MAX); | 4788 | (unsigned long long)pii.pool_id, U32_MAX); |
4653 | goto out_err; | 4789 | goto out_err; |
4654 | } | 4790 | } |
4655 | 4791 | ||
4656 | image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); | ||
4657 | if (IS_ERR(image_id)) { | ||
4658 | ret = PTR_ERR(image_id); | ||
4659 | goto out_err; | ||
4660 | } | ||
4661 | ceph_decode_64_safe(&p, end, snap_id, out_err); | ||
4662 | ceph_decode_64_safe(&p, end, overlap, out_err); | ||
4663 | |||
4664 | /* | 4792 | /* |
4665 | * The parent won't change (except when the clone is | 4793 | * The parent won't change (except when the clone is |
4666 | * flattened, already handled that). So we only need to | 4794 | * flattened, already handled that). So we only need to |
4667 | * record the parent spec we have not already done so. | 4795 | * record the parent spec we have not already done so. |
4668 | */ | 4796 | */ |
4669 | if (!rbd_dev->parent_spec) { | 4797 | if (!rbd_dev->parent_spec) { |
4670 | parent_spec->pool_id = pool_id; | 4798 | parent_spec->pool_id = pii.pool_id; |
4671 | parent_spec->image_id = image_id; | 4799 | if (pii.pool_ns && *pii.pool_ns) { |
4672 | parent_spec->snap_id = snap_id; | 4800 | parent_spec->pool_ns = pii.pool_ns; |
4673 | 4801 | pii.pool_ns = NULL; | |
4674 | /* TODO: support cloning across namespaces */ | ||
4675 | if (rbd_dev->spec->pool_ns) { | ||
4676 | parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns, | ||
4677 | GFP_KERNEL); | ||
4678 | if (!parent_spec->pool_ns) { | ||
4679 | ret = -ENOMEM; | ||
4680 | goto out_err; | ||
4681 | } | ||
4682 | } | 4802 | } |
4803 | parent_spec->image_id = pii.image_id; | ||
4804 | pii.image_id = NULL; | ||
4805 | parent_spec->snap_id = pii.snap_id; | ||
4683 | 4806 | ||
4684 | rbd_dev->parent_spec = parent_spec; | 4807 | rbd_dev->parent_spec = parent_spec; |
4685 | parent_spec = NULL; /* rbd_dev now owns this */ | 4808 | parent_spec = NULL; /* rbd_dev now owns this */ |
4686 | } else { | ||
4687 | kfree(image_id); | ||
4688 | } | 4809 | } |
4689 | 4810 | ||
4690 | /* | 4811 | /* |
4691 | * We always update the parent overlap. If it's zero we issue | 4812 | * We always update the parent overlap. If it's zero we issue |
4692 | * a warning, as we will proceed as if there was no parent. | 4813 | * a warning, as we will proceed as if there was no parent. |
4693 | */ | 4814 | */ |
4694 | if (!overlap) { | 4815 | if (!pii.overlap) { |
4695 | if (parent_spec) { | 4816 | if (parent_spec) { |
4696 | /* refresh, careful to warn just once */ | 4817 | /* refresh, careful to warn just once */ |
4697 | if (rbd_dev->parent_overlap) | 4818 | if (rbd_dev->parent_overlap) |
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
4702 | rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); | 4823 | rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); |
4703 | } | 4824 | } |
4704 | } | 4825 | } |
4705 | rbd_dev->parent_overlap = overlap; | 4826 | rbd_dev->parent_overlap = pii.overlap; |
4706 | 4827 | ||
4707 | out: | 4828 | out: |
4708 | ret = 0; | 4829 | ret = 0; |
4709 | out_err: | 4830 | out_err: |
4710 | kfree(reply_buf); | 4831 | kfree(pii.pool_ns); |
4832 | kfree(pii.image_id); | ||
4711 | rbd_spec_put(parent_spec); | 4833 | rbd_spec_put(parent_spec); |
4712 | |||
4713 | return ret; | 4834 | return ret; |
4714 | } | 4835 | } |
4715 | 4836 | ||
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index b55b245e8052..fd1e19f1a49f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants, | |||
84 | "Maximum number of grants to map persistently"); | 84 | "Maximum number of grants to map persistently"); |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * How long a persistent grant is allowed to remain allocated without being in | ||
88 | * use. The time is in seconds, 0 means indefinitely long. | ||
89 | */ | ||
90 | |||
91 | static unsigned int xen_blkif_pgrant_timeout = 60; | ||
92 | module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout, | ||
93 | uint, 0644); | ||
94 | MODULE_PARM_DESC(persistent_grant_unused_seconds, | ||
95 | "Time in seconds an unused persistent grant is allowed to " | ||
96 | "remain allocated. Default is 60, 0 means unlimited."); | ||
97 | |||
98 | /* | ||
87 | * Maximum number of rings/queues blkback supports, allow as many queues as there | 99 | * Maximum number of rings/queues blkback supports, allow as many queues as there |
88 | * are CPUs if user has not specified a value. | 100 | * are CPUs if user has not specified a value. |
89 | */ | 101 | */ |
@@ -123,6 +135,13 @@ module_param(log_stats, int, 0644); | |||
123 | /* Number of free pages to remove on each call to gnttab_free_pages */ | 135 | /* Number of free pages to remove on each call to gnttab_free_pages */ |
124 | #define NUM_BATCH_FREE_PAGES 10 | 136 | #define NUM_BATCH_FREE_PAGES 10 |
125 | 137 | ||
138 | static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) | ||
139 | { | ||
140 | return xen_blkif_pgrant_timeout && | ||
141 | (jiffies - persistent_gnt->last_used >= | ||
142 | HZ * xen_blkif_pgrant_timeout); | ||
143 | } | ||
144 | |||
126 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) | 145 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) |
127 | { | 146 | { |
128 | unsigned long flags; | 147 | unsigned long flags; |
@@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring, | |||
236 | } | 255 | } |
237 | } | 256 | } |
238 | 257 | ||
239 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); | 258 | persistent_gnt->active = true; |
240 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | ||
241 | /* Add new node and rebalance tree. */ | 259 | /* Add new node and rebalance tree. */ |
242 | rb_link_node(&(persistent_gnt->node), parent, new); | 260 | rb_link_node(&(persistent_gnt->node), parent, new); |
243 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); | 261 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); |
@@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
261 | else if (gref > data->gnt) | 279 | else if (gref > data->gnt) |
262 | node = node->rb_right; | 280 | node = node->rb_right; |
263 | else { | 281 | else { |
264 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | 282 | if (data->active) { |
265 | pr_alert_ratelimited("requesting a grant already in use\n"); | 283 | pr_alert_ratelimited("requesting a grant already in use\n"); |
266 | return NULL; | 284 | return NULL; |
267 | } | 285 | } |
268 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | 286 | data->active = true; |
269 | atomic_inc(&ring->persistent_gnt_in_use); | 287 | atomic_inc(&ring->persistent_gnt_in_use); |
270 | return data; | 288 | return data; |
271 | } | 289 | } |
@@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
276 | static void put_persistent_gnt(struct xen_blkif_ring *ring, | 294 | static void put_persistent_gnt(struct xen_blkif_ring *ring, |
277 | struct persistent_gnt *persistent_gnt) | 295 | struct persistent_gnt *persistent_gnt) |
278 | { | 296 | { |
279 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | 297 | if (!persistent_gnt->active) |
280 | pr_alert_ratelimited("freeing a grant already unused\n"); | 298 | pr_alert_ratelimited("freeing a grant already unused\n"); |
281 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | 299 | persistent_gnt->last_used = jiffies; |
282 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | 300 | persistent_gnt->active = false; |
283 | atomic_dec(&ring->persistent_gnt_in_use); | 301 | atomic_dec(&ring->persistent_gnt_in_use); |
284 | } | 302 | } |
285 | 303 | ||
@@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
371 | struct persistent_gnt *persistent_gnt; | 389 | struct persistent_gnt *persistent_gnt; |
372 | struct rb_node *n; | 390 | struct rb_node *n; |
373 | unsigned int num_clean, total; | 391 | unsigned int num_clean, total; |
374 | bool scan_used = false, clean_used = false; | 392 | bool scan_used = false; |
375 | struct rb_root *root; | 393 | struct rb_root *root; |
376 | 394 | ||
377 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || | ||
378 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && | ||
379 | !ring->blkif->vbd.overflow_max_grants)) { | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | if (work_busy(&ring->persistent_purge_work)) { | 395 | if (work_busy(&ring->persistent_purge_work)) { |
384 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); | 396 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); |
385 | goto out; | 397 | goto out; |
386 | } | 398 | } |
387 | 399 | ||
388 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | 400 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || |
389 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; | 401 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && |
390 | num_clean = min(ring->persistent_gnt_c, num_clean); | 402 | !ring->blkif->vbd.overflow_max_grants)) { |
391 | if ((num_clean == 0) || | 403 | num_clean = 0; |
392 | (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) | 404 | } else { |
393 | goto out; | 405 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; |
406 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + | ||
407 | num_clean; | ||
408 | num_clean = min(ring->persistent_gnt_c, num_clean); | ||
409 | pr_debug("Going to purge at least %u persistent grants\n", | ||
410 | num_clean); | ||
411 | } | ||
394 | 412 | ||
395 | /* | 413 | /* |
396 | * At this point, we can assure that there will be no calls | 414 | * At this point, we can assure that there will be no calls |
@@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
401 | * number of grants. | 419 | * number of grants. |
402 | */ | 420 | */ |
403 | 421 | ||
404 | total = num_clean; | 422 | total = 0; |
405 | |||
406 | pr_debug("Going to purge %u persistent grants\n", num_clean); | ||
407 | 423 | ||
408 | BUG_ON(!list_empty(&ring->persistent_purge_list)); | 424 | BUG_ON(!list_empty(&ring->persistent_purge_list)); |
409 | root = &ring->persistent_gnts; | 425 | root = &ring->persistent_gnts; |
@@ -412,46 +428,37 @@ purge_list: | |||
412 | BUG_ON(persistent_gnt->handle == | 428 | BUG_ON(persistent_gnt->handle == |
413 | BLKBACK_INVALID_HANDLE); | 429 | BLKBACK_INVALID_HANDLE); |
414 | 430 | ||
415 | if (clean_used) { | 431 | if (persistent_gnt->active) |
416 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | ||
417 | continue; | 432 | continue; |
418 | } | 433 | if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) |
419 | |||
420 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | ||
421 | continue; | 434 | continue; |
422 | if (!scan_used && | 435 | if (scan_used && total >= num_clean) |
423 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | ||
424 | continue; | 436 | continue; |
425 | 437 | ||
426 | rb_erase(&persistent_gnt->node, root); | 438 | rb_erase(&persistent_gnt->node, root); |
427 | list_add(&persistent_gnt->remove_node, | 439 | list_add(&persistent_gnt->remove_node, |
428 | &ring->persistent_purge_list); | 440 | &ring->persistent_purge_list); |
429 | if (--num_clean == 0) | 441 | total++; |
430 | goto finished; | ||
431 | } | 442 | } |
432 | /* | 443 | /* |
433 | * If we get here it means we also need to start cleaning | 444 | * Check whether we also need to start cleaning |
434 | * grants that were used since last purge in order to cope | 445 | * grants that were used since last purge in order to cope |
435 | * with the requested num | 446 | * with the requested num |
436 | */ | 447 | */ |
437 | if (!scan_used && !clean_used) { | 448 | if (!scan_used && total < num_clean) { |
438 | pr_debug("Still missing %u purged frames\n", num_clean); | 449 | pr_debug("Still missing %u purged frames\n", num_clean - total); |
439 | scan_used = true; | 450 | scan_used = true; |
440 | goto purge_list; | 451 | goto purge_list; |
441 | } | 452 | } |
442 | finished: | ||
443 | if (!clean_used) { | ||
444 | pr_debug("Finished scanning for grants to clean, removing used flag\n"); | ||
445 | clean_used = true; | ||
446 | goto purge_list; | ||
447 | } | ||
448 | 453 | ||
449 | ring->persistent_gnt_c -= (total - num_clean); | 454 | if (total) { |
450 | ring->blkif->vbd.overflow_max_grants = 0; | 455 | ring->persistent_gnt_c -= total; |
456 | ring->blkif->vbd.overflow_max_grants = 0; | ||
451 | 457 | ||
452 | /* We can defer this work */ | 458 | /* We can defer this work */ |
453 | schedule_work(&ring->persistent_purge_work); | 459 | schedule_work(&ring->persistent_purge_work); |
454 | pr_debug("Purged %u/%u\n", (total - num_clean), total); | 460 | pr_debug("Purged %u/%u\n", num_clean, total); |
461 | } | ||
455 | 462 | ||
456 | out: | 463 | out: |
457 | return; | 464 | return; |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index ecb35fe8ca8d..1d3002d773f7 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -233,16 +233,6 @@ struct xen_vbd { | |||
233 | 233 | ||
234 | struct backend_info; | 234 | struct backend_info; |
235 | 235 | ||
236 | /* Number of available flags */ | ||
237 | #define PERSISTENT_GNT_FLAGS_SIZE 2 | ||
238 | /* This persistent grant is currently in use */ | ||
239 | #define PERSISTENT_GNT_ACTIVE 0 | ||
240 | /* | ||
241 | * This persistent grant has been used, this flag is set when we remove the | ||
242 | * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. | ||
243 | */ | ||
244 | #define PERSISTENT_GNT_WAS_ACTIVE 1 | ||
245 | |||
246 | /* Number of requests that we can fit in a ring */ | 236 | /* Number of requests that we can fit in a ring */ |
247 | #define XEN_BLKIF_REQS_PER_PAGE 32 | 237 | #define XEN_BLKIF_REQS_PER_PAGE 32 |
248 | 238 | ||
@@ -250,7 +240,8 @@ struct persistent_gnt { | |||
250 | struct page *page; | 240 | struct page *page; |
251 | grant_ref_t gnt; | 241 | grant_ref_t gnt; |
252 | grant_handle_t handle; | 242 | grant_handle_t handle; |
253 | DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); | 243 | unsigned long last_used; |
244 | bool active; | ||
254 | struct rb_node node; | 245 | struct rb_node node; |
255 | struct list_head remove_node; | 246 | struct list_head remove_node; |
256 | }; | 247 | }; |
@@ -278,7 +269,6 @@ struct xen_blkif_ring { | |||
278 | wait_queue_head_t pending_free_wq; | 269 | wait_queue_head_t pending_free_wq; |
279 | 270 | ||
280 | /* Tree to store persistent grants. */ | 271 | /* Tree to store persistent grants. */ |
281 | spinlock_t pers_gnts_lock; | ||
282 | struct rb_root persistent_gnts; | 272 | struct rb_root persistent_gnts; |
283 | unsigned int persistent_gnt_c; | 273 | unsigned int persistent_gnt_c; |
284 | atomic_t persistent_gnt_in_use; | 274 | atomic_t persistent_gnt_in_use; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8986adab9bf5..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/scatterlist.h> | 46 | #include <linux/scatterlist.h> |
47 | #include <linux/bitmap.h> | 47 | #include <linux/bitmap.h> |
48 | #include <linux/list.h> | 48 | #include <linux/list.h> |
49 | #include <linux/workqueue.h> | ||
49 | 50 | ||
50 | #include <xen/xen.h> | 51 | #include <xen/xen.h> |
51 | #include <xen/xenbus.h> | 52 | #include <xen/xenbus.h> |
@@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq) | |||
121 | 122 | ||
122 | static DEFINE_MUTEX(blkfront_mutex); | 123 | static DEFINE_MUTEX(blkfront_mutex); |
123 | static const struct block_device_operations xlvbd_block_fops; | 124 | static const struct block_device_operations xlvbd_block_fops; |
125 | static struct delayed_work blkfront_work; | ||
126 | static LIST_HEAD(info_list); | ||
124 | 127 | ||
125 | /* | 128 | /* |
126 | * Maximum number of segments in indirect requests, the actual value used by | 129 | * Maximum number of segments in indirect requests, the actual value used by |
@@ -216,6 +219,7 @@ struct blkfront_info | |||
216 | /* Save uncomplete reqs and bios for migration. */ | 219 | /* Save uncomplete reqs and bios for migration. */ |
217 | struct list_head requests; | 220 | struct list_head requests; |
218 | struct bio_list bio_list; | 221 | struct bio_list bio_list; |
222 | struct list_head info_list; | ||
219 | }; | 223 | }; |
220 | 224 | ||
221 | static unsigned int nr_minors; | 225 | static unsigned int nr_minors; |
@@ -1759,6 +1763,12 @@ abort_transaction: | |||
1759 | return err; | 1763 | return err; |
1760 | } | 1764 | } |
1761 | 1765 | ||
1766 | static void free_info(struct blkfront_info *info) | ||
1767 | { | ||
1768 | list_del(&info->info_list); | ||
1769 | kfree(info); | ||
1770 | } | ||
1771 | |||
1762 | /* Common code used when first setting up, and when resuming. */ | 1772 | /* Common code used when first setting up, and when resuming. */ |
1763 | static int talk_to_blkback(struct xenbus_device *dev, | 1773 | static int talk_to_blkback(struct xenbus_device *dev, |
1764 | struct blkfront_info *info) | 1774 | struct blkfront_info *info) |
@@ -1880,7 +1890,10 @@ again: | |||
1880 | destroy_blkring: | 1890 | destroy_blkring: |
1881 | blkif_free(info, 0); | 1891 | blkif_free(info, 0); |
1882 | 1892 | ||
1883 | kfree(info); | 1893 | mutex_lock(&blkfront_mutex); |
1894 | free_info(info); | ||
1895 | mutex_unlock(&blkfront_mutex); | ||
1896 | |||
1884 | dev_set_drvdata(&dev->dev, NULL); | 1897 | dev_set_drvdata(&dev->dev, NULL); |
1885 | 1898 | ||
1886 | return err; | 1899 | return err; |
@@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1991 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | 2004 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); |
1992 | dev_set_drvdata(&dev->dev, info); | 2005 | dev_set_drvdata(&dev->dev, info); |
1993 | 2006 | ||
2007 | mutex_lock(&blkfront_mutex); | ||
2008 | list_add(&info->info_list, &info_list); | ||
2009 | mutex_unlock(&blkfront_mutex); | ||
2010 | |||
1994 | return 0; | 2011 | return 0; |
1995 | } | 2012 | } |
1996 | 2013 | ||
@@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2301 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2318 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) |
2302 | indirect_segments = 0; | 2319 | indirect_segments = 0; |
2303 | info->max_indirect_segments = indirect_segments; | 2320 | info->max_indirect_segments = indirect_segments; |
2321 | |||
2322 | if (info->feature_persistent) { | ||
2323 | mutex_lock(&blkfront_mutex); | ||
2324 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
2325 | mutex_unlock(&blkfront_mutex); | ||
2326 | } | ||
2304 | } | 2327 | } |
2305 | 2328 | ||
2306 | /* | 2329 | /* |
@@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
2482 | mutex_unlock(&info->mutex); | 2505 | mutex_unlock(&info->mutex); |
2483 | 2506 | ||
2484 | if (!bdev) { | 2507 | if (!bdev) { |
2485 | kfree(info); | 2508 | mutex_lock(&blkfront_mutex); |
2509 | free_info(info); | ||
2510 | mutex_unlock(&blkfront_mutex); | ||
2486 | return 0; | 2511 | return 0; |
2487 | } | 2512 | } |
2488 | 2513 | ||
@@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
2502 | if (info && !bdev->bd_openers) { | 2527 | if (info && !bdev->bd_openers) { |
2503 | xlvbd_release_gendisk(info); | 2528 | xlvbd_release_gendisk(info); |
2504 | disk->private_data = NULL; | 2529 | disk->private_data = NULL; |
2505 | kfree(info); | 2530 | mutex_lock(&blkfront_mutex); |
2531 | free_info(info); | ||
2532 | mutex_unlock(&blkfront_mutex); | ||
2506 | } | 2533 | } |
2507 | 2534 | ||
2508 | mutex_unlock(&bdev->bd_mutex); | 2535 | mutex_unlock(&bdev->bd_mutex); |
@@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) | |||
2585 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); | 2612 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
2586 | xlvbd_release_gendisk(info); | 2613 | xlvbd_release_gendisk(info); |
2587 | disk->private_data = NULL; | 2614 | disk->private_data = NULL; |
2588 | kfree(info); | 2615 | free_info(info); |
2589 | } | 2616 | } |
2590 | 2617 | ||
2591 | out: | 2618 | out: |
@@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = { | |||
2618 | .is_ready = blkfront_is_ready, | 2645 | .is_ready = blkfront_is_ready, |
2619 | }; | 2646 | }; |
2620 | 2647 | ||
2648 | static void purge_persistent_grants(struct blkfront_info *info) | ||
2649 | { | ||
2650 | unsigned int i; | ||
2651 | unsigned long flags; | ||
2652 | |||
2653 | for (i = 0; i < info->nr_rings; i++) { | ||
2654 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | ||
2655 | struct grant *gnt_list_entry, *tmp; | ||
2656 | |||
2657 | spin_lock_irqsave(&rinfo->ring_lock, flags); | ||
2658 | |||
2659 | if (rinfo->persistent_gnts_c == 0) { | ||
2660 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
2661 | continue; | ||
2662 | } | ||
2663 | |||
2664 | list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, | ||
2665 | node) { | ||
2666 | if (gnt_list_entry->gref == GRANT_INVALID_REF || | ||
2667 | gnttab_query_foreign_access(gnt_list_entry->gref)) | ||
2668 | continue; | ||
2669 | |||
2670 | list_del(&gnt_list_entry->node); | ||
2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | ||
2672 | rinfo->persistent_gnts_c--; | ||
2673 | __free_page(gnt_list_entry->page); | ||
2674 | kfree(gnt_list_entry); | ||
2675 | } | ||
2676 | |||
2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
2678 | } | ||
2679 | } | ||
2680 | |||
2681 | static void blkfront_delay_work(struct work_struct *work) | ||
2682 | { | ||
2683 | struct blkfront_info *info; | ||
2684 | bool need_schedule_work = false; | ||
2685 | |||
2686 | mutex_lock(&blkfront_mutex); | ||
2687 | |||
2688 | list_for_each_entry(info, &info_list, info_list) { | ||
2689 | if (info->feature_persistent) { | ||
2690 | need_schedule_work = true; | ||
2691 | mutex_lock(&info->mutex); | ||
2692 | purge_persistent_grants(info); | ||
2693 | mutex_unlock(&info->mutex); | ||
2694 | } | ||
2695 | } | ||
2696 | |||
2697 | if (need_schedule_work) | ||
2698 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
2699 | |||
2700 | mutex_unlock(&blkfront_mutex); | ||
2701 | } | ||
2702 | |||
2621 | static int __init xlblk_init(void) | 2703 | static int __init xlblk_init(void) |
2622 | { | 2704 | { |
2623 | int ret; | 2705 | int ret; |
@@ -2626,6 +2708,15 @@ static int __init xlblk_init(void) | |||
2626 | if (!xen_domain()) | 2708 | if (!xen_domain()) |
2627 | return -ENODEV; | 2709 | return -ENODEV; |
2628 | 2710 | ||
2711 | if (!xen_has_pv_disk_devices()) | ||
2712 | return -ENODEV; | ||
2713 | |||
2714 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
2715 | pr_warn("xen_blk: can't get major %d with name %s\n", | ||
2716 | XENVBD_MAJOR, DEV_NAME); | ||
2717 | return -ENODEV; | ||
2718 | } | ||
2719 | |||
2629 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2720 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) |
2630 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2721 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; |
2631 | 2722 | ||
@@ -2641,14 +2732,7 @@ static int __init xlblk_init(void) | |||
2641 | xen_blkif_max_queues = nr_cpus; | 2732 | xen_blkif_max_queues = nr_cpus; |
2642 | } | 2733 | } |
2643 | 2734 | ||
2644 | if (!xen_has_pv_disk_devices()) | 2735 | INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); |
2645 | return -ENODEV; | ||
2646 | |||
2647 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
2648 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | ||
2649 | XENVBD_MAJOR, DEV_NAME); | ||
2650 | return -ENODEV; | ||
2651 | } | ||
2652 | 2736 | ||
2653 | ret = xenbus_register_frontend(&blkfront_driver); | 2737 | ret = xenbus_register_frontend(&blkfront_driver); |
2654 | if (ret) { | 2738 | if (ret) { |
@@ -2663,6 +2747,8 @@ module_init(xlblk_init); | |||
2663 | 2747 | ||
2664 | static void __exit xlblk_exit(void) | 2748 | static void __exit xlblk_exit(void) |
2665 | { | 2749 | { |
2750 | cancel_delayed_work_sync(&blkfront_work); | ||
2751 | |||
2666 | xenbus_unregister_driver(&blkfront_driver); | 2752 | xenbus_unregister_driver(&blkfront_driver); |
2667 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); | 2753 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); |
2668 | kfree(minors); | 2754 | kfree(minors); |
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 2df11cc08a46..845b0314ce3a 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig | |||
@@ -200,6 +200,7 @@ config BT_HCIUART_RTL | |||
200 | depends on BT_HCIUART | 200 | depends on BT_HCIUART |
201 | depends on BT_HCIUART_SERDEV | 201 | depends on BT_HCIUART_SERDEV |
202 | depends on GPIOLIB | 202 | depends on GPIOLIB |
203 | depends on ACPI | ||
203 | select BT_HCIUART_3WIRE | 204 | select BT_HCIUART_3WIRE |
204 | select BT_RTL | 205 | select BT_RTL |
205 | help | 206 | help |
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index ed2a5c7cb77f..4593baff2bc9 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c | |||
@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev) | |||
144 | fw_size = fw->size; | 144 | fw_size = fw->size; |
145 | 145 | ||
146 | /* The size of patch header is 30 bytes, should be skip */ | 146 | /* The size of patch header is 30 bytes, should be skip */ |
147 | if (fw_size < 30) | 147 | if (fw_size < 30) { |
148 | return -EINVAL; | 148 | err = -EINVAL; |
149 | goto free_fw; | ||
150 | } | ||
149 | 151 | ||
150 | fw_size -= 30; | 152 | fw_size -= 30; |
151 | fw_ptr += 30; | 153 | fw_ptr += 30; |
@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev) | |||
172 | fw_ptr += dlen; | 174 | fw_ptr += dlen; |
173 | } | 175 | } |
174 | 176 | ||
177 | free_fw: | ||
175 | release_firmware(fw); | 178 | release_firmware(fw); |
176 | |||
177 | return err; | 179 | return err; |
178 | } | 180 | } |
179 | 181 | ||
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index c9bac9dc4637..e4fe954e63a9 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata) | |||
498 | 498 | ||
499 | /** | 499 | /** |
500 | * syc_ioremap - ioremap register space for the interconnect target module | 500 | * syc_ioremap - ioremap register space for the interconnect target module |
501 | * @ddata: deviec driver data | 501 | * @ddata: device driver data |
502 | * | 502 | * |
503 | * Note that the interconnect target module registers can be anywhere | 503 | * Note that the interconnect target module registers can be anywhere |
504 | * within the first child device address space. For example, SGX has | 504 | * within the interconnect target module range. For example, SGX has |
505 | * them at offset 0x1fc00 in the 32MB module address space. We just | 505 | * them at offset 0x1fc00 in the 32MB module address space. And cpsw |
506 | * what we need around the interconnect target module registers. | 506 | * has them at offset 0x1200 in the CPSW_WR child. Usually the |
507 | * the interconnect target module registers are at the beginning of | ||
508 | * the module range though. | ||
507 | */ | 509 | */ |
508 | static int sysc_ioremap(struct sysc *ddata) | 510 | static int sysc_ioremap(struct sysc *ddata) |
509 | { | 511 | { |
510 | u32 size = 0; | 512 | int size; |
511 | |||
512 | if (ddata->offsets[SYSC_SYSSTATUS] >= 0) | ||
513 | size = ddata->offsets[SYSC_SYSSTATUS]; | ||
514 | else if (ddata->offsets[SYSC_SYSCONFIG] >= 0) | ||
515 | size = ddata->offsets[SYSC_SYSCONFIG]; | ||
516 | else if (ddata->offsets[SYSC_REVISION] >= 0) | ||
517 | size = ddata->offsets[SYSC_REVISION]; | ||
518 | else | ||
519 | return -EINVAL; | ||
520 | 513 | ||
521 | size &= 0xfff00; | 514 | size = max3(ddata->offsets[SYSC_REVISION], |
522 | size += SZ_256; | 515 | ddata->offsets[SYSC_SYSCONFIG], |
516 | ddata->offsets[SYSC_SYSSTATUS]); | ||
517 | |||
518 | if (size < 0 || (size + sizeof(u32)) > ddata->module_size) | ||
519 | return -EINVAL; | ||
523 | 520 | ||
524 | ddata->module_va = devm_ioremap(ddata->dev, | 521 | ddata->module_va = devm_ioremap(ddata->dev, |
525 | ddata->module_pa, | 522 | ddata->module_pa, |
526 | size); | 523 | size + sizeof(u32)); |
527 | if (!ddata->module_va) | 524 | if (!ddata->module_va) |
528 | return -EIO; | 525 | return -EIO; |
529 | 526 | ||
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev) | |||
1224 | if (!pm_runtime_status_suspended(dev)) { | 1221 | if (!pm_runtime_status_suspended(dev)) { |
1225 | error = pm_generic_runtime_suspend(dev); | 1222 | error = pm_generic_runtime_suspend(dev); |
1226 | if (error) { | 1223 | if (error) { |
1227 | dev_err(dev, "%s error at %i: %i\n", | 1224 | dev_warn(dev, "%s busy at %i: %i\n", |
1228 | __func__, __LINE__, error); | 1225 | __func__, __LINE__, error); |
1229 | 1226 | ||
1230 | return error; | 1227 | return 0; |
1231 | } | 1228 | } |
1232 | 1229 | ||
1233 | error = sysc_runtime_suspend(ddata->dev); | 1230 | error = sysc_runtime_suspend(ddata->dev); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 113fc6edb2b0..a5d5a96479bf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, | |||
2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || | 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || |
2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) | 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) |
2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); | 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); |
2549 | if (((int)arg >= cdi->capacity)) | 2549 | if (arg >= cdi->capacity) |
2550 | return -EINVAL; | 2550 | return -EINVAL; |
2551 | return cdrom_slot_status(cdi, arg); | 2551 | return cdrom_slot_status(cdi, arg); |
2552 | } | 2552 | } |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ce277ee0a28a..40728491f37b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU | |||
566 | that CPU manufacturer (perhaps with the insistence or mandate | 566 | that CPU manufacturer (perhaps with the insistence or mandate |
567 | of a Nation State's intelligence or law enforcement agencies) | 567 | of a Nation State's intelligence or law enforcement agencies) |
568 | has not installed a hidden back door to compromise the CPU's | 568 | has not installed a hidden back door to compromise the CPU's |
569 | random number generation facilities. | 569 | random number generation facilities. This can also be configured |
570 | 570 | at boot with "random.trust_cpu=on/off". | |
diff --git a/drivers/char/random.c b/drivers/char/random.c index bf5f99fc36f1..c75b6cdf0053 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly; | |||
779 | 779 | ||
780 | static void invalidate_batched_entropy(void); | 780 | static void invalidate_batched_entropy(void); |
781 | 781 | ||
782 | static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); | ||
783 | static int __init parse_trust_cpu(char *arg) | ||
784 | { | ||
785 | return kstrtobool(arg, &trust_cpu); | ||
786 | } | ||
787 | early_param("random.trust_cpu", parse_trust_cpu); | ||
788 | |||
782 | static void crng_initialize(struct crng_state *crng) | 789 | static void crng_initialize(struct crng_state *crng) |
783 | { | 790 | { |
784 | int i; | 791 | int i; |
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng) | |||
799 | } | 806 | } |
800 | crng->state[i] ^= rv; | 807 | crng->state[i] ^= rv; |
801 | } | 808 | } |
802 | #ifdef CONFIG_RANDOM_TRUST_CPU | 809 | if (trust_cpu && arch_init) { |
803 | if (arch_init) { | ||
804 | crng_init = 2; | 810 | crng_init = 2; |
805 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); | 811 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); |
806 | } | 812 | } |
807 | #endif | ||
808 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; | 813 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; |
809 | } | 814 | } |
810 | 815 | ||
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index 740af90a9508..c5edf8f2fd19 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c | |||
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) | |||
558 | if (!clk_base) | 558 | if (!clk_base) |
559 | goto npcm7xx_init_error; | 559 | goto npcm7xx_init_error; |
560 | 560 | ||
561 | npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * | 561 | npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws, |
562 | NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); | 562 | NPCM7XX_NUM_CLOCKS), GFP_KERNEL); |
563 | if (!npcm7xx_clk_data) | 563 | if (!npcm7xx_clk_data) |
564 | goto npcm7xx_init_np_err; | 564 | goto npcm7xx_init_np_err; |
565 | 565 | ||
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index fb62f3938008..3a0996f2d556 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c | |||
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev) | |||
46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), | 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), |
47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); | 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); |
48 | 48 | ||
49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); | 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk); |
50 | 50 | ||
51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", | 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", |
52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, | 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 110483f0e3fb..e26a40971b26 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
379 | if (idx == -1) | 379 | if (idx == -1) |
380 | idx = i; /* first enabled state */ | 380 | idx = i; /* first enabled state */ |
381 | if (s->target_residency > data->predicted_us) { | 381 | if (s->target_residency > data->predicted_us) { |
382 | if (!tick_nohz_tick_stopped()) | 382 | if (data->predicted_us < TICK_USEC) |
383 | break; | 383 | break; |
384 | 384 | ||
385 | if (!tick_nohz_tick_stopped()) { | ||
386 | /* | ||
387 | * If the state selected so far is shallow, | ||
388 | * waking up early won't hurt, so retain the | ||
389 | * tick in that case and let the governor run | ||
390 | * again in the next iteration of the loop. | ||
391 | */ | ||
392 | expected_interval = drv->states[idx].target_residency; | ||
393 | break; | ||
394 | } | ||
395 | |||
385 | /* | 396 | /* |
386 | * If the state selected so far is shallow and this | 397 | * If the state selected so far is shallow and this |
387 | * state's target residency matches the time till the | 398 | * state's target residency matches the time till the |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
679 | int ret = 0; | 679 | int ret = 0; |
680 | 680 | ||
681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { | 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
682 | crypto_ablkcipher_set_flags(ablkcipher, | ||
683 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
684 | dev_err(jrdev, "key size mismatch\n"); | 682 | dev_err(jrdev, "key size mismatch\n"); |
685 | return -EINVAL; | 683 | goto badkey; |
686 | } | 684 | } |
687 | 685 | ||
688 | ctx->cdata.keylen = keylen; | 686 | ctx->cdata.keylen = keylen; |
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
715 | return ret; | 713 | return ret; |
716 | badkey: | 714 | badkey: |
717 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 715 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
718 | return 0; | 716 | return -EINVAL; |
719 | } | 717 | } |
720 | 718 | ||
721 | /* | 719 | /* |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); | 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); | 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
95 | } | 95 | } |
96 | 96 | ||
97 | /* RSA Job Completion handler */ | 97 | /* RSA Job Completion handler */ |
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
417 | goto unmap_p; | 417 | goto unmap_p; |
418 | } | 418 | } |
419 | 419 | ||
420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
423 | goto unmap_q; | 423 | goto unmap_q; |
424 | } | 424 | } |
425 | 425 | ||
426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
429 | goto unmap_tmp1; | 429 | goto unmap_tmp1; |
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
451 | return 0; | 451 | return 0; |
452 | 452 | ||
453 | unmap_tmp1: | 453 | unmap_tmp1: |
454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
455 | unmap_q: | 455 | unmap_q: |
456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
457 | unmap_p: | 457 | unmap_p: |
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
504 | goto unmap_dq; | 504 | goto unmap_dq; |
505 | } | 505 | } |
506 | 506 | ||
507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
510 | goto unmap_qinv; | 510 | goto unmap_qinv; |
511 | } | 511 | } |
512 | 512 | ||
513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
516 | goto unmap_tmp1; | 516 | goto unmap_tmp1; |
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
538 | return 0; | 538 | return 0; |
539 | 539 | ||
540 | unmap_tmp1: | 540 | unmap_tmp1: |
541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
542 | unmap_qinv: | 542 | unmap_qinv: |
543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
544 | unmap_dq: | 544 | unmap_dq: |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); | 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); |
191 | 191 | ||
192 | /* Unmap just-run descriptor so we can post-process */ | 192 | /* Unmap just-run descriptor so we can post-process */ |
193 | dma_unmap_single(dev, jrp->outring[hw_idx].desc, | 193 | dma_unmap_single(dev, |
194 | caam_dma_to_cpu(jrp->outring[hw_idx].desc), | ||
194 | jrp->entinfo[sw_idx].desc_size, | 195 | jrp->entinfo[sw_idx].desc_size, |
195 | DMA_TO_DEVICE); | 196 | DMA_TO_DEVICE); |
196 | 197 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h | |||
@@ -35,6 +35,7 @@ struct nitrox_cmdq { | |||
35 | /* requests in backlog queues */ | 35 | /* requests in backlog queues */ |
36 | atomic_t backlog_count; | 36 | atomic_t backlog_count; |
37 | 37 | ||
38 | int write_idx; | ||
38 | /* command size 32B/64B */ | 39 | /* command size 32B/64B */ |
39 | u8 instr_size; | 40 | u8 instr_size; |
40 | u8 qno; | 41 | u8 qno; |
@@ -87,7 +88,7 @@ struct nitrox_bh { | |||
87 | struct bh_data *slc; | 88 | struct bh_data *slc; |
88 | }; | 89 | }; |
89 | 90 | ||
90 | /* NITROX-5 driver state */ | 91 | /* NITROX-V driver state */ |
91 | #define NITROX_UCODE_LOADED 0 | 92 | #define NITROX_UCODE_LOADED 0 |
92 | #define NITROX_READY 1 | 93 | #define NITROX_READY 1 |
93 | 94 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) | |||
36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); | 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); |
37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); | 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); |
38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); | 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); |
39 | cmdq->write_idx = 0; | ||
39 | 40 | ||
40 | spin_lock_init(&cmdq->response_lock); | 41 | spin_lock_init(&cmdq->response_lock); |
41 | spin_lock_init(&cmdq->cmdq_lock); | 42 | spin_lock_init(&cmdq->cmdq_lock); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
@@ -42,6 +42,16 @@ | |||
42 | * Invalid flag options in AES-CCM IV. | 42 | * Invalid flag options in AES-CCM IV. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | static inline int incr_index(int index, int count, int max) | ||
46 | { | ||
47 | if ((index + count) >= max) | ||
48 | index = index + count - max; | ||
49 | else | ||
50 | index += count; | ||
51 | |||
52 | return index; | ||
53 | } | ||
54 | |||
45 | /** | 55 | /** |
46 | * dma_free_sglist - unmap and free the sg lists. | 56 | * dma_free_sglist - unmap and free the sg lists. |
47 | * @ndev: N5 device | 57 | * @ndev: N5 device |
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, | |||
426 | struct nitrox_cmdq *cmdq) | 436 | struct nitrox_cmdq *cmdq) |
427 | { | 437 | { |
428 | struct nitrox_device *ndev = sr->ndev; | 438 | struct nitrox_device *ndev = sr->ndev; |
429 | union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; | 439 | int idx; |
430 | u64 offset; | ||
431 | u8 *ent; | 440 | u8 *ent; |
432 | 441 | ||
433 | spin_lock_bh(&cmdq->cmdq_lock); | 442 | spin_lock_bh(&cmdq->cmdq_lock); |
434 | 443 | ||
435 | /* get the next write offset */ | 444 | idx = cmdq->write_idx; |
436 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); | ||
437 | pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); | ||
438 | /* copy the instruction */ | 445 | /* copy the instruction */ |
439 | ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; | 446 | ent = cmdq->head + (idx * cmdq->instr_size); |
440 | memcpy(ent, &sr->instr, cmdq->instr_size); | 447 | memcpy(ent, &sr->instr, cmdq->instr_size); |
441 | /* flush the command queue updates */ | ||
442 | dma_wmb(); | ||
443 | 448 | ||
444 | sr->tstamp = jiffies; | ||
445 | atomic_set(&sr->status, REQ_POSTED); | 449 | atomic_set(&sr->status, REQ_POSTED); |
446 | response_list_add(sr, cmdq); | 450 | response_list_add(sr, cmdq); |
451 | sr->tstamp = jiffies; | ||
452 | /* flush the command queue updates */ | ||
453 | dma_wmb(); | ||
447 | 454 | ||
448 | /* Ring doorbell with count 1 */ | 455 | /* Ring doorbell with count 1 */ |
449 | writeq(1, cmdq->dbell_csr_addr); | 456 | writeq(1, cmdq->dbell_csr_addr); |
450 | /* orders the doorbell rings */ | 457 | /* orders the doorbell rings */ |
451 | mmiowb(); | 458 | mmiowb(); |
452 | 459 | ||
460 | cmdq->write_idx = incr_index(idx, 1, ndev->qlen); | ||
461 | |||
453 | spin_unlock_bh(&cmdq->cmdq_lock); | 462 | spin_unlock_bh(&cmdq->cmdq_lock); |
454 | } | 463 | } |
455 | 464 | ||
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
459 | struct nitrox_softreq *sr, *tmp; | 468 | struct nitrox_softreq *sr, *tmp; |
460 | int ret = 0; | 469 | int ret = 0; |
461 | 470 | ||
471 | if (!atomic_read(&cmdq->backlog_count)) | ||
472 | return 0; | ||
473 | |||
462 | spin_lock_bh(&cmdq->backlog_lock); | 474 | spin_lock_bh(&cmdq->backlog_lock); |
463 | 475 | ||
464 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | 476 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { |
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
466 | 478 | ||
467 | /* submit until space available */ | 479 | /* submit until space available */ |
468 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 480 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
469 | ret = -EBUSY; | 481 | ret = -ENOSPC; |
470 | break; | 482 | break; |
471 | } | 483 | } |
472 | /* delete from backlog list */ | 484 | /* delete from backlog list */ |
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) | |||
491 | { | 503 | { |
492 | struct nitrox_cmdq *cmdq = sr->cmdq; | 504 | struct nitrox_cmdq *cmdq = sr->cmdq; |
493 | struct nitrox_device *ndev = sr->ndev; | 505 | struct nitrox_device *ndev = sr->ndev; |
494 | int ret = -EBUSY; | 506 | |
507 | /* try to post backlog requests */ | ||
508 | post_backlog_cmds(cmdq); | ||
495 | 509 | ||
496 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 510 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
497 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 511 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
498 | return -EAGAIN; | 512 | return -ENOSPC; |
499 | 513 | /* add to backlog list */ | |
500 | backlog_list_add(sr, cmdq); | 514 | backlog_list_add(sr, cmdq); |
501 | } else { | 515 | return -EBUSY; |
502 | ret = post_backlog_cmds(cmdq); | ||
503 | if (ret) { | ||
504 | backlog_list_add(sr, cmdq); | ||
505 | return ret; | ||
506 | } | ||
507 | post_se_instr(sr, cmdq); | ||
508 | ret = -EINPROGRESS; | ||
509 | } | 516 | } |
510 | return ret; | 517 | post_se_instr(sr, cmdq); |
518 | |||
519 | return -EINPROGRESS; | ||
511 | } | 520 | } |
512 | 521 | ||
513 | /** | 522 | /** |
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, | |||
624 | */ | 633 | */ |
625 | sr->instr.fdata[0] = *((u64 *)&req->gph); | 634 | sr->instr.fdata[0] = *((u64 *)&req->gph); |
626 | sr->instr.fdata[1] = 0; | 635 | sr->instr.fdata[1] = 0; |
627 | /* flush the soft_req changes before posting the cmd */ | ||
628 | wmb(); | ||
629 | 636 | ||
630 | ret = nitrox_enqueue_request(sr); | 637 | ret = nitrox_enqueue_request(sr); |
631 | if (ret == -EAGAIN) | 638 | if (ret == -ENOSPC) |
632 | goto send_fail; | 639 | goto send_fail; |
633 | 640 | ||
634 | return ret; | 641 | return ret; |
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h | |||
@@ -96,6 +96,10 @@ enum csk_flags { | |||
96 | CSK_CONN_INLINE, /* Connection on HW */ | 96 | CSK_CONN_INLINE, /* Connection on HW */ |
97 | }; | 97 | }; |
98 | 98 | ||
99 | enum chtls_cdev_state { | ||
100 | CHTLS_CDEV_STATE_UP = 1 | ||
101 | }; | ||
102 | |||
99 | struct listen_ctx { | 103 | struct listen_ctx { |
100 | struct sock *lsk; | 104 | struct sock *lsk; |
101 | struct chtls_dev *cdev; | 105 | struct chtls_dev *cdev; |
@@ -146,6 +150,7 @@ struct chtls_dev { | |||
146 | unsigned int send_page_order; | 150 | unsigned int send_page_order; |
147 | int max_host_sndbuf; | 151 | int max_host_sndbuf; |
148 | struct key_map kmap; | 152 | struct key_map kmap; |
153 | unsigned int cdev_state; | ||
149 | }; | 154 | }; |
150 | 155 | ||
151 | struct chtls_hws { | 156 | struct chtls_hws { |
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c | |||
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) | |||
160 | tlsdev->hash = chtls_create_hash; | 160 | tlsdev->hash = chtls_create_hash; |
161 | tlsdev->unhash = chtls_destroy_hash; | 161 | tlsdev->unhash = chtls_destroy_hash; |
162 | tls_register_device(&cdev->tlsdev); | 162 | tls_register_device(&cdev->tlsdev); |
163 | cdev->cdev_state = CHTLS_CDEV_STATE_UP; | ||
163 | } | 164 | } |
164 | 165 | ||
165 | static void chtls_unregister_dev(struct chtls_dev *cdev) | 166 | static void chtls_unregister_dev(struct chtls_dev *cdev) |
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) | |||
281 | struct chtls_dev *cdev, *tmp; | 282 | struct chtls_dev *cdev, *tmp; |
282 | 283 | ||
283 | mutex_lock(&cdev_mutex); | 284 | mutex_lock(&cdev_mutex); |
284 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) | 285 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { |
285 | chtls_free_uld(cdev); | 286 | if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) |
287 | chtls_free_uld(cdev); | ||
288 | } | ||
286 | mutex_unlock(&cdev_mutex); | 289 | mutex_unlock(&cdev_mutex); |
287 | } | 290 | } |
288 | 291 | ||
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | |||
107 | ret = crypto_skcipher_encrypt(req); | 107 | ret = crypto_skcipher_encrypt(req); |
108 | skcipher_request_zero(req); | 108 | skcipher_request_zero(req); |
109 | } else { | 109 | } else { |
110 | preempt_disable(); | ||
111 | pagefault_disable(); | ||
112 | enable_kernel_vsx(); | ||
113 | |||
114 | blkcipher_walk_init(&walk, dst, src, nbytes); | 110 | blkcipher_walk_init(&walk, dst, src, nbytes); |
115 | ret = blkcipher_walk_virt(desc, &walk); | 111 | ret = blkcipher_walk_virt(desc, &walk); |
116 | while ((nbytes = walk.nbytes)) { | 112 | while ((nbytes = walk.nbytes)) { |
113 | preempt_disable(); | ||
114 | pagefault_disable(); | ||
115 | enable_kernel_vsx(); | ||
117 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 116 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
118 | walk.dst.virt.addr, | 117 | walk.dst.virt.addr, |
119 | nbytes & AES_BLOCK_MASK, | 118 | nbytes & AES_BLOCK_MASK, |
120 | &ctx->enc_key, walk.iv, 1); | 119 | &ctx->enc_key, walk.iv, 1); |
120 | disable_kernel_vsx(); | ||
121 | pagefault_enable(); | ||
122 | preempt_enable(); | ||
123 | |||
121 | nbytes &= AES_BLOCK_SIZE - 1; | 124 | nbytes &= AES_BLOCK_SIZE - 1; |
122 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 125 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
123 | } | 126 | } |
124 | |||
125 | disable_kernel_vsx(); | ||
126 | pagefault_enable(); | ||
127 | preempt_enable(); | ||
128 | } | 127 | } |
129 | 128 | ||
130 | return ret; | 129 | return ret; |
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | |||
147 | ret = crypto_skcipher_decrypt(req); | 146 | ret = crypto_skcipher_decrypt(req); |
148 | skcipher_request_zero(req); | 147 | skcipher_request_zero(req); |
149 | } else { | 148 | } else { |
150 | preempt_disable(); | ||
151 | pagefault_disable(); | ||
152 | enable_kernel_vsx(); | ||
153 | |||
154 | blkcipher_walk_init(&walk, dst, src, nbytes); | 149 | blkcipher_walk_init(&walk, dst, src, nbytes); |
155 | ret = blkcipher_walk_virt(desc, &walk); | 150 | ret = blkcipher_walk_virt(desc, &walk); |
156 | while ((nbytes = walk.nbytes)) { | 151 | while ((nbytes = walk.nbytes)) { |
152 | preempt_disable(); | ||
153 | pagefault_disable(); | ||
154 | enable_kernel_vsx(); | ||
157 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 155 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
158 | walk.dst.virt.addr, | 156 | walk.dst.virt.addr, |
159 | nbytes & AES_BLOCK_MASK, | 157 | nbytes & AES_BLOCK_MASK, |
160 | &ctx->dec_key, walk.iv, 0); | 158 | &ctx->dec_key, walk.iv, 0); |
159 | disable_kernel_vsx(); | ||
160 | pagefault_enable(); | ||
161 | preempt_enable(); | ||
162 | |||
161 | nbytes &= AES_BLOCK_SIZE - 1; | 163 | nbytes &= AES_BLOCK_SIZE - 1; |
162 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 164 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
163 | } | 165 | } |
164 | |||
165 | disable_kernel_vsx(); | ||
166 | pagefault_enable(); | ||
167 | preempt_enable(); | ||
168 | } | 166 | } |
169 | 167 | ||
170 | return ret; | 168 | return ret; |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | |||
116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); | 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); |
117 | skcipher_request_zero(req); | 117 | skcipher_request_zero(req); |
118 | } else { | 118 | } else { |
119 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
120 | |||
121 | ret = blkcipher_walk_virt(desc, &walk); | ||
122 | |||
119 | preempt_disable(); | 123 | preempt_disable(); |
120 | pagefault_disable(); | 124 | pagefault_disable(); |
121 | enable_kernel_vsx(); | 125 | enable_kernel_vsx(); |
122 | 126 | ||
123 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
124 | |||
125 | ret = blkcipher_walk_virt(desc, &walk); | ||
126 | iv = walk.iv; | 127 | iv = walk.iv; |
127 | memset(tweak, 0, AES_BLOCK_SIZE); | 128 | memset(tweak, 0, AES_BLOCK_SIZE); |
128 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 129 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
129 | 130 | ||
131 | disable_kernel_vsx(); | ||
132 | pagefault_enable(); | ||
133 | preempt_enable(); | ||
134 | |||
130 | while ((nbytes = walk.nbytes)) { | 135 | while ((nbytes = walk.nbytes)) { |
136 | preempt_disable(); | ||
137 | pagefault_disable(); | ||
138 | enable_kernel_vsx(); | ||
131 | if (enc) | 139 | if (enc) |
132 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 140 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
133 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); | 141 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); |
134 | else | 142 | else |
135 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | 143 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
136 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); | 144 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); |
145 | disable_kernel_vsx(); | ||
146 | pagefault_enable(); | ||
147 | preempt_enable(); | ||
137 | 148 | ||
138 | nbytes &= AES_BLOCK_SIZE - 1; | 149 | nbytes &= AES_BLOCK_SIZE - 1; |
139 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 150 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
140 | } | 151 | } |
141 | |||
142 | disable_kernel_vsx(); | ||
143 | pagefault_enable(); | ||
144 | preempt_enable(); | ||
145 | } | 152 | } |
146 | return ret; | 153 | return ret; |
147 | } | 154 | } |
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 6fd46083e629..bbe4d72ca105 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
@@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, | |||
392 | { | 392 | { |
393 | struct file *filp = vmf->vma->vm_file; | 393 | struct file *filp = vmf->vma->vm_file; |
394 | unsigned long fault_size; | 394 | unsigned long fault_size; |
395 | int rc, id; | 395 | vm_fault_t rc = VM_FAULT_SIGBUS; |
396 | int id; | ||
396 | pfn_t pfn; | 397 | pfn_t pfn; |
397 | struct dev_dax *dev_dax = filp->private_data; | 398 | struct dev_dax *dev_dax = filp->private_data; |
398 | 399 | ||
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 721e6c57beae..64342944d917 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c | |||
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, | |||
166 | le32_to_cpu(attr->sustained_freq_khz); | 166 | le32_to_cpu(attr->sustained_freq_khz); |
167 | dom_info->sustained_perf_level = | 167 | dom_info->sustained_perf_level = |
168 | le32_to_cpu(attr->sustained_perf_level); | 168 | le32_to_cpu(attr->sustained_perf_level); |
169 | dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / | 169 | if (!dom_info->sustained_freq_khz || |
170 | !dom_info->sustained_perf_level) | ||
171 | /* CPUFreq converts to kHz, hence default 1000 */ | ||
172 | dom_info->mult_factor = 1000; | ||
173 | else | ||
174 | dom_info->mult_factor = | ||
175 | (dom_info->sustained_freq_khz * 1000) / | ||
170 | dom_info->sustained_perf_level; | 176 | dom_info->sustained_perf_level; |
171 | memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); | 177 | memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); |
172 | } | 178 | } |
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index 3530ccd17e04..da9781a2ef4a 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c | |||
@@ -41,6 +41,8 @@ struct adp5588_gpio { | |||
41 | uint8_t int_en[3]; | 41 | uint8_t int_en[3]; |
42 | uint8_t irq_mask[3]; | 42 | uint8_t irq_mask[3]; |
43 | uint8_t irq_stat[3]; | 43 | uint8_t irq_stat[3]; |
44 | uint8_t int_input_en[3]; | ||
45 | uint8_t int_lvl_cached[3]; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | static int adp5588_gpio_read(struct i2c_client *client, u8 reg) | 48 | static int adp5588_gpio_read(struct i2c_client *client, u8 reg) |
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d) | |||
173 | struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); | 175 | struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); |
174 | int i; | 176 | int i; |
175 | 177 | ||
176 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) | 178 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { |
179 | if (dev->int_input_en[i]) { | ||
180 | mutex_lock(&dev->lock); | ||
181 | dev->dir[i] &= ~dev->int_input_en[i]; | ||
182 | dev->int_input_en[i] = 0; | ||
183 | adp5588_gpio_write(dev->client, GPIO_DIR1 + i, | ||
184 | dev->dir[i]); | ||
185 | mutex_unlock(&dev->lock); | ||
186 | } | ||
187 | |||
188 | if (dev->int_lvl_cached[i] != dev->int_lvl[i]) { | ||
189 | dev->int_lvl_cached[i] = dev->int_lvl[i]; | ||
190 | adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i, | ||
191 | dev->int_lvl[i]); | ||
192 | } | ||
193 | |||
177 | if (dev->int_en[i] ^ dev->irq_mask[i]) { | 194 | if (dev->int_en[i] ^ dev->irq_mask[i]) { |
178 | dev->int_en[i] = dev->irq_mask[i]; | 195 | dev->int_en[i] = dev->irq_mask[i]; |
179 | adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, | 196 | adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, |
180 | dev->int_en[i]); | 197 | dev->int_en[i]); |
181 | } | 198 | } |
199 | } | ||
182 | 200 | ||
183 | mutex_unlock(&dev->irq_lock); | 201 | mutex_unlock(&dev->irq_lock); |
184 | } | 202 | } |
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type) | |||
221 | else | 239 | else |
222 | return -EINVAL; | 240 | return -EINVAL; |
223 | 241 | ||
224 | adp5588_gpio_direction_input(&dev->gpio_chip, gpio); | 242 | dev->int_input_en[bank] |= bit; |
225 | adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank, | ||
226 | dev->int_lvl[bank]); | ||
227 | 243 | ||
228 | return 0; | 244 | return 0; |
229 | } | 245 | } |
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 28da700f5f52..044888fd96a1 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c | |||
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev) | |||
728 | out_unregister: | 728 | out_unregister: |
729 | dwapb_gpio_unregister(gpio); | 729 | dwapb_gpio_unregister(gpio); |
730 | dwapb_irq_teardown(gpio); | 730 | dwapb_irq_teardown(gpio); |
731 | clk_disable_unprepare(gpio->clk); | ||
731 | 732 | ||
732 | return err; | 733 | return err; |
733 | } | 734 | } |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c48ed9d89ff5..8b9d7e42c600 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | struct acpi_gpio_event { | 26 | struct acpi_gpio_event { |
27 | struct list_head node; | 27 | struct list_head node; |
28 | struct list_head initial_sync_list; | ||
29 | acpi_handle handle; | 28 | acpi_handle handle; |
30 | unsigned int pin; | 29 | unsigned int pin; |
31 | unsigned int irq; | 30 | unsigned int irq; |
@@ -49,10 +48,19 @@ struct acpi_gpio_chip { | |||
49 | struct mutex conn_lock; | 48 | struct mutex conn_lock; |
50 | struct gpio_chip *chip; | 49 | struct gpio_chip *chip; |
51 | struct list_head events; | 50 | struct list_head events; |
51 | struct list_head deferred_req_irqs_list_entry; | ||
52 | }; | 52 | }; |
53 | 53 | ||
54 | static LIST_HEAD(acpi_gpio_initial_sync_list); | 54 | /* |
55 | static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); | 55 | * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init |
56 | * (so builtin drivers) we register the ACPI GpioInt event handlers from a | ||
57 | * late_initcall_sync handler, so that other builtin drivers can register their | ||
58 | * OpRegions before the event handlers can run. This list contains gpiochips | ||
59 | * for which the acpi_gpiochip_request_interrupts() has been deferred. | ||
60 | */ | ||
61 | static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); | ||
62 | static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); | ||
63 | static bool acpi_gpio_deferred_req_irqs_done; | ||
56 | 64 | ||
57 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) | 65 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) |
58 | { | 66 | { |
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) | |||
89 | return gpiochip_get_desc(chip, pin); | 97 | return gpiochip_get_desc(chip, pin); |
90 | } | 98 | } |
91 | 99 | ||
92 | static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) | ||
93 | { | ||
94 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
95 | list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); | ||
96 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
97 | } | ||
98 | |||
99 | static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) | ||
100 | { | ||
101 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
102 | if (!list_empty(&event->initial_sync_list)) | ||
103 | list_del_init(&event->initial_sync_list); | ||
104 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
105 | } | ||
106 | |||
107 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) | 100 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) |
108 | { | 101 | { |
109 | struct acpi_gpio_event *event = data; | 102 | struct acpi_gpio_event *event = data; |
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
186 | 179 | ||
187 | gpiod_direction_input(desc); | 180 | gpiod_direction_input(desc); |
188 | 181 | ||
189 | value = gpiod_get_value(desc); | 182 | value = gpiod_get_value_cansleep(desc); |
190 | 183 | ||
191 | ret = gpiochip_lock_as_irq(chip, pin); | 184 | ret = gpiochip_lock_as_irq(chip, pin); |
192 | if (ret) { | 185 | if (ret) { |
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
229 | event->irq = irq; | 222 | event->irq = irq; |
230 | event->pin = pin; | 223 | event->pin = pin; |
231 | event->desc = desc; | 224 | event->desc = desc; |
232 | INIT_LIST_HEAD(&event->initial_sync_list); | ||
233 | 225 | ||
234 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, | 226 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, |
235 | "ACPI:Event", event); | 227 | "ACPI:Event", event); |
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
251 | * may refer to OperationRegions from other (builtin) drivers which | 243 | * may refer to OperationRegions from other (builtin) drivers which |
252 | * may be probed after us. | 244 | * may be probed after us. |
253 | */ | 245 | */ |
254 | if (handler == acpi_gpio_irq_handler && | 246 | if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || |
255 | (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || | 247 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) |
256 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) | 248 | handler(event->irq, event); |
257 | acpi_gpio_add_to_initial_sync_list(event); | ||
258 | 249 | ||
259 | return AE_OK; | 250 | return AE_OK; |
260 | 251 | ||
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
283 | struct acpi_gpio_chip *acpi_gpio; | 274 | struct acpi_gpio_chip *acpi_gpio; |
284 | acpi_handle handle; | 275 | acpi_handle handle; |
285 | acpi_status status; | 276 | acpi_status status; |
277 | bool defer; | ||
286 | 278 | ||
287 | if (!chip->parent || !chip->to_irq) | 279 | if (!chip->parent || !chip->to_irq) |
288 | return; | 280 | return; |
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
295 | if (ACPI_FAILURE(status)) | 287 | if (ACPI_FAILURE(status)) |
296 | return; | 288 | return; |
297 | 289 | ||
290 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
291 | defer = !acpi_gpio_deferred_req_irqs_done; | ||
292 | if (defer) | ||
293 | list_add(&acpi_gpio->deferred_req_irqs_list_entry, | ||
294 | &acpi_gpio_deferred_req_irqs_list); | ||
295 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
296 | |||
297 | if (defer) | ||
298 | return; | ||
299 | |||
298 | acpi_walk_resources(handle, "_AEI", | 300 | acpi_walk_resources(handle, "_AEI", |
299 | acpi_gpiochip_request_interrupt, acpi_gpio); | 301 | acpi_gpiochip_request_interrupt, acpi_gpio); |
300 | } | 302 | } |
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) | |||
325 | if (ACPI_FAILURE(status)) | 327 | if (ACPI_FAILURE(status)) |
326 | return; | 328 | return; |
327 | 329 | ||
330 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
331 | if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry)) | ||
332 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); | ||
333 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
334 | |||
328 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { | 335 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { |
329 | struct gpio_desc *desc; | 336 | struct gpio_desc *desc; |
330 | 337 | ||
331 | acpi_gpio_del_from_initial_sync_list(event); | ||
332 | |||
333 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) | 338 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) |
334 | disable_irq_wake(event->irq); | 339 | disable_irq_wake(event->irq); |
335 | 340 | ||
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) | |||
1052 | 1057 | ||
1053 | acpi_gpio->chip = chip; | 1058 | acpi_gpio->chip = chip; |
1054 | INIT_LIST_HEAD(&acpi_gpio->events); | 1059 | INIT_LIST_HEAD(&acpi_gpio->events); |
1060 | INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); | ||
1055 | 1061 | ||
1056 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); | 1062 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); |
1057 | if (ACPI_FAILURE(status)) { | 1063 | if (ACPI_FAILURE(status)) { |
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) | |||
1198 | return con_id == NULL; | 1204 | return con_id == NULL; |
1199 | } | 1205 | } |
1200 | 1206 | ||
1201 | /* Sync the initial state of handlers after all builtin drivers have probed */ | 1207 | /* Run deferred acpi_gpiochip_request_interrupts() */ |
1202 | static int acpi_gpio_initial_sync(void) | 1208 | static int acpi_gpio_handle_deferred_request_interrupts(void) |
1203 | { | 1209 | { |
1204 | struct acpi_gpio_event *event, *ep; | 1210 | struct acpi_gpio_chip *acpi_gpio, *tmp; |
1211 | |||
1212 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
1213 | list_for_each_entry_safe(acpi_gpio, tmp, | ||
1214 | &acpi_gpio_deferred_req_irqs_list, | ||
1215 | deferred_req_irqs_list_entry) { | ||
1216 | acpi_handle handle; | ||
1205 | 1217 | ||
1206 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | 1218 | handle = ACPI_HANDLE(acpi_gpio->chip->parent); |
1207 | list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, | 1219 | acpi_walk_resources(handle, "_AEI", |
1208 | initial_sync_list) { | 1220 | acpi_gpiochip_request_interrupt, acpi_gpio); |
1209 | acpi_evaluate_object(event->handle, NULL, NULL, NULL); | 1221 | |
1210 | list_del_init(&event->initial_sync_list); | 1222 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); |
1211 | } | 1223 | } |
1212 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | 1224 | |
1225 | acpi_gpio_deferred_req_irqs_done = true; | ||
1226 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
1213 | 1227 | ||
1214 | return 0; | 1228 | return 0; |
1215 | } | 1229 | } |
1216 | /* We must use _sync so that this runs after the first deferred_probe run */ | 1230 | /* We must use _sync so that this runs after the first deferred_probe run */ |
1217 | late_initcall_sync(acpi_gpio_initial_sync); | 1231 | late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a4f1157d6aa0..d4e7a09598fa 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) | |||
31 | struct of_phandle_args *gpiospec = data; | 31 | struct of_phandle_args *gpiospec = data; |
32 | 32 | ||
33 | return chip->gpiodev->dev.of_node == gpiospec->np && | 33 | return chip->gpiodev->dev.of_node == gpiospec->np && |
34 | chip->of_xlate && | ||
34 | chip->of_xlate(chip, gpiospec, NULL) >= 0; | 35 | chip->of_xlate(chip, gpiospec, NULL) >= 0; |
35 | } | 36 | } |
36 | 37 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..b6e9df11115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
1012 | if (r) | 1012 | if (r) |
1013 | return r; | 1013 | return r; |
1014 | 1014 | ||
1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { | 1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
1016 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; | 1016 | parser->job->preamble_status |= |
1017 | if (!parser->ctx->preamble_presented) { | 1017 | AMDGPU_PREAMBLE_IB_PRESENT; |
1018 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
1019 | parser->ctx->preamble_presented = true; | ||
1020 | } | ||
1021 | } | ||
1022 | 1018 | ||
1023 | if (parser->ring && parser->ring != ring) | 1019 | if (parser->ring && parser->ring != ring) |
1024 | return -EINVAL; | 1020 | return -EINVAL; |
@@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1207 | 1203 | ||
1208 | int r; | 1204 | int r; |
1209 | 1205 | ||
1206 | job = p->job; | ||
1207 | p->job = NULL; | ||
1208 | |||
1209 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
1210 | if (r) | ||
1211 | goto error_unlock; | ||
1212 | |||
1213 | /* No memory allocation is allowed while holding the mn lock */ | ||
1210 | amdgpu_mn_lock(p->mn); | 1214 | amdgpu_mn_lock(p->mn); |
1211 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 1215 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
1212 | struct amdgpu_bo *bo = e->robj; | 1216 | struct amdgpu_bo *bo = e->robj; |
1213 | 1217 | ||
1214 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { | 1218 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { |
1215 | amdgpu_mn_unlock(p->mn); | 1219 | r = -ERESTARTSYS; |
1216 | return -ERESTARTSYS; | 1220 | goto error_abort; |
1217 | } | 1221 | } |
1218 | } | 1222 | } |
1219 | 1223 | ||
1220 | job = p->job; | ||
1221 | p->job = NULL; | ||
1222 | |||
1223 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
1224 | if (r) { | ||
1225 | amdgpu_job_free(job); | ||
1226 | amdgpu_mn_unlock(p->mn); | ||
1227 | return r; | ||
1228 | } | ||
1229 | |||
1230 | job->owner = p->filp; | 1224 | job->owner = p->filp; |
1231 | p->fence = dma_fence_get(&job->base.s_fence->finished); | 1225 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
1232 | 1226 | ||
@@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1241 | 1235 | ||
1242 | amdgpu_cs_post_dependencies(p); | 1236 | amdgpu_cs_post_dependencies(p); |
1243 | 1237 | ||
1238 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && | ||
1239 | !p->ctx->preamble_presented) { | ||
1240 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
1241 | p->ctx->preamble_presented = true; | ||
1242 | } | ||
1243 | |||
1244 | cs->out.handle = seq; | 1244 | cs->out.handle = seq; |
1245 | job->uf_sequence = seq; | 1245 | job->uf_sequence = seq; |
1246 | 1246 | ||
@@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1258 | amdgpu_mn_unlock(p->mn); | 1258 | amdgpu_mn_unlock(p->mn); |
1259 | 1259 | ||
1260 | return 0; | 1260 | return 0; |
1261 | |||
1262 | error_abort: | ||
1263 | dma_fence_put(&job->base.s_fence->finished); | ||
1264 | job->base.s_fence = NULL; | ||
1265 | |||
1266 | error_unlock: | ||
1267 | amdgpu_job_free(job); | ||
1268 | amdgpu_mn_unlock(p->mn); | ||
1269 | return r; | ||
1261 | } | 1270 | } |
1262 | 1271 | ||
1263 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 1272 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
164 | return r; | 164 | return r; |
165 | } | 165 | } |
166 | 166 | ||
167 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
167 | if (ring->funcs->emit_pipeline_sync && job && | 168 | if (ring->funcs->emit_pipeline_sync && job && |
168 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || | 169 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || |
170 | (amdgpu_sriov_vf(adev) && need_ctx_switch) || | ||
169 | amdgpu_vm_need_pipeline_sync(ring, job))) { | 171 | amdgpu_vm_need_pipeline_sync(ring, job))) { |
170 | need_pipe_sync = true; | 172 | need_pipe_sync = true; |
171 | dma_fence_put(tmp); | 173 | dma_fence_put(tmp); |
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
196 | } | 198 | } |
197 | 199 | ||
198 | skip_preamble = ring->current_ctx == fence_ctx; | 200 | skip_preamble = ring->current_ctx == fence_ctx; |
199 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
200 | if (job && ring->funcs->emit_cntxcntl) { | 201 | if (job && ring->funcs->emit_cntxcntl) { |
201 | if (need_ctx_switch) | 202 | if (need_ctx_switch) |
202 | status |= AMDGPU_HAVE_CTX_SWITCH; | 203 | status |= AMDGPU_HAVE_CTX_SWITCH; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
1932 | amdgpu_fence_wait_empty(ring); | 1932 | amdgpu_fence_wait_empty(ring); |
1933 | } | 1933 | } |
1934 | 1934 | ||
1935 | mutex_lock(&adev->pm.mutex); | ||
1936 | /* update battery/ac status */ | ||
1937 | if (power_supply_is_system_supplied() > 0) | ||
1938 | adev->pm.ac_power = true; | ||
1939 | else | ||
1940 | adev->pm.ac_power = false; | ||
1941 | mutex_unlock(&adev->pm.mutex); | ||
1942 | |||
1943 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | 1935 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
1944 | if (!amdgpu_device_has_dc_support(adev)) { | 1936 | if (!amdgpu_device_has_dc_support(adev)) { |
1945 | mutex_lock(&adev->pm.mutex); | 1937 | mutex_lock(&adev->pm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, | |||
172 | * is validated on next vm use to avoid fault. | 172 | * is validated on next vm use to avoid fault. |
173 | * */ | 173 | * */ |
174 | list_move_tail(&base->vm_status, &vm->evicted); | 174 | list_move_tail(&base->vm_status, &vm->evicted); |
175 | base->moved = true; | ||
175 | } | 176 | } |
176 | 177 | ||
177 | /** | 178 | /** |
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
369 | uint64_t addr; | 370 | uint64_t addr; |
370 | int r; | 371 | int r; |
371 | 372 | ||
372 | addr = amdgpu_bo_gpu_offset(bo); | ||
373 | entries = amdgpu_bo_size(bo) / 8; | 373 | entries = amdgpu_bo_size(bo) / 8; |
374 | 374 | ||
375 | if (pte_support_ats) { | 375 | if (pte_support_ats) { |
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
401 | if (r) | 401 | if (r) |
402 | goto error; | 402 | goto error; |
403 | 403 | ||
404 | addr = amdgpu_bo_gpu_offset(bo); | ||
404 | if (ats_entries) { | 405 | if (ats_entries) { |
405 | uint64_t ats_value; | 406 | uint64_t ats_value; |
406 | 407 | ||
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) | |||
2483 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size | 2484 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size |
2484 | * | 2485 | * |
2485 | * @adev: amdgpu_device pointer | 2486 | * @adev: amdgpu_device pointer |
2486 | * @vm_size: the default vm size if it's set auto | 2487 | * @min_vm_size: the minimum vm size in GB if it's set auto |
2487 | * @fragment_size_default: Default PTE fragment size | 2488 | * @fragment_size_default: Default PTE fragment size |
2488 | * @max_level: max VMPT level | 2489 | * @max_level: max VMPT level |
2489 | * @max_bits: max address space size in bits | 2490 | * @max_bits: max address space size in bits |
2490 | * | 2491 | * |
2491 | */ | 2492 | */ |
2492 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 2493 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
2493 | uint32_t fragment_size_default, unsigned max_level, | 2494 | uint32_t fragment_size_default, unsigned max_level, |
2494 | unsigned max_bits) | 2495 | unsigned max_bits) |
2495 | { | 2496 | { |
2497 | unsigned int max_size = 1 << (max_bits - 30); | ||
2498 | unsigned int vm_size; | ||
2496 | uint64_t tmp; | 2499 | uint64_t tmp; |
2497 | 2500 | ||
2498 | /* adjust vm size first */ | 2501 | /* adjust vm size first */ |
2499 | if (amdgpu_vm_size != -1) { | 2502 | if (amdgpu_vm_size != -1) { |
2500 | unsigned max_size = 1 << (max_bits - 30); | ||
2501 | |||
2502 | vm_size = amdgpu_vm_size; | 2503 | vm_size = amdgpu_vm_size; |
2503 | if (vm_size > max_size) { | 2504 | if (vm_size > max_size) { |
2504 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", | 2505 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", |
2505 | amdgpu_vm_size, max_size); | 2506 | amdgpu_vm_size, max_size); |
2506 | vm_size = max_size; | 2507 | vm_size = max_size; |
2507 | } | 2508 | } |
2509 | } else { | ||
2510 | struct sysinfo si; | ||
2511 | unsigned int phys_ram_gb; | ||
2512 | |||
2513 | /* Optimal VM size depends on the amount of physical | ||
2514 | * RAM available. Underlying requirements and | ||
2515 | * assumptions: | ||
2516 | * | ||
2517 | * - Need to map system memory and VRAM from all GPUs | ||
2518 | * - VRAM from other GPUs not known here | ||
2519 | * - Assume VRAM <= system memory | ||
2520 | * - On GFX8 and older, VM space can be segmented for | ||
2521 | * different MTYPEs | ||
2522 | * - Need to allow room for fragmentation, guard pages etc. | ||
2523 | * | ||
2524 | * This adds up to a rough guess of system memory x3. | ||
2525 | * Round up to power of two to maximize the available | ||
2526 | * VM size with the given page table size. | ||
2527 | */ | ||
2528 | si_meminfo(&si); | ||
2529 | phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + | ||
2530 | (1 << 30) - 1) >> 30; | ||
2531 | vm_size = roundup_pow_of_two( | ||
2532 | min(max(phys_ram_gb * 3, min_vm_size), max_size)); | ||
2508 | } | 2533 | } |
2509 | 2534 | ||
2510 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; | 2535 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, | |||
321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); | 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); |
322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
323 | struct amdgpu_bo_va *bo_va); | 323 | struct amdgpu_bo_va *bo_va); |
324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
325 | uint32_t fragment_size_default, unsigned max_level, | 325 | uint32_t fragment_size_default, unsigned max_level, |
326 | unsigned max_bits); | 326 | unsigned max_bits); |
327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5664 | if (amdgpu_sriov_vf(adev)) | 5664 | if (amdgpu_sriov_vf(adev)) |
5665 | return 0; | 5665 | return 0; |
5666 | 5666 | ||
5667 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | ||
5668 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
5669 | AMD_PG_SUPPORT_CP | | ||
5670 | AMD_PG_SUPPORT_GFX_DMG)) | ||
5671 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | ||
5667 | switch (adev->asic_type) { | 5672 | switch (adev->asic_type) { |
5668 | case CHIP_CARRIZO: | 5673 | case CHIP_CARRIZO: |
5669 | case CHIP_STONEY: | 5674 | case CHIP_STONEY: |
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5713 | default: | 5718 | default: |
5714 | break; | 5719 | break; |
5715 | } | 5720 | } |
5716 | 5721 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | |
5722 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
5723 | AMD_PG_SUPPORT_CP | | ||
5724 | AMD_PG_SUPPORT_GFX_DMG)) | ||
5725 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | ||
5717 | return 0; | 5726 | return 0; |
5718 | } | 5727 | } |
5719 | 5728 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) | |||
632 | amdgpu_gart_table_vram_unpin(adev); | 632 | amdgpu_gart_table_vram_unpin(adev); |
633 | } | 633 | } |
634 | 634 | ||
635 | static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) | ||
636 | { | ||
637 | amdgpu_gart_table_vram_free(adev); | ||
638 | amdgpu_gart_fini(adev); | ||
639 | } | ||
640 | |||
641 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, | 635 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, |
642 | u32 status, u32 addr, u32 mc_client) | 636 | u32 status, u32 addr, u32 mc_client) |
643 | { | 637 | { |
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) | |||
935 | 929 | ||
936 | amdgpu_gem_force_release(adev); | 930 | amdgpu_gem_force_release(adev); |
937 | amdgpu_vm_manager_fini(adev); | 931 | amdgpu_vm_manager_fini(adev); |
938 | gmc_v6_0_gart_fini(adev); | 932 | amdgpu_gart_table_vram_free(adev); |
939 | amdgpu_bo_fini(adev); | 933 | amdgpu_bo_fini(adev); |
934 | amdgpu_gart_fini(adev); | ||
940 | release_firmware(adev->gmc.fw); | 935 | release_firmware(adev->gmc.fw); |
941 | adev->gmc.fw = NULL; | 936 | adev->gmc.fw = NULL; |
942 | 937 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) | |||
747 | } | 747 | } |
748 | 748 | ||
749 | /** | 749 | /** |
750 | * gmc_v7_0_gart_fini - vm fini callback | ||
751 | * | ||
752 | * @adev: amdgpu_device pointer | ||
753 | * | ||
754 | * Tears down the driver GART/VM setup (CIK). | ||
755 | */ | ||
756 | static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) | ||
757 | { | ||
758 | amdgpu_gart_table_vram_free(adev); | ||
759 | amdgpu_gart_fini(adev); | ||
760 | } | ||
761 | |||
762 | /** | ||
763 | * gmc_v7_0_vm_decode_fault - print human readable fault info | 750 | * gmc_v7_0_vm_decode_fault - print human readable fault info |
764 | * | 751 | * |
765 | * @adev: amdgpu_device pointer | 752 | * @adev: amdgpu_device pointer |
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) | |||
1095 | amdgpu_gem_force_release(adev); | 1082 | amdgpu_gem_force_release(adev); |
1096 | amdgpu_vm_manager_fini(adev); | 1083 | amdgpu_vm_manager_fini(adev); |
1097 | kfree(adev->gmc.vm_fault_info); | 1084 | kfree(adev->gmc.vm_fault_info); |
1098 | gmc_v7_0_gart_fini(adev); | 1085 | amdgpu_gart_table_vram_free(adev); |
1099 | amdgpu_bo_fini(adev); | 1086 | amdgpu_bo_fini(adev); |
1087 | amdgpu_gart_fini(adev); | ||
1100 | release_firmware(adev->gmc.fw); | 1088 | release_firmware(adev->gmc.fw); |
1101 | adev->gmc.fw = NULL; | 1089 | adev->gmc.fw = NULL; |
1102 | 1090 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | |||
969 | } | 969 | } |
970 | 970 | ||
971 | /** | 971 | /** |
972 | * gmc_v8_0_gart_fini - vm fini callback | ||
973 | * | ||
974 | * @adev: amdgpu_device pointer | ||
975 | * | ||
976 | * Tears down the driver GART/VM setup (CIK). | ||
977 | */ | ||
978 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | ||
979 | { | ||
980 | amdgpu_gart_table_vram_free(adev); | ||
981 | amdgpu_gart_fini(adev); | ||
982 | } | ||
983 | |||
984 | /** | ||
985 | * gmc_v8_0_vm_decode_fault - print human readable fault info | 972 | * gmc_v8_0_vm_decode_fault - print human readable fault info |
986 | * | 973 | * |
987 | * @adev: amdgpu_device pointer | 974 | * @adev: amdgpu_device pointer |
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) | |||
1199 | amdgpu_gem_force_release(adev); | 1186 | amdgpu_gem_force_release(adev); |
1200 | amdgpu_vm_manager_fini(adev); | 1187 | amdgpu_vm_manager_fini(adev); |
1201 | kfree(adev->gmc.vm_fault_info); | 1188 | kfree(adev->gmc.vm_fault_info); |
1202 | gmc_v8_0_gart_fini(adev); | 1189 | amdgpu_gart_table_vram_free(adev); |
1203 | amdgpu_bo_fini(adev); | 1190 | amdgpu_bo_fini(adev); |
1191 | amdgpu_gart_fini(adev); | ||
1204 | release_firmware(adev->gmc.fw); | 1192 | release_firmware(adev->gmc.fw); |
1205 | adev->gmc.fw = NULL; | 1193 | adev->gmc.fw = NULL; |
1206 | 1194 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) | |||
942 | return 0; | 942 | return 0; |
943 | } | 943 | } |
944 | 944 | ||
945 | /** | ||
946 | * gmc_v9_0_gart_fini - vm fini callback | ||
947 | * | ||
948 | * @adev: amdgpu_device pointer | ||
949 | * | ||
950 | * Tears down the driver GART/VM setup (CIK). | ||
951 | */ | ||
952 | static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) | ||
953 | { | ||
954 | amdgpu_gart_table_vram_free(adev); | ||
955 | amdgpu_gart_fini(adev); | ||
956 | } | ||
957 | |||
958 | static int gmc_v9_0_sw_fini(void *handle) | 945 | static int gmc_v9_0_sw_fini(void *handle) |
959 | { | 946 | { |
960 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 947 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
961 | 948 | ||
962 | amdgpu_gem_force_release(adev); | 949 | amdgpu_gem_force_release(adev); |
963 | amdgpu_vm_manager_fini(adev); | 950 | amdgpu_vm_manager_fini(adev); |
964 | gmc_v9_0_gart_fini(adev); | ||
965 | 951 | ||
966 | /* | 952 | /* |
967 | * TODO: | 953 | * TODO: |
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) | |||
974 | */ | 960 | */ |
975 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); | 961 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
976 | 962 | ||
963 | amdgpu_gart_table_vram_free(adev); | ||
977 | amdgpu_bo_fini(adev); | 964 | amdgpu_bo_fini(adev); |
965 | amdgpu_gart_fini(adev); | ||
978 | 966 | ||
979 | return 0; | 967 | return 0; |
980 | } | 968 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, | |||
65 | int min_temp, int max_temp); | 65 | int min_temp, int max_temp); |
66 | static int kv_init_fps_limits(struct amdgpu_device *adev); | 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); |
67 | 67 | ||
68 | static void kv_dpm_powergate_uvd(void *handle, bool gate); | ||
69 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); | ||
70 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); | 68 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); |
71 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); | 69 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); |
72 | 70 | ||
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
1354 | return ret; | 1352 | return ret; |
1355 | } | 1353 | } |
1356 | 1354 | ||
1357 | kv_update_current_ps(adev, adev->pm.dpm.boot_ps); | ||
1358 | |||
1359 | if (adev->irq.installed && | 1355 | if (adev->irq.installed && |
1360 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { | 1356 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { |
1361 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); | 1357 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); |
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
1374 | 1370 | ||
1375 | static void kv_dpm_disable(struct amdgpu_device *adev) | 1371 | static void kv_dpm_disable(struct amdgpu_device *adev) |
1376 | { | 1372 | { |
1373 | struct kv_power_info *pi = kv_get_pi(adev); | ||
1374 | |||
1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1375 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
1378 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); | 1376 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
1379 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) | |||
1387 | /* powerup blocks */ | 1385 | /* powerup blocks */ |
1388 | kv_dpm_powergate_acp(adev, false); | 1386 | kv_dpm_powergate_acp(adev, false); |
1389 | kv_dpm_powergate_samu(adev, false); | 1387 | kv_dpm_powergate_samu(adev, false); |
1390 | kv_dpm_powergate_vce(adev, false); | 1388 | if (pi->caps_vce_pg) /* power on the VCE block */ |
1391 | kv_dpm_powergate_uvd(adev, false); | 1389 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
1390 | if (pi->caps_uvd_pg) /* power on the UVD block */ | ||
1391 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); | ||
1392 | 1392 | ||
1393 | kv_enable_smc_cac(adev, false); | 1393 | kv_enable_smc_cac(adev, false); |
1394 | kv_enable_didt(adev, false); | 1394 | kv_enable_didt(adev, false); |
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
1551 | int ret; | 1551 | int ret; |
1552 | 1552 | ||
1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { | 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { |
1554 | kv_dpm_powergate_vce(adev, false); | ||
1555 | if (pi->caps_stable_p_state) | 1554 | if (pi->caps_stable_p_state) |
1556 | pi->vce_boot_level = table->count - 1; | 1555 | pi->vce_boot_level = table->count - 1; |
1557 | else | 1556 | else |
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
1573 | kv_enable_vce_dpm(adev, true); | 1572 | kv_enable_vce_dpm(adev, true); |
1574 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { | 1573 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { |
1575 | kv_enable_vce_dpm(adev, false); | 1574 | kv_enable_vce_dpm(adev, false); |
1576 | kv_dpm_powergate_vce(adev, true); | ||
1577 | } | 1575 | } |
1578 | 1576 | ||
1579 | return 0; | 1577 | return 0; |
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) | |||
1702 | } | 1700 | } |
1703 | } | 1701 | } |
1704 | 1702 | ||
1705 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | 1703 | static void kv_dpm_powergate_vce(void *handle, bool gate) |
1706 | { | 1704 | { |
1705 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1707 | struct kv_power_info *pi = kv_get_pi(adev); | 1706 | struct kv_power_info *pi = kv_get_pi(adev); |
1708 | 1707 | int ret; | |
1709 | if (pi->vce_power_gated == gate) | ||
1710 | return; | ||
1711 | 1708 | ||
1712 | pi->vce_power_gated = gate; | 1709 | pi->vce_power_gated = gate; |
1713 | 1710 | ||
1714 | if (!pi->caps_vce_pg) | 1711 | if (gate) { |
1715 | return; | 1712 | /* stop the VCE block */ |
1716 | 1713 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | |
1717 | if (gate) | 1714 | AMD_PG_STATE_GATE); |
1718 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); | 1715 | kv_enable_vce_dpm(adev, false); |
1719 | else | 1716 | if (pi->caps_vce_pg) /* power off the VCE block */ |
1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | 1717 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); |
1718 | } else { | ||
1719 | if (pi->caps_vce_pg) /* power on the VCE block */ | ||
1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | ||
1721 | kv_enable_vce_dpm(adev, true); | ||
1722 | /* re-init the VCE block */ | ||
1723 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
1724 | AMD_PG_STATE_UNGATE); | ||
1725 | } | ||
1721 | } | 1726 | } |
1722 | 1727 | ||
1728 | |||
1723 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) | 1729 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) |
1724 | { | 1730 | { |
1725 | struct kv_power_info *pi = kv_get_pi(adev); | 1731 | struct kv_power_info *pi = kv_get_pi(adev); |
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) | |||
3061 | else | 3067 | else |
3062 | adev->pm.dpm_enabled = true; | 3068 | adev->pm.dpm_enabled = true; |
3063 | mutex_unlock(&adev->pm.mutex); | 3069 | mutex_unlock(&adev->pm.mutex); |
3064 | 3070 | amdgpu_pm_compute_clocks(adev); | |
3065 | return ret; | 3071 | return ret; |
3066 | } | 3072 | } |
3067 | 3073 | ||
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, | |||
3313 | case AMD_IP_BLOCK_TYPE_UVD: | 3319 | case AMD_IP_BLOCK_TYPE_UVD: |
3314 | kv_dpm_powergate_uvd(handle, gate); | 3320 | kv_dpm_powergate_uvd(handle, gate); |
3315 | break; | 3321 | break; |
3322 | case AMD_IP_BLOCK_TYPE_VCE: | ||
3323 | kv_dpm_powergate_vce(handle, gate); | ||
3324 | break; | ||
3316 | default: | 3325 | default: |
3317 | break; | 3326 | break; |
3318 | } | 3327 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) | |||
6887 | 6887 | ||
6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
6889 | si_thermal_start_thermal_controller(adev); | 6889 | si_thermal_start_thermal_controller(adev); |
6890 | ni_update_current_ps(adev, boot_ps); | ||
6891 | 6890 | ||
6892 | return 0; | 6891 | return 0; |
6893 | } | 6892 | } |
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) | |||
7763 | else | 7762 | else |
7764 | adev->pm.dpm_enabled = true; | 7763 | adev->pm.dpm_enabled = true; |
7765 | mutex_unlock(&adev->pm.mutex); | 7764 | mutex_unlock(&adev->pm.mutex); |
7766 | 7765 | amdgpu_pm_compute_clocks(adev); | |
7767 | return ret; | 7766 | return ret; |
7768 | } | 7767 | } |
7769 | 7768 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | |||
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, | |||
480 | { | 480 | { |
481 | struct dc_context *ctx = pp->ctx; | 481 | struct dc_context *ctx = pp->ctx; |
482 | struct amdgpu_device *adev = ctx->driver_context; | 482 | struct amdgpu_device *adev = ctx->driver_context; |
483 | void *pp_handle = adev->powerplay.pp_handle; | ||
483 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | 484 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; |
485 | struct pp_display_clock_request clock = {0}; | ||
484 | 486 | ||
485 | if (!pp_funcs || !pp_funcs->display_configuration_changed) | 487 | if (!pp_funcs || !pp_funcs->display_clock_voltage_request) |
486 | return; | 488 | return; |
487 | 489 | ||
488 | amdgpu_dpm_display_configuration_changed(adev); | 490 | clock.clock_type = amd_pp_dcf_clock; |
491 | clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; | ||
492 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
493 | |||
494 | clock.clock_type = amd_pp_f_clock; | ||
495 | clock.clock_freq_in_khz = req->hard_min_fclk_khz; | ||
496 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
489 | } | 497 | } |
490 | 498 | ||
491 | void pp_rv_set_wm_ranges(struct pp_smu *pp, | 499 | void pp_rv_set_wm_ranges(struct pp_smu *pp, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
754 | * fail-safe mode | 754 | * fail-safe mode |
755 | */ | 755 | */ |
756 | if (dc_is_hdmi_signal(link->connector_signal) || | 756 | if (dc_is_hdmi_signal(link->connector_signal) || |
757 | dc_is_dvi_signal(link->connector_signal)) | 757 | dc_is_dvi_signal(link->connector_signal)) { |
758 | if (prev_sink != NULL) | ||
759 | dc_sink_release(prev_sink); | ||
760 | |||
758 | return false; | 761 | return false; |
762 | } | ||
759 | default: | 763 | default: |
760 | break; | 764 | break; |
761 | } | 765 | } |
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 6e3f56684f4e..51ed99a37803 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c | |||
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, | |||
170 | unsigned int tiling_mode = 0; | 170 | unsigned int tiling_mode = 0; |
171 | unsigned int stride = 0; | 171 | unsigned int stride = 0; |
172 | 172 | ||
173 | switch (info->drm_format_mod << 10) { | 173 | switch (info->drm_format_mod) { |
174 | case PLANE_CTL_TILED_LINEAR: | 174 | case DRM_FORMAT_MOD_LINEAR: |
175 | tiling_mode = I915_TILING_NONE; | 175 | tiling_mode = I915_TILING_NONE; |
176 | break; | 176 | break; |
177 | case PLANE_CTL_TILED_X: | 177 | case I915_FORMAT_MOD_X_TILED: |
178 | tiling_mode = I915_TILING_X; | 178 | tiling_mode = I915_TILING_X; |
179 | stride = info->stride; | 179 | stride = info->stride; |
180 | break; | 180 | break; |
181 | case PLANE_CTL_TILED_Y: | 181 | case I915_FORMAT_MOD_Y_TILED: |
182 | case I915_FORMAT_MOD_Yf_TILED: | ||
182 | tiling_mode = I915_TILING_Y; | 183 | tiling_mode = I915_TILING_Y; |
183 | stride = info->stride; | 184 | stride = info->stride; |
184 | break; | 185 | break; |
185 | default: | 186 | default: |
186 | gvt_dbg_core("not supported tiling mode\n"); | 187 | gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", |
188 | info->drm_format_mod); | ||
187 | } | 189 | } |
188 | obj->tiling_and_stride = tiling_mode | stride; | 190 | obj->tiling_and_stride = tiling_mode | stride; |
189 | } else { | 191 | } else { |
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
222 | info->height = p.height; | 224 | info->height = p.height; |
223 | info->stride = p.stride; | 225 | info->stride = p.stride; |
224 | info->drm_format = p.drm_format; | 226 | info->drm_format = p.drm_format; |
225 | info->drm_format_mod = p.tiled; | 227 | |
228 | switch (p.tiled) { | ||
229 | case PLANE_CTL_TILED_LINEAR: | ||
230 | info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; | ||
231 | break; | ||
232 | case PLANE_CTL_TILED_X: | ||
233 | info->drm_format_mod = I915_FORMAT_MOD_X_TILED; | ||
234 | break; | ||
235 | case PLANE_CTL_TILED_Y: | ||
236 | info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; | ||
237 | break; | ||
238 | case PLANE_CTL_TILED_YF: | ||
239 | info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; | ||
240 | break; | ||
241 | default: | ||
242 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); | ||
243 | } | ||
244 | |||
226 | info->size = (((p.stride * p.height * p.bpp) / 8) + | 245 | info->size = (((p.stride * p.height * p.bpp) / 8) + |
227 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 246 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
228 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { | 247 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { |
229 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); | 248 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); |
230 | if (ret) | 249 | if (ret) |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index face664be3e8..481896fb712a 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c | |||
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
220 | if (IS_SKYLAKE(dev_priv) | 220 | if (IS_SKYLAKE(dev_priv) |
221 | || IS_KABYLAKE(dev_priv) | 221 | || IS_KABYLAKE(dev_priv) |
222 | || IS_BROXTON(dev_priv)) { | 222 | || IS_BROXTON(dev_priv)) { |
223 | plane->tiled = (val & PLANE_CTL_TILED_MASK) >> | 223 | plane->tiled = val & PLANE_CTL_TILED_MASK; |
224 | _PLANE_CTL_TILED_SHIFT; | ||
225 | fmt = skl_format_to_drm( | 224 | fmt = skl_format_to_drm( |
226 | val & PLANE_CTL_FORMAT_MASK, | 225 | val & PLANE_CTL_FORMAT_MASK, |
227 | val & PLANE_CTL_ORDER_RGBX, | 226 | val & PLANE_CTL_ORDER_RGBX, |
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
260 | return -EINVAL; | 259 | return -EINVAL; |
261 | } | 260 | } |
262 | 261 | ||
263 | plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), | 262 | plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, |
264 | (IS_SKYLAKE(dev_priv) | 263 | (IS_SKYLAKE(dev_priv) |
265 | || IS_KABYLAKE(dev_priv) | 264 | || IS_KABYLAKE(dev_priv) |
266 | || IS_BROXTON(dev_priv)) ? | 265 | || IS_BROXTON(dev_priv)) ? |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index cb055f3c81a2..60c155085029 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h | |||
@@ -101,7 +101,7 @@ struct intel_gvt; | |||
101 | /* color space conversion and gamma correction are not included */ | 101 | /* color space conversion and gamma correction are not included */ |
102 | struct intel_vgpu_primary_plane_format { | 102 | struct intel_vgpu_primary_plane_format { |
103 | u8 enabled; /* plane is enabled */ | 103 | u8 enabled; /* plane is enabled */ |
104 | u8 tiled; /* X-tiled */ | 104 | u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */ |
105 | u8 bpp; /* bits per pixel */ | 105 | u8 bpp; /* bits per pixel */ |
106 | u32 hw_format; /* format field in the PRI_CTL register */ | 106 | u32 hw_format; /* format field in the PRI_CTL register */ |
107 | u32 drm_format; /* format in DRM definition */ | 107 | u32 drm_format; /* format in DRM definition */ |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 7a58ca555197..72afa518edd9 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, | |||
1296 | return 0; | 1296 | return 0; |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu, | ||
1300 | unsigned int offset, void *p_data, unsigned int bytes) | ||
1301 | { | ||
1302 | write_vreg(vgpu, offset, p_data, bytes); | ||
1303 | |||
1304 | if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST) | ||
1305 | vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE; | ||
1306 | else | ||
1307 | vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE; | ||
1308 | |||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1299 | static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, | 1312 | static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, |
1300 | unsigned int offset, void *p_data, unsigned int bytes) | 1313 | unsigned int offset, void *p_data, unsigned int bytes) |
1301 | { | 1314 | { |
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, | |||
1525 | u32 v = *(u32 *)p_data; | 1538 | u32 v = *(u32 *)p_data; |
1526 | u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; | 1539 | u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; |
1527 | 1540 | ||
1528 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; | 1541 | switch (offset) { |
1529 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; | 1542 | case _PHY_CTL_FAMILY_EDP: |
1530 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; | 1543 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; |
1544 | break; | ||
1545 | case _PHY_CTL_FAMILY_DDI: | ||
1546 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; | ||
1547 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; | ||
1548 | break; | ||
1549 | } | ||
1531 | 1550 | ||
1532 | vgpu_vreg(vgpu, offset) = v; | 1551 | vgpu_vreg(vgpu, offset) = v; |
1533 | 1552 | ||
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2812 | MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, | 2831 | MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, |
2813 | skl_power_well_ctl_write); | 2832 | skl_power_well_ctl_write); |
2814 | 2833 | ||
2834 | MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); | ||
2835 | |||
2815 | MMIO_D(_MMIO(0xa210), D_SKL_PLUS); | 2836 | MMIO_D(_MMIO(0xa210), D_SKL_PLUS); |
2816 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2837 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
2817 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2838 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2987 | NULL, gen9_trtte_write); | 3008 | NULL, gen9_trtte_write); |
2988 | MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); | 3009 | MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); |
2989 | 3010 | ||
2990 | MMIO_D(_MMIO(0x45008), D_SKL_PLUS); | ||
2991 | |||
2992 | MMIO_D(_MMIO(0x46430), D_SKL_PLUS); | 3011 | MMIO_D(_MMIO(0x46430), D_SKL_PLUS); |
2993 | 3012 | ||
2994 | MMIO_D(_MMIO(0x46520), D_SKL_PLUS); | 3013 | MMIO_D(_MMIO(0x46520), D_SKL_PLUS); |
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
3025 | MMIO_D(_MMIO(0x44500), D_SKL_PLUS); | 3044 | MMIO_D(_MMIO(0x44500), D_SKL_PLUS); |
3026 | MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); | 3045 | MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
3027 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, | 3046 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
3028 | NULL, NULL); | 3047 | NULL, NULL); |
3048 | MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, | ||
3049 | NULL, NULL); | ||
3029 | 3050 | ||
3030 | MMIO_D(_MMIO(0x4ab8), D_KBL); | 3051 | MMIO_D(_MMIO(0x4ab8), D_KBL); |
3031 | MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); | 3052 | MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 42e1e6bdcc2c..e872f4847fbe 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, | |||
562 | * performace for batch mmio read/write, so we need | 562 | * performace for batch mmio read/write, so we need |
563 | * handle forcewake mannually. | 563 | * handle forcewake mannually. |
564 | */ | 564 | */ |
565 | intel_runtime_pm_get(dev_priv); | ||
566 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 565 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
567 | switch_mmio(pre, next, ring_id); | 566 | switch_mmio(pre, next, ring_id); |
568 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 567 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
569 | intel_runtime_pm_put(dev_priv); | ||
570 | } | 568 | } |
571 | 569 | ||
572 | /** | 570 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 09d7bb72b4ff..c32e7d5e8629 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) | |||
47 | return false; | 47 | return false; |
48 | } | 48 | } |
49 | 49 | ||
50 | /* We give 2 seconds higher prio for vGPU during start */ | ||
51 | #define GVT_SCHED_VGPU_PRI_TIME 2 | ||
52 | |||
50 | struct vgpu_sched_data { | 53 | struct vgpu_sched_data { |
51 | struct list_head lru_list; | 54 | struct list_head lru_list; |
52 | struct intel_vgpu *vgpu; | 55 | struct intel_vgpu *vgpu; |
53 | bool active; | 56 | bool active; |
54 | 57 | bool pri_sched; | |
58 | ktime_t pri_time; | ||
55 | ktime_t sched_in_time; | 59 | ktime_t sched_in_time; |
56 | ktime_t sched_time; | 60 | ktime_t sched_time; |
57 | ktime_t left_ts; | 61 | ktime_t left_ts; |
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) | |||
183 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) | 187 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) |
184 | continue; | 188 | continue; |
185 | 189 | ||
190 | if (vgpu_data->pri_sched) { | ||
191 | if (ktime_before(ktime_get(), vgpu_data->pri_time)) { | ||
192 | vgpu = vgpu_data->vgpu; | ||
193 | break; | ||
194 | } else | ||
195 | vgpu_data->pri_sched = false; | ||
196 | } | ||
197 | |||
186 | /* Return the vGPU only if it has time slice left */ | 198 | /* Return the vGPU only if it has time slice left */ |
187 | if (vgpu_data->left_ts > 0) { | 199 | if (vgpu_data->left_ts > 0) { |
188 | vgpu = vgpu_data->vgpu; | 200 | vgpu = vgpu_data->vgpu; |
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) | |||
202 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 214 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
203 | struct vgpu_sched_data *vgpu_data; | 215 | struct vgpu_sched_data *vgpu_data; |
204 | struct intel_vgpu *vgpu = NULL; | 216 | struct intel_vgpu *vgpu = NULL; |
217 | |||
205 | /* no active vgpu or has already had a target */ | 218 | /* no active vgpu or has already had a target */ |
206 | if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) | 219 | if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) |
207 | goto out; | 220 | goto out; |
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) | |||
209 | vgpu = find_busy_vgpu(sched_data); | 222 | vgpu = find_busy_vgpu(sched_data); |
210 | if (vgpu) { | 223 | if (vgpu) { |
211 | scheduler->next_vgpu = vgpu; | 224 | scheduler->next_vgpu = vgpu; |
212 | |||
213 | /* Move the last used vGPU to the tail of lru_list */ | ||
214 | vgpu_data = vgpu->sched_data; | 225 | vgpu_data = vgpu->sched_data; |
215 | list_del_init(&vgpu_data->lru_list); | 226 | if (!vgpu_data->pri_sched) { |
216 | list_add_tail(&vgpu_data->lru_list, | 227 | /* Move the last used vGPU to the tail of lru_list */ |
217 | &sched_data->lru_runq_head); | 228 | list_del_init(&vgpu_data->lru_list); |
229 | list_add_tail(&vgpu_data->lru_list, | ||
230 | &sched_data->lru_runq_head); | ||
231 | } | ||
218 | } else { | 232 | } else { |
219 | scheduler->next_vgpu = gvt->idle_vgpu; | 233 | scheduler->next_vgpu = gvt->idle_vgpu; |
220 | } | 234 | } |
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | |||
328 | { | 342 | { |
329 | struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; | 343 | struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; |
330 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; | 344 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; |
345 | ktime_t now; | ||
331 | 346 | ||
332 | if (!list_empty(&vgpu_data->lru_list)) | 347 | if (!list_empty(&vgpu_data->lru_list)) |
333 | return; | 348 | return; |
334 | 349 | ||
335 | list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); | 350 | now = ktime_get(); |
351 | vgpu_data->pri_time = ktime_add(now, | ||
352 | ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); | ||
353 | vgpu_data->pri_sched = true; | ||
354 | |||
355 | list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); | ||
336 | 356 | ||
337 | if (!hrtimer_active(&sched_data->timer)) | 357 | if (!hrtimer_active(&sched_data->timer)) |
338 | hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), | 358 | hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), |
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
426 | &vgpu->gvt->scheduler; | 446 | &vgpu->gvt->scheduler; |
427 | int ring_id; | 447 | int ring_id; |
428 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; | 448 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; |
449 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
429 | 450 | ||
430 | if (!vgpu_data->active) | 451 | if (!vgpu_data->active) |
431 | return; | 452 | return; |
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
444 | scheduler->current_vgpu = NULL; | 465 | scheduler->current_vgpu = NULL; |
445 | } | 466 | } |
446 | 467 | ||
468 | intel_runtime_pm_get(dev_priv); | ||
447 | spin_lock_bh(&scheduler->mmio_context_lock); | 469 | spin_lock_bh(&scheduler->mmio_context_lock); |
448 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | 470 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { |
449 | if (scheduler->engine_owner[ring_id] == vgpu) { | 471 | if (scheduler->engine_owner[ring_id] == vgpu) { |
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
452 | } | 474 | } |
453 | } | 475 | } |
454 | spin_unlock_bh(&scheduler->mmio_context_lock); | 476 | spin_unlock_bh(&scheduler->mmio_context_lock); |
477 | intel_runtime_pm_put(dev_priv); | ||
455 | mutex_unlock(&vgpu->gvt->sched_lock); | 478 | mutex_unlock(&vgpu->gvt->sched_lock); |
456 | } | 479 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 08ec7446282e..9e63cd47b60f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -10422,7 +10422,7 @@ enum skl_power_gate { | |||
10422 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ | 10422 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ |
10423 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) | 10423 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) |
10424 | #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | 10424 | #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ |
10425 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ | 10425 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ |
10426 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) | 10426 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) |
10427 | #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) | 10427 | #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) |
10428 | #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) | 10428 | #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) |
@@ -10437,7 +10437,7 @@ enum skl_power_gate { | |||
10437 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ | 10437 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ |
10438 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) | 10438 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) |
10439 | #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | 10439 | #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ |
10440 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ | 10440 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ |
10441 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) | 10441 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) |
10442 | #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) | 10442 | #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) |
10443 | #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) | 10443 | #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..98358b4b36de 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, | |||
199 | vma->flags |= I915_VMA_GGTT; | 199 | vma->flags |= I915_VMA_GGTT; |
200 | list_add(&vma->obj_link, &obj->vma_list); | 200 | list_add(&vma->obj_link, &obj->vma_list); |
201 | } else { | 201 | } else { |
202 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); | ||
203 | list_add_tail(&vma->obj_link, &obj->vma_list); | 202 | list_add_tail(&vma->obj_link, &obj->vma_list); |
204 | } | 203 | } |
205 | 204 | ||
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) | |||
807 | if (vma->obj) | 806 | if (vma->obj) |
808 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); | 807 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); |
809 | 808 | ||
810 | if (!i915_vma_is_ggtt(vma)) | ||
811 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); | ||
812 | |||
813 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { | 809 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { |
814 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); | 810 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); |
815 | kfree(iter); | 811 | kfree(iter); |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) | |||
962 | { | 962 | { |
963 | int ret; | 963 | int ret; |
964 | 964 | ||
965 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | ||
966 | return; | ||
967 | |||
968 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); | 965 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); |
969 | if (ret < 0) { | 966 | if (ret < 0) { |
970 | DRM_ERROR("failed to add audio component (%d)\n", ret); | 967 | DRM_ERROR("failed to add audio component (%d)\n", ret); |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8761513f3532..c9af34861d9e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
2708 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) | 2708 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) |
2709 | intel_dp_stop_link_train(intel_dp); | 2709 | intel_dp_stop_link_train(intel_dp); |
2710 | 2710 | ||
2711 | intel_ddi_enable_pipe_clock(crtc_state); | 2711 | if (!is_mst) |
2712 | intel_ddi_enable_pipe_clock(crtc_state); | ||
2712 | } | 2713 | } |
2713 | 2714 | ||
2714 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | 2715 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, |
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, | |||
2810 | bool is_mst = intel_crtc_has_type(old_crtc_state, | 2811 | bool is_mst = intel_crtc_has_type(old_crtc_state, |
2811 | INTEL_OUTPUT_DP_MST); | 2812 | INTEL_OUTPUT_DP_MST); |
2812 | 2813 | ||
2813 | intel_ddi_disable_pipe_clock(old_crtc_state); | 2814 | if (!is_mst) { |
2814 | 2815 | intel_ddi_disable_pipe_clock(old_crtc_state); | |
2815 | /* | 2816 | /* |
2816 | * Power down sink before disabling the port, otherwise we end | 2817 | * Power down sink before disabling the port, otherwise we end |
2817 | * up getting interrupts from the sink on detecting link loss. | 2818 | * up getting interrupts from the sink on detecting link loss. |
2818 | */ | 2819 | */ |
2819 | if (!is_mst) | ||
2820 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 2820 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
2821 | } | ||
2821 | 2822 | ||
2822 | intel_disable_ddi_buf(encoder); | 2823 | intel_disable_ddi_buf(encoder); |
2823 | 2824 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3fa1c8a983..4a3c8ee9a973 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; | 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; |
2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; | 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; |
2990 | int dst_x = plane_state->base.dst.x1; | 2990 | int dst_x = plane_state->base.dst.x1; |
2991 | int dst_w = drm_rect_width(&plane_state->base.dst); | ||
2991 | int pipe_src_w = crtc_state->pipe_src_w; | 2992 | int pipe_src_w = crtc_state->pipe_src_w; |
2992 | int max_width = skl_max_plane_width(fb, 0, rotation); | 2993 | int max_width = skl_max_plane_width(fb, 0, rotation); |
2993 | int max_height = 4096; | 2994 | int max_height = 4096; |
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
3009 | * screen may cause FIFO underflow and display corruption. | 3010 | * screen may cause FIFO underflow and display corruption. |
3010 | */ | 3011 | */ |
3011 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && | 3012 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && |
3012 | (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { | 3013 | (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { |
3013 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", | 3014 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", |
3014 | dst_x + w < 4 ? "end" : "start", | 3015 | dst_x + dst_w < 4 ? "end" : "start", |
3015 | dst_x + w < 4 ? dst_x + w : dst_x, | 3016 | dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, |
3016 | 4, pipe_src_w - 4); | 3017 | 4, pipe_src_w - 4); |
3017 | return -ERANGE; | 3018 | return -ERANGE; |
3018 | } | 3019 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cd0f649b57a5..1193202766a2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) | |||
4160 | return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); | 4160 | return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); |
4161 | } | 4161 | } |
4162 | 4162 | ||
4163 | /* | ||
4164 | * If display is now connected check links status, | ||
4165 | * there has been known issues of link loss triggering | ||
4166 | * long pulse. | ||
4167 | * | ||
4168 | * Some sinks (eg. ASUS PB287Q) seem to perform some | ||
4169 | * weird HPD ping pong during modesets. So we can apparently | ||
4170 | * end up with HPD going low during a modeset, and then | ||
4171 | * going back up soon after. And once that happens we must | ||
4172 | * retrain the link to get a picture. That's in case no | ||
4173 | * userspace component reacted to intermittent HPD dip. | ||
4174 | */ | ||
4175 | int intel_dp_retrain_link(struct intel_encoder *encoder, | 4163 | int intel_dp_retrain_link(struct intel_encoder *encoder, |
4176 | struct drm_modeset_acquire_ctx *ctx) | 4164 | struct drm_modeset_acquire_ctx *ctx) |
4177 | { | 4165 | { |
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) | |||
4661 | } | 4649 | } |
4662 | 4650 | ||
4663 | static int | 4651 | static int |
4664 | intel_dp_long_pulse(struct intel_connector *connector) | 4652 | intel_dp_long_pulse(struct intel_connector *connector, |
4653 | struct drm_modeset_acquire_ctx *ctx) | ||
4665 | { | 4654 | { |
4666 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | 4655 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
4667 | struct intel_dp *intel_dp = intel_attached_dp(&connector->base); | 4656 | struct intel_dp *intel_dp = intel_attached_dp(&connector->base); |
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector) | |||
4720 | */ | 4709 | */ |
4721 | status = connector_status_disconnected; | 4710 | status = connector_status_disconnected; |
4722 | goto out; | 4711 | goto out; |
4712 | } else { | ||
4713 | /* | ||
4714 | * If display is now connected check links status, | ||
4715 | * there has been known issues of link loss triggering | ||
4716 | * long pulse. | ||
4717 | * | ||
4718 | * Some sinks (eg. ASUS PB287Q) seem to perform some | ||
4719 | * weird HPD ping pong during modesets. So we can apparently | ||
4720 | * end up with HPD going low during a modeset, and then | ||
4721 | * going back up soon after. And once that happens we must | ||
4722 | * retrain the link to get a picture. That's in case no | ||
4723 | * userspace component reacted to intermittent HPD dip. | ||
4724 | */ | ||
4725 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; | ||
4726 | |||
4727 | intel_dp_retrain_link(encoder, ctx); | ||
4723 | } | 4728 | } |
4724 | 4729 | ||
4725 | /* | 4730 | /* |
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector, | |||
4781 | return ret; | 4786 | return ret; |
4782 | } | 4787 | } |
4783 | 4788 | ||
4784 | status = intel_dp_long_pulse(intel_dp->attached_connector); | 4789 | status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); |
4785 | } | 4790 | } |
4786 | 4791 | ||
4787 | intel_dp->detect_done = false; | 4792 | intel_dp->detect_done = false; |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7e3e01607643..4ecd65375603 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, | |||
166 | struct intel_connector *connector = | 166 | struct intel_connector *connector = |
167 | to_intel_connector(old_conn_state->connector); | 167 | to_intel_connector(old_conn_state->connector); |
168 | 168 | ||
169 | intel_ddi_disable_pipe_clock(old_crtc_state); | ||
170 | |||
169 | /* this can fail */ | 171 | /* this can fail */ |
170 | drm_dp_check_act_status(&intel_dp->mst_mgr); | 172 | drm_dp_check_act_status(&intel_dp->mst_mgr); |
171 | /* and this can also fail */ | 173 | /* and this can also fail */ |
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, | |||
252 | I915_WRITE(DP_TP_STATUS(port), temp); | 254 | I915_WRITE(DP_TP_STATUS(port), temp); |
253 | 255 | ||
254 | ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); | 256 | ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); |
257 | |||
258 | intel_ddi_enable_pipe_clock(pipe_config); | ||
255 | } | 259 | } |
256 | 260 | ||
257 | static void intel_mst_enable_dp(struct intel_encoder *encoder, | 261 | static void intel_mst_enable_dp(struct intel_encoder *encoder, |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..192972a7d287 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, | |||
943 | 943 | ||
944 | ret = i2c_transfer(adapter, &msg, 1); | 944 | ret = i2c_transfer(adapter, &msg, 1); |
945 | if (ret == 1) | 945 | if (ret == 1) |
946 | return 0; | 946 | ret = 0; |
947 | return ret >= 0 ? -EIO : ret; | 947 | else if (ret >= 0) |
948 | ret = -EIO; | ||
949 | |||
950 | kfree(write_buf); | ||
951 | return ret; | ||
948 | } | 952 | } |
949 | 953 | ||
950 | static | 954 | static |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, | |||
74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", | 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", |
75 | lspcon_mode_name(mode)); | 75 | lspcon_mode_name(mode)); |
76 | 76 | ||
77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); | 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); |
78 | if (current_mode != mode) | 78 | if (current_mode != mode) |
79 | DRM_ERROR("LSPCON mode hasn't settled\n"); | 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); |
80 | 80 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c | |||
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, | |||
132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); | 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); |
133 | } | 133 | } |
134 | 134 | ||
135 | static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) | ||
136 | { | ||
137 | return 4; | ||
138 | } | ||
139 | |||
135 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) | 140 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) |
136 | { | 141 | { |
137 | unsigned int reg; | 142 | unsigned int reg; |
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) | |||
157 | 162 | ||
158 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) | 163 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) |
159 | { | 164 | { |
165 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
166 | * is defined in mediatek HW data sheet. | ||
167 | * The alphabet order in XXX is no relation to data | ||
168 | * arrangement in memory. | ||
169 | */ | ||
160 | switch (fmt) { | 170 | switch (fmt) { |
161 | default: | 171 | default: |
162 | case DRM_FORMAT_RGB565: | 172 | case DRM_FORMAT_RGB565: |
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { | |||
221 | .stop = mtk_ovl_stop, | 231 | .stop = mtk_ovl_stop, |
222 | .enable_vblank = mtk_ovl_enable_vblank, | 232 | .enable_vblank = mtk_ovl_enable_vblank, |
223 | .disable_vblank = mtk_ovl_disable_vblank, | 233 | .disable_vblank = mtk_ovl_disable_vblank, |
234 | .layer_nr = mtk_ovl_layer_nr, | ||
224 | .layer_on = mtk_ovl_layer_on, | 235 | .layer_on = mtk_ovl_layer_on, |
225 | .layer_off = mtk_ovl_layer_off, | 236 | .layer_off = mtk_ovl_layer_off, |
226 | .layer_config = mtk_ovl_layer_config, | 237 | .layer_config = mtk_ovl_layer_config, |
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c | |||
@@ -31,14 +31,31 @@ | |||
31 | #define RDMA_REG_UPDATE_INT BIT(0) | 31 | #define RDMA_REG_UPDATE_INT BIT(0) |
32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 | 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 |
33 | #define RDMA_ENGINE_EN BIT(0) | 33 | #define RDMA_ENGINE_EN BIT(0) |
34 | #define RDMA_MODE_MEMORY BIT(1) | ||
34 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 | 35 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 |
36 | #define RDMA_MATRIX_ENABLE BIT(17) | ||
37 | #define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) | ||
38 | #define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) | ||
35 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 | 39 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 |
36 | #define DISP_REG_RDMA_TARGET_LINE 0x001c | 40 | #define DISP_REG_RDMA_TARGET_LINE 0x001c |
41 | #define DISP_RDMA_MEM_CON 0x0024 | ||
42 | #define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) | ||
43 | #define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) | ||
44 | #define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) | ||
45 | #define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) | ||
46 | #define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) | ||
47 | #define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) | ||
48 | #define MEM_MODE_INPUT_SWAP BIT(8) | ||
49 | #define DISP_RDMA_MEM_SRC_PITCH 0x002c | ||
50 | #define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 | ||
37 | #define DISP_REG_RDMA_FIFO_CON 0x0040 | 51 | #define DISP_REG_RDMA_FIFO_CON 0x0040 |
38 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) | 52 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) |
39 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) | 53 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) |
40 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) | 54 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) |
41 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) | 55 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) |
56 | #define DISP_RDMA_MEM_START_ADDR 0x0f00 | ||
57 | |||
58 | #define RDMA_MEM_GMC 0x40402020 | ||
42 | 59 | ||
43 | struct mtk_disp_rdma_data { | 60 | struct mtk_disp_rdma_data { |
44 | unsigned int fifo_size; | 61 | unsigned int fifo_size; |
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, | |||
138 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); | 155 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); |
139 | } | 156 | } |
140 | 157 | ||
158 | static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, | ||
159 | unsigned int fmt) | ||
160 | { | ||
161 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
162 | * is defined in mediatek HW data sheet. | ||
163 | * The alphabet order in XXX is no relation to data | ||
164 | * arrangement in memory. | ||
165 | */ | ||
166 | switch (fmt) { | ||
167 | default: | ||
168 | case DRM_FORMAT_RGB565: | ||
169 | return MEM_MODE_INPUT_FORMAT_RGB565; | ||
170 | case DRM_FORMAT_BGR565: | ||
171 | return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; | ||
172 | case DRM_FORMAT_RGB888: | ||
173 | return MEM_MODE_INPUT_FORMAT_RGB888; | ||
174 | case DRM_FORMAT_BGR888: | ||
175 | return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; | ||
176 | case DRM_FORMAT_RGBX8888: | ||
177 | case DRM_FORMAT_RGBA8888: | ||
178 | return MEM_MODE_INPUT_FORMAT_ARGB8888; | ||
179 | case DRM_FORMAT_BGRX8888: | ||
180 | case DRM_FORMAT_BGRA8888: | ||
181 | return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; | ||
182 | case DRM_FORMAT_XRGB8888: | ||
183 | case DRM_FORMAT_ARGB8888: | ||
184 | return MEM_MODE_INPUT_FORMAT_RGBA8888; | ||
185 | case DRM_FORMAT_XBGR8888: | ||
186 | case DRM_FORMAT_ABGR8888: | ||
187 | return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; | ||
188 | case DRM_FORMAT_UYVY: | ||
189 | return MEM_MODE_INPUT_FORMAT_UYVY; | ||
190 | case DRM_FORMAT_YUYV: | ||
191 | return MEM_MODE_INPUT_FORMAT_YUYV; | ||
192 | } | ||
193 | } | ||
194 | |||
195 | static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) | ||
196 | { | ||
197 | return 1; | ||
198 | } | ||
199 | |||
200 | static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, | ||
201 | struct mtk_plane_state *state) | ||
202 | { | ||
203 | struct mtk_disp_rdma *rdma = comp_to_rdma(comp); | ||
204 | struct mtk_plane_pending_state *pending = &state->pending; | ||
205 | unsigned int addr = pending->addr; | ||
206 | unsigned int pitch = pending->pitch & 0xffff; | ||
207 | unsigned int fmt = pending->format; | ||
208 | unsigned int con; | ||
209 | |||
210 | con = rdma_fmt_convert(rdma, fmt); | ||
211 | writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); | ||
212 | |||
213 | if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { | ||
214 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
215 | RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); | ||
216 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
217 | RDMA_MATRIX_INT_MTX_SEL, | ||
218 | RDMA_MATRIX_INT_MTX_BT601_to_RGB); | ||
219 | } else { | ||
220 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
221 | RDMA_MATRIX_ENABLE, 0); | ||
222 | } | ||
223 | |||
224 | writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); | ||
225 | writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); | ||
226 | writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); | ||
227 | rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, | ||
228 | RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); | ||
229 | } | ||
230 | |||
141 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { | 231 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { |
142 | .config = mtk_rdma_config, | 232 | .config = mtk_rdma_config, |
143 | .start = mtk_rdma_start, | 233 | .start = mtk_rdma_start, |
144 | .stop = mtk_rdma_stop, | 234 | .stop = mtk_rdma_stop, |
145 | .enable_vblank = mtk_rdma_enable_vblank, | 235 | .enable_vblank = mtk_rdma_enable_vblank, |
146 | .disable_vblank = mtk_rdma_disable_vblank, | 236 | .disable_vblank = mtk_rdma_disable_vblank, |
237 | .layer_nr = mtk_rdma_layer_nr, | ||
238 | .layer_config = mtk_rdma_layer_config, | ||
147 | }; | 239 | }; |
148 | 240 | ||
149 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, | 241 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c | |||
@@ -45,7 +45,8 @@ struct mtk_drm_crtc { | |||
45 | bool pending_needs_vblank; | 45 | bool pending_needs_vblank; |
46 | struct drm_pending_vblank_event *event; | 46 | struct drm_pending_vblank_event *event; |
47 | 47 | ||
48 | struct drm_plane planes[OVL_LAYER_NR]; | 48 | struct drm_plane *planes; |
49 | unsigned int layer_nr; | ||
49 | bool pending_planes; | 50 | bool pending_planes; |
50 | 51 | ||
51 | void __iomem *config_regs; | 52 | void __iomem *config_regs; |
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
171 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | 172 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) |
172 | { | 173 | { |
173 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 174 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
174 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 175 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
175 | 176 | ||
176 | mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); | 177 | mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); |
177 | 178 | ||
178 | return 0; | 179 | return 0; |
179 | } | 180 | } |
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | |||
181 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) | 182 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) |
182 | { | 183 | { |
183 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 184 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
184 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 185 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
185 | 186 | ||
186 | mtk_ddp_comp_disable_vblank(ovl); | 187 | mtk_ddp_comp_disable_vblank(comp); |
187 | } | 188 | } |
188 | 189 | ||
189 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) | 190 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) |
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) | |||
286 | } | 287 | } |
287 | 288 | ||
288 | /* Initially configure all planes */ | 289 | /* Initially configure all planes */ |
289 | for (i = 0; i < OVL_LAYER_NR; i++) { | 290 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
290 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 291 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
291 | struct mtk_plane_state *plane_state; | 292 | struct mtk_plane_state *plane_state; |
292 | 293 | ||
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
334 | { | 335 | { |
335 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 336 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
336 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); | 337 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); |
337 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 338 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
338 | unsigned int i; | 339 | unsigned int i; |
339 | 340 | ||
340 | /* | 341 | /* |
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
343 | * queue update module registers on vblank. | 344 | * queue update module registers on vblank. |
344 | */ | 345 | */ |
345 | if (state->pending_config) { | 346 | if (state->pending_config) { |
346 | mtk_ddp_comp_config(ovl, state->pending_width, | 347 | mtk_ddp_comp_config(comp, state->pending_width, |
347 | state->pending_height, | 348 | state->pending_height, |
348 | state->pending_vrefresh, 0); | 349 | state->pending_vrefresh, 0); |
349 | 350 | ||
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
351 | } | 352 | } |
352 | 353 | ||
353 | if (mtk_crtc->pending_planes) { | 354 | if (mtk_crtc->pending_planes) { |
354 | for (i = 0; i < OVL_LAYER_NR; i++) { | 355 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
355 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 356 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
356 | struct mtk_plane_state *plane_state; | 357 | struct mtk_plane_state *plane_state; |
357 | 358 | ||
358 | plane_state = to_mtk_plane_state(plane->state); | 359 | plane_state = to_mtk_plane_state(plane->state); |
359 | 360 | ||
360 | if (plane_state->pending.config) { | 361 | if (plane_state->pending.config) { |
361 | mtk_ddp_comp_layer_config(ovl, i, plane_state); | 362 | mtk_ddp_comp_layer_config(comp, i, plane_state); |
362 | plane_state->pending.config = false; | 363 | plane_state->pending.config = false; |
363 | } | 364 | } |
364 | } | 365 | } |
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
370 | struct drm_crtc_state *old_state) | 371 | struct drm_crtc_state *old_state) |
371 | { | 372 | { |
372 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 373 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
373 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 374 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
374 | int ret; | 375 | int ret; |
375 | 376 | ||
376 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 377 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
377 | 378 | ||
378 | ret = mtk_smi_larb_get(ovl->larb_dev); | 379 | ret = mtk_smi_larb_get(comp->larb_dev); |
379 | if (ret) { | 380 | if (ret) { |
380 | DRM_ERROR("Failed to get larb: %d\n", ret); | 381 | DRM_ERROR("Failed to get larb: %d\n", ret); |
381 | return; | 382 | return; |
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
383 | 384 | ||
384 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); | 385 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); |
385 | if (ret) { | 386 | if (ret) { |
386 | mtk_smi_larb_put(ovl->larb_dev); | 387 | mtk_smi_larb_put(comp->larb_dev); |
387 | return; | 388 | return; |
388 | } | 389 | } |
389 | 390 | ||
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
395 | struct drm_crtc_state *old_state) | 396 | struct drm_crtc_state *old_state) |
396 | { | 397 | { |
397 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 398 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
398 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 399 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
399 | int i; | 400 | int i; |
400 | 401 | ||
401 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 402 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
403 | return; | 404 | return; |
404 | 405 | ||
405 | /* Set all pending plane state to disabled */ | 406 | /* Set all pending plane state to disabled */ |
406 | for (i = 0; i < OVL_LAYER_NR; i++) { | 407 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
407 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 408 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
408 | struct mtk_plane_state *plane_state; | 409 | struct mtk_plane_state *plane_state; |
409 | 410 | ||
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
418 | 419 | ||
419 | drm_crtc_vblank_off(crtc); | 420 | drm_crtc_vblank_off(crtc); |
420 | mtk_crtc_ddp_hw_fini(mtk_crtc); | 421 | mtk_crtc_ddp_hw_fini(mtk_crtc); |
421 | mtk_smi_larb_put(ovl->larb_dev); | 422 | mtk_smi_larb_put(comp->larb_dev); |
422 | 423 | ||
423 | mtk_crtc->enabled = false; | 424 | mtk_crtc->enabled = false; |
424 | } | 425 | } |
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, | |||
450 | 451 | ||
451 | if (mtk_crtc->event) | 452 | if (mtk_crtc->event) |
452 | mtk_crtc->pending_needs_vblank = true; | 453 | mtk_crtc->pending_needs_vblank = true; |
453 | for (i = 0; i < OVL_LAYER_NR; i++) { | 454 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
454 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 455 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
455 | struct mtk_plane_state *plane_state; | 456 | struct mtk_plane_state *plane_state; |
456 | 457 | ||
@@ -516,7 +517,7 @@ err_cleanup_crtc: | |||
516 | return ret; | 517 | return ret; |
517 | } | 518 | } |
518 | 519 | ||
519 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) | 520 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) |
520 | { | 521 | { |
521 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 522 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
522 | struct mtk_drm_private *priv = crtc->dev->dev_private; | 523 | struct mtk_drm_private *priv = crtc->dev->dev_private; |
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
598 | mtk_crtc->ddp_comp[i] = comp; | 599 | mtk_crtc->ddp_comp[i] = comp; |
599 | } | 600 | } |
600 | 601 | ||
601 | for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { | 602 | mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); |
603 | mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * | ||
604 | sizeof(struct drm_plane), | ||
605 | GFP_KERNEL); | ||
606 | |||
607 | for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { | ||
602 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : | 608 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : |
603 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : | 609 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : |
604 | DRM_PLANE_TYPE_OVERLAY; | 610 | DRM_PLANE_TYPE_OVERLAY; |
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
609 | } | 615 | } |
610 | 616 | ||
611 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], | 617 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], |
612 | &mtk_crtc->planes[1], pipe); | 618 | mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : |
619 | NULL, pipe); | ||
613 | if (ret < 0) | 620 | if (ret < 0) |
614 | goto unprepare; | 621 | goto unprepare; |
615 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); | 622 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h | |||
@@ -18,13 +18,12 @@ | |||
18 | #include "mtk_drm_ddp_comp.h" | 18 | #include "mtk_drm_ddp_comp.h" |
19 | #include "mtk_drm_plane.h" | 19 | #include "mtk_drm_plane.h" |
20 | 20 | ||
21 | #define OVL_LAYER_NR 4 | ||
22 | #define MTK_LUT_SIZE 512 | 21 | #define MTK_LUT_SIZE 512 |
23 | #define MTK_MAX_BPC 10 | 22 | #define MTK_MAX_BPC 10 |
24 | #define MTK_MIN_BPC 3 | 23 | #define MTK_MIN_BPC 3 |
25 | 24 | ||
26 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); | 25 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); |
27 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); | 26 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); |
28 | int mtk_drm_crtc_create(struct drm_device *drm_dev, | 27 | int mtk_drm_crtc_create(struct drm_device *drm_dev, |
29 | const enum mtk_ddp_comp_id *path, | 28 | const enum mtk_ddp_comp_id *path, |
30 | unsigned int path_len); | 29 | unsigned int path_len); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c | |||
@@ -106,6 +106,8 @@ | |||
106 | #define OVL1_MOUT_EN_COLOR1 0x1 | 106 | #define OVL1_MOUT_EN_COLOR1 0x1 |
107 | #define GAMMA_MOUT_EN_RDMA1 0x1 | 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 |
108 | #define RDMA0_SOUT_DPI0 0x2 | 108 | #define RDMA0_SOUT_DPI0 0x2 |
109 | #define RDMA0_SOUT_DPI1 0x3 | ||
110 | #define RDMA0_SOUT_DSI1 0x1 | ||
109 | #define RDMA0_SOUT_DSI2 0x4 | 111 | #define RDMA0_SOUT_DSI2 0x4 |
110 | #define RDMA0_SOUT_DSI3 0x5 | 112 | #define RDMA0_SOUT_DSI3 0x5 |
111 | #define RDMA1_SOUT_DPI0 0x2 | 113 | #define RDMA1_SOUT_DPI0 0x2 |
@@ -122,6 +124,8 @@ | |||
122 | #define DPI0_SEL_IN_RDMA2 0x3 | 124 | #define DPI0_SEL_IN_RDMA2 0x3 |
123 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) | 125 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) |
124 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) | 126 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) |
127 | #define DSI0_SEL_IN_RDMA1 0x1 | ||
128 | #define DSI0_SEL_IN_RDMA2 0x4 | ||
125 | #define DSI1_SEL_IN_RDMA1 0x1 | 129 | #define DSI1_SEL_IN_RDMA1 0x1 |
126 | #define DSI1_SEL_IN_RDMA2 0x4 | 130 | #define DSI1_SEL_IN_RDMA2 0x4 |
127 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) | 131 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) |
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, | |||
224 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { | 228 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { |
225 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 229 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
226 | value = RDMA0_SOUT_DPI0; | 230 | value = RDMA0_SOUT_DPI0; |
231 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { | ||
232 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
233 | value = RDMA0_SOUT_DPI1; | ||
234 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { | ||
235 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
236 | value = RDMA0_SOUT_DSI1; | ||
227 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { | 237 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { |
228 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 238 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
229 | value = RDMA0_SOUT_DSI2; | 239 | value = RDMA0_SOUT_DSI2; |
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
282 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { | 292 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { |
283 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 293 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
284 | value = DPI1_SEL_IN_RDMA1; | 294 | value = DPI1_SEL_IN_RDMA1; |
295 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { | ||
296 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | ||
297 | value = DSI0_SEL_IN_RDMA1; | ||
285 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { | 298 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { |
286 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | 299 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; |
287 | value = DSI1_SEL_IN_RDMA1; | 300 | value = DSI1_SEL_IN_RDMA1; |
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
297 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { | 310 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { |
298 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 311 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
299 | value = DPI1_SEL_IN_RDMA2; | 312 | value = DPI1_SEL_IN_RDMA2; |
300 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | 313 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { |
301 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 314 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
315 | value = DSI0_SEL_IN_RDMA2; | ||
316 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | ||
317 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | ||
302 | value = DSI1_SEL_IN_RDMA2; | 318 | value = DSI1_SEL_IN_RDMA2; |
303 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { | 319 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { |
304 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 320 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | |||
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { | |||
78 | void (*stop)(struct mtk_ddp_comp *comp); | 78 | void (*stop)(struct mtk_ddp_comp *comp); |
79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); | 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); |
80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); | 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); |
81 | unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); | ||
81 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); | 82 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); |
82 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); | 83 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); |
83 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, | 84 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, |
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) | |||
128 | comp->funcs->disable_vblank(comp); | 129 | comp->funcs->disable_vblank(comp); |
129 | } | 130 | } |
130 | 131 | ||
132 | static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) | ||
133 | { | ||
134 | if (comp->funcs && comp->funcs->layer_nr) | ||
135 | return comp->funcs->layer_nr(comp); | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
131 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, | 140 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, |
132 | unsigned int idx) | 141 | unsigned int idx) |
133 | { | 142 | { |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) | |||
381 | err_deinit: | 381 | err_deinit: |
382 | mtk_drm_kms_deinit(drm); | 382 | mtk_drm_kms_deinit(drm); |
383 | err_free: | 383 | err_free: |
384 | drm_dev_unref(drm); | 384 | drm_dev_put(drm); |
385 | return ret; | 385 | return ret; |
386 | } | 386 | } |
387 | 387 | ||
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) | |||
390 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
391 | 391 | ||
392 | drm_dev_unregister(private->drm); | 392 | drm_dev_unregister(private->drm); |
393 | drm_dev_unref(private->drm); | 393 | drm_dev_put(private->drm); |
394 | private->drm = NULL; | 394 | private->drm = NULL; |
395 | } | 395 | } |
396 | 396 | ||
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) | |||
564 | 564 | ||
565 | drm_dev_unregister(drm); | 565 | drm_dev_unregister(drm); |
566 | mtk_drm_kms_deinit(drm); | 566 | mtk_drm_kms_deinit(drm); |
567 | drm_dev_unref(drm); | 567 | drm_dev_put(drm); |
568 | 568 | ||
569 | component_master_del(&pdev->dev, &mtk_drm_ops); | 569 | component_master_del(&pdev->dev, &mtk_drm_ops); |
570 | pm_runtime_disable(&pdev->dev); | 570 | pm_runtime_disable(&pdev->dev); |
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) | |||
580 | { | 580 | { |
581 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
582 | struct drm_device *drm = private->drm; | 582 | struct drm_device *drm = private->drm; |
583 | int ret; | ||
583 | 584 | ||
584 | drm_kms_helper_poll_disable(drm); | 585 | ret = drm_mode_config_helper_suspend(drm); |
585 | |||
586 | private->suspend_state = drm_atomic_helper_suspend(drm); | ||
587 | if (IS_ERR(private->suspend_state)) { | ||
588 | drm_kms_helper_poll_enable(drm); | ||
589 | return PTR_ERR(private->suspend_state); | ||
590 | } | ||
591 | |||
592 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); | 586 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); |
593 | return 0; | 587 | |
588 | return ret; | ||
594 | } | 589 | } |
595 | 590 | ||
596 | static int mtk_drm_sys_resume(struct device *dev) | 591 | static int mtk_drm_sys_resume(struct device *dev) |
597 | { | 592 | { |
598 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 593 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
599 | struct drm_device *drm = private->drm; | 594 | struct drm_device *drm = private->drm; |
595 | int ret; | ||
600 | 596 | ||
601 | drm_atomic_helper_resume(drm, private->suspend_state); | 597 | ret = drm_mode_config_helper_resume(drm); |
602 | drm_kms_helper_poll_enable(drm); | ||
603 | |||
604 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); | 598 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); |
605 | return 0; | 599 | |
600 | return ret; | ||
606 | } | 601 | } |
607 | #endif | 602 | #endif |
608 | 603 | ||
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 90837f7c7d0f..f4c7516eb989 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c | |||
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) | |||
302 | return clamp_val(reg, 0, 1023) & (0xff << 2); | 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); |
303 | } | 303 | } |
304 | 304 | ||
305 | static u16 adt7475_read_word(struct i2c_client *client, int reg) | 305 | static int adt7475_read_word(struct i2c_client *client, int reg) |
306 | { | 306 | { |
307 | u16 val; | 307 | int val1, val2; |
308 | 308 | ||
309 | val = i2c_smbus_read_byte_data(client, reg); | 309 | val1 = i2c_smbus_read_byte_data(client, reg); |
310 | val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); | 310 | if (val1 < 0) |
311 | return val1; | ||
312 | val2 = i2c_smbus_read_byte_data(client, reg + 1); | ||
313 | if (val2 < 0) | ||
314 | return val2; | ||
311 | 315 | ||
312 | return val; | 316 | return val1 | (val2 << 8); |
313 | } | 317 | } |
314 | 318 | ||
315 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) | 319 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) |
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, | |||
962 | { | 966 | { |
963 | struct adt7475_data *data = adt7475_update_device(dev); | 967 | struct adt7475_data *data = adt7475_update_device(dev); |
964 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | 968 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); |
965 | int i = clamp_val(data->range[sattr->index] & 0xf, 0, | 969 | int idx; |
966 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
967 | 970 | ||
968 | if (IS_ERR(data)) | 971 | if (IS_ERR(data)) |
969 | return PTR_ERR(data); | 972 | return PTR_ERR(data); |
973 | idx = clamp_val(data->range[sattr->index] & 0xf, 0, | ||
974 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
970 | 975 | ||
971 | return sprintf(buf, "%d\n", pwmfreq_table[i]); | 976 | return sprintf(buf, "%d\n", pwmfreq_table[idx]); |
972 | } | 977 | } |
973 | 978 | ||
974 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, | 979 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, |
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, | |||
1004 | char *buf) | 1009 | char *buf) |
1005 | { | 1010 | { |
1006 | struct adt7475_data *data = adt7475_update_device(dev); | 1011 | struct adt7475_data *data = adt7475_update_device(dev); |
1012 | |||
1013 | if (IS_ERR(data)) | ||
1014 | return PTR_ERR(data); | ||
1015 | |||
1007 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); | 1016 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); |
1008 | } | 1017 | } |
1009 | 1018 | ||
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf84..71d3445ba869 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c | |||
@@ -17,7 +17,7 @@ | |||
17 | * Bi-directional Current/Power Monitor with I2C Interface | 17 | * Bi-directional Current/Power Monitor with I2C Interface |
18 | * Datasheet: http://www.ti.com/product/ina230 | 18 | * Datasheet: http://www.ti.com/product/ina230 |
19 | * | 19 | * |
20 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 20 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
21 | * Thanks to Jan Volkering | 21 | * Thanks to Jan Volkering |
22 | * | 22 | * |
23 | * This program is free software; you can redistribute it and/or modify | 23 | * This program is free software; you can redistribute it and/or modify |
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) | |||
329 | return 0; | 329 | return 0; |
330 | } | 330 | } |
331 | 331 | ||
332 | static ssize_t ina2xx_show_shunt(struct device *dev, | ||
333 | struct device_attribute *da, | ||
334 | char *buf) | ||
335 | { | ||
336 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
337 | |||
338 | return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); | ||
339 | } | ||
340 | |||
332 | static ssize_t ina2xx_store_shunt(struct device *dev, | 341 | static ssize_t ina2xx_store_shunt(struct device *dev, |
333 | struct device_attribute *da, | 342 | struct device_attribute *da, |
334 | const char *buf, size_t count) | 343 | const char *buf, size_t count) |
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, | |||
403 | 412 | ||
404 | /* shunt resistance */ | 413 | /* shunt resistance */ |
405 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, | 414 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, |
406 | ina2xx_show_value, ina2xx_store_shunt, | 415 | ina2xx_show_shunt, ina2xx_store_shunt, |
407 | INA2XX_CALIBRATION); | 416 | INA2XX_CALIBRATION); |
408 | 417 | ||
409 | /* update interval (ina226 only) */ | 418 | /* update interval (ina226 only) */ |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c6bd61e4695a..944f5b63aecd 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/bitops.h> | 63 | #include <linux/bitops.h> |
64 | #include <linux/dmi.h> | 64 | #include <linux/dmi.h> |
65 | #include <linux/io.h> | 65 | #include <linux/io.h> |
66 | #include <linux/nospec.h> | ||
66 | #include "lm75.h" | 67 | #include "lm75.h" |
67 | 68 | ||
68 | #define USE_ALTERNATE | 69 | #define USE_ALTERNATE |
@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, | |||
2689 | return err; | 2690 | return err; |
2690 | if (val > NUM_TEMP) | 2691 | if (val > NUM_TEMP) |
2691 | return -EINVAL; | 2692 | return -EINVAL; |
2693 | val = array_index_nospec(val, NUM_TEMP + 1); | ||
2692 | if (val && (!(data->have_temp & BIT(val - 1)) || | 2694 | if (val && (!(data->have_temp & BIT(val - 1)) || |
2693 | !data->temp_src[val - 1])) | 2695 | !data->temp_src[val - 1])) |
2694 | return -EINVAL; | 2696 | return -EINVAL; |
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c index fb4e4a6bb1f6..be5ba4690895 100644 --- a/drivers/hwmon/raspberrypi-hwmon.c +++ b/drivers/hwmon/raspberrypi-hwmon.c | |||
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver); | |||
164 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); | 164 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); |
165 | MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); | 165 | MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); |
166 | MODULE_LICENSE("GPL v2"); | 166 | MODULE_LICENSE("GPL v2"); |
167 | MODULE_ALIAS("platform:raspberrypi-hwmon"); | ||
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 6ec65adaba49..c33dcfb87993 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap) | |||
110 | } | 110 | } |
111 | #ifdef DEBUG | 111 | #ifdef DEBUG |
112 | if (jiffies != start && i2c_debug >= 3) | 112 | if (jiffies != start && i2c_debug >= 3) |
113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " | 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", |
114 | "high\n", jiffies - start); | 114 | jiffies - start); |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | done: | 117 | done: |
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
171 | setsda(adap, sb); | 171 | setsda(adap, sb); |
172 | udelay((adap->udelay + 1) / 2); | 172 | udelay((adap->udelay + 1) / 2); |
173 | if (sclhi(adap) < 0) { /* timed out */ | 173 | if (sclhi(adap) < 0) { /* timed out */ |
174 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 174 | bit_dbg(1, &i2c_adap->dev, |
175 | "timeout at bit #%d\n", (int)c, i); | 175 | "i2c_outb: 0x%02x, timeout at bit #%d\n", |
176 | (int)c, i); | ||
176 | return -ETIMEDOUT; | 177 | return -ETIMEDOUT; |
177 | } | 178 | } |
178 | /* FIXME do arbitration here: | 179 | /* FIXME do arbitration here: |
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
185 | } | 186 | } |
186 | sdahi(adap); | 187 | sdahi(adap); |
187 | if (sclhi(adap) < 0) { /* timeout */ | 188 | if (sclhi(adap) < 0) { /* timeout */ |
188 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 189 | bit_dbg(1, &i2c_adap->dev, |
189 | "timeout at ack\n", (int)c); | 190 | "i2c_outb: 0x%02x, timeout at ack\n", (int)c); |
190 | return -ETIMEDOUT; | 191 | return -ETIMEDOUT; |
191 | } | 192 | } |
192 | 193 | ||
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) | |||
215 | sdahi(adap); | 216 | sdahi(adap); |
216 | for (i = 0; i < 8; i++) { | 217 | for (i = 0; i < 8; i++) { |
217 | if (sclhi(adap) < 0) { /* timeout */ | 218 | if (sclhi(adap) < 0) { /* timeout */ |
218 | bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " | 219 | bit_dbg(1, &i2c_adap->dev, |
219 | "#%d\n", 7 - i); | 220 | "i2c_inb: timeout at bit #%d\n", |
221 | 7 - i); | ||
220 | return -ETIMEDOUT; | 222 | return -ETIMEDOUT; |
221 | } | 223 | } |
222 | indata *= 2; | 224 | indata *= 2; |
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
265 | goto bailout; | 267 | goto bailout; |
266 | } | 268 | } |
267 | if (!scl) { | 269 | if (!scl) { |
268 | printk(KERN_WARNING "%s: SCL unexpected low " | 270 | printk(KERN_WARNING |
269 | "while pulling SDA low!\n", name); | 271 | "%s: SCL unexpected low while pulling SDA low!\n", |
272 | name); | ||
270 | goto bailout; | 273 | goto bailout; |
271 | } | 274 | } |
272 | 275 | ||
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
278 | goto bailout; | 281 | goto bailout; |
279 | } | 282 | } |
280 | if (!scl) { | 283 | if (!scl) { |
281 | printk(KERN_WARNING "%s: SCL unexpected low " | 284 | printk(KERN_WARNING |
282 | "while pulling SDA high!\n", name); | 285 | "%s: SCL unexpected low while pulling SDA high!\n", |
286 | name); | ||
283 | goto bailout; | 287 | goto bailout; |
284 | } | 288 | } |
285 | 289 | ||
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
291 | goto bailout; | 295 | goto bailout; |
292 | } | 296 | } |
293 | if (!sda) { | 297 | if (!sda) { |
294 | printk(KERN_WARNING "%s: SDA unexpected low " | 298 | printk(KERN_WARNING |
295 | "while pulling SCL low!\n", name); | 299 | "%s: SDA unexpected low while pulling SCL low!\n", |
300 | name); | ||
296 | goto bailout; | 301 | goto bailout; |
297 | } | 302 | } |
298 | 303 | ||
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
304 | goto bailout; | 309 | goto bailout; |
305 | } | 310 | } |
306 | if (!sda) { | 311 | if (!sda) { |
307 | printk(KERN_WARNING "%s: SDA unexpected low " | 312 | printk(KERN_WARNING |
308 | "while pulling SCL high!\n", name); | 313 | "%s: SDA unexpected low while pulling SCL high!\n", |
314 | name); | ||
309 | goto bailout; | 315 | goto bailout; |
310 | } | 316 | } |
311 | 317 | ||
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap, | |||
352 | i2c_start(adap); | 358 | i2c_start(adap); |
353 | } | 359 | } |
354 | if (i && ret) | 360 | if (i && ret) |
355 | bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " | 361 | bit_dbg(1, &i2c_adap->dev, |
356 | "0x%02x: %s\n", i + 1, | 362 | "Used %d tries to %s client at 0x%02x: %s\n", i + 1, |
357 | addr & 1 ? "read from" : "write to", addr >> 1, | 363 | addr & 1 ? "read from" : "write to", addr >> 1, |
358 | ret == 1 ? "success" : "failed, timeout?"); | 364 | ret == 1 ? "success" : "failed, timeout?"); |
359 | return ret; | 365 | return ret; |
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
442 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { | 448 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { |
443 | if (!(flags & I2C_M_NO_RD_ACK)) | 449 | if (!(flags & I2C_M_NO_RD_ACK)) |
444 | acknak(i2c_adap, 0); | 450 | acknak(i2c_adap, 0); |
445 | dev_err(&i2c_adap->dev, "readbytes: invalid " | 451 | dev_err(&i2c_adap->dev, |
446 | "block length (%d)\n", inval); | 452 | "readbytes: invalid block length (%d)\n", |
453 | inval); | ||
447 | return -EPROTO; | 454 | return -EPROTO; |
448 | } | 455 | } |
449 | /* The original count value accounts for the extra | 456 | /* The original count value accounts for the extra |
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
506 | return -ENXIO; | 513 | return -ENXIO; |
507 | } | 514 | } |
508 | if (flags & I2C_M_RD) { | 515 | if (flags & I2C_M_RD) { |
509 | bit_dbg(3, &i2c_adap->dev, "emitting repeated " | 516 | bit_dbg(3, &i2c_adap->dev, |
510 | "start condition\n"); | 517 | "emitting repeated start condition\n"); |
511 | i2c_repstart(adap); | 518 | i2c_repstart(adap); |
512 | /* okay, now switch into reading mode */ | 519 | /* okay, now switch into reading mode */ |
513 | addr |= 0x01; | 520 | addr |= 0x01; |
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, | |||
564 | } | 571 | } |
565 | ret = bit_doAddress(i2c_adap, pmsg); | 572 | ret = bit_doAddress(i2c_adap, pmsg); |
566 | if ((ret != 0) && !nak_ok) { | 573 | if ((ret != 0) && !nak_ok) { |
567 | bit_dbg(1, &i2c_adap->dev, "NAK from " | 574 | bit_dbg(1, &i2c_adap->dev, |
568 | "device addr 0x%02x msg #%d\n", | 575 | "NAK from device addr 0x%02x msg #%d\n", |
569 | msgs[i].addr, i); | 576 | msgs[i].addr, i); |
570 | goto bailout; | 577 | goto bailout; |
571 | } | 578 | } |
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e18442b9973a..94d94b4a9a0d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
@@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) | |||
708 | i2c_set_adapdata(adap, dev); | 708 | i2c_set_adapdata(adap, dev); |
709 | 709 | ||
710 | if (dev->pm_disabled) { | 710 | if (dev->pm_disabled) { |
711 | dev_pm_syscore_device(dev->dev, true); | ||
712 | irq_flags = IRQF_NO_SUSPEND; | 711 | irq_flags = IRQF_NO_SUSPEND; |
713 | } else { | 712 | } else { |
714 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; | 713 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 1a8d2da5b000..b5750fd85125 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev) | |||
434 | { | 434 | { |
435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
436 | 436 | ||
437 | if (i_dev->pm_disabled) | ||
438 | return 0; | ||
439 | |||
437 | i_dev->disable(i_dev); | 440 | i_dev->disable(i_dev); |
438 | i2c_dw_prepare_clk(i_dev, false); | 441 | i2c_dw_prepare_clk(i_dev, false); |
439 | 442 | ||
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev) | |||
444 | { | 447 | { |
445 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 448 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
446 | 449 | ||
447 | i2c_dw_prepare_clk(i_dev, true); | 450 | if (!i_dev->pm_disabled) |
451 | i2c_dw_prepare_clk(i_dev, true); | ||
452 | |||
448 | i_dev->init(i_dev); | 453 | i_dev->init(i_dev); |
449 | 454 | ||
450 | return 0; | 455 | return 0; |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 941c223f6491..c91e145ef5a5 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -140,6 +140,7 @@ | |||
140 | 140 | ||
141 | #define SBREG_BAR 0x10 | 141 | #define SBREG_BAR 0x10 |
142 | #define SBREG_SMBCTRL 0xc6000c | 142 | #define SBREG_SMBCTRL 0xc6000c |
143 | #define SBREG_SMBCTRL_DNV 0xcf000c | ||
143 | 144 | ||
144 | /* Host status bits for SMBPCISTS */ | 145 | /* Host status bits for SMBPCISTS */ |
145 | #define SMBPCISTS_INTS BIT(3) | 146 | #define SMBPCISTS_INTS BIT(3) |
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv) | |||
1399 | spin_unlock(&p2sb_spinlock); | 1400 | spin_unlock(&p2sb_spinlock); |
1400 | 1401 | ||
1401 | res = &tco_res[ICH_RES_MEM_OFF]; | 1402 | res = &tco_res[ICH_RES_MEM_OFF]; |
1402 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; | 1403 | if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) |
1404 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; | ||
1405 | else | ||
1406 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; | ||
1407 | |||
1403 | res->end = res->start + 3; | 1408 | res->end = res->start + 3; |
1404 | res->flags = IORESOURCE_MEM; | 1409 | res->flags = IORESOURCE_MEM; |
1405 | 1410 | ||
@@ -1415,6 +1420,13 @@ static void i801_add_tco(struct i801_priv *priv) | |||
1415 | } | 1420 | } |
1416 | 1421 | ||
1417 | #ifdef CONFIG_ACPI | 1422 | #ifdef CONFIG_ACPI |
1423 | static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, | ||
1424 | acpi_physical_address address) | ||
1425 | { | ||
1426 | return address >= priv->smba && | ||
1427 | address <= pci_resource_end(priv->pci_dev, SMBBAR); | ||
1428 | } | ||
1429 | |||
1418 | static acpi_status | 1430 | static acpi_status |
1419 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | 1431 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, |
1420 | u64 *value, void *handler_context, void *region_context) | 1432 | u64 *value, void *handler_context, void *region_context) |
@@ -1430,7 +1442,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | |||
1430 | */ | 1442 | */ |
1431 | mutex_lock(&priv->acpi_lock); | 1443 | mutex_lock(&priv->acpi_lock); |
1432 | 1444 | ||
1433 | if (!priv->acpi_reserved) { | 1445 | if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { |
1434 | priv->acpi_reserved = true; | 1446 | priv->acpi_reserved = true; |
1435 | 1447 | ||
1436 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | 1448 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); |
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 6d975f5221ca..06c4c767af32 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c | |||
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = { | |||
538 | 538 | ||
539 | static const struct of_device_id lpi2c_imx_of_match[] = { | 539 | static const struct of_device_id lpi2c_imx_of_match[] = { |
540 | { .compatible = "fsl,imx7ulp-lpi2c" }, | 540 | { .compatible = "fsl,imx7ulp-lpi2c" }, |
541 | { .compatible = "fsl,imx8dv-lpi2c" }, | ||
542 | { }, | 541 | { }, |
543 | }; | 542 | }; |
544 | MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); | 543 | MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 439e8778f849..818cab14e87c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data) | |||
507 | pd->pos = pd->msg->len; | 507 | pd->pos = pd->msg->len; |
508 | pd->stop_after_dma = true; | 508 | pd->stop_after_dma = true; |
509 | 509 | ||
510 | i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf); | ||
511 | |||
512 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); | 510 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); |
513 | } | 511 | } |
514 | 512 | ||
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) | |||
602 | dma_async_issue_pending(chan); | 600 | dma_async_issue_pending(chan); |
603 | } | 601 | } |
604 | 602 | ||
605 | static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | 603 | static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, |
606 | bool do_init) | 604 | bool do_init) |
607 | { | 605 | { |
608 | if (do_init) { | 606 | if (do_init) { |
609 | /* Initialize channel registers */ | 607 | /* Initialize channel registers */ |
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | |||
627 | 625 | ||
628 | /* Enable all interrupts to begin with */ | 626 | /* Enable all interrupts to begin with */ |
629 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | 627 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); |
630 | return 0; | ||
631 | } | 628 | } |
632 | 629 | ||
633 | static int poll_dte(struct sh_mobile_i2c_data *pd) | 630 | static int poll_dte(struct sh_mobile_i2c_data *pd) |
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
698 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; | 695 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; |
699 | pd->stop_after_dma = false; | 696 | pd->stop_after_dma = false; |
700 | 697 | ||
701 | err = start_ch(pd, msg, do_start); | 698 | start_ch(pd, msg, do_start); |
702 | if (err) | ||
703 | break; | ||
704 | 699 | ||
705 | if (do_start) | 700 | if (do_start) |
706 | i2c_op(pd, OP_START, 0); | 701 | i2c_op(pd, OP_START, 0); |
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
709 | timeout = wait_event_timeout(pd->wait, | 704 | timeout = wait_event_timeout(pd->wait, |
710 | pd->sr & (ICSR_TACK | SW_DONE), | 705 | pd->sr & (ICSR_TACK | SW_DONE), |
711 | adapter->timeout); | 706 | adapter->timeout); |
707 | |||
708 | /* 'stop_after_dma' tells if DMA transfer was complete */ | ||
709 | i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); | ||
710 | |||
712 | if (!timeout) { | 711 | if (!timeout) { |
713 | dev_err(pd->dev, "Transfer request timed out\n"); | 712 | dev_err(pd->dev, "Transfer request timed out\n"); |
714 | if (pd->dma_direction != DMA_NONE) | 713 | if (pd->dma_direction != DMA_NONE) |
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index 9918bdd81619..a403e8579b65 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c | |||
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, | |||
401 | return ret; | 401 | return ret; |
402 | 402 | ||
403 | for (msg = msgs; msg < emsg; msg++) { | 403 | for (msg = msgs; msg < emsg; msg++) { |
404 | /* If next message is read, skip the stop condition */ | 404 | /* Emit STOP if it is the last message or I2C_M_STOP is set. */ |
405 | bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); | 405 | bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); |
406 | /* but, force it if I2C_M_STOP is set */ | ||
407 | if (msg->flags & I2C_M_STOP) | ||
408 | stop = true; | ||
409 | 406 | ||
410 | ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); | 407 | ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); |
411 | if (ret) | 408 | if (ret) |
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index bb181b088291..454f914ae66d 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c | |||
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap, | |||
248 | return ret; | 248 | return ret; |
249 | 249 | ||
250 | for (msg = msgs; msg < emsg; msg++) { | 250 | for (msg = msgs; msg < emsg; msg++) { |
251 | /* If next message is read, skip the stop condition */ | 251 | /* Emit STOP if it is the last message or I2C_M_STOP is set. */ |
252 | bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); | 252 | bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); |
253 | /* but, force it if I2C_M_STOP is set */ | ||
254 | if (msg->flags & I2C_M_STOP) | ||
255 | stop = true; | ||
256 | 253 | ||
257 | ret = uniphier_i2c_master_xfer_one(adap, msg, stop); | 254 | ret = uniphier_i2c_master_xfer_one(adap, msg, stop); |
258 | if (ret) | 255 | if (ret) |
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 9a71e50d21f1..0c51c0ffdda9 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c | |||
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
532 | { | 532 | { |
533 | u8 rx_watermark; | 533 | u8 rx_watermark; |
534 | struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; | 534 | struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; |
535 | unsigned long flags; | ||
535 | 536 | ||
536 | /* Clear and enable Rx full interrupt. */ | 537 | /* Clear and enable Rx full interrupt. */ |
537 | xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); | 538 | xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); |
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
547 | rx_watermark = IIC_RX_FIFO_DEPTH; | 548 | rx_watermark = IIC_RX_FIFO_DEPTH; |
548 | xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); | 549 | xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); |
549 | 550 | ||
551 | local_irq_save(flags); | ||
550 | if (!(msg->flags & I2C_M_NOSTART)) | 552 | if (!(msg->flags & I2C_M_NOSTART)) |
551 | /* write the address */ | 553 | /* write the address */ |
552 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, | 554 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, |
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
556 | 558 | ||
557 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, | 559 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, |
558 | msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); | 560 | msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); |
561 | local_irq_restore(flags); | ||
562 | |||
559 | if (i2c->nmsgs == 1) | 563 | if (i2c->nmsgs == 1) |
560 | /* very last, enable bus not busy as well */ | 564 | /* very last, enable bus not busy as well */ |
561 | xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); | 565 | xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); |
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f15737763608..9ee9a15e7134 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) | |||
2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); | 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); |
2294 | 2294 | ||
2295 | /** | 2295 | /** |
2296 | * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg | 2296 | * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg |
2297 | * @msg: the message to be synced with | ||
2298 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. | 2297 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. |
2298 | * @msg: the message which the buffer corresponds to | ||
2299 | * @xferred: bool saying if the message was transferred | ||
2299 | */ | 2300 | */ |
2300 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) | 2301 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) |
2301 | { | 2302 | { |
2302 | if (!buf || buf == msg->buf) | 2303 | if (!buf || buf == msg->buf) |
2303 | return; | 2304 | return; |
2304 | 2305 | ||
2305 | if (msg->flags & I2C_M_RD) | 2306 | if (xferred && msg->flags & I2C_M_RD) |
2306 | memcpy(msg->buf, buf, msg->len); | 2307 | memcpy(msg->buf, buf, msg->len); |
2307 | 2308 | ||
2308 | kfree(buf); | 2309 | kfree(buf); |
2309 | } | 2310 | } |
2310 | EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); | 2311 | EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); |
2311 | 2312 | ||
2312 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); | 2313 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); |
2313 | MODULE_DESCRIPTION("I2C-Bus main module"); | 2314 | MODULE_DESCRIPTION("I2C-Bus main module"); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 316a57530f6d..c2df341ff6fa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = { | |||
1439 | * The consequence of the above is that allocation is cost is low, but | 1439 | * The consequence of the above is that allocation is cost is low, but |
1440 | * freeing is expensive. We assumes that freeing rarely occurs. | 1440 | * freeing is expensive. We assumes that freeing rarely occurs. |
1441 | */ | 1441 | */ |
1442 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ | ||
1442 | 1443 | ||
1443 | static DEFINE_MUTEX(lpi_range_lock); | 1444 | static DEFINE_MUTEX(lpi_range_lock); |
1444 | static LIST_HEAD(lpi_range_list); | 1445 | static LIST_HEAD(lpi_range_list); |
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void) | |||
1625 | { | 1626 | { |
1626 | phys_addr_t paddr; | 1627 | phys_addr_t paddr; |
1627 | 1628 | ||
1628 | lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); | 1629 | lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), |
1630 | ITS_MAX_LPI_NRBITS); | ||
1629 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); | 1631 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); |
1630 | if (!gic_rdists->prop_page) { | 1632 | if (!gic_rdists->prop_page) { |
1631 | pr_err("Failed to allocate PROPBASE\n"); | 1633 | pr_err("Failed to allocate PROPBASE\n"); |
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 94329e03001e..0b2af6e74fc3 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) | |||
1276 | static int resync_finish(struct mddev *mddev) | 1276 | static int resync_finish(struct mddev *mddev) |
1277 | { | 1277 | { |
1278 | struct md_cluster_info *cinfo = mddev->cluster_info; | 1278 | struct md_cluster_info *cinfo = mddev->cluster_info; |
1279 | int ret = 0; | ||
1279 | 1280 | ||
1280 | clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); | 1281 | clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); |
1281 | dlm_unlock_sync(cinfo->resync_lockres); | ||
1282 | 1282 | ||
1283 | /* | 1283 | /* |
1284 | * If resync thread is interrupted so we can't say resync is finished, | 1284 | * If resync thread is interrupted so we can't say resync is finished, |
1285 | * another node will launch resync thread to continue. | 1285 | * another node will launch resync thread to continue. |
1286 | */ | 1286 | */ |
1287 | if (test_bit(MD_CLOSING, &mddev->flags)) | 1287 | if (!test_bit(MD_CLOSING, &mddev->flags)) |
1288 | return 0; | 1288 | ret = resync_info_update(mddev, 0, 0); |
1289 | else | 1289 | dlm_unlock_sync(cinfo->resync_lockres); |
1290 | return resync_info_update(mddev, 0, 0); | 1290 | return ret; |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | static int area_resyncing(struct mddev *mddev, int direction, | 1293 | static int area_resyncing(struct mddev *mddev, int direction, |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 981898049491..d6f7978b4449 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, | |||
4529 | allow_barrier(conf); | 4529 | allow_barrier(conf); |
4530 | } | 4530 | } |
4531 | 4531 | ||
4532 | raise_barrier(conf, 0); | ||
4532 | read_more: | 4533 | read_more: |
4533 | /* Now schedule reads for blocks from sector_nr to last */ | 4534 | /* Now schedule reads for blocks from sector_nr to last */ |
4534 | r10_bio = raid10_alloc_init_r10buf(conf); | 4535 | r10_bio = raid10_alloc_init_r10buf(conf); |
4535 | r10_bio->state = 0; | 4536 | r10_bio->state = 0; |
4536 | raise_barrier(conf, sectors_done != 0); | 4537 | raise_barrier(conf, 1); |
4537 | atomic_set(&r10_bio->remaining, 0); | 4538 | atomic_set(&r10_bio->remaining, 0); |
4538 | r10_bio->mddev = mddev; | 4539 | r10_bio->mddev = mddev; |
4539 | r10_bio->sector = sector_nr; | 4540 | r10_bio->sector = sector_nr; |
@@ -4629,6 +4630,8 @@ read_more: | |||
4629 | if (sector_nr <= last) | 4630 | if (sector_nr <= last) |
4630 | goto read_more; | 4631 | goto read_more; |
4631 | 4632 | ||
4633 | lower_barrier(conf); | ||
4634 | |||
4632 | /* Now that we have done the whole section we can | 4635 | /* Now that we have done the whole section we can |
4633 | * update reshape_progress | 4636 | * update reshape_progress |
4634 | */ | 4637 | */ |
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index a001808a2b77..bfb811407061 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h | |||
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); | |||
46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); | 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); |
47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); | 47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); |
48 | 48 | ||
49 | static inline bool raid5_has_log(struct r5conf *conf) | ||
50 | { | ||
51 | return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); | ||
52 | } | ||
53 | |||
49 | static inline bool raid5_has_ppl(struct r5conf *conf) | 54 | static inline bool raid5_has_ppl(struct r5conf *conf) |
50 | { | 55 | { |
51 | return test_bit(MD_HAS_PPL, &conf->mddev->flags); | 56 | return test_bit(MD_HAS_PPL, &conf->mddev->flags); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4ce0d7502fad..e4e98f47865d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh) | |||
733 | { | 733 | { |
734 | struct r5conf *conf = sh->raid_conf; | 734 | struct r5conf *conf = sh->raid_conf; |
735 | 735 | ||
736 | if (conf->log || raid5_has_ppl(conf)) | 736 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
737 | return false; | 737 | return false; |
738 | return test_bit(STRIPE_BATCH_READY, &sh->state) && | 738 | return test_bit(STRIPE_BATCH_READY, &sh->state) && |
739 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && | 739 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && |
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) | |||
7737 | sector_t newsize; | 7737 | sector_t newsize; |
7738 | struct r5conf *conf = mddev->private; | 7738 | struct r5conf *conf = mddev->private; |
7739 | 7739 | ||
7740 | if (conf->log || raid5_has_ppl(conf)) | 7740 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
7741 | return -EINVAL; | 7741 | return -EINVAL; |
7742 | sectors &= ~((sector_t)conf->chunk_sectors - 1); | 7742 | sectors &= ~((sector_t)conf->chunk_sectors - 1); |
7743 | newsize = raid5_size(mddev, sectors, mddev->raid_disks); | 7743 | newsize = raid5_size(mddev, sectors, mddev->raid_disks); |
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev) | |||
7788 | { | 7788 | { |
7789 | struct r5conf *conf = mddev->private; | 7789 | struct r5conf *conf = mddev->private; |
7790 | 7790 | ||
7791 | if (conf->log || raid5_has_ppl(conf)) | 7791 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
7792 | return -EINVAL; | 7792 | return -EINVAL; |
7793 | if (mddev->delta_disks == 0 && | 7793 | if (mddev->delta_disks == 0 && |
7794 | mddev->new_layout == mddev->layout && | 7794 | mddev->new_layout == mddev->layout && |
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index 31112f622b88..475e5b3790ed 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c | |||
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev) | |||
411 | if (ret < 0) | 411 | if (ret < 0) |
412 | goto error; | 412 | goto error; |
413 | } | 413 | } |
414 | } else { | 414 | } else if (pdata) { |
415 | for (i = 0; i < pdata->num_sub_devices; i++) { | 415 | for (i = 0; i < pdata->num_sub_devices; i++) { |
416 | pdata->sub_devices[i].dev.parent = dev; | 416 | pdata->sub_devices[i].dev.parent = dev; |
417 | ret = platform_device_register(&pdata->sub_devices[i]); | 417 | ret = platform_device_register(&pdata->sub_devices[i]); |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 648eb6743ed5..6edffeed9953 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, | |||
238 | mmc_exit_request(mq->queue, req); | 238 | mmc_exit_request(mq->queue, req); |
239 | } | 239 | } |
240 | 240 | ||
241 | /* | ||
242 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests | ||
243 | * will not be dispatched in parallel. | ||
244 | */ | ||
245 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | 241 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
246 | const struct blk_mq_queue_data *bd) | 242 | const struct blk_mq_queue_data *bd) |
247 | { | 243 | { |
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
264 | 260 | ||
265 | spin_lock_irq(q->queue_lock); | 261 | spin_lock_irq(q->queue_lock); |
266 | 262 | ||
267 | if (mq->recovery_needed) { | 263 | if (mq->recovery_needed || mq->busy) { |
268 | spin_unlock_irq(q->queue_lock); | 264 | spin_unlock_irq(q->queue_lock); |
269 | return BLK_STS_RESOURCE; | 265 | return BLK_STS_RESOURCE; |
270 | } | 266 | } |
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
291 | break; | 287 | break; |
292 | } | 288 | } |
293 | 289 | ||
290 | /* Parallel dispatch of requests is not supported at the moment */ | ||
291 | mq->busy = true; | ||
292 | |||
294 | mq->in_flight[issue_type] += 1; | 293 | mq->in_flight[issue_type] += 1; |
295 | get_card = (mmc_tot_in_flight(mq) == 1); | 294 | get_card = (mmc_tot_in_flight(mq) == 1); |
296 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); | 295 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
333 | mq->in_flight[issue_type] -= 1; | 332 | mq->in_flight[issue_type] -= 1; |
334 | if (mmc_tot_in_flight(mq) == 0) | 333 | if (mmc_tot_in_flight(mq) == 0) |
335 | put_card = true; | 334 | put_card = true; |
335 | mq->busy = false; | ||
336 | spin_unlock_irq(q->queue_lock); | 336 | spin_unlock_irq(q->queue_lock); |
337 | if (put_card) | 337 | if (put_card) |
338 | mmc_put_card(card, &mq->ctx); | 338 | mmc_put_card(card, &mq->ctx); |
339 | } else { | ||
340 | WRITE_ONCE(mq->busy, false); | ||
339 | } | 341 | } |
340 | 342 | ||
341 | return ret; | 343 | return ret; |
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 17e59d50b496..9bf3c9245075 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h | |||
@@ -81,6 +81,7 @@ struct mmc_queue { | |||
81 | unsigned int cqe_busy; | 81 | unsigned int cqe_busy; |
82 | #define MMC_CQE_DCMD_BUSY BIT(0) | 82 | #define MMC_CQE_DCMD_BUSY BIT(0) |
83 | #define MMC_CQE_QUEUE_FULL BIT(1) | 83 | #define MMC_CQE_QUEUE_FULL BIT(1) |
84 | bool busy; | ||
84 | bool use_cqe; | 85 | bool use_cqe; |
85 | bool recovery_needed; | 86 | bool recovery_needed; |
86 | bool in_recovery; | 87 | bool in_recovery; |
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 294de177632c..61e4e2a213c9 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c | |||
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, | |||
217 | * We don't really have DMA, so we need | 217 | * We don't really have DMA, so we need |
218 | * to copy from our platform driver buffer | 218 | * to copy from our platform driver buffer |
219 | */ | 219 | */ |
220 | sg_copy_to_buffer(data->sg, 1, host->virt_base, | 220 | sg_copy_from_buffer(data->sg, 1, host->virt_base, |
221 | data->sg->length); | 221 | data->sg->length); |
222 | } | 222 | } |
223 | host->data->bytes_xfered += data->sg->length; | 223 | host->data->bytes_xfered += data->sg->length; |
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, | |||
393 | * We don't really have DMA, so we need to copy to our | 393 | * We don't really have DMA, so we need to copy to our |
394 | * platform driver buffer | 394 | * platform driver buffer |
395 | */ | 395 | */ |
396 | sg_copy_from_buffer(data->sg, 1, host->virt_base, | 396 | sg_copy_to_buffer(data->sg, 1, host->virt_base, |
397 | data->sg->length); | 397 | data->sg->length); |
398 | } | 398 | } |
399 | } | 399 | } |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 5aa2c9404e92..be53044086c7 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
1976 | do { | 1976 | do { |
1977 | value = atmci_readl(host, ATMCI_RDR); | 1977 | value = atmci_readl(host, ATMCI_RDR); |
1978 | if (likely(offset + 4 <= sg->length)) { | 1978 | if (likely(offset + 4 <= sg->length)) { |
1979 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); | 1979 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); |
1980 | 1980 | ||
1981 | offset += 4; | 1981 | offset += 4; |
1982 | nbytes += 4; | 1982 | nbytes += 4; |
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
1993 | } else { | 1993 | } else { |
1994 | unsigned int remaining = sg->length - offset; | 1994 | unsigned int remaining = sg->length - offset; |
1995 | 1995 | ||
1996 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); | 1996 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); |
1997 | nbytes += remaining; | 1997 | nbytes += remaining; |
1998 | 1998 | ||
1999 | flush_dcache_page(sg_page(sg)); | 1999 | flush_dcache_page(sg_page(sg)); |
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
2003 | goto done; | 2003 | goto done; |
2004 | 2004 | ||
2005 | offset = 4 - remaining; | 2005 | offset = 4 - remaining; |
2006 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, | 2006 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, |
2007 | offset, 0); | 2007 | offset, 0); |
2008 | nbytes += offset; | 2008 | nbytes += offset; |
2009 | } | 2009 | } |
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
2042 | 2042 | ||
2043 | do { | 2043 | do { |
2044 | if (likely(offset + 4 <= sg->length)) { | 2044 | if (likely(offset + 4 <= sg->length)) { |
2045 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); | 2045 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); |
2046 | atmci_writel(host, ATMCI_TDR, value); | 2046 | atmci_writel(host, ATMCI_TDR, value); |
2047 | 2047 | ||
2048 | offset += 4; | 2048 | offset += 4; |
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
2059 | unsigned int remaining = sg->length - offset; | 2059 | unsigned int remaining = sg->length - offset; |
2060 | 2060 | ||
2061 | value = 0; | 2061 | value = 0; |
2062 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); | 2062 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); |
2063 | nbytes += remaining; | 2063 | nbytes += remaining; |
2064 | 2064 | ||
2065 | host->sg = sg = sg_next(sg); | 2065 | host->sg = sg = sg_next(sg); |
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
2070 | } | 2070 | } |
2071 | 2071 | ||
2072 | offset = 4 - remaining; | 2072 | offset = 4 - remaining; |
2073 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, | 2073 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, |
2074 | offset, 0); | 2074 | offset, 0); |
2075 | atmci_writel(host, ATMCI_TDR, value); | 2075 | atmci_writel(host, ATMCI_TDR, value); |
2076 | nbytes += offset; | 2076 | nbytes += offset; |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 35cc0de6be67..ca0b43973769 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
@@ -45,14 +45,16 @@ | |||
45 | /* DM_CM_RST */ | 45 | /* DM_CM_RST */ |
46 | #define RST_DTRANRST1 BIT(9) | 46 | #define RST_DTRANRST1 BIT(9) |
47 | #define RST_DTRANRST0 BIT(8) | 47 | #define RST_DTRANRST0 BIT(8) |
48 | #define RST_RESERVED_BITS GENMASK_ULL(32, 0) | 48 | #define RST_RESERVED_BITS GENMASK_ULL(31, 0) |
49 | 49 | ||
50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ | 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ |
51 | #define INFO1_CLEAR 0 | 51 | #define INFO1_CLEAR 0 |
52 | #define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) | ||
52 | #define INFO1_DTRANEND1 BIT(17) | 53 | #define INFO1_DTRANEND1 BIT(17) |
53 | #define INFO1_DTRANEND0 BIT(16) | 54 | #define INFO1_DTRANEND0 BIT(16) |
54 | 55 | ||
55 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ | 56 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ |
57 | #define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) | ||
56 | #define INFO2_DTRANERR1 BIT(17) | 58 | #define INFO2_DTRANERR1 BIT(17) |
57 | #define INFO2_DTRANERR0 BIT(16) | 59 | #define INFO2_DTRANERR0 BIT(16) |
58 | 60 | ||
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, | |||
252 | { | 254 | { |
253 | struct renesas_sdhi *priv = host_to_priv(host); | 255 | struct renesas_sdhi *priv = host_to_priv(host); |
254 | 256 | ||
257 | /* Disable DMAC interrupts, we don't use them */ | ||
258 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, | ||
259 | INFO1_MASK_CLEAR); | ||
260 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, | ||
261 | INFO2_MASK_CLEAR); | ||
262 | |||
255 | /* Each value is set to non-zero to assume "enabling" each DMA */ | 263 | /* Each value is set to non-zero to assume "enabling" each DMA */ |
256 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; | 264 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; |
257 | 265 | ||
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index ca18612c4201..67b2065e7a19 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
@@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali) | |||
1338 | 1338 | ||
1339 | denali_enable_irq(denali); | 1339 | denali_enable_irq(denali); |
1340 | denali_reset_banks(denali); | 1340 | denali_reset_banks(denali); |
1341 | if (!denali->max_banks) { | ||
1342 | /* Error out earlier if no chip is found for some reasons. */ | ||
1343 | ret = -ENODEV; | ||
1344 | goto disable_irq; | ||
1345 | } | ||
1341 | 1346 | ||
1342 | denali->active_bank = DENALI_INVALID_BANK; | 1347 | denali->active_bank = DENALI_INVALID_BANK; |
1343 | 1348 | ||
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c index a3f04315c05c..427fcbc1b71c 100644 --- a/drivers/mtd/nand/raw/docg4.c +++ b/drivers/mtd/nand/raw/docg4.c | |||
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev) | |||
1218 | return 0; | 1218 | return 0; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static void __init init_mtd_structs(struct mtd_info *mtd) | 1221 | static void init_mtd_structs(struct mtd_info *mtd) |
1222 | { | 1222 | { |
1223 | /* initialize mtd and nand data structures */ | 1223 | /* initialize mtd and nand data structures */ |
1224 | 1224 | ||
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) | |||
1290 | 1290 | ||
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | static int __init read_id_reg(struct mtd_info *mtd) | 1293 | static int read_id_reg(struct mtd_info *mtd) |
1294 | { | 1294 | { |
1295 | struct nand_chip *nand = mtd_to_nand(mtd); | 1295 | struct nand_chip *nand = mtd_to_nand(mtd); |
1296 | struct docg4_priv *doc = nand_get_controller_data(nand); | 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8bb1e38b1681..cecbb1d1f587 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) | |||
5913 | return bp->hw_resc.max_cp_rings; | 5913 | return bp->hw_resc.max_cp_rings; |
5914 | } | 5914 | } |
5915 | 5915 | ||
5916 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) | 5916 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
5917 | { | 5917 | { |
5918 | bp->hw_resc.max_cp_rings = max; | 5918 | return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); |
5919 | } | 5919 | } |
5920 | 5920 | ||
5921 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) | 5921 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
5922 | { | 5922 | { |
5923 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5923 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
5924 | 5924 | ||
@@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) | |||
6684 | hw_resc->resv_rx_rings = 0; | 6684 | hw_resc->resv_rx_rings = 0; |
6685 | hw_resc->resv_hw_ring_grps = 0; | 6685 | hw_resc->resv_hw_ring_grps = 0; |
6686 | hw_resc->resv_vnics = 0; | 6686 | hw_resc->resv_vnics = 0; |
6687 | bp->tx_nr_rings = 0; | ||
6688 | bp->rx_nr_rings = 0; | ||
6687 | } | 6689 | } |
6688 | return rc; | 6690 | return rc; |
6689 | } | 6691 | } |
@@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
8629 | 8631 | ||
8630 | *max_tx = hw_resc->max_tx_rings; | 8632 | *max_tx = hw_resc->max_tx_rings; |
8631 | *max_rx = hw_resc->max_rx_rings; | 8633 | *max_rx = hw_resc->max_rx_rings; |
8632 | *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); | 8634 | *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), |
8635 | hw_resc->max_irqs); | ||
8633 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); | 8636 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); |
8634 | max_ring_grps = hw_resc->max_hw_ring_grps; | 8637 | max_ring_grps = hw_resc->max_hw_ring_grps; |
8635 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { | 8638 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
@@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) | |||
8769 | if (bp->tx_nr_rings) | 8772 | if (bp->tx_nr_rings) |
8770 | return 0; | 8773 | return 0; |
8771 | 8774 | ||
8775 | bnxt_ulp_irq_stop(bp); | ||
8776 | bnxt_clear_int_mode(bp); | ||
8772 | rc = bnxt_set_dflt_rings(bp, true); | 8777 | rc = bnxt_set_dflt_rings(bp, true); |
8773 | if (rc) { | 8778 | if (rc) { |
8774 | netdev_err(bp->dev, "Not enough rings available.\n"); | 8779 | netdev_err(bp->dev, "Not enough rings available.\n"); |
8775 | return rc; | 8780 | goto init_dflt_ring_err; |
8776 | } | 8781 | } |
8777 | rc = bnxt_init_int_mode(bp); | 8782 | rc = bnxt_init_int_mode(bp); |
8778 | if (rc) | 8783 | if (rc) |
8779 | return rc; | 8784 | goto init_dflt_ring_err; |
8785 | |||
8780 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; | 8786 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
8781 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { | 8787 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { |
8782 | bp->flags |= BNXT_FLAG_RFS; | 8788 | bp->flags |= BNXT_FLAG_RFS; |
8783 | bp->dev->features |= NETIF_F_NTUPLE; | 8789 | bp->dev->features |= NETIF_F_NTUPLE; |
8784 | } | 8790 | } |
8785 | return 0; | 8791 | init_dflt_ring_err: |
8792 | bnxt_ulp_irq_restart(bp, rc); | ||
8793 | return rc; | ||
8786 | } | 8794 | } |
8787 | 8795 | ||
8788 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) | 8796 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fefa011320e0..bde384630a75 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *); | |||
1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); | 1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); |
1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); | 1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); |
1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); | 1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); |
1484 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); | 1484 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp); |
1485 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); | ||
1486 | int bnxt_get_avail_msix(struct bnxt *bp, int num); | 1485 | int bnxt_get_avail_msix(struct bnxt *bp, int num); |
1487 | int bnxt_reserve_rings(struct bnxt *bp); | 1486 | int bnxt_reserve_rings(struct bnxt *bp); |
1488 | void bnxt_tx_disable(struct bnxt *bp); | 1487 | void bnxt_tx_disable(struct bnxt *bp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 6d583bcd2a81..fcd085a9853a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) | |||
451 | 451 | ||
452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); | 452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); |
453 | 453 | ||
454 | vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; | 454 | vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; | 457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; |
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) | |||
549 | max_stat_ctxs = hw_resc->max_stat_ctxs; | 549 | max_stat_ctxs = hw_resc->max_stat_ctxs; |
550 | 550 | ||
551 | /* Remaining rings are distributed equally amongs VF's for now */ | 551 | /* Remaining rings are distributed equally amongs VF's for now */ |
552 | vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; | 552 | vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - |
553 | bp->cp_nr_rings) / num_vfs; | ||
553 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; | 554 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; |
554 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 555 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
555 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / | 556 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / |
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
643 | */ | 644 | */ |
644 | vfs_supported = *num_vfs; | 645 | vfs_supported = *num_vfs; |
645 | 646 | ||
646 | avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; | 647 | avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
647 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 648 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
648 | avail_cp = min_t(int, avail_cp, avail_stat); | 649 | avail_cp = min_t(int, avail_cp, avail_stat); |
649 | 650 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 139d96c5a023..092c817f8f11 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
@@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, | |||
110 | struct tcf_exts *tc_exts) | 110 | struct tcf_exts *tc_exts) |
111 | { | 111 | { |
112 | const struct tc_action *tc_act; | 112 | const struct tc_action *tc_act; |
113 | LIST_HEAD(tc_actions); | 113 | int i, rc; |
114 | int rc; | ||
115 | 114 | ||
116 | if (!tcf_exts_has_actions(tc_exts)) { | 115 | if (!tcf_exts_has_actions(tc_exts)) { |
117 | netdev_info(bp->dev, "no actions"); | 116 | netdev_info(bp->dev, "no actions"); |
118 | return -EINVAL; | 117 | return -EINVAL; |
119 | } | 118 | } |
120 | 119 | ||
121 | tcf_exts_to_list(tc_exts, &tc_actions); | 120 | tcf_exts_for_each_action(i, tc_act, tc_exts) { |
122 | list_for_each_entry(tc_act, &tc_actions, list) { | ||
123 | /* Drop action */ | 121 | /* Drop action */ |
124 | if (is_tcf_gact_shot(tc_act)) { | 122 | if (is_tcf_gact_shot(tc_act)) { |
125 | actions->flags |= BNXT_TC_ACTION_FLAG_DROP; | 123 | actions->flags |= BNXT_TC_ACTION_FLAG_DROP; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c37b2842f972..beee61292d5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, | |||
169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; | 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
170 | } | 170 | } |
171 | bnxt_fill_msix_vecs(bp, ent); | 171 | bnxt_fill_msix_vecs(bp, ent); |
172 | bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); | ||
173 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; | 172 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; |
174 | return avail_msix; | 173 | return avail_msix; |
175 | } | 174 | } |
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
178 | { | 177 | { |
179 | struct net_device *dev = edev->net; | 178 | struct net_device *dev = edev->net; |
180 | struct bnxt *bp = netdev_priv(dev); | 179 | struct bnxt *bp = netdev_priv(dev); |
181 | int max_cp_rings, msix_requested; | ||
182 | 180 | ||
183 | ASSERT_RTNL(); | 181 | ASSERT_RTNL(); |
184 | if (ulp_id != BNXT_ROCE_ULP) | 182 | if (ulp_id != BNXT_ROCE_ULP) |
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
187 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) | 185 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) |
188 | return 0; | 186 | return 0; |
189 | 187 | ||
190 | max_cp_rings = bnxt_get_max_func_cp_rings(bp); | ||
191 | msix_requested = edev->ulp_tbl[ulp_id].msix_requested; | ||
192 | bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); | ||
193 | edev->ulp_tbl[ulp_id].msix_requested = 0; | 188 | edev->ulp_tbl[ulp_id].msix_requested = 0; |
194 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; | 189 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; |
195 | if (netif_running(dev)) { | 190 | if (netif_running(dev)) { |
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) | |||
220 | return 0; | 215 | return 0; |
221 | } | 216 | } |
222 | 217 | ||
223 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id) | ||
224 | { | ||
225 | ASSERT_RTNL(); | ||
226 | if (bnxt_ulp_registered(bp->edev, ulp_id)) { | ||
227 | struct bnxt_en_dev *edev = bp->edev; | ||
228 | unsigned int msix_req, max; | ||
229 | |||
230 | msix_req = edev->ulp_tbl[ulp_id].msix_requested; | ||
231 | max = bnxt_get_max_func_cp_rings(bp); | ||
232 | bnxt_set_max_func_cp_rings(bp, max - msix_req); | ||
233 | max = bnxt_get_max_func_stat_ctxs(bp); | ||
234 | bnxt_set_max_func_stat_ctxs(bp, max - 1); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, | 218 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, |
239 | struct bnxt_fw_msg *fw_msg) | 219 | struct bnxt_fw_msg *fw_msg) |
240 | { | 220 | { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index df48ac71729f..d9bea37cd211 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | |||
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) | |||
90 | 90 | ||
91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); | 91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); |
92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); | 92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); |
93 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); | ||
94 | void bnxt_ulp_stop(struct bnxt *bp); | 93 | void bnxt_ulp_stop(struct bnxt *bp); |
95 | void bnxt_ulp_start(struct bnxt *bp); | 94 | void bnxt_ulp_start(struct bnxt *bp); |
96 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); | 95 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b773bc07edf7..14b49612aa86 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters { | |||
186 | #define UMAC_MAC1 0x010 | 186 | #define UMAC_MAC1 0x010 |
187 | #define UMAC_MAX_FRAME_LEN 0x014 | 187 | #define UMAC_MAX_FRAME_LEN 0x014 |
188 | 188 | ||
189 | #define UMAC_MODE 0x44 | ||
190 | #define MODE_LINK_STATUS (1 << 5) | ||
191 | |||
189 | #define UMAC_EEE_CTRL 0x064 | 192 | #define UMAC_EEE_CTRL 0x064 |
190 | #define EN_LPI_RX_PAUSE (1 << 0) | 193 | #define EN_LPI_RX_PAUSE (1 << 0) |
191 | #define EN_LPI_TX_PFC (1 << 1) | 194 | #define EN_LPI_TX_PFC (1 << 1) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 5333274a283c..4241ae928d4a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev) | |||
115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, | 115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, |
116 | struct fixed_phy_status *status) | 116 | struct fixed_phy_status *status) |
117 | { | 117 | { |
118 | if (dev && dev->phydev && status) | 118 | struct bcmgenet_priv *priv; |
119 | status->link = dev->phydev->link; | 119 | u32 reg; |
120 | |||
121 | if (dev && dev->phydev && status) { | ||
122 | priv = netdev_priv(dev); | ||
123 | reg = bcmgenet_umac_readl(priv, UMAC_MODE); | ||
124 | status->link = !!(reg & MODE_LINK_STATUS); | ||
125 | } | ||
120 | 126 | ||
121 | return 0; | 127 | return 0; |
122 | } | 128 | } |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index dc09f9a8a49b..16e4ef7d7185 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev) | |||
482 | 482 | ||
483 | if (np) { | 483 | if (np) { |
484 | if (of_phy_is_fixed_link(np)) { | 484 | if (of_phy_is_fixed_link(np)) { |
485 | if (of_phy_register_fixed_link(np) < 0) { | ||
486 | dev_err(&bp->pdev->dev, | ||
487 | "broken fixed-link specification\n"); | ||
488 | return -ENODEV; | ||
489 | } | ||
490 | bp->phy_node = of_node_get(np); | 485 | bp->phy_node = of_node_get(np); |
491 | } else { | 486 | } else { |
492 | bp->phy_node = of_parse_phandle(np, "phy-handle", 0); | 487 | bp->phy_node = of_parse_phandle(np, "phy-handle", 0); |
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp) | |||
569 | { | 564 | { |
570 | struct macb_platform_data *pdata; | 565 | struct macb_platform_data *pdata; |
571 | struct device_node *np; | 566 | struct device_node *np; |
572 | int err; | 567 | int err = -ENXIO; |
573 | 568 | ||
574 | /* Enable management port */ | 569 | /* Enable management port */ |
575 | macb_writel(bp, NCR, MACB_BIT(MPE)); | 570 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp) | |||
592 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); | 587 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
593 | 588 | ||
594 | np = bp->pdev->dev.of_node; | 589 | np = bp->pdev->dev.of_node; |
595 | if (pdata) | 590 | if (np && of_phy_is_fixed_link(np)) { |
596 | bp->mii_bus->phy_mask = pdata->phy_mask; | 591 | if (of_phy_register_fixed_link(np) < 0) { |
592 | dev_err(&bp->pdev->dev, | ||
593 | "broken fixed-link specification %pOF\n", np); | ||
594 | goto err_out_free_mdiobus; | ||
595 | } | ||
596 | |||
597 | err = mdiobus_register(bp->mii_bus); | ||
598 | } else { | ||
599 | if (pdata) | ||
600 | bp->mii_bus->phy_mask = pdata->phy_mask; | ||
601 | |||
602 | err = of_mdiobus_register(bp->mii_bus, np); | ||
603 | } | ||
597 | 604 | ||
598 | err = of_mdiobus_register(bp->mii_bus, np); | ||
599 | if (err) | 605 | if (err) |
600 | goto err_out_free_mdiobus; | 606 | goto err_out_free_fixed_link; |
601 | 607 | ||
602 | err = macb_mii_probe(bp->dev); | 608 | err = macb_mii_probe(bp->dev); |
603 | if (err) | 609 | if (err) |
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp) | |||
607 | 613 | ||
608 | err_out_unregister_bus: | 614 | err_out_unregister_bus: |
609 | mdiobus_unregister(bp->mii_bus); | 615 | mdiobus_unregister(bp->mii_bus); |
616 | err_out_free_fixed_link: | ||
610 | if (np && of_phy_is_fixed_link(np)) | 617 | if (np && of_phy_is_fixed_link(np)) |
611 | of_phy_deregister_fixed_link(np); | 618 | of_phy_deregister_fixed_link(np); |
612 | err_out_free_mdiobus: | 619 | err_out_free_mdiobus: |
@@ -642,7 +649,7 @@ static int macb_halt_tx(struct macb *bp) | |||
642 | if (!(status & MACB_BIT(TGO))) | 649 | if (!(status & MACB_BIT(TGO))) |
643 | return 0; | 650 | return 0; |
644 | 651 | ||
645 | usleep_range(10, 250); | 652 | udelay(250); |
646 | } while (time_before(halt_time, timeout)); | 653 | } while (time_before(halt_time, timeout)); |
647 | 654 | ||
648 | return -ETIMEDOUT; | 655 | return -ETIMEDOUT; |
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp) | |||
2028 | { | 2035 | { |
2029 | struct macb_queue *queue; | 2036 | struct macb_queue *queue; |
2030 | unsigned int q; | 2037 | unsigned int q; |
2038 | u32 ctrl = macb_readl(bp, NCR); | ||
2031 | 2039 | ||
2032 | /* Disable RX and TX (XXX: Should we halt the transmission | 2040 | /* Disable RX and TX (XXX: Should we halt the transmission |
2033 | * more gracefully?) | 2041 | * more gracefully?) |
2034 | */ | 2042 | */ |
2035 | macb_writel(bp, NCR, 0); | 2043 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
2036 | 2044 | ||
2037 | /* Clear the stats registers (XXX: Update stats first?) */ | 2045 | /* Clear the stats registers (XXX: Update stats first?) */ |
2038 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | 2046 | ctrl |= MACB_BIT(CLRSTAT); |
2047 | |||
2048 | macb_writel(bp, NCR, ctrl); | ||
2039 | 2049 | ||
2040 | /* Clear all status flags */ | 2050 | /* Clear all status flags */ |
2041 | macb_writel(bp, TSR, -1); | 2051 | macb_writel(bp, TSR, -1); |
@@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp) | |||
2223 | } | 2233 | } |
2224 | 2234 | ||
2225 | /* Enable TX and RX */ | 2235 | /* Enable TX and RX */ |
2226 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); | 2236 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); |
2227 | } | 2237 | } |
2228 | 2238 | ||
2229 | /* The hash address register is 64 bits long and takes up two | 2239 | /* The hash address register is 64 bits long and takes up two |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 623f73dd7738..c116f96956fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | |||
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in, | |||
417 | struct ch_filter_specification *fs) | 417 | struct ch_filter_specification *fs) |
418 | { | 418 | { |
419 | const struct tc_action *a; | 419 | const struct tc_action *a; |
420 | LIST_HEAD(actions); | 420 | int i; |
421 | 421 | ||
422 | tcf_exts_to_list(cls->exts, &actions); | 422 | tcf_exts_for_each_action(i, a, cls->exts) { |
423 | list_for_each_entry(a, &actions, list) { | ||
424 | if (is_tcf_gact_ok(a)) { | 423 | if (is_tcf_gact_ok(a)) { |
425 | fs->action = FILTER_PASS; | 424 | fs->action = FILTER_PASS; |
426 | } else if (is_tcf_gact_shot(a)) { | 425 | } else if (is_tcf_gact_shot(a)) { |
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, | |||
591 | bool act_redir = false; | 590 | bool act_redir = false; |
592 | bool act_pedit = false; | 591 | bool act_pedit = false; |
593 | bool act_vlan = false; | 592 | bool act_vlan = false; |
594 | LIST_HEAD(actions); | 593 | int i; |
595 | 594 | ||
596 | tcf_exts_to_list(cls->exts, &actions); | 595 | tcf_exts_for_each_action(i, a, cls->exts) { |
597 | list_for_each_entry(a, &actions, list) { | ||
598 | if (is_tcf_gact_ok(a)) { | 596 | if (is_tcf_gact_ok(a)) { |
599 | /* Do nothing */ | 597 | /* Do nothing */ |
600 | } else if (is_tcf_gact_shot(a)) { | 598 | } else if (is_tcf_gact_shot(a)) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 18eb2aedd4cb..c7d2b4dc7568 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | |||
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap, | |||
93 | unsigned int num_actions = 0; | 93 | unsigned int num_actions = 0; |
94 | const struct tc_action *a; | 94 | const struct tc_action *a; |
95 | struct tcf_exts *exts; | 95 | struct tcf_exts *exts; |
96 | LIST_HEAD(actions); | 96 | int i; |
97 | 97 | ||
98 | exts = cls->knode.exts; | 98 | exts = cls->knode.exts; |
99 | if (!tcf_exts_has_actions(exts)) | 99 | if (!tcf_exts_has_actions(exts)) |
100 | return -EINVAL; | 100 | return -EINVAL; |
101 | 101 | ||
102 | tcf_exts_to_list(exts, &actions); | 102 | tcf_exts_for_each_action(i, a, exts) { |
103 | list_for_each_entry(a, &actions, list) { | ||
104 | /* Don't allow more than one action per rule. */ | 103 | /* Don't allow more than one action per rule. */ |
105 | if (num_actions) | 104 | if (num_actions) |
106 | return -EINVAL; | 105 | return -EINVAL; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index fa5b30f547f6..08a750fb60c4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
@@ -220,10 +220,10 @@ struct hnae_desc_cb { | |||
220 | 220 | ||
221 | /* priv data for the desc, e.g. skb when use with ip stack*/ | 221 | /* priv data for the desc, e.g. skb when use with ip stack*/ |
222 | void *priv; | 222 | void *priv; |
223 | u16 page_offset; | 223 | u32 page_offset; |
224 | u16 reuse_flag; | 224 | u32 length; /* length of the buffer */ |
225 | 225 | ||
226 | u16 length; /* length of the buffer */ | 226 | u16 reuse_flag; |
227 | 227 | ||
228 | /* desc type, used by the ring user to mark the type of the priv data */ | 228 | /* desc type, used by the ring user to mark the type of the priv data */ |
229 | u16 type; | 229 | u16 type; |
@@ -486,6 +486,8 @@ struct hnae_ae_ops { | |||
486 | u8 *auto_neg, u16 *speed, u8 *duplex); | 486 | u8 *auto_neg, u16 *speed, u8 *duplex); |
487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); | 487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); |
488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); | 488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); |
489 | bool (*need_adjust_link)(struct hnae_handle *handle, | ||
490 | int speed, int duplex); | ||
489 | int (*set_loopback)(struct hnae_handle *handle, | 491 | int (*set_loopback)(struct hnae_handle *handle, |
490 | enum hnae_loop loop_mode, int en); | 492 | enum hnae_loop loop_mode, int en); |
491 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, | 493 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e6aad30e7e69..b52029e26d15 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | |||
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle) | |||
155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; | 155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; |
156 | } | 156 | } |
157 | 157 | ||
158 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) | ||
159 | { | ||
160 | struct dsaf_device *dsaf_dev; | ||
161 | struct hns_ppe_cb *ppe_cb; | ||
162 | struct hnae_vf_cb *vf_cb; | ||
163 | int ret; | ||
164 | int i; | ||
165 | |||
166 | for (i = 0; i < handle->q_num; i++) { | ||
167 | ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); | ||
168 | if (ret) | ||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | ppe_cb = hns_get_ppe_cb(handle); | ||
173 | ret = hns_ppe_wait_tx_fifo_clean(ppe_cb); | ||
174 | if (ret) | ||
175 | return ret; | ||
176 | |||
177 | dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); | ||
178 | if (!dsaf_dev) | ||
179 | return -EINVAL; | ||
180 | ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id); | ||
181 | if (ret) | ||
182 | return ret; | ||
183 | |||
184 | vf_cb = hns_ae_get_vf_cb(handle); | ||
185 | ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb); | ||
186 | if (ret) | ||
187 | return ret; | ||
188 | |||
189 | mdelay(10); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
158 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) | 193 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) |
159 | { | 194 | { |
160 | int q_num = handle->q_num; | 195 | int q_num = handle->q_num; |
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle, | |||
399 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); | 434 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); |
400 | } | 435 | } |
401 | 436 | ||
437 | static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed, | ||
438 | int duplex) | ||
439 | { | ||
440 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | ||
441 | |||
442 | return hns_mac_need_adjust_link(mac_cb, speed, duplex); | ||
443 | } | ||
444 | |||
402 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, | 445 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, |
403 | int duplex) | 446 | int duplex) |
404 | { | 447 | { |
405 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | 448 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); |
406 | 449 | ||
407 | hns_mac_adjust_link(mac_cb, speed, duplex); | 450 | switch (mac_cb->dsaf_dev->dsaf_ver) { |
451 | case AE_VERSION_1: | ||
452 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
453 | break; | ||
454 | |||
455 | case AE_VERSION_2: | ||
456 | /* chip need to clear all pkt inside */ | ||
457 | hns_mac_disable(mac_cb, MAC_COMM_MODE_RX); | ||
458 | if (hns_ae_wait_flow_down(handle)) { | ||
459 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
460 | break; | ||
461 | } | ||
462 | |||
463 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
464 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
465 | break; | ||
466 | |||
467 | default: | ||
468 | break; | ||
469 | } | ||
470 | |||
471 | return; | ||
408 | } | 472 | } |
409 | 473 | ||
410 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, | 474 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, |
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { | |||
902 | .get_status = hns_ae_get_link_status, | 966 | .get_status = hns_ae_get_link_status, |
903 | .get_info = hns_ae_get_mac_info, | 967 | .get_info = hns_ae_get_mac_info, |
904 | .adjust_link = hns_ae_adjust_link, | 968 | .adjust_link = hns_ae_adjust_link, |
969 | .need_adjust_link = hns_ae_need_adjust_link, | ||
905 | .set_loopback = hns_ae_config_loopback, | 970 | .set_loopback = hns_ae_config_loopback, |
906 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, | 971 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, |
907 | .get_pauseparam = hns_ae_get_pauseparam, | 972 | .get_pauseparam = hns_ae_get_pauseparam, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 5488c6e89f21..09e4061d1fa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en, | |||
257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); | 257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); |
258 | } | 258 | } |
259 | 259 | ||
260 | static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed, | ||
261 | int duplex) | ||
262 | { | ||
263 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
264 | struct hns_mac_cb *mac_cb = drv->mac_cb; | ||
265 | |||
266 | return (mac_cb->speed != speed) || | ||
267 | (mac_cb->half_duplex == duplex); | ||
268 | } | ||
269 | |||
260 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, | 270 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, |
261 | u32 full_duplex) | 271 | u32 full_duplex) |
262 | { | 272 | { |
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) | |||
309 | hns_gmac_set_uc_match(mac_drv, en); | 319 | hns_gmac_set_uc_match(mac_drv, en); |
310 | } | 320 | } |
311 | 321 | ||
322 | int hns_gmac_wait_fifo_clean(void *mac_drv) | ||
323 | { | ||
324 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
325 | int wait_cnt; | ||
326 | u32 val; | ||
327 | |||
328 | wait_cnt = 0; | ||
329 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
330 | val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG); | ||
331 | /* bit5~bit0 is not send complete pkts */ | ||
332 | if ((val & 0x3f) == 0) | ||
333 | break; | ||
334 | usleep_range(100, 200); | ||
335 | } | ||
336 | |||
337 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
338 | dev_err(drv->dev, | ||
339 | "hns ge %d fifo was not idle.\n", drv->mac_id); | ||
340 | return -EBUSY; | ||
341 | } | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
312 | static void hns_gmac_init(void *mac_drv) | 346 | static void hns_gmac_init(void *mac_drv) |
313 | { | 347 | { |
314 | u32 port; | 348 | u32 port; |
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
690 | mac_drv->mac_disable = hns_gmac_disable; | 724 | mac_drv->mac_disable = hns_gmac_disable; |
691 | mac_drv->mac_free = hns_gmac_free; | 725 | mac_drv->mac_free = hns_gmac_free; |
692 | mac_drv->adjust_link = hns_gmac_adjust_link; | 726 | mac_drv->adjust_link = hns_gmac_adjust_link; |
727 | mac_drv->need_adjust_link = hns_gmac_need_adjust_link; | ||
693 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; | 728 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; |
694 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; | 729 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; |
695 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; | 730 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; |
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
717 | mac_drv->get_strings = hns_gmac_get_strings; | 752 | mac_drv->get_strings = hns_gmac_get_strings; |
718 | mac_drv->update_stats = hns_gmac_update_stats; | 753 | mac_drv->update_stats = hns_gmac_update_stats; |
719 | mac_drv->set_promiscuous = hns_gmac_set_promisc; | 754 | mac_drv->set_promiscuous = hns_gmac_set_promisc; |
755 | mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean; | ||
720 | 756 | ||
721 | return (void *)mac_drv; | 757 | return (void *)mac_drv; |
722 | } | 758 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 1c2326bd76e2..6ed6f142427e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, | |||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
117 | /** | ||
118 | *hns_mac_is_adjust_link - check is need change mac speed and duplex register | ||
119 | *@mac_cb: mac device | ||
120 | *@speed: phy device speed | ||
121 | *@duplex:phy device duplex | ||
122 | * | ||
123 | */ | ||
124 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | ||
125 | { | ||
126 | struct mac_driver *mac_ctrl_drv; | ||
127 | |||
128 | mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac); | ||
129 | |||
130 | if (mac_ctrl_drv->need_adjust_link) | ||
131 | return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv, | ||
132 | (enum mac_speed)speed, duplex); | ||
133 | else | ||
134 | return true; | ||
135 | } | ||
136 | |||
117 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | 137 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) |
118 | { | 138 | { |
119 | int ret; | 139 | int ret; |
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) | |||
430 | return 0; | 450 | return 0; |
431 | } | 451 | } |
432 | 452 | ||
453 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb) | ||
454 | { | ||
455 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | ||
456 | |||
457 | if (drv->wait_fifo_clean) | ||
458 | return drv->wait_fifo_clean(drv); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
433 | void hns_mac_reset(struct hns_mac_cb *mac_cb) | 463 | void hns_mac_reset(struct hns_mac_cb *mac_cb) |
434 | { | 464 | { |
435 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | 465 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); |
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev) | |||
998 | return DSAF_MAX_PORT_NUM; | 1028 | return DSAF_MAX_PORT_NUM; |
999 | } | 1029 | } |
1000 | 1030 | ||
1031 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
1032 | { | ||
1033 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
1034 | |||
1035 | mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode); | ||
1036 | } | ||
1037 | |||
1038 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
1039 | { | ||
1040 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
1041 | |||
1042 | mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode); | ||
1043 | } | ||
1044 | |||
1001 | /** | 1045 | /** |
1002 | * hns_mac_init - init mac | 1046 | * hns_mac_init - init mac |
1003 | * @dsaf_dev: dsa fabric device struct pointer | 1047 | * @dsaf_dev: dsa fabric device struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index bbc0a98e7ca3..fbc75341bef7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | |||
@@ -356,6 +356,9 @@ struct mac_driver { | |||
356 | /*adjust mac mode of port,include speed and duplex*/ | 356 | /*adjust mac mode of port,include speed and duplex*/ |
357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, | 357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, |
358 | u32 full_duplex); | 358 | u32 full_duplex); |
359 | /* need adjust link */ | ||
360 | bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed, | ||
361 | int duplex); | ||
359 | /* config autoegotaite mode of port*/ | 362 | /* config autoegotaite mode of port*/ |
360 | void (*set_an_mode)(void *mac_drv, u8 enable); | 363 | void (*set_an_mode)(void *mac_drv, u8 enable); |
361 | /* config loopbank mode */ | 364 | /* config loopbank mode */ |
@@ -394,6 +397,7 @@ struct mac_driver { | |||
394 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); | 397 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); |
395 | 398 | ||
396 | void (*update_stats)(void *mac_drv); | 399 | void (*update_stats)(void *mac_drv); |
400 | int (*wait_fifo_clean)(void *mac_drv); | ||
397 | 401 | ||
398 | enum mac_mode mac_mode; | 402 | enum mac_mode mac_mode; |
399 | u8 mac_id; | 403 | u8 mac_id; |
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, | |||
427 | 431 | ||
428 | int hns_mac_init(struct dsaf_device *dsaf_dev); | 432 | int hns_mac_init(struct dsaf_device *dsaf_dev); |
429 | void mac_adjust_link(struct net_device *net_dev); | 433 | void mac_adjust_link(struct net_device *net_dev); |
434 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); | ||
430 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); | 435 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); |
431 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); | 436 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); |
432 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, | 437 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, |
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | |||
463 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | 468 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, |
464 | const unsigned char *addr); | 469 | const unsigned char *addr); |
465 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); | 470 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); |
471 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
472 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
473 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb); | ||
466 | 474 | ||
467 | #endif /* _HNS_DSAF_MAC_H */ | 475 | #endif /* _HNS_DSAF_MAC_H */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ca50c2553a9c..e557a4ef5996 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, | |||
2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; | 2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; |
2728 | } | 2728 | } |
2729 | 2729 | ||
2730 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) | ||
2731 | { | ||
2732 | u32 val, val_tmp; | ||
2733 | int wait_cnt; | ||
2734 | |||
2735 | if (port >= DSAF_SERVICE_NW_NUM) | ||
2736 | return 0; | ||
2737 | |||
2738 | wait_cnt = 0; | ||
2739 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
2740 | val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG + | ||
2741 | (port + DSAF_XGE_NUM) * 0x40); | ||
2742 | val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG + | ||
2743 | (port + DSAF_XGE_NUM) * 0x40); | ||
2744 | if (val == val_tmp) | ||
2745 | break; | ||
2746 | |||
2747 | usleep_range(100, 200); | ||
2748 | } | ||
2749 | |||
2750 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
2751 | dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n", | ||
2752 | val, val_tmp); | ||
2753 | return -EBUSY; | ||
2754 | } | ||
2755 | |||
2756 | return 0; | ||
2757 | } | ||
2758 | |||
2730 | /** | 2759 | /** |
2731 | * dsaf_probe - probo dsaf dev | 2760 | * dsaf_probe - probo dsaf dev |
2732 | * @pdev: dasf platform device | 2761 | * @pdev: dasf platform device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 4507e8222683..0e1cd99831a6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | |||
@@ -44,6 +44,8 @@ struct hns_mac_cb; | |||
44 | #define DSAF_ROCE_CREDIT_CHN 8 | 44 | #define DSAF_ROCE_CREDIT_CHN 8 |
45 | #define DSAF_ROCE_CHAN_MODE 3 | 45 | #define DSAF_ROCE_CHAN_MODE 3 |
46 | 46 | ||
47 | #define HNS_MAX_WAIT_CNT 10000 | ||
48 | |||
47 | enum dsaf_roce_port_mode { | 49 | enum dsaf_roce_port_mode { |
48 | DSAF_ROCE_6PORT_MODE, | 50 | DSAF_ROCE_6PORT_MODE, |
49 | DSAF_ROCE_4PORT_MODE, | 51 | DSAF_ROCE_4PORT_MODE, |
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr( | |||
463 | 465 | ||
464 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, | 466 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, |
465 | u8 mac_id, u8 port_num); | 467 | u8 mac_id, u8 port_num); |
468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); | ||
466 | 469 | ||
467 | #endif /* __HNS_DSAF_MAIN_H__ */ | 470 | #endif /* __HNS_DSAF_MAIN_H__ */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index d160d8c9e45b..0942e4916d9d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) | |||
275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); | 275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); |
276 | } | 276 | } |
277 | 277 | ||
278 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb) | ||
279 | { | ||
280 | int wait_cnt; | ||
281 | u32 val; | ||
282 | |||
283 | wait_cnt = 0; | ||
284 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
285 | val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU; | ||
286 | if (!val) | ||
287 | break; | ||
288 | |||
289 | usleep_range(100, 200); | ||
290 | } | ||
291 | |||
292 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
293 | dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n", | ||
294 | val); | ||
295 | return -EBUSY; | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
278 | /** | 301 | /** |
279 | * ppe_init_hw - init ppe | 302 | * ppe_init_hw - init ppe |
280 | * @ppe_cb: ppe device | 303 | * @ppe_cb: ppe device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 9d8e643e8aa6..f670e63a5a01 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | |||
@@ -100,6 +100,7 @@ struct ppe_common_cb { | |||
100 | 100 | ||
101 | }; | 101 | }; |
102 | 102 | ||
103 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb); | ||
103 | int hns_ppe_init(struct dsaf_device *dsaf_dev); | 104 | int hns_ppe_init(struct dsaf_device *dsaf_dev); |
104 | 105 | ||
105 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); | 106 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 9d76e2e54f9d..5d64519b9b1d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) | |||
66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); | 66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); |
67 | } | 67 | } |
68 | 68 | ||
69 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs) | ||
70 | { | ||
71 | u32 head, tail; | ||
72 | int wait_cnt; | ||
73 | |||
74 | tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL); | ||
75 | wait_cnt = 0; | ||
76 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
77 | head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); | ||
78 | if (tail == head) | ||
79 | break; | ||
80 | |||
81 | usleep_range(100, 200); | ||
82 | } | ||
83 | |||
84 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
85 | dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n"); | ||
86 | return -EBUSY; | ||
87 | } | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
69 | /** | 92 | /** |
70 | *hns_rcb_reset_ring_hw - ring reset | 93 | *hns_rcb_reset_ring_hw - ring reset |
71 | *@q: ring struct pointer | 94 | *@q: ring struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 602816498c8d..2319b772a271 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | |||
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); | |||
136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); | 136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); |
137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); | 137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); |
138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); | 138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); |
139 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs); | ||
139 | u32 hns_rcb_get_rx_coalesced_frames( | 140 | u32 hns_rcb_get_rx_coalesced_frames( |
140 | struct rcb_common_cb *rcb_common, u32 port_idx); | 141 | struct rcb_common_cb *rcb_common, u32 port_idx); |
141 | u32 hns_rcb_get_tx_coalesced_frames( | 142 | u32 hns_rcb_get_tx_coalesced_frames( |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 886cbbf25761..74d935d82cbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
@@ -464,6 +464,7 @@ | |||
464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 | 464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 |
465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 | 465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 |
466 | 466 | ||
467 | #define GMAC_FIFO_STATE_REG 0x0000UL | ||
467 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL | 468 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL |
468 | #define GMAC_FD_FC_TYPE_REG 0x000CUL | 469 | #define GMAC_FD_FC_TYPE_REG 0x000CUL |
469 | #define GMAC_TX_WATER_LINE_REG 0x0010UL | 470 | #define GMAC_TX_WATER_LINE_REG 0x0010UL |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 9f2b552aee33..f56855e63c96 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
@@ -406,113 +406,13 @@ out_net_tx_busy: | |||
406 | return NETDEV_TX_BUSY; | 406 | return NETDEV_TX_BUSY; |
407 | } | 407 | } |
408 | 408 | ||
409 | /** | ||
410 | * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE | ||
411 | * @data: pointer to the start of the headers | ||
412 | * @max: total length of section to find headers in | ||
413 | * | ||
414 | * This function is meant to determine the length of headers that will | ||
415 | * be recognized by hardware for LRO, GRO, and RSC offloads. The main | ||
416 | * motivation of doing this is to only perform one pull for IPv4 TCP | ||
417 | * packets so that we can do basic things like calculating the gso_size | ||
418 | * based on the average data per packet. | ||
419 | **/ | ||
420 | static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, | ||
421 | unsigned int max_size) | ||
422 | { | ||
423 | unsigned char *network; | ||
424 | u8 hlen; | ||
425 | |||
426 | /* this should never happen, but better safe than sorry */ | ||
427 | if (max_size < ETH_HLEN) | ||
428 | return max_size; | ||
429 | |||
430 | /* initialize network frame pointer */ | ||
431 | network = data; | ||
432 | |||
433 | /* set first protocol and move network header forward */ | ||
434 | network += ETH_HLEN; | ||
435 | |||
436 | /* handle any vlan tag if present */ | ||
437 | if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) | ||
438 | == HNS_RX_FLAG_VLAN_PRESENT) { | ||
439 | if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) | ||
440 | return max_size; | ||
441 | |||
442 | network += VLAN_HLEN; | ||
443 | } | ||
444 | |||
445 | /* handle L3 protocols */ | ||
446 | if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | ||
447 | == HNS_RX_FLAG_L3ID_IPV4) { | ||
448 | if ((typeof(max_size))(network - data) > | ||
449 | (max_size - sizeof(struct iphdr))) | ||
450 | return max_size; | ||
451 | |||
452 | /* access ihl as a u8 to avoid unaligned access on ia64 */ | ||
453 | hlen = (network[0] & 0x0F) << 2; | ||
454 | |||
455 | /* verify hlen meets minimum size requirements */ | ||
456 | if (hlen < sizeof(struct iphdr)) | ||
457 | return network - data; | ||
458 | |||
459 | /* record next protocol if header is present */ | ||
460 | } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | ||
461 | == HNS_RX_FLAG_L3ID_IPV6) { | ||
462 | if ((typeof(max_size))(network - data) > | ||
463 | (max_size - sizeof(struct ipv6hdr))) | ||
464 | return max_size; | ||
465 | |||
466 | /* record next protocol */ | ||
467 | hlen = sizeof(struct ipv6hdr); | ||
468 | } else { | ||
469 | return network - data; | ||
470 | } | ||
471 | |||
472 | /* relocate pointer to start of L4 header */ | ||
473 | network += hlen; | ||
474 | |||
475 | /* finally sort out TCP/UDP */ | ||
476 | if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | ||
477 | == HNS_RX_FLAG_L4ID_TCP) { | ||
478 | if ((typeof(max_size))(network - data) > | ||
479 | (max_size - sizeof(struct tcphdr))) | ||
480 | return max_size; | ||
481 | |||
482 | /* access doff as a u8 to avoid unaligned access on ia64 */ | ||
483 | hlen = (network[12] & 0xF0) >> 2; | ||
484 | |||
485 | /* verify hlen meets minimum size requirements */ | ||
486 | if (hlen < sizeof(struct tcphdr)) | ||
487 | return network - data; | ||
488 | |||
489 | network += hlen; | ||
490 | } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | ||
491 | == HNS_RX_FLAG_L4ID_UDP) { | ||
492 | if ((typeof(max_size))(network - data) > | ||
493 | (max_size - sizeof(struct udphdr))) | ||
494 | return max_size; | ||
495 | |||
496 | network += sizeof(struct udphdr); | ||
497 | } | ||
498 | |||
499 | /* If everything has gone correctly network should be the | ||
500 | * data section of the packet and will be the end of the header. | ||
501 | * If not then it probably represents the end of the last recognized | ||
502 | * header. | ||
503 | */ | ||
504 | if ((typeof(max_size))(network - data) < max_size) | ||
505 | return network - data; | ||
506 | else | ||
507 | return max_size; | ||
508 | } | ||
509 | |||
510 | static void hns_nic_reuse_page(struct sk_buff *skb, int i, | 409 | static void hns_nic_reuse_page(struct sk_buff *skb, int i, |
511 | struct hnae_ring *ring, int pull_len, | 410 | struct hnae_ring *ring, int pull_len, |
512 | struct hnae_desc_cb *desc_cb) | 411 | struct hnae_desc_cb *desc_cb) |
513 | { | 412 | { |
514 | struct hnae_desc *desc; | 413 | struct hnae_desc *desc; |
515 | int truesize, size; | 414 | u32 truesize; |
415 | int size; | ||
516 | int last_offset; | 416 | int last_offset; |
517 | bool twobufs; | 417 | bool twobufs; |
518 | 418 | ||
@@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, | |||
530 | } | 430 | } |
531 | 431 | ||
532 | skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, | 432 | skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, |
533 | size - pull_len, truesize - pull_len); | 433 | size - pull_len, truesize); |
534 | 434 | ||
535 | /* avoid re-using remote pages,flag default unreuse */ | 435 | /* avoid re-using remote pages,flag default unreuse */ |
536 | if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) | 436 | if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) |
@@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, | |||
695 | } else { | 595 | } else { |
696 | ring->stats.seg_pkt_cnt++; | 596 | ring->stats.seg_pkt_cnt++; |
697 | 597 | ||
698 | pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); | 598 | pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE); |
699 | memcpy(__skb_put(skb, pull_len), va, | 599 | memcpy(__skb_put(skb, pull_len), va, |
700 | ALIGN(pull_len, sizeof(long))); | 600 | ALIGN(pull_len, sizeof(long))); |
701 | 601 | ||
@@ -1212,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev) | |||
1212 | struct hnae_handle *h = priv->ae_handle; | 1112 | struct hnae_handle *h = priv->ae_handle; |
1213 | int state = 1; | 1113 | int state = 1; |
1214 | 1114 | ||
1115 | /* If there is no phy, do not need adjust link */ | ||
1215 | if (ndev->phydev) { | 1116 | if (ndev->phydev) { |
1216 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | 1117 | /* When phy link down, do nothing */ |
1217 | ndev->phydev->duplex); | 1118 | if (ndev->phydev->link == 0) |
1218 | state = ndev->phydev->link; | 1119 | return; |
1120 | |||
1121 | if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed, | ||
1122 | ndev->phydev->duplex)) { | ||
1123 | /* because Hi161X chip don't support to change gmac | ||
1124 | * speed and duplex with traffic. Delay 200ms to | ||
1125 | * make sure there is no more data in chip FIFO. | ||
1126 | */ | ||
1127 | netif_carrier_off(ndev); | ||
1128 | msleep(200); | ||
1129 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | ||
1130 | ndev->phydev->duplex); | ||
1131 | netif_carrier_on(ndev); | ||
1132 | } | ||
1219 | } | 1133 | } |
1134 | |||
1220 | state = state && h->dev->ops->get_status(h); | 1135 | state = state && h->dev->ops->get_status(h); |
1221 | 1136 | ||
1222 | if (state != priv->link) { | 1137 | if (state != priv->link) { |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 08f3c4743f74..774beda040a1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev, | |||
243 | } | 243 | } |
244 | 244 | ||
245 | if (h->dev->ops->adjust_link) { | 245 | if (h->dev->ops->adjust_link) { |
246 | netif_carrier_off(net_dev); | ||
246 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); | 247 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); |
248 | netif_carrier_on(net_dev); | ||
247 | return 0; | 249 | return 0; |
248 | } | 250 | } |
249 | 251 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3554dca7a680..955c4ab18b03 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |||
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, | |||
2019 | struct hns3_desc_cb *desc_cb) | 2019 | struct hns3_desc_cb *desc_cb) |
2020 | { | 2020 | { |
2021 | struct hns3_desc *desc; | 2021 | struct hns3_desc *desc; |
2022 | int truesize, size; | 2022 | u32 truesize; |
2023 | int size; | ||
2023 | int last_offset; | 2024 | int last_offset; |
2024 | bool twobufs; | 2025 | bool twobufs; |
2025 | 2026 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index a02a96aee2a2..cb450d7ec8c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | |||
@@ -284,11 +284,11 @@ struct hns3_desc_cb { | |||
284 | 284 | ||
285 | /* priv data for the desc, e.g. skb when use with ip stack*/ | 285 | /* priv data for the desc, e.g. skb when use with ip stack*/ |
286 | void *priv; | 286 | void *priv; |
287 | u16 page_offset; | 287 | u32 page_offset; |
288 | u16 reuse_flag; | ||
289 | |||
290 | u32 length; /* length of the buffer */ | 288 | u32 length; /* length of the buffer */ |
291 | 289 | ||
290 | u16 reuse_flag; | ||
291 | |||
292 | /* desc type, used by the ring user to mark the type of the priv data */ | 292 | /* desc type, used by the ring user to mark the type of the priv data */ |
293 | u16 type; | 293 | u16 type; |
294 | }; | 294 | }; |
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 354c0982847b..372664686309 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s | |||
494 | case 16384: | 494 | case 16384: |
495 | ret |= EMAC_MR1_RFS_16K; | 495 | ret |= EMAC_MR1_RFS_16K; |
496 | break; | 496 | break; |
497 | case 8192: | ||
498 | ret |= EMAC4_MR1_RFS_8K; | ||
499 | break; | ||
500 | case 4096: | 497 | case 4096: |
501 | ret |= EMAC_MR1_RFS_4K; | 498 | ret |= EMAC_MR1_RFS_4K; |
502 | break; | 499 | break; |
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ | |||
537 | case 16384: | 534 | case 16384: |
538 | ret |= EMAC4_MR1_RFS_16K; | 535 | ret |= EMAC4_MR1_RFS_16K; |
539 | break; | 536 | break; |
537 | case 8192: | ||
538 | ret |= EMAC4_MR1_RFS_8K; | ||
539 | break; | ||
540 | case 4096: | 540 | case 4096: |
541 | ret |= EMAC4_MR1_RFS_4K; | 541 | ret |= EMAC4_MR1_RFS_4K; |
542 | break; | 542 | break; |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index dafdd4ade705..4f0daf67b18d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1823 | adapter->map_id = 1; | 1823 | adapter->map_id = 1; |
1824 | release_rx_pools(adapter); | 1824 | release_rx_pools(adapter); |
1825 | release_tx_pools(adapter); | 1825 | release_tx_pools(adapter); |
1826 | init_rx_pools(netdev); | 1826 | rc = init_rx_pools(netdev); |
1827 | init_tx_pools(netdev); | 1827 | if (rc) |
1828 | return rc; | ||
1829 | rc = init_tx_pools(netdev); | ||
1830 | if (rc) | ||
1831 | return rc; | ||
1828 | 1832 | ||
1829 | release_napi(adapter); | 1833 | release_napi(adapter); |
1830 | init_napi(adapter); | 1834 | rc = init_napi(adapter); |
1835 | if (rc) | ||
1836 | return rc; | ||
1831 | } else { | 1837 | } else { |
1832 | rc = reset_tx_pools(adapter); | 1838 | rc = reset_tx_pools(adapter); |
1833 | if (rc) | 1839 | if (rc) |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index bdb3f8e65ed4..2569a168334c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
624 | adapter->tx_ring = tx_old; | 624 | adapter->tx_ring = tx_old; |
625 | e1000_free_all_rx_resources(adapter); | 625 | e1000_free_all_rx_resources(adapter); |
626 | e1000_free_all_tx_resources(adapter); | 626 | e1000_free_all_tx_resources(adapter); |
627 | kfree(tx_old); | ||
628 | kfree(rx_old); | ||
629 | adapter->rx_ring = rxdr; | 627 | adapter->rx_ring = rxdr; |
630 | adapter->tx_ring = txdr; | 628 | adapter->tx_ring = txdr; |
631 | err = e1000_up(adapter); | 629 | err = e1000_up(adapter); |
632 | if (err) | 630 | if (err) |
633 | goto err_setup; | 631 | goto err_setup; |
634 | } | 632 | } |
633 | kfree(tx_old); | ||
634 | kfree(rx_old); | ||
635 | 635 | ||
636 | clear_bit(__E1000_RESETTING, &adapter->flags); | 636 | clear_bit(__E1000_RESETTING, &adapter->flags); |
637 | return 0; | 637 | return 0; |
@@ -644,7 +644,8 @@ err_setup_rx: | |||
644 | err_alloc_rx: | 644 | err_alloc_rx: |
645 | kfree(txdr); | 645 | kfree(txdr); |
646 | err_alloc_tx: | 646 | err_alloc_tx: |
647 | e1000_up(adapter); | 647 | if (netif_running(adapter->netdev)) |
648 | e1000_up(adapter); | ||
648 | err_setup: | 649 | err_setup: |
649 | clear_bit(__E1000_RESETTING, &adapter->flags); | 650 | clear_bit(__E1000_RESETTING, &adapter->flags); |
650 | return err; | 651 | return err; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index abcd096ede14..5ff6caa83948 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) | |||
2013 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) | 2013 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) |
2014 | i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); | 2014 | i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); |
2015 | 2015 | ||
2016 | WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, | 2016 | WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, |
2017 | "stat strings count mismatch!"); | 2017 | "stat strings count mismatch!"); |
2018 | } | 2018 | } |
2019 | 2019 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f2c622e78802..ac685ad4d877 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, | |||
5122 | u8 *bw_share) | 5122 | u8 *bw_share) |
5123 | { | 5123 | { |
5124 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; | 5124 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; |
5125 | struct i40e_pf *pf = vsi->back; | ||
5125 | i40e_status ret; | 5126 | i40e_status ret; |
5126 | int i; | 5127 | int i; |
5127 | 5128 | ||
5128 | if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) | 5129 | /* There is no need to reset BW when mqprio mode is on. */ |
5130 | if (pf->flags & I40E_FLAG_TC_MQPRIO) | ||
5129 | return 0; | 5131 | return 0; |
5130 | if (!vsi->mqprio_qopt.qopt.hw) { | 5132 | if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { |
5131 | ret = i40e_set_bw_limit(vsi, vsi->seid, 0); | 5133 | ret = i40e_set_bw_limit(vsi, vsi->seid, 0); |
5132 | if (ret) | 5134 | if (ret) |
5133 | dev_info(&vsi->back->pdev->dev, | 5135 | dev_info(&pf->pdev->dev, |
5134 | "Failed to reset tx rate for vsi->seid %u\n", | 5136 | "Failed to reset tx rate for vsi->seid %u\n", |
5135 | vsi->seid); | 5137 | vsi->seid); |
5136 | return ret; | 5138 | return ret; |
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, | |||
5139 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | 5141 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) |
5140 | bw_data.tc_bw_credits[i] = bw_share[i]; | 5142 | bw_data.tc_bw_credits[i] = bw_share[i]; |
5141 | 5143 | ||
5142 | ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, | 5144 | ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); |
5143 | NULL); | ||
5144 | if (ret) { | 5145 | if (ret) { |
5145 | dev_info(&vsi->back->pdev->dev, | 5146 | dev_info(&pf->pdev->dev, |
5146 | "AQ command Config VSI BW allocation per TC failed = %d\n", | 5147 | "AQ command Config VSI BW allocation per TC failed = %d\n", |
5147 | vsi->back->hw.aq.asq_last_status); | 5148 | pf->hw.aq.asq_last_status); |
5148 | return -EINVAL; | 5149 | return -EINVAL; |
5149 | } | 5150 | } |
5150 | 5151 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d8b5fff581e7..868f4a1d0f72 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h | |||
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[]; | |||
89 | #define ice_for_each_rxq(vsi, i) \ | 89 | #define ice_for_each_rxq(vsi, i) \ |
90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) | 90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) |
91 | 91 | ||
92 | /* Macros for each allocated tx/rx ring whether used or not in a VSI */ | ||
93 | #define ice_for_each_alloc_txq(vsi, i) \ | ||
94 | for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) | ||
95 | |||
96 | #define ice_for_each_alloc_rxq(vsi, i) \ | ||
97 | for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) | ||
98 | |||
92 | struct ice_tc_info { | 99 | struct ice_tc_info { |
93 | u16 qoffset; | 100 | u16 qoffset; |
94 | u16 qcount; | 101 | u16 qcount; |
@@ -189,9 +196,9 @@ struct ice_vsi { | |||
189 | struct list_head tmp_sync_list; /* MAC filters to be synced */ | 196 | struct list_head tmp_sync_list; /* MAC filters to be synced */ |
190 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ | 197 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ |
191 | 198 | ||
192 | bool irqs_ready; | 199 | u8 irqs_ready; |
193 | bool current_isup; /* Sync 'link up' logging */ | 200 | u8 current_isup; /* Sync 'link up' logging */ |
194 | bool stat_offsets_loaded; | 201 | u8 stat_offsets_loaded; |
195 | 202 | ||
196 | /* queue information */ | 203 | /* queue information */ |
197 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ | 204 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
@@ -262,7 +269,7 @@ struct ice_pf { | |||
262 | struct ice_hw_port_stats stats; | 269 | struct ice_hw_port_stats stats; |
263 | struct ice_hw_port_stats stats_prev; | 270 | struct ice_hw_port_stats stats_prev; |
264 | struct ice_hw hw; | 271 | struct ice_hw hw; |
265 | bool stat_prev_loaded; /* has previous stats been loaded */ | 272 | u8 stat_prev_loaded; /* has previous stats been loaded */ |
266 | char int_name[ICE_INT_NAME_STR_LEN]; | 273 | char int_name[ICE_INT_NAME_STR_LEN]; |
267 | }; | 274 | }; |
268 | 275 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7541ec2270b3..a0614f472658 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | |||
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props { | |||
329 | /* VLAN section */ | 329 | /* VLAN section */ |
330 | __le16 pvid; /* VLANS include priority bits */ | 330 | __le16 pvid; /* VLANS include priority bits */ |
331 | u8 pvlan_reserved[2]; | 331 | u8 pvlan_reserved[2]; |
332 | u8 port_vlan_flags; | 332 | u8 vlan_flags; |
333 | #define ICE_AQ_VSI_PVLAN_MODE_S 0 | 333 | #define ICE_AQ_VSI_VLAN_MODE_S 0 |
334 | #define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) | 334 | #define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) |
335 | #define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 | 335 | #define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 |
336 | #define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 | 336 | #define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 |
337 | #define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 | 337 | #define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 |
338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) | 338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) |
339 | #define ICE_AQ_VSI_PVLAN_EMOD_S 3 | 339 | #define ICE_AQ_VSI_VLAN_EMOD_S 3 |
340 | #define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 340 | #define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
341 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) | 341 | #define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) |
342 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) | 342 | #define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) |
343 | #define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) | 343 | #define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) |
344 | #define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 344 | #define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
345 | u8 pvlan_reserved2[3]; | 345 | u8 pvlan_reserved2[3]; |
346 | /* ingress egress up sections */ | 346 | /* ingress egress up sections */ |
347 | __le32 ingress_table; /* bitmap, 3 bits per up */ | 347 | __le32 ingress_table; /* bitmap, 3 bits per up */ |
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act { | |||
594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) | 594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) |
595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 | 595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 |
596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) | 596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) |
597 | #define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 | ||
597 | 598 | ||
598 | /* Action = 7 - Set Stat count */ | 599 | /* Action = 7 - Set Stat count */ |
599 | #define ICE_LG_ACT_STAT_COUNT 0x7 | 600 | #define ICE_LG_ACT_STAT_COUNT 0x7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 71d032cc5fa7..661beea6af79 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c | |||
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) | |||
45 | /** | 45 | /** |
46 | * ice_clear_pf_cfg - Clear PF configuration | 46 | * ice_clear_pf_cfg - Clear PF configuration |
47 | * @hw: pointer to the hardware structure | 47 | * @hw: pointer to the hardware structure |
48 | * | ||
49 | * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port | ||
50 | * configuration, flow director filters, etc.). | ||
48 | */ | 51 | */ |
49 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) | 52 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) |
50 | { | 53 | { |
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) | |||
1483 | struct ice_phy_info *phy_info; | 1486 | struct ice_phy_info *phy_info; |
1484 | enum ice_status status = 0; | 1487 | enum ice_status status = 0; |
1485 | 1488 | ||
1486 | if (!pi) | 1489 | if (!pi || !link_up) |
1487 | return ICE_ERR_PARAM; | 1490 | return ICE_ERR_PARAM; |
1488 | 1491 | ||
1489 | phy_info = &pi->phy; | 1492 | phy_info = &pi->phy; |
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, | |||
1619 | } | 1622 | } |
1620 | 1623 | ||
1621 | /* LUT size is only valid for Global and PF table types */ | 1624 | /* LUT size is only valid for Global and PF table types */ |
1622 | if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { | 1625 | switch (lut_size) { |
1623 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << | 1626 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: |
1624 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1627 | break; |
1625 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1628 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: |
1626 | } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { | ||
1627 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << | 1629 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << |
1628 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1630 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
1629 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1631 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
1630 | } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && | 1632 | break; |
1631 | (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { | 1633 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: |
1632 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << | 1634 | if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { |
1633 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1635 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << |
1634 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1636 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
1635 | } else { | 1637 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
1638 | break; | ||
1639 | } | ||
1640 | /* fall-through */ | ||
1641 | default: | ||
1636 | status = ICE_ERR_PARAM; | 1642 | status = ICE_ERR_PARAM; |
1637 | goto ice_aq_get_set_rss_lut_exit; | 1643 | goto ice_aq_get_set_rss_lut_exit; |
1638 | } | 1644 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 7c511f144ed6..62be72fdc8f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c | |||
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) | |||
597 | return 0; | 597 | return 0; |
598 | 598 | ||
599 | init_ctrlq_free_rq: | 599 | init_ctrlq_free_rq: |
600 | ice_shutdown_rq(hw, cq); | 600 | if (cq->rq.head) { |
601 | ice_shutdown_sq(hw, cq); | 601 | ice_shutdown_rq(hw, cq); |
602 | mutex_destroy(&cq->sq_lock); | 602 | mutex_destroy(&cq->rq_lock); |
603 | mutex_destroy(&cq->rq_lock); | 603 | } |
604 | if (cq->sq.head) { | ||
605 | ice_shutdown_sq(hw, cq); | ||
606 | mutex_destroy(&cq->sq_lock); | ||
607 | } | ||
604 | return status; | 608 | return status; |
605 | } | 609 | } |
606 | 610 | ||
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) | |||
706 | return; | 710 | return; |
707 | } | 711 | } |
708 | 712 | ||
709 | ice_shutdown_sq(hw, cq); | 713 | if (cq->sq.head) { |
710 | ice_shutdown_rq(hw, cq); | 714 | ice_shutdown_sq(hw, cq); |
711 | mutex_destroy(&cq->sq_lock); | 715 | mutex_destroy(&cq->sq_lock); |
712 | mutex_destroy(&cq->rq_lock); | 716 | } |
717 | if (cq->rq.head) { | ||
718 | ice_shutdown_rq(hw, cq); | ||
719 | mutex_destroy(&cq->rq_lock); | ||
720 | } | ||
713 | } | 721 | } |
714 | 722 | ||
715 | /** | 723 | /** |
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, | |||
1057 | 1065 | ||
1058 | clean_rq_elem_out: | 1066 | clean_rq_elem_out: |
1059 | /* Set pending if needed, unlock and return */ | 1067 | /* Set pending if needed, unlock and return */ |
1060 | if (pending) | 1068 | if (pending) { |
1069 | /* re-read HW head to calculate actual pending messages */ | ||
1070 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
1061 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); | 1071 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); |
1072 | } | ||
1062 | clean_rq_elem_err: | 1073 | clean_rq_elem_err: |
1063 | mutex_unlock(&cq->rq_lock); | 1074 | mutex_unlock(&cq->rq_lock); |
1064 | 1075 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1db304c01d10..c71a9b528d6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c | |||
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev) | |||
26 | { | 26 | { |
27 | struct ice_netdev_priv *np = netdev_priv(netdev); | 27 | struct ice_netdev_priv *np = netdev_priv(netdev); |
28 | 28 | ||
29 | return ((np->vsi->num_txq + np->vsi->num_rxq) * | 29 | return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * |
30 | (sizeof(struct ice_q_stats) / sizeof(u64))); | 30 | (sizeof(struct ice_q_stats) / sizeof(u64))); |
31 | } | 31 | } |
32 | 32 | ||
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
218 | p += ETH_GSTRING_LEN; | 218 | p += ETH_GSTRING_LEN; |
219 | } | 219 | } |
220 | 220 | ||
221 | ice_for_each_txq(vsi, i) { | 221 | ice_for_each_alloc_txq(vsi, i) { |
222 | snprintf(p, ETH_GSTRING_LEN, | 222 | snprintf(p, ETH_GSTRING_LEN, |
223 | "tx-queue-%u.tx_packets", i); | 223 | "tx-queue-%u.tx_packets", i); |
224 | p += ETH_GSTRING_LEN; | 224 | p += ETH_GSTRING_LEN; |
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
226 | p += ETH_GSTRING_LEN; | 226 | p += ETH_GSTRING_LEN; |
227 | } | 227 | } |
228 | 228 | ||
229 | ice_for_each_rxq(vsi, i) { | 229 | ice_for_each_alloc_rxq(vsi, i) { |
230 | snprintf(p, ETH_GSTRING_LEN, | 230 | snprintf(p, ETH_GSTRING_LEN, |
231 | "rx-queue-%u.rx_packets", i); | 231 | "rx-queue-%u.rx_packets", i); |
232 | p += ETH_GSTRING_LEN; | 232 | p += ETH_GSTRING_LEN; |
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) | |||
253 | { | 253 | { |
254 | switch (sset) { | 254 | switch (sset) { |
255 | case ETH_SS_STATS: | 255 | case ETH_SS_STATS: |
256 | /* The number (and order) of strings reported *must* remain | ||
257 | * constant for a given netdevice. This function must not | ||
258 | * report a different number based on run time parameters | ||
259 | * (such as the number of queues in use, or the setting of | ||
260 | * a private ethtool flag). This is due to the nature of the | ||
261 | * ethtool stats API. | ||
262 | * | ||
263 | * User space programs such as ethtool must make 3 separate | ||
264 | * ioctl requests, one for size, one for the strings, and | ||
265 | * finally one for the stats. Since these cross into | ||
266 | * user space, changes to the number or size could result in | ||
267 | * undefined memory access or incorrect string<->value | ||
268 | * correlations for statistics. | ||
269 | * | ||
270 | * Even if it appears to be safe, changes to the size or | ||
271 | * order of strings will suffer from race conditions and are | ||
272 | * not safe. | ||
273 | */ | ||
256 | return ICE_ALL_STATS_LEN(netdev); | 274 | return ICE_ALL_STATS_LEN(netdev); |
257 | default: | 275 | default: |
258 | return -EOPNOTSUPP; | 276 | return -EOPNOTSUPP; |
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev, | |||
280 | /* populate per queue stats */ | 298 | /* populate per queue stats */ |
281 | rcu_read_lock(); | 299 | rcu_read_lock(); |
282 | 300 | ||
283 | ice_for_each_txq(vsi, j) { | 301 | ice_for_each_alloc_txq(vsi, j) { |
284 | ring = READ_ONCE(vsi->tx_rings[j]); | 302 | ring = READ_ONCE(vsi->tx_rings[j]); |
285 | if (!ring) | 303 | if (ring) { |
286 | continue; | 304 | data[i++] = ring->stats.pkts; |
287 | data[i++] = ring->stats.pkts; | 305 | data[i++] = ring->stats.bytes; |
288 | data[i++] = ring->stats.bytes; | 306 | } else { |
307 | data[i++] = 0; | ||
308 | data[i++] = 0; | ||
309 | } | ||
289 | } | 310 | } |
290 | 311 | ||
291 | ice_for_each_rxq(vsi, j) { | 312 | ice_for_each_alloc_rxq(vsi, j) { |
292 | ring = READ_ONCE(vsi->rx_rings[j]); | 313 | ring = READ_ONCE(vsi->rx_rings[j]); |
293 | data[i++] = ring->stats.pkts; | 314 | if (ring) { |
294 | data[i++] = ring->stats.bytes; | 315 | data[i++] = ring->stats.pkts; |
316 | data[i++] = ring->stats.bytes; | ||
317 | } else { | ||
318 | data[i++] = 0; | ||
319 | data[i++] = 0; | ||
320 | } | ||
295 | } | 321 | } |
296 | 322 | ||
297 | rcu_read_unlock(); | 323 | rcu_read_unlock(); |
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) | |||
519 | goto done; | 545 | goto done; |
520 | } | 546 | } |
521 | 547 | ||
522 | for (i = 0; i < vsi->num_txq; i++) { | 548 | for (i = 0; i < vsi->alloc_txq; i++) { |
523 | /* clone ring and setup updated count */ | 549 | /* clone ring and setup updated count */ |
524 | tx_rings[i] = *vsi->tx_rings[i]; | 550 | tx_rings[i] = *vsi->tx_rings[i]; |
525 | tx_rings[i].count = new_tx_cnt; | 551 | tx_rings[i].count = new_tx_cnt; |
@@ -551,7 +577,7 @@ process_rx: | |||
551 | goto done; | 577 | goto done; |
552 | } | 578 | } |
553 | 579 | ||
554 | for (i = 0; i < vsi->num_rxq; i++) { | 580 | for (i = 0; i < vsi->alloc_rxq; i++) { |
555 | /* clone ring and setup updated count */ | 581 | /* clone ring and setup updated count */ |
556 | rx_rings[i] = *vsi->rx_rings[i]; | 582 | rx_rings[i] = *vsi->rx_rings[i]; |
557 | rx_rings[i].count = new_rx_cnt; | 583 | rx_rings[i].count = new_rx_cnt; |
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 499904874b3f..6076fc87df9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h | |||
@@ -121,10 +121,6 @@ | |||
121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 | 121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 |
122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) | 122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) |
123 | #define PFINT_OICR 0x0016CA00 | 123 | #define PFINT_OICR 0x0016CA00 |
124 | #define PFINT_OICR_HLP_RDY_S 14 | ||
125 | #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) | ||
126 | #define PFINT_OICR_CPM_RDY_S 15 | ||
127 | #define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S) | ||
128 | #define PFINT_OICR_ECC_ERR_S 16 | 124 | #define PFINT_OICR_ECC_ERR_S 16 |
129 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) | 125 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) |
130 | #define PFINT_OICR_MAL_DETECT_S 19 | 126 | #define PFINT_OICR_MAL_DETECT_S 19 |
@@ -133,10 +129,6 @@ | |||
133 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) | 129 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) |
134 | #define PFINT_OICR_PCI_EXCEPTION_S 21 | 130 | #define PFINT_OICR_PCI_EXCEPTION_S 21 |
135 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) | 131 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) |
136 | #define PFINT_OICR_GPIO_S 22 | ||
137 | #define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S) | ||
138 | #define PFINT_OICR_STORM_DETECT_S 24 | ||
139 | #define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S) | ||
140 | #define PFINT_OICR_HMC_ERR_S 26 | 132 | #define PFINT_OICR_HMC_ERR_S 26 |
141 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) | 133 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) |
142 | #define PFINT_OICR_PE_CRITERR_S 28 | 134 | #define PFINT_OICR_PE_CRITERR_S 28 |
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d23a91665b46..068dbc740b76 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | |||
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits { | |||
265 | struct ice_rlan_ctx { | 265 | struct ice_rlan_ctx { |
266 | u16 head; | 266 | u16 head; |
267 | u16 cpuid; /* bigger than needed, see above for reason */ | 267 | u16 cpuid; /* bigger than needed, see above for reason */ |
268 | #define ICE_RLAN_BASE_S 7 | ||
268 | u64 base; | 269 | u64 base; |
269 | u16 qlen; | 270 | u16 qlen; |
270 | #define ICE_RLAN_CTX_DBUF_S 7 | 271 | #define ICE_RLAN_CTX_DBUF_S 7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5299caf55a7f..f1e80eed2fd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
901 | case ice_aqc_opc_get_link_status: | 901 | case ice_aqc_opc_get_link_status: |
902 | if (ice_handle_link_event(pf)) | 902 | if (ice_handle_link_event(pf)) |
903 | dev_err(&pf->pdev->dev, | 903 | dev_err(&pf->pdev->dev, |
904 | "Could not handle link event"); | 904 | "Could not handle link event\n"); |
905 | break; | 905 | break; |
906 | default: | 906 | default: |
907 | dev_dbg(&pf->pdev->dev, | 907 | dev_dbg(&pf->pdev->dev, |
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
917 | } | 917 | } |
918 | 918 | ||
919 | /** | 919 | /** |
920 | * ice_ctrlq_pending - check if there is a difference between ntc and ntu | ||
921 | * @hw: pointer to hardware info | ||
922 | * @cq: control queue information | ||
923 | * | ||
924 | * returns true if there are pending messages in a queue, false if there aren't | ||
925 | */ | ||
926 | static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) | ||
927 | { | ||
928 | u16 ntu; | ||
929 | |||
930 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
931 | return cq->rq.next_to_clean != ntu; | ||
932 | } | ||
933 | |||
934 | /** | ||
920 | * ice_clean_adminq_subtask - clean the AdminQ rings | 935 | * ice_clean_adminq_subtask - clean the AdminQ rings |
921 | * @pf: board private structure | 936 | * @pf: board private structure |
922 | */ | 937 | */ |
923 | static void ice_clean_adminq_subtask(struct ice_pf *pf) | 938 | static void ice_clean_adminq_subtask(struct ice_pf *pf) |
924 | { | 939 | { |
925 | struct ice_hw *hw = &pf->hw; | 940 | struct ice_hw *hw = &pf->hw; |
926 | u32 val; | ||
927 | 941 | ||
928 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) | 942 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) |
929 | return; | 943 | return; |
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) | |||
933 | 947 | ||
934 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); | 948 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); |
935 | 949 | ||
936 | /* re-enable Admin queue interrupt causes */ | 950 | /* There might be a situation where new messages arrive to a control |
937 | val = rd32(hw, PFINT_FW_CTL); | 951 | * queue between processing the last message and clearing the |
938 | wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); | 952 | * EVENT_PENDING bit. So before exiting, check queue head again (using |
953 | * ice_ctrlq_pending) and process new messages if any. | ||
954 | */ | ||
955 | if (ice_ctrlq_pending(hw, &hw->adminq)) | ||
956 | __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); | ||
939 | 957 | ||
940 | ice_flush(hw); | 958 | ice_flush(hw); |
941 | } | 959 | } |
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) | |||
1295 | qcount = numq_tc; | 1313 | qcount = numq_tc; |
1296 | } | 1314 | } |
1297 | 1315 | ||
1298 | /* find higher power-of-2 of qcount */ | 1316 | /* find the (rounded up) power-of-2 of qcount */ |
1299 | pow = ilog2(qcount); | 1317 | pow = order_base_2(qcount); |
1300 | |||
1301 | if (!is_power_of_2(qcount)) | ||
1302 | pow++; | ||
1303 | 1318 | ||
1304 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { | 1319 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { |
1305 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { | 1320 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { |
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) | |||
1352 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; | 1367 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; |
1353 | /* Traffic from VSI can be sent to LAN */ | 1368 | /* Traffic from VSI can be sent to LAN */ |
1354 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; | 1369 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; |
1355 | /* Allow all packets untagged/tagged */ | 1370 | |
1356 | ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & | 1371 | /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy |
1357 | ICE_AQ_VSI_PVLAN_MODE_M) >> | 1372 | * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all |
1358 | ICE_AQ_VSI_PVLAN_MODE_S); | 1373 | * packets untagged/tagged. |
1359 | /* Show VLAN/UP from packets in Rx descriptors */ | 1374 | */ |
1360 | ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & | 1375 | ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & |
1361 | ICE_AQ_VSI_PVLAN_EMOD_M) >> | 1376 | ICE_AQ_VSI_VLAN_MODE_M) >> |
1362 | ICE_AQ_VSI_PVLAN_EMOD_S); | 1377 | ICE_AQ_VSI_VLAN_MODE_S); |
1378 | |||
1363 | /* Have 1:1 UP mapping for both ingress/egress tables */ | 1379 | /* Have 1:1 UP mapping for both ingress/egress tables */ |
1364 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); | 1380 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); |
1365 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); | 1381 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); |
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf) | |||
1688 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ | 1704 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ |
1689 | rd32(hw, PFINT_OICR); /* read to clear */ | 1705 | rd32(hw, PFINT_OICR); /* read to clear */ |
1690 | 1706 | ||
1691 | val = (PFINT_OICR_HLP_RDY_M | | 1707 | val = (PFINT_OICR_ECC_ERR_M | |
1692 | PFINT_OICR_CPM_RDY_M | | ||
1693 | PFINT_OICR_ECC_ERR_M | | ||
1694 | PFINT_OICR_MAL_DETECT_M | | 1708 | PFINT_OICR_MAL_DETECT_M | |
1695 | PFINT_OICR_GRST_M | | 1709 | PFINT_OICR_GRST_M | |
1696 | PFINT_OICR_PCI_EXCEPTION_M | | 1710 | PFINT_OICR_PCI_EXCEPTION_M | |
1697 | PFINT_OICR_GPIO_M | | 1711 | PFINT_OICR_HMC_ERR_M | |
1698 | PFINT_OICR_STORM_DETECT_M | | 1712 | PFINT_OICR_PE_CRITERR_M); |
1699 | PFINT_OICR_HMC_ERR_M); | ||
1700 | 1713 | ||
1701 | wr32(hw, PFINT_OICR_ENA, val); | 1714 | wr32(hw, PFINT_OICR_ENA, val); |
1702 | 1715 | ||
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) | |||
2058 | skip_req_irq: | 2071 | skip_req_irq: |
2059 | ice_ena_misc_vector(pf); | 2072 | ice_ena_misc_vector(pf); |
2060 | 2073 | ||
2061 | val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | | 2074 | val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
2062 | (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | | 2075 | PFINT_OICR_CTL_CAUSE_ENA_M); |
2063 | PFINT_OICR_CTL_CAUSE_ENA_M; | ||
2064 | wr32(hw, PFINT_OICR_CTL, val); | 2076 | wr32(hw, PFINT_OICR_CTL, val); |
2065 | 2077 | ||
2066 | /* This enables Admin queue Interrupt causes */ | 2078 | /* This enables Admin queue Interrupt causes */ |
2067 | val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | | 2079 | val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
2068 | (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | | 2080 | PFINT_FW_CTL_CAUSE_ENA_M); |
2069 | PFINT_FW_CTL_CAUSE_ENA_M; | ||
2070 | wr32(hw, PFINT_FW_CTL, val); | 2081 | wr32(hw, PFINT_FW_CTL, val); |
2071 | 2082 | ||
2072 | itr_gran = hw->itr_gran_200; | 2083 | itr_gran = hw->itr_gran_200; |
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) | |||
3246 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | 3257 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
3247 | ice_dis_msix(pf); | 3258 | ice_dis_msix(pf); |
3248 | 3259 | ||
3249 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); | 3260 | if (pf->irq_tracker) { |
3250 | pf->irq_tracker = NULL; | 3261 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); |
3262 | pf->irq_tracker = NULL; | ||
3263 | } | ||
3251 | } | 3264 | } |
3252 | 3265 | ||
3253 | /** | 3266 | /** |
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev, | |||
3271 | 3284 | ||
3272 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); | 3285 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); |
3273 | if (err) { | 3286 | if (err) { |
3274 | dev_err(&pdev->dev, "I/O map error %d\n", err); | 3287 | dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); |
3275 | return err; | 3288 | return err; |
3276 | } | 3289 | } |
3277 | 3290 | ||
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
3720 | enum ice_status status; | 3733 | enum ice_status status; |
3721 | 3734 | ||
3722 | /* Here we are configuring the VSI to let the driver add VLAN tags by | 3735 | /* Here we are configuring the VSI to let the driver add VLAN tags by |
3723 | * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN | 3736 | * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag |
3724 | * tag insertion happens in the Tx hot path, in ice_tx_map. | 3737 | * insertion happens in the Tx hot path, in ice_tx_map. |
3725 | */ | 3738 | */ |
3726 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; | 3739 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; |
3727 | 3740 | ||
3728 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3741 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
3729 | ctxt.vsi_num = vsi->vsi_num; | 3742 | ctxt.vsi_num = vsi->vsi_num; |
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
3735 | return -EIO; | 3748 | return -EIO; |
3736 | } | 3749 | } |
3737 | 3750 | ||
3738 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3751 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
3739 | return 0; | 3752 | return 0; |
3740 | } | 3753 | } |
3741 | 3754 | ||
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
3757 | */ | 3770 | */ |
3758 | if (ena) { | 3771 | if (ena) { |
3759 | /* Strip VLAN tag from Rx packet and put it in the desc */ | 3772 | /* Strip VLAN tag from Rx packet and put it in the desc */ |
3760 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; | 3773 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; |
3761 | } else { | 3774 | } else { |
3762 | /* Disable stripping. Leave tag in packet */ | 3775 | /* Disable stripping. Leave tag in packet */ |
3763 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; | 3776 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
3764 | } | 3777 | } |
3765 | 3778 | ||
3779 | /* Allow all packets untagged/tagged */ | ||
3780 | ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; | ||
3781 | |||
3766 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3782 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
3767 | ctxt.vsi_num = vsi->vsi_num; | 3783 | ctxt.vsi_num = vsi->vsi_num; |
3768 | 3784 | ||
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
3773 | return -EIO; | 3789 | return -EIO; |
3774 | } | 3790 | } |
3775 | 3791 | ||
3776 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3792 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
3777 | return 0; | 3793 | return 0; |
3778 | } | 3794 | } |
3779 | 3795 | ||
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) | |||
3986 | /* clear the context structure first */ | 4002 | /* clear the context structure first */ |
3987 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | 4003 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); |
3988 | 4004 | ||
3989 | rlan_ctx.base = ring->dma >> 7; | 4005 | rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; |
3990 | 4006 | ||
3991 | rlan_ctx.qlen = ring->count; | 4007 | rlan_ctx.qlen = ring->count; |
3992 | 4008 | ||
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) | |||
4098 | { | 4114 | { |
4099 | int err; | 4115 | int err; |
4100 | 4116 | ||
4101 | ice_set_rx_mode(vsi->netdev); | 4117 | if (vsi->netdev) { |
4102 | 4118 | ice_set_rx_mode(vsi->netdev); | |
4103 | err = ice_restore_vlan(vsi); | 4119 | err = ice_restore_vlan(vsi); |
4104 | if (err) | 4120 | if (err) |
4105 | return err; | 4121 | return err; |
4122 | } | ||
4106 | 4123 | ||
4107 | err = ice_vsi_cfg_txqs(vsi); | 4124 | err = ice_vsi_cfg_txqs(vsi); |
4108 | if (!err) | 4125 | if (!err) |
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi) | |||
4868 | */ | 4885 | */ |
4869 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | 4886 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
4870 | { | 4887 | { |
4871 | int i, err; | 4888 | int i, err = 0; |
4872 | 4889 | ||
4873 | if (!vsi->num_txq) { | 4890 | if (!vsi->num_txq) { |
4874 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", | 4891 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", |
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | |||
4893 | */ | 4910 | */ |
4894 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) | 4911 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
4895 | { | 4912 | { |
4896 | int i, err; | 4913 | int i, err = 0; |
4897 | 4914 | ||
4898 | if (!vsi->num_rxq) { | 4915 | if (!vsi->num_rxq) { |
4899 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", | 4916 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", |
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) | |||
5235 | u8 count = 0; | 5252 | u8 count = 0; |
5236 | 5253 | ||
5237 | if (new_mtu == netdev->mtu) { | 5254 | if (new_mtu == netdev->mtu) { |
5238 | netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); | 5255 | netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); |
5239 | return 0; | 5256 | return 0; |
5240 | } | 5257 | } |
5241 | 5258 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 92da0a626ce0..295a8cd87fc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c | |||
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) | |||
131 | * | 131 | * |
132 | * This function will request NVM ownership. | 132 | * This function will request NVM ownership. |
133 | */ | 133 | */ |
134 | static enum | 134 | static enum ice_status |
135 | ice_status ice_acquire_nvm(struct ice_hw *hw, | 135 | ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) |
136 | enum ice_aq_res_access_type access) | ||
137 | { | 136 | { |
138 | if (hw->nvm.blank_nvm_mode) | 137 | if (hw->nvm.blank_nvm_mode) |
139 | return 0; | 138 | return 0; |
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2e6c1d92cc88..eeae199469b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c | |||
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, | |||
1576 | return status; | 1576 | return status; |
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | if (owner == ICE_SCHED_NODE_OWNER_LAN) | 1579 | vsi->max_lanq[tc] = new_numqs; |
1580 | vsi->max_lanq[tc] = new_numqs; | ||
1581 | 1580 | ||
1582 | return status; | 1581 | return status; |
1583 | } | 1582 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 723d15f1e90b..6b7ec2ae5ad6 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c | |||
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, | |||
645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; |
646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); | 646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); |
647 | 647 | ||
648 | act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 648 | act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << |
649 | ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; | ||
649 | 650 | ||
650 | /* Third action Marker value */ | 651 | /* Third action Marker value */ |
651 | act |= ICE_LG_ACT_GENERIC; | 652 | act |= ICE_LG_ACT_GENERIC; |
652 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & | 653 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & |
653 | ICE_LG_ACT_GENERIC_VALUE_M; | 654 | ICE_LG_ACT_GENERIC_VALUE_M; |
654 | 655 | ||
655 | act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | ||
656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); | 656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); |
657 | 657 | ||
658 | /* call the fill switch rule to fill the lookup tx rx structure */ | 658 | /* call the fill switch rule to fill the lookup tx rx structure */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 6f4a0d159dbf..9b8ec128ee31 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h | |||
@@ -17,7 +17,7 @@ struct ice_vsi_ctx { | |||
17 | u16 vsis_unallocated; | 17 | u16 vsis_unallocated; |
18 | u16 flags; | 18 | u16 flags; |
19 | struct ice_aqc_vsi_props info; | 19 | struct ice_aqc_vsi_props info; |
20 | bool alloc_from_pool; | 20 | u8 alloc_from_pool; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | enum ice_sw_fwd_act_type { | 23 | enum ice_sw_fwd_act_type { |
@@ -94,8 +94,8 @@ struct ice_fltr_info { | |||
94 | u8 qgrp_size; | 94 | u8 qgrp_size; |
95 | 95 | ||
96 | /* Rule creations populate these indicators basing on the switch type */ | 96 | /* Rule creations populate these indicators basing on the switch type */ |
97 | bool lb_en; /* Indicate if packet can be looped back */ | 97 | u8 lb_en; /* Indicate if packet can be looped back */ |
98 | bool lan_en; /* Indicate if packet can be forwarded to the uplink */ | 98 | u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ |
99 | }; | 99 | }; |
100 | 100 | ||
101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ | 101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 567067b650c4..31bc998fe200 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h | |||
@@ -143,7 +143,7 @@ struct ice_ring { | |||
143 | u16 next_to_use; | 143 | u16 next_to_use; |
144 | u16 next_to_clean; | 144 | u16 next_to_clean; |
145 | 145 | ||
146 | bool ring_active; /* is ring online or not */ | 146 | u8 ring_active; /* is ring online or not */ |
147 | 147 | ||
148 | /* stats structs */ | 148 | /* stats structs */ |
149 | struct ice_q_stats stats; | 149 | struct ice_q_stats stats; |
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 99c8a9a71b5e..97c366e0ca59 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h | |||
@@ -83,7 +83,7 @@ struct ice_link_status { | |||
83 | u64 phy_type_low; | 83 | u64 phy_type_low; |
84 | u16 max_frame_size; | 84 | u16 max_frame_size; |
85 | u16 link_speed; | 85 | u16 link_speed; |
86 | bool lse_ena; /* Link Status Event notification */ | 86 | u8 lse_ena; /* Link Status Event notification */ |
87 | u8 link_info; | 87 | u8 link_info; |
88 | u8 an_info; | 88 | u8 an_info; |
89 | u8 ext_info; | 89 | u8 ext_info; |
@@ -101,7 +101,7 @@ struct ice_phy_info { | |||
101 | struct ice_link_status link_info_old; | 101 | struct ice_link_status link_info_old; |
102 | u64 phy_type_low; | 102 | u64 phy_type_low; |
103 | enum ice_media_type media_type; | 103 | enum ice_media_type media_type; |
104 | bool get_link_info; | 104 | u8 get_link_info; |
105 | }; | 105 | }; |
106 | 106 | ||
107 | /* Common HW capabilities for SW use */ | 107 | /* Common HW capabilities for SW use */ |
@@ -167,7 +167,7 @@ struct ice_nvm_info { | |||
167 | u32 oem_ver; /* OEM version info */ | 167 | u32 oem_ver; /* OEM version info */ |
168 | u16 sr_words; /* Shadow RAM size in words */ | 168 | u16 sr_words; /* Shadow RAM size in words */ |
169 | u16 ver; /* NVM package version */ | 169 | u16 ver; /* NVM package version */ |
170 | bool blank_nvm_mode; /* is NVM empty (no FW present) */ | 170 | u8 blank_nvm_mode; /* is NVM empty (no FW present) */ |
171 | }; | 171 | }; |
172 | 172 | ||
173 | /* Max number of port to queue branches w.r.t topology */ | 173 | /* Max number of port to queue branches w.r.t topology */ |
@@ -181,7 +181,7 @@ struct ice_sched_node { | |||
181 | struct ice_aqc_txsched_elem_data info; | 181 | struct ice_aqc_txsched_elem_data info; |
182 | u32 agg_id; /* aggregator group id */ | 182 | u32 agg_id; /* aggregator group id */ |
183 | u16 vsi_id; | 183 | u16 vsi_id; |
184 | bool in_use; /* suspended or in use */ | 184 | u8 in_use; /* suspended or in use */ |
185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ | 185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ |
186 | u8 num_children; | 186 | u8 num_children; |
187 | u8 tc_num; | 187 | u8 tc_num; |
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info { | |||
218 | struct ice_sched_tx_policy { | 218 | struct ice_sched_tx_policy { |
219 | u16 max_num_vsis; | 219 | u16 max_num_vsis; |
220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; | 220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; |
221 | bool rdma_ena; | 221 | u8 rdma_ena; |
222 | }; | 222 | }; |
223 | 223 | ||
224 | struct ice_port_info { | 224 | struct ice_port_info { |
@@ -243,7 +243,7 @@ struct ice_port_info { | |||
243 | struct list_head agg_list; /* lists all aggregator */ | 243 | struct list_head agg_list; /* lists all aggregator */ |
244 | u8 lport; | 244 | u8 lport; |
245 | #define ICE_LPORT_MASK 0xff | 245 | #define ICE_LPORT_MASK 0xff |
246 | bool is_vf; | 246 | u8 is_vf; |
247 | }; | 247 | }; |
248 | 248 | ||
249 | struct ice_switch_info { | 249 | struct ice_switch_info { |
@@ -287,7 +287,7 @@ struct ice_hw { | |||
287 | u8 max_cgds; | 287 | u8 max_cgds; |
288 | u8 sw_entry_point_layer; | 288 | u8 sw_entry_point_layer; |
289 | 289 | ||
290 | bool evb_veb; /* true for VEB, false for VEPA */ | 290 | u8 evb_veb; /* true for VEB, false for VEPA */ |
291 | struct ice_bus_info bus; | 291 | struct ice_bus_info bus; |
292 | struct ice_nvm_info nvm; | 292 | struct ice_nvm_info nvm; |
293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ | 293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ |
@@ -318,7 +318,7 @@ struct ice_hw { | |||
318 | u8 itr_gran_100; | 318 | u8 itr_gran_100; |
319 | u8 itr_gran_50; | 319 | u8 itr_gran_50; |
320 | u8 itr_gran_25; | 320 | u8 itr_gran_25; |
321 | bool ucast_shared; /* true if VSIs can share unicast addr */ | 321 | u8 ucast_shared; /* true if VSIs can share unicast addr */ |
322 | 322 | ||
323 | }; | 323 | }; |
324 | 324 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index f92f7918112d..5acf3b743876 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) | |||
1649 | if (hw->phy.type == e1000_phy_m88) | 1649 | if (hw->phy.type == e1000_phy_m88) |
1650 | igb_phy_disable_receiver(adapter); | 1650 | igb_phy_disable_receiver(adapter); |
1651 | 1651 | ||
1652 | mdelay(500); | 1652 | msleep(500); |
1653 | return 0; | 1653 | return 0; |
1654 | } | 1654 | } |
1655 | 1655 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d03c2f0d7592..a32c576c1e65 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
3873 | 3873 | ||
3874 | adapter->mac_table = kcalloc(hw->mac.rar_entry_count, | 3874 | adapter->mac_table = kcalloc(hw->mac.rar_entry_count, |
3875 | sizeof(struct igb_mac_addr), | 3875 | sizeof(struct igb_mac_addr), |
3876 | GFP_ATOMIC); | 3876 | GFP_KERNEL); |
3877 | if (!adapter->mac_table) | 3877 | if (!adapter->mac_table) |
3878 | return -ENOMEM; | 3878 | return -ENOMEM; |
3879 | 3879 | ||
@@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
3883 | 3883 | ||
3884 | /* Setup and initialize a copy of the hw vlan table array */ | 3884 | /* Setup and initialize a copy of the hw vlan table array */ |
3885 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), | 3885 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), |
3886 | GFP_ATOMIC); | 3886 | GFP_KERNEL); |
3887 | if (!adapter->shadow_vfta) | 3887 | if (!adapter->shadow_vfta) |
3888 | return -ENOMEM; | 3888 | return -ENOMEM; |
3889 | 3889 | ||
@@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) | |||
5816 | 5816 | ||
5817 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | 5817 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
5818 | csum_failed: | 5818 | csum_failed: |
5819 | if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) | 5819 | if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && |
5820 | !tx_ring->launchtime_enable) | ||
5820 | return; | 5821 | return; |
5821 | goto no_csum; | 5822 | goto no_csum; |
5822 | } | 5823 | } |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 43664adf7a3c..d3e72d0f66ef 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
@@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
771 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | 771 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); |
772 | rxdr->size = ALIGN(rxdr->size, 4096); | 772 | rxdr->size = ALIGN(rxdr->size, 4096); |
773 | 773 | ||
774 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 774 | rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
775 | GFP_KERNEL); | 775 | GFP_KERNEL); |
776 | 776 | ||
777 | if (!rxdr->desc) { | 777 | if (!rxdr->desc) { |
778 | vfree(rxdr->buffer_info); | 778 | vfree(rxdr->buffer_info); |
779 | return -ENOMEM; | 779 | return -ENOMEM; |
780 | } | 780 | } |
781 | memset(rxdr->desc, 0, rxdr->size); | ||
782 | 781 | ||
783 | rxdr->next_to_clean = 0; | 782 | rxdr->next_to_clean = 0; |
784 | rxdr->next_to_use = 0; | 783 | rxdr->next_to_use = 0; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 94b3165ff543..ccd852ad62a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | |||
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, | |||
192 | } | 192 | } |
193 | 193 | ||
194 | /* alloc the udl from per cpu ddp pool */ | 194 | /* alloc the udl from per cpu ddp pool */ |
195 | ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); | 195 | ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); |
196 | if (!ddp->udl) { | 196 | if (!ddp->udl) { |
197 | e_err(drv, "failed allocated ddp context\n"); | 197 | e_err(drv, "failed allocated ddp context\n"); |
198 | goto out_noddp_unmap; | 198 | goto out_noddp_unmap; |
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) | |||
760 | return 0; | 760 | return 0; |
761 | 761 | ||
762 | /* Extra buffer to be shared by all DDPs for HW work around */ | 762 | /* Extra buffer to be shared by all DDPs for HW work around */ |
763 | buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | 763 | buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); |
764 | if (!buffer) | 764 | if (!buffer) |
765 | return -ENOMEM; | 765 | return -ENOMEM; |
766 | 766 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 447098005490..9a23d33a47ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, | |||
6201 | 6201 | ||
6202 | adapter->mac_table = kcalloc(hw->mac.num_rar_entries, | 6202 | adapter->mac_table = kcalloc(hw->mac.num_rar_entries, |
6203 | sizeof(struct ixgbe_mac_addr), | 6203 | sizeof(struct ixgbe_mac_addr), |
6204 | GFP_ATOMIC); | 6204 | GFP_KERNEL); |
6205 | if (!adapter->mac_table) | 6205 | if (!adapter->mac_table) |
6206 | return -ENOMEM; | 6206 | return -ENOMEM; |
6207 | 6207 | ||
@@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
6620 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6620 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6621 | 6621 | ||
6622 | if (adapter->xdp_prog) { | 6622 | if (adapter->xdp_prog) { |
6623 | e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); | 6623 | int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + |
6624 | return -EPERM; | 6624 | VLAN_HLEN; |
6625 | int i; | ||
6626 | |||
6627 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
6628 | struct ixgbe_ring *ring = adapter->rx_ring[i]; | ||
6629 | |||
6630 | if (new_frame_size > ixgbe_rx_bufsz(ring)) { | ||
6631 | e_warn(probe, "Requested MTU size is not supported with XDP\n"); | ||
6632 | return -EINVAL; | ||
6633 | } | ||
6634 | } | ||
6625 | } | 6635 | } |
6626 | 6636 | ||
6627 | /* | 6637 | /* |
@@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
8983 | 8993 | ||
8984 | #ifdef CONFIG_IXGBE_DCB | 8994 | #ifdef CONFIG_IXGBE_DCB |
8985 | if (tc) { | 8995 | if (tc) { |
8996 | if (adapter->xdp_prog) { | ||
8997 | e_warn(probe, "DCB is not supported with XDP\n"); | ||
8998 | |||
8999 | ixgbe_init_interrupt_scheme(adapter); | ||
9000 | if (netif_running(dev)) | ||
9001 | ixgbe_open(dev); | ||
9002 | return -EINVAL; | ||
9003 | } | ||
9004 | |||
8986 | netdev_set_num_tc(dev, tc); | 9005 | netdev_set_num_tc(dev, tc); |
8987 | ixgbe_set_prio_tc_map(adapter); | 9006 | ixgbe_set_prio_tc_map(adapter); |
8988 | 9007 | ||
@@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
9171 | struct tcf_exts *exts, u64 *action, u8 *queue) | 9190 | struct tcf_exts *exts, u64 *action, u8 *queue) |
9172 | { | 9191 | { |
9173 | const struct tc_action *a; | 9192 | const struct tc_action *a; |
9174 | LIST_HEAD(actions); | 9193 | int i; |
9175 | 9194 | ||
9176 | if (!tcf_exts_has_actions(exts)) | 9195 | if (!tcf_exts_has_actions(exts)) |
9177 | return -EINVAL; | 9196 | return -EINVAL; |
9178 | 9197 | ||
9179 | tcf_exts_to_list(exts, &actions); | 9198 | tcf_exts_for_each_action(i, a, exts) { |
9180 | list_for_each_entry(a, &actions, list) { | ||
9181 | |||
9182 | /* Drop action */ | 9199 | /* Drop action */ |
9183 | if (is_tcf_gact_shot(a)) { | 9200 | if (is_tcf_gact_shot(a)) { |
9184 | *action = IXGBE_FDIR_DROP_QUEUE; | 9201 | *action = IXGBE_FDIR_DROP_QUEUE; |
@@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) | |||
9936 | int tcs = adapter->hw_tcs ? : 1; | 9953 | int tcs = adapter->hw_tcs ? : 1; |
9937 | int pool, err; | 9954 | int pool, err; |
9938 | 9955 | ||
9956 | if (adapter->xdp_prog) { | ||
9957 | e_warn(probe, "L2FW offload is not supported with XDP\n"); | ||
9958 | return ERR_PTR(-EINVAL); | ||
9959 | } | ||
9960 | |||
9939 | /* The hardware supported by ixgbe only filters on the destination MAC | 9961 | /* The hardware supported by ixgbe only filters on the destination MAC |
9940 | * address. In order to avoid issues we only support offloading modes | 9962 | * address. In order to avoid issues we only support offloading modes |
9941 | * where the hardware can actually provide the functionality. | 9963 | * where the hardware can actually provide the functionality. |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 6f59933cdff7..3c6f01c41b78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | |||
53 | struct ixgbe_hw *hw = &adapter->hw; | 53 | struct ixgbe_hw *hw = &adapter->hw; |
54 | int i; | 54 | int i; |
55 | 55 | ||
56 | if (adapter->xdp_prog) { | ||
57 | e_warn(probe, "SRIOV is not supported with XDP\n"); | ||
58 | return -EINVAL; | ||
59 | } | ||
60 | |||
56 | /* Enable VMDq flag so device will be set in VM mode */ | 61 | /* Enable VMDq flag so device will be set in VM mode */ |
57 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | | 62 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | |
58 | IXGBE_FLAG_VMDQ_ENABLED; | 63 | IXGBE_FLAG_VMDQ_ENABLED; |
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, | |||
688 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | 693 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) |
689 | { | 694 | { |
690 | struct ixgbe_hw *hw = &adapter->hw; | 695 | struct ixgbe_hw *hw = &adapter->hw; |
696 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; | ||
691 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; | 697 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
698 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); | ||
692 | u8 num_tcs = adapter->hw_tcs; | 699 | u8 num_tcs = adapter->hw_tcs; |
700 | u32 reg_val; | ||
701 | u32 queue; | ||
702 | u32 word; | ||
693 | 703 | ||
694 | /* remove VLAN filters beloning to this VF */ | 704 | /* remove VLAN filters beloning to this VF */ |
695 | ixgbe_clear_vf_vlans(adapter, vf); | 705 | ixgbe_clear_vf_vlans(adapter, vf); |
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | |||
726 | 736 | ||
727 | /* reset VF api back to unknown */ | 737 | /* reset VF api back to unknown */ |
728 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; | 738 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; |
739 | |||
740 | /* Restart each queue for given VF */ | ||
741 | for (queue = 0; queue < q_per_pool; queue++) { | ||
742 | unsigned int reg_idx = (vf * q_per_pool) + queue; | ||
743 | |||
744 | reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); | ||
745 | |||
746 | /* Re-enabling only configured queues */ | ||
747 | if (reg_val) { | ||
748 | reg_val |= IXGBE_TXDCTL_ENABLE; | ||
749 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); | ||
750 | reg_val &= ~IXGBE_TXDCTL_ENABLE; | ||
751 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); | ||
752 | } | ||
753 | } | ||
754 | |||
755 | /* Clear VF's mailbox memory */ | ||
756 | for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) | ||
757 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); | ||
758 | |||
759 | IXGBE_WRITE_FLUSH(hw); | ||
729 | } | 760 | } |
730 | 761 | ||
731 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | 762 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 44cfb2021145..41bcbb337e83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | |||
@@ -2518,6 +2518,7 @@ enum { | |||
2518 | /* Translated register #defines */ | 2518 | /* Translated register #defines */ |
2519 | #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) | 2519 | #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) |
2520 | #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) | 2520 | #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) |
2521 | #define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) | ||
2521 | #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) | 2522 | #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) |
2522 | #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) | 2523 | #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) |
2523 | 2524 | ||
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 32d785b616e1..28500417843e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
@@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
4803 | dev->min_mtu = ETH_MIN_MTU; | 4803 | dev->min_mtu = ETH_MIN_MTU; |
4804 | /* 9704 == 9728 - 20 and rounding to 8 */ | 4804 | /* 9704 == 9728 - 20 and rounding to 8 */ |
4805 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; | 4805 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; |
4806 | dev->dev.of_node = port_node; | ||
4806 | 4807 | ||
4807 | /* Phylink isn't used w/ ACPI as of now */ | 4808 | /* Phylink isn't used w/ ACPI as of now */ |
4808 | if (port_node) { | 4809 | if (port_node) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9131a1376e7d..9fed54017659 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
1982 | goto out_ok; | 1982 | goto out_ok; |
1983 | 1983 | ||
1984 | modify_ip_header = false; | 1984 | modify_ip_header = false; |
1985 | tcf_exts_to_list(exts, &actions); | 1985 | tcf_exts_for_each_action(i, a, exts) { |
1986 | list_for_each_entry(a, &actions, list) { | 1986 | int k; |
1987 | |||
1987 | if (!is_tcf_pedit(a)) | 1988 | if (!is_tcf_pedit(a)) |
1988 | continue; | 1989 | continue; |
1989 | 1990 | ||
1990 | nkeys = tcf_pedit_nkeys(a); | 1991 | nkeys = tcf_pedit_nkeys(a); |
1991 | for (i = 0; i < nkeys; i++) { | 1992 | for (k = 0; k < nkeys; k++) { |
1992 | htype = tcf_pedit_htype(a, i); | 1993 | htype = tcf_pedit_htype(a, k); |
1993 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || | 1994 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || |
1994 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { | 1995 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { |
1995 | modify_ip_header = true; | 1996 | modify_ip_header = true; |
@@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
2053 | const struct tc_action *a; | 2054 | const struct tc_action *a; |
2054 | LIST_HEAD(actions); | 2055 | LIST_HEAD(actions); |
2055 | u32 action = 0; | 2056 | u32 action = 0; |
2056 | int err; | 2057 | int err, i; |
2057 | 2058 | ||
2058 | if (!tcf_exts_has_actions(exts)) | 2059 | if (!tcf_exts_has_actions(exts)) |
2059 | return -EINVAL; | 2060 | return -EINVAL; |
2060 | 2061 | ||
2061 | attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 2062 | attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
2062 | 2063 | ||
2063 | tcf_exts_to_list(exts, &actions); | 2064 | tcf_exts_for_each_action(i, a, exts) { |
2064 | list_for_each_entry(a, &actions, list) { | ||
2065 | if (is_tcf_gact_shot(a)) { | 2065 | if (is_tcf_gact_shot(a)) { |
2066 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP; | 2066 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP; |
2067 | if (MLX5_CAP_FLOWTABLE(priv->mdev, | 2067 | if (MLX5_CAP_FLOWTABLE(priv->mdev, |
@@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
2666 | LIST_HEAD(actions); | 2666 | LIST_HEAD(actions); |
2667 | bool encap = false; | 2667 | bool encap = false; |
2668 | u32 action = 0; | 2668 | u32 action = 0; |
2669 | int err; | 2669 | int err, i; |
2670 | 2670 | ||
2671 | if (!tcf_exts_has_actions(exts)) | 2671 | if (!tcf_exts_has_actions(exts)) |
2672 | return -EINVAL; | 2672 | return -EINVAL; |
@@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
2674 | attr->in_rep = rpriv->rep; | 2674 | attr->in_rep = rpriv->rep; |
2675 | attr->in_mdev = priv->mdev; | 2675 | attr->in_mdev = priv->mdev; |
2676 | 2676 | ||
2677 | tcf_exts_to_list(exts, &actions); | 2677 | tcf_exts_for_each_action(i, a, exts) { |
2678 | list_for_each_entry(a, &actions, list) { | ||
2679 | if (is_tcf_gact_shot(a)) { | 2678 | if (is_tcf_gact_shot(a)) { |
2680 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP | | 2679 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP | |
2681 | MLX5_FLOW_CONTEXT_ACTION_COUNT; | 2680 | MLX5_FLOW_CONTEXT_ACTION_COUNT; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 86478a6b99c5..c8c315eb5128 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c | |||
@@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
139 | struct mlx5_wq_ctrl *wq_ctrl) | 139 | struct mlx5_wq_ctrl *wq_ctrl) |
140 | { | 140 | { |
141 | u32 sq_strides_offset; | 141 | u32 sq_strides_offset; |
142 | u32 rq_pg_remainder; | ||
142 | int err; | 143 | int err; |
143 | 144 | ||
144 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, | 145 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, |
145 | MLX5_GET(qpc, qpc, log_rq_size), | 146 | MLX5_GET(qpc, qpc, log_rq_size), |
146 | &wq->rq.fbc); | 147 | &wq->rq.fbc); |
147 | 148 | ||
148 | sq_strides_offset = | 149 | rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; |
149 | ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; | 150 | sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; |
150 | 151 | ||
151 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), | 152 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), |
152 | MLX5_GET(qpc, qpc, log_sq_size), | 153 | MLX5_GET(qpc, qpc, log_sq_size), |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6070d1591d1e..930700413b1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1346 | return -ENOMEM; | 1346 | return -ENOMEM; |
1347 | mall_tc_entry->cookie = f->cookie; | 1347 | mall_tc_entry->cookie = f->cookie; |
1348 | 1348 | ||
1349 | tcf_exts_to_list(f->exts, &actions); | 1349 | a = tcf_exts_first_action(f->exts); |
1350 | a = list_first_entry(&actions, struct tc_action, list); | ||
1351 | 1350 | ||
1352 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { | 1351 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { |
1353 | struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; | 1352 | struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3ae930196741..3cdb7aca90b7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, | |||
414 | void | 414 | void |
415 | mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); | 415 | mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); |
416 | void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); | 416 | void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); |
417 | void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, | ||
418 | struct net_device *dev); | ||
417 | 419 | ||
418 | /* spectrum_kvdl.c */ | 420 | /* spectrum_kvdl.c */ |
419 | enum mlxsw_sp_kvdl_entry_type { | 421 | enum mlxsw_sp_kvdl_entry_type { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ebd1b24ebaa5..8d211972c5e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | |||
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, | |||
21 | struct netlink_ext_ack *extack) | 21 | struct netlink_ext_ack *extack) |
22 | { | 22 | { |
23 | const struct tc_action *a; | 23 | const struct tc_action *a; |
24 | LIST_HEAD(actions); | 24 | int err, i; |
25 | int err; | ||
26 | 25 | ||
27 | if (!tcf_exts_has_actions(exts)) | 26 | if (!tcf_exts_has_actions(exts)) |
28 | return 0; | 27 | return 0; |
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, | |||
32 | if (err) | 31 | if (err) |
33 | return err; | 32 | return err; |
34 | 33 | ||
35 | tcf_exts_to_list(exts, &actions); | 34 | tcf_exts_for_each_action(i, a, exts) { |
36 | list_for_each_entry(a, &actions, list) { | ||
37 | if (is_tcf_gact_ok(a)) { | 35 | if (is_tcf_gact_ok(a)) { |
38 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); | 36 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); |
39 | if (err) { | 37 | if (err) { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3a96307f51b0..2ab9cf25a08a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) | |||
6234 | mlxsw_sp_vr_put(mlxsw_sp, vr); | 6234 | mlxsw_sp_vr_put(mlxsw_sp, vr); |
6235 | } | 6235 | } |
6236 | 6236 | ||
6237 | void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, | ||
6238 | struct net_device *dev) | ||
6239 | { | ||
6240 | struct mlxsw_sp_rif *rif; | ||
6241 | |||
6242 | rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); | ||
6243 | if (!rif) | ||
6244 | return; | ||
6245 | mlxsw_sp_rif_destroy(rif); | ||
6246 | } | ||
6247 | |||
6237 | static void | 6248 | static void |
6238 | mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, | 6249 | mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, |
6239 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) | 6250 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0d8444aaba01..db715da7bab7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, | |||
127 | return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); | 127 | return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); |
128 | } | 128 | } |
129 | 129 | ||
130 | static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, | ||
131 | void *data) | ||
132 | { | ||
133 | struct mlxsw_sp *mlxsw_sp = data; | ||
134 | |||
135 | mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, | ||
140 | struct net_device *dev) | ||
141 | { | ||
142 | mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); | ||
143 | netdev_walk_all_upper_dev_rcu(dev, | ||
144 | mlxsw_sp_bridge_device_upper_rif_destroy, | ||
145 | mlxsw_sp); | ||
146 | } | ||
147 | |||
130 | static struct mlxsw_sp_bridge_device * | 148 | static struct mlxsw_sp_bridge_device * |
131 | mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, | 149 | mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, |
132 | struct net_device *br_dev) | 150 | struct net_device *br_dev) |
@@ -165,6 +183,8 @@ static void | |||
165 | mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, | 183 | mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, |
166 | struct mlxsw_sp_bridge_device *bridge_device) | 184 | struct mlxsw_sp_bridge_device *bridge_device) |
167 | { | 185 | { |
186 | mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, | ||
187 | bridge_device->dev); | ||
168 | list_del(&bridge_device->list); | 188 | list_del(&bridge_device->list); |
169 | if (bridge_device->vlan_enabled) | 189 | if (bridge_device->vlan_enabled) |
170 | bridge->vlan_enabled_exists = false; | 190 | bridge->vlan_enabled_exists = false; |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 0ba0356ec4e6..9044496803e6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
@@ -796,11 +796,10 @@ int nfp_flower_compile_action(struct nfp_app *app, | |||
796 | struct net_device *netdev, | 796 | struct net_device *netdev, |
797 | struct nfp_fl_payload *nfp_flow) | 797 | struct nfp_fl_payload *nfp_flow) |
798 | { | 798 | { |
799 | int act_len, act_cnt, err, tun_out_cnt, out_cnt; | 799 | int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; |
800 | enum nfp_flower_tun_type tun_type; | 800 | enum nfp_flower_tun_type tun_type; |
801 | const struct tc_action *a; | 801 | const struct tc_action *a; |
802 | u32 csum_updated = 0; | 802 | u32 csum_updated = 0; |
803 | LIST_HEAD(actions); | ||
804 | 803 | ||
805 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); | 804 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); |
806 | nfp_flow->meta.act_len = 0; | 805 | nfp_flow->meta.act_len = 0; |
@@ -810,8 +809,7 @@ int nfp_flower_compile_action(struct nfp_app *app, | |||
810 | tun_out_cnt = 0; | 809 | tun_out_cnt = 0; |
811 | out_cnt = 0; | 810 | out_cnt = 0; |
812 | 811 | ||
813 | tcf_exts_to_list(flow->exts, &actions); | 812 | tcf_exts_for_each_action(i, a, flow->exts) { |
814 | list_for_each_entry(a, &actions, list) { | ||
815 | err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, | 813 | err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, |
816 | netdev, &tun_type, &tun_out_cnt, | 814 | netdev, &tun_type, &tun_out_cnt, |
817 | &out_cnt, &csum_updated); | 815 | &out_cnt, &csum_updated); |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a8b9fbab5f73..253bdaef1505 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
@@ -229,29 +229,16 @@ done: | |||
229 | spin_unlock_bh(&nn->reconfig_lock); | 229 | spin_unlock_bh(&nn->reconfig_lock); |
230 | } | 230 | } |
231 | 231 | ||
232 | /** | 232 | static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) |
233 | * nfp_net_reconfig() - Reconfigure the firmware | ||
234 | * @nn: NFP Net device to reconfigure | ||
235 | * @update: The value for the update field in the BAR config | ||
236 | * | ||
237 | * Write the update word to the BAR and ping the reconfig queue. The | ||
238 | * poll until the firmware has acknowledged the update by zeroing the | ||
239 | * update word. | ||
240 | * | ||
241 | * Return: Negative errno on error, 0 on success | ||
242 | */ | ||
243 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
244 | { | 233 | { |
245 | bool cancelled_timer = false; | 234 | bool cancelled_timer = false; |
246 | u32 pre_posted_requests; | 235 | u32 pre_posted_requests; |
247 | int ret; | ||
248 | 236 | ||
249 | spin_lock_bh(&nn->reconfig_lock); | 237 | spin_lock_bh(&nn->reconfig_lock); |
250 | 238 | ||
251 | nn->reconfig_sync_present = true; | 239 | nn->reconfig_sync_present = true; |
252 | 240 | ||
253 | if (nn->reconfig_timer_active) { | 241 | if (nn->reconfig_timer_active) { |
254 | del_timer(&nn->reconfig_timer); | ||
255 | nn->reconfig_timer_active = false; | 242 | nn->reconfig_timer_active = false; |
256 | cancelled_timer = true; | 243 | cancelled_timer = true; |
257 | } | 244 | } |
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) | |||
260 | 247 | ||
261 | spin_unlock_bh(&nn->reconfig_lock); | 248 | spin_unlock_bh(&nn->reconfig_lock); |
262 | 249 | ||
263 | if (cancelled_timer) | 250 | if (cancelled_timer) { |
251 | del_timer_sync(&nn->reconfig_timer); | ||
264 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); | 252 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); |
253 | } | ||
265 | 254 | ||
266 | /* Run the posted reconfigs which were issued before we started */ | 255 | /* Run the posted reconfigs which were issued before we started */ |
267 | if (pre_posted_requests) { | 256 | if (pre_posted_requests) { |
268 | nfp_net_reconfig_start(nn, pre_posted_requests); | 257 | nfp_net_reconfig_start(nn, pre_posted_requests); |
269 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 258 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
270 | } | 259 | } |
260 | } | ||
261 | |||
262 | static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) | ||
263 | { | ||
264 | nfp_net_reconfig_sync_enter(nn); | ||
265 | |||
266 | spin_lock_bh(&nn->reconfig_lock); | ||
267 | nn->reconfig_sync_present = false; | ||
268 | spin_unlock_bh(&nn->reconfig_lock); | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * nfp_net_reconfig() - Reconfigure the firmware | ||
273 | * @nn: NFP Net device to reconfigure | ||
274 | * @update: The value for the update field in the BAR config | ||
275 | * | ||
276 | * Write the update word to the BAR and ping the reconfig queue. The | ||
277 | * poll until the firmware has acknowledged the update by zeroing the | ||
278 | * update word. | ||
279 | * | ||
280 | * Return: Negative errno on error, 0 on success | ||
281 | */ | ||
282 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
283 | { | ||
284 | int ret; | ||
285 | |||
286 | nfp_net_reconfig_sync_enter(nn); | ||
271 | 287 | ||
272 | nfp_net_reconfig_start(nn, update); | 288 | nfp_net_reconfig_start(nn, update); |
273 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 289 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
@@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, | |||
3633 | */ | 3649 | */ |
3634 | void nfp_net_free(struct nfp_net *nn) | 3650 | void nfp_net_free(struct nfp_net *nn) |
3635 | { | 3651 | { |
3652 | WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); | ||
3636 | if (nn->dp.netdev) | 3653 | if (nn->dp.netdev) |
3637 | free_netdev(nn->dp.netdev); | 3654 | free_netdev(nn->dp.netdev); |
3638 | else | 3655 | else |
@@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn) | |||
3920 | return; | 3937 | return; |
3921 | 3938 | ||
3922 | unregister_netdev(nn->dp.netdev); | 3939 | unregister_netdev(nn->dp.netdev); |
3940 | nfp_net_reconfig_wait_posted(nn); | ||
3923 | } | 3941 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d9ab5add27a8..34193c2f1699 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c | |||
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, | |||
407 | 407 | ||
408 | if (i == QED_INIT_MAX_POLL_COUNT) { | 408 | if (i == QED_INIT_MAX_POLL_COUNT) { |
409 | DP_ERR(p_hwfn, | 409 | DP_ERR(p_hwfn, |
410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", | 410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", |
411 | addr, le32_to_cpu(cmd->expected_val), | 411 | addr, le32_to_cpu(cmd->expected_val), |
412 | val, le32_to_cpu(cmd->op_data)); | 412 | val, le32_to_cpu(cmd->op_data)); |
413 | } | 413 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index d89a0e22f6e4..5d37ec7e9b0b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
@@ -48,7 +48,7 @@ | |||
48 | #include "qed_reg_addr.h" | 48 | #include "qed_reg_addr.h" |
49 | #include "qed_sriov.h" | 49 | #include "qed_sriov.h" |
50 | 50 | ||
51 | #define CHIP_MCP_RESP_ITER_US 10 | 51 | #define QED_MCP_RESP_ITER_US 10 |
52 | 52 | ||
53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ | 53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ |
54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ | 54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ |
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | /* Maximum of 1 sec to wait for the SHMEM ready indication */ | ||
187 | #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 | ||
188 | #define QED_MCP_SHMEM_RDY_ITER_MS 50 | ||
189 | |||
186 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 190 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
187 | { | 191 | { |
188 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; | 192 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; |
193 | u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; | ||
194 | u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; | ||
189 | u32 drv_mb_offsize, mfw_mb_offsize; | 195 | u32 drv_mb_offsize, mfw_mb_offsize; |
190 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); | 196 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); |
191 | 197 | ||
192 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); | 198 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); |
193 | if (!p_info->public_base) | 199 | if (!p_info->public_base) { |
194 | return 0; | 200 | DP_NOTICE(p_hwfn, |
201 | "The address of the MCP scratch-pad is not configured\n"); | ||
202 | return -EINVAL; | ||
203 | } | ||
195 | 204 | ||
196 | p_info->public_base |= GRCBASE_MCP; | 205 | p_info->public_base |= GRCBASE_MCP; |
197 | 206 | ||
207 | /* Get the MFW MB address and number of supported messages */ | ||
208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
210 | PUBLIC_MFW_MB)); | ||
211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, | ||
213 | p_info->mfw_mb_addr + | ||
214 | offsetof(struct public_mfw_mb, | ||
215 | sup_msgs)); | ||
216 | |||
217 | /* The driver can notify that there was an MCP reset, and might read the | ||
218 | * SHMEM values before the MFW has completed initializing them. | ||
219 | * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a | ||
220 | * data ready indication. | ||
221 | */ | ||
222 | while (!p_info->mfw_mb_length && --cnt) { | ||
223 | msleep(msec); | ||
224 | p_info->mfw_mb_length = | ||
225 | (u16)qed_rd(p_hwfn, p_ptt, | ||
226 | p_info->mfw_mb_addr + | ||
227 | offsetof(struct public_mfw_mb, sup_msgs)); | ||
228 | } | ||
229 | |||
230 | if (!cnt) { | ||
231 | DP_NOTICE(p_hwfn, | ||
232 | "Failed to get the SHMEM ready notification after %d msec\n", | ||
233 | QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); | ||
234 | return -EBUSY; | ||
235 | } | ||
236 | |||
198 | /* Calculate the driver and MFW mailbox address */ | 237 | /* Calculate the driver and MFW mailbox address */ |
199 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, | 238 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, |
200 | SECTION_OFFSIZE_ADDR(p_info->public_base, | 239 | SECTION_OFFSIZE_ADDR(p_info->public_base, |
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
204 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", | 243 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", |
205 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); | 244 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); |
206 | 245 | ||
207 | /* Set the MFW MB address */ | ||
208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
210 | PUBLIC_MFW_MB)); | ||
211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); | ||
213 | |||
214 | /* Get the current driver mailbox sequence before sending | 246 | /* Get the current driver mailbox sequence before sending |
215 | * the first command | 247 | * the first command |
216 | */ | 248 | */ |
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, | |||
285 | 317 | ||
286 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 318 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
287 | { | 319 | { |
288 | u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; | 320 | u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; |
289 | int rc = 0; | 321 | int rc = 0; |
290 | 322 | ||
323 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
324 | DP_NOTICE(p_hwfn, | ||
325 | "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); | ||
326 | return -EBUSY; | ||
327 | } | ||
328 | |||
291 | /* Ensure that only a single thread is accessing the mailbox */ | 329 | /* Ensure that only a single thread is accessing the mailbox */ |
292 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 330 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
293 | 331 | ||
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
413 | (p_mb_params->cmd | seq_num), p_mb_params->param); | 451 | (p_mb_params->cmd | seq_num), p_mb_params->param); |
414 | } | 452 | } |
415 | 453 | ||
454 | static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) | ||
455 | { | ||
456 | p_hwfn->mcp_info->b_block_cmd = block_cmd; | ||
457 | |||
458 | DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", | ||
459 | block_cmd ? "Block" : "Unblock"); | ||
460 | } | ||
461 | |||
462 | static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, | ||
463 | struct qed_ptt *p_ptt) | ||
464 | { | ||
465 | u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; | ||
466 | u32 delay = QED_MCP_RESP_ITER_US; | ||
467 | |||
468 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
469 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
470 | cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
471 | udelay(delay); | ||
472 | cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
473 | udelay(delay); | ||
474 | cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
475 | |||
476 | DP_NOTICE(p_hwfn, | ||
477 | "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", | ||
478 | cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); | ||
479 | } | ||
480 | |||
416 | static int | 481 | static int |
417 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | 482 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, |
418 | struct qed_ptt *p_ptt, | 483 | struct qed_ptt *p_ptt, |
419 | struct qed_mcp_mb_params *p_mb_params, | 484 | struct qed_mcp_mb_params *p_mb_params, |
420 | u32 max_retries, u32 delay) | 485 | u32 max_retries, u32 usecs) |
421 | { | 486 | { |
487 | u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); | ||
422 | struct qed_mcp_cmd_elem *p_cmd_elem; | 488 | struct qed_mcp_cmd_elem *p_cmd_elem; |
423 | u32 cnt = 0; | ||
424 | u16 seq_num; | 489 | u16 seq_num; |
425 | int rc = 0; | 490 | int rc = 0; |
426 | 491 | ||
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
443 | goto err; | 508 | goto err; |
444 | 509 | ||
445 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 510 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
446 | udelay(delay); | 511 | |
512 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) | ||
513 | msleep(msecs); | ||
514 | else | ||
515 | udelay(usecs); | ||
447 | } while (++cnt < max_retries); | 516 | } while (++cnt < max_retries); |
448 | 517 | ||
449 | if (cnt >= max_retries) { | 518 | if (cnt >= max_retries) { |
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
472 | * The spinlock stays locked until the list element is removed. | 541 | * The spinlock stays locked until the list element is removed. |
473 | */ | 542 | */ |
474 | 543 | ||
475 | udelay(delay); | 544 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) |
545 | msleep(msecs); | ||
546 | else | ||
547 | udelay(usecs); | ||
548 | |||
476 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 549 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
477 | 550 | ||
478 | if (p_cmd_elem->b_is_completed) | 551 | if (p_cmd_elem->b_is_completed) |
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
491 | DP_NOTICE(p_hwfn, | 564 | DP_NOTICE(p_hwfn, |
492 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", | 565 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", |
493 | p_mb_params->cmd, p_mb_params->param); | 566 | p_mb_params->cmd, p_mb_params->param); |
567 | qed_mcp_print_cpu_info(p_hwfn, p_ptt); | ||
494 | 568 | ||
495 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 569 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
496 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); | 570 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); |
497 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 571 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
498 | 572 | ||
573 | if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) | ||
574 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
575 | |||
499 | return -EAGAIN; | 576 | return -EAGAIN; |
500 | } | 577 | } |
501 | 578 | ||
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
507 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", | 584 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", |
508 | p_mb_params->mcp_resp, | 585 | p_mb_params->mcp_resp, |
509 | p_mb_params->mcp_param, | 586 | p_mb_params->mcp_param, |
510 | (cnt * delay) / 1000, (cnt * delay) % 1000); | 587 | (cnt * usecs) / 1000, (cnt * usecs) % 1000); |
511 | 588 | ||
512 | /* Clear the sequence number from the MFW response */ | 589 | /* Clear the sequence number from the MFW response */ |
513 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; | 590 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; |
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
525 | { | 602 | { |
526 | size_t union_data_size = sizeof(union drv_union_data); | 603 | size_t union_data_size = sizeof(union drv_union_data); |
527 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; | 604 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; |
528 | u32 delay = CHIP_MCP_RESP_ITER_US; | 605 | u32 usecs = QED_MCP_RESP_ITER_US; |
529 | 606 | ||
530 | /* MCP not initialized */ | 607 | /* MCP not initialized */ |
531 | if (!qed_mcp_is_init(p_hwfn)) { | 608 | if (!qed_mcp_is_init(p_hwfn)) { |
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
533 | return -EBUSY; | 610 | return -EBUSY; |
534 | } | 611 | } |
535 | 612 | ||
613 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
614 | DP_NOTICE(p_hwfn, | ||
615 | "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", | ||
616 | p_mb_params->cmd, p_mb_params->param); | ||
617 | return -EBUSY; | ||
618 | } | ||
619 | |||
536 | if (p_mb_params->data_src_size > union_data_size || | 620 | if (p_mb_params->data_src_size > union_data_size || |
537 | p_mb_params->data_dst_size > union_data_size) { | 621 | p_mb_params->data_dst_size > union_data_size) { |
538 | DP_ERR(p_hwfn, | 622 | DP_ERR(p_hwfn, |
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
542 | return -EINVAL; | 626 | return -EINVAL; |
543 | } | 627 | } |
544 | 628 | ||
629 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { | ||
630 | max_retries = DIV_ROUND_UP(max_retries, 1000); | ||
631 | usecs *= 1000; | ||
632 | } | ||
633 | |||
545 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, | 634 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, |
546 | delay); | 635 | usecs); |
547 | } | 636 | } |
548 | 637 | ||
549 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, | 638 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, |
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
761 | mb_params.data_src_size = sizeof(load_req); | 850 | mb_params.data_src_size = sizeof(load_req); |
762 | mb_params.p_data_dst = &load_rsp; | 851 | mb_params.p_data_dst = &load_rsp; |
763 | mb_params.data_dst_size = sizeof(load_rsp); | 852 | mb_params.data_dst_size = sizeof(load_rsp); |
853 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
764 | 854 | ||
765 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | 855 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
766 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", | 856 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", |
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
982 | 1072 | ||
983 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1073 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
984 | { | 1074 | { |
985 | u32 wol_param, mcp_resp, mcp_param; | 1075 | struct qed_mcp_mb_params mb_params; |
1076 | u32 wol_param; | ||
986 | 1077 | ||
987 | switch (p_hwfn->cdev->wol_config) { | 1078 | switch (p_hwfn->cdev->wol_config) { |
988 | case QED_OV_WOL_DISABLED: | 1079 | case QED_OV_WOL_DISABLED: |
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
1000 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; | 1091 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; |
1001 | } | 1092 | } |
1002 | 1093 | ||
1003 | return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, | 1094 | memset(&mb_params, 0, sizeof(mb_params)); |
1004 | &mcp_resp, &mcp_param); | 1095 | mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; |
1096 | mb_params.param = wol_param; | ||
1097 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
1098 | |||
1099 | return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | ||
1005 | } | 1100 | } |
1006 | 1101 | ||
1007 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1102 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
@@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, | |||
2077 | return rc; | 2172 | return rc; |
2078 | } | 2173 | } |
2079 | 2174 | ||
2175 | /* A maximal 100 msec waiting time for the MCP to halt */ | ||
2176 | #define QED_MCP_HALT_SLEEP_MS 10 | ||
2177 | #define QED_MCP_HALT_MAX_RETRIES 10 | ||
2178 | |||
2080 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2179 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
2081 | { | 2180 | { |
2082 | u32 resp = 0, param = 0; | 2181 | u32 resp = 0, param = 0, cpu_state, cnt = 0; |
2083 | int rc; | 2182 | int rc; |
2084 | 2183 | ||
2085 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, | 2184 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, |
2086 | ¶m); | 2185 | ¶m); |
2087 | if (rc) | 2186 | if (rc) { |
2088 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | 2187 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); |
2188 | return rc; | ||
2189 | } | ||
2089 | 2190 | ||
2090 | return rc; | 2191 | do { |
2192 | msleep(QED_MCP_HALT_SLEEP_MS); | ||
2193 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
2194 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) | ||
2195 | break; | ||
2196 | } while (++cnt < QED_MCP_HALT_MAX_RETRIES); | ||
2197 | |||
2198 | if (cnt == QED_MCP_HALT_MAX_RETRIES) { | ||
2199 | DP_NOTICE(p_hwfn, | ||
2200 | "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
2201 | qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); | ||
2202 | return -EBUSY; | ||
2203 | } | ||
2204 | |||
2205 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
2206 | |||
2207 | return 0; | ||
2091 | } | 2208 | } |
2092 | 2209 | ||
2210 | #define QED_MCP_RESUME_SLEEP_MS 10 | ||
2211 | |||
2093 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2212 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
2094 | { | 2213 | { |
2095 | u32 value, cpu_mode; | 2214 | u32 cpu_mode, cpu_state; |
2096 | 2215 | ||
2097 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); | 2216 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); |
2098 | 2217 | ||
2099 | value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
2100 | value &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
2101 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); | ||
2102 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | 2218 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); |
2219 | cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
2220 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); | ||
2221 | msleep(QED_MCP_RESUME_SLEEP_MS); | ||
2222 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
2103 | 2223 | ||
2104 | return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; | 2224 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { |
2225 | DP_NOTICE(p_hwfn, | ||
2226 | "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
2227 | cpu_mode, cpu_state); | ||
2228 | return -EBUSY; | ||
2229 | } | ||
2230 | |||
2231 | qed_mcp_cmd_set_blocking(p_hwfn, false); | ||
2232 | |||
2233 | return 0; | ||
2105 | } | 2234 | } |
2106 | 2235 | ||
2107 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, | 2236 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 047976d5c6e9..85e6b3989e7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h | |||
@@ -635,11 +635,14 @@ struct qed_mcp_info { | |||
635 | */ | 635 | */ |
636 | spinlock_t cmd_lock; | 636 | spinlock_t cmd_lock; |
637 | 637 | ||
638 | /* Flag to indicate whether sending a MFW mailbox command is blocked */ | ||
639 | bool b_block_cmd; | ||
640 | |||
638 | /* Spinlock used for syncing SW link-changes and link-changes | 641 | /* Spinlock used for syncing SW link-changes and link-changes |
639 | * originating from attention context. | 642 | * originating from attention context. |
640 | */ | 643 | */ |
641 | spinlock_t link_lock; | 644 | spinlock_t link_lock; |
642 | bool block_mb_sending; | 645 | |
643 | u32 public_base; | 646 | u32 public_base; |
644 | u32 drv_mb_addr; | 647 | u32 drv_mb_addr; |
645 | u32 mfw_mb_addr; | 648 | u32 mfw_mb_addr; |
@@ -660,14 +663,20 @@ struct qed_mcp_info { | |||
660 | }; | 663 | }; |
661 | 664 | ||
662 | struct qed_mcp_mb_params { | 665 | struct qed_mcp_mb_params { |
663 | u32 cmd; | 666 | u32 cmd; |
664 | u32 param; | 667 | u32 param; |
665 | void *p_data_src; | 668 | void *p_data_src; |
666 | u8 data_src_size; | 669 | void *p_data_dst; |
667 | void *p_data_dst; | 670 | u8 data_src_size; |
668 | u8 data_dst_size; | 671 | u8 data_dst_size; |
669 | u32 mcp_resp; | 672 | u32 mcp_resp; |
670 | u32 mcp_param; | 673 | u32 mcp_param; |
674 | u32 flags; | ||
675 | #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) | ||
676 | #define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) | ||
677 | #define QED_MB_FLAGS_IS_SET(params, flag) \ | ||
678 | ({ typeof(params) __params = (params); \ | ||
679 | (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) | ||
671 | }; | 680 | }; |
672 | 681 | ||
673 | struct qed_drv_tlv_hdr { | 682 | struct qed_drv_tlv_hdr { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index d8ad2dcad8d5..f736f70956fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
@@ -562,8 +562,10 @@ | |||
562 | 0 | 562 | 0 |
563 | #define MCP_REG_CPU_STATE \ | 563 | #define MCP_REG_CPU_STATE \ |
564 | 0xe05004UL | 564 | 0xe05004UL |
565 | #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) | ||
565 | #define MCP_REG_CPU_EVENT_MASK \ | 566 | #define MCP_REG_CPU_EVENT_MASK \ |
566 | 0xe05008UL | 567 | 0xe05008UL |
568 | #define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL | ||
567 | #define PGLUE_B_REG_PF_BAR0_SIZE \ | 569 | #define PGLUE_B_REG_PF_BAR0_SIZE \ |
568 | 0x2aae60UL | 570 | 0x2aae60UL |
569 | #define PGLUE_B_REG_PF_BAR1_SIZE \ | 571 | #define PGLUE_B_REG_PF_BAR1_SIZE \ |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9673d19308e6..b16ce7d93caf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c | |||
@@ -2006,18 +2006,16 @@ unlock: | |||
2006 | static int qede_parse_actions(struct qede_dev *edev, | 2006 | static int qede_parse_actions(struct qede_dev *edev, |
2007 | struct tcf_exts *exts) | 2007 | struct tcf_exts *exts) |
2008 | { | 2008 | { |
2009 | int rc = -EINVAL, num_act = 0; | 2009 | int rc = -EINVAL, num_act = 0, i; |
2010 | const struct tc_action *a; | 2010 | const struct tc_action *a; |
2011 | bool is_drop = false; | 2011 | bool is_drop = false; |
2012 | LIST_HEAD(actions); | ||
2013 | 2012 | ||
2014 | if (!tcf_exts_has_actions(exts)) { | 2013 | if (!tcf_exts_has_actions(exts)) { |
2015 | DP_NOTICE(edev, "No tc actions received\n"); | 2014 | DP_NOTICE(edev, "No tc actions received\n"); |
2016 | return rc; | 2015 | return rc; |
2017 | } | 2016 | } |
2018 | 2017 | ||
2019 | tcf_exts_to_list(exts, &actions); | 2018 | tcf_exts_for_each_action(i, a, exts) { |
2020 | list_for_each_entry(a, &actions, list) { | ||
2021 | num_act++; | 2019 | num_act++; |
2022 | 2020 | ||
2023 | if (is_tcf_gact_shot(a)) | 2021 | if (is_tcf_gact_shot(a)) |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 353f1c129af1..059ba9429e51 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, | |||
2384 | return status; | 2384 | return status; |
2385 | } | 2385 | } |
2386 | 2386 | ||
2387 | static netdev_features_t qlge_fix_features(struct net_device *ndev, | ||
2388 | netdev_features_t features) | ||
2389 | { | ||
2390 | int err; | ||
2391 | |||
2392 | /* Update the behavior of vlan accel in the adapter */ | ||
2393 | err = qlge_update_hw_vlan_features(ndev, features); | ||
2394 | if (err) | ||
2395 | return err; | ||
2396 | |||
2397 | return features; | ||
2398 | } | ||
2399 | |||
2400 | static int qlge_set_features(struct net_device *ndev, | 2387 | static int qlge_set_features(struct net_device *ndev, |
2401 | netdev_features_t features) | 2388 | netdev_features_t features) |
2402 | { | 2389 | { |
2403 | netdev_features_t changed = ndev->features ^ features; | 2390 | netdev_features_t changed = ndev->features ^ features; |
2391 | int err; | ||
2392 | |||
2393 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) { | ||
2394 | /* Update the behavior of vlan accel in the adapter */ | ||
2395 | err = qlge_update_hw_vlan_features(ndev, features); | ||
2396 | if (err) | ||
2397 | return err; | ||
2404 | 2398 | ||
2405 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) | ||
2406 | qlge_vlan_mode(ndev, features); | 2399 | qlge_vlan_mode(ndev, features); |
2400 | } | ||
2407 | 2401 | ||
2408 | return 0; | 2402 | return 0; |
2409 | } | 2403 | } |
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = { | |||
4719 | .ndo_set_mac_address = qlge_set_mac_address, | 4713 | .ndo_set_mac_address = qlge_set_mac_address, |
4720 | .ndo_validate_addr = eth_validate_addr, | 4714 | .ndo_validate_addr = eth_validate_addr, |
4721 | .ndo_tx_timeout = qlge_tx_timeout, | 4715 | .ndo_tx_timeout = qlge_tx_timeout, |
4722 | .ndo_fix_features = qlge_fix_features, | ||
4723 | .ndo_set_features = qlge_set_features, | 4716 | .ndo_set_features = qlge_set_features, |
4724 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, | 4717 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, |
4725 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, | 4718 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0efa977c422d..b08d51bf7a20 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { | |||
218 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, | 218 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, |
219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, | 219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, |
220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, | 220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, |
221 | { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, | ||
221 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, | 222 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, |
222 | { PCI_VENDOR_ID_DLINK, 0x4300, | 223 | { PCI_VENDOR_ID_DLINK, 0x4300, |
223 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, | 224 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, |
@@ -4522,7 +4523,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
4522 | rtl_hw_reset(tp); | 4523 | rtl_hw_reset(tp); |
4523 | } | 4524 | } |
4524 | 4525 | ||
4525 | static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) | 4526 | static void rtl_set_tx_config_registers(struct rtl8169_private *tp) |
4526 | { | 4527 | { |
4527 | /* Set DMA burst size and Interframe Gap Time */ | 4528 | /* Set DMA burst size and Interframe Gap Time */ |
4528 | RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | | 4529 | RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | |
@@ -4633,12 +4634,14 @@ static void rtl_hw_start(struct rtl8169_private *tp) | |||
4633 | 4634 | ||
4634 | rtl_set_rx_max_size(tp); | 4635 | rtl_set_rx_max_size(tp); |
4635 | rtl_set_rx_tx_desc_registers(tp); | 4636 | rtl_set_rx_tx_desc_registers(tp); |
4636 | rtl_set_rx_tx_config_registers(tp); | 4637 | rtl_set_tx_config_registers(tp); |
4637 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); | 4638 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); |
4638 | 4639 | ||
4639 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ | 4640 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ |
4640 | RTL_R8(tp, IntrMask); | 4641 | RTL_R8(tp, IntrMask); |
4641 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); | 4642 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); |
4643 | rtl_init_rxcfg(tp); | ||
4644 | |||
4642 | rtl_set_rx_mode(tp->dev); | 4645 | rtl_set_rx_mode(tp->dev); |
4643 | /* no early-rx interrupts */ | 4646 | /* no early-rx interrupts */ |
4644 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); | 4647 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); |
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b81f4faf7b10..1470fc12282b 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* Renesas Ethernet AVB device driver | 2 | /* Renesas Ethernet AVB device driver |
2 | * | 3 | * |
3 | * Copyright (C) 2014-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2014-2015 Renesas Electronics Corporation |
@@ -5,10 +6,6 @@ | |||
5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
6 | * | 7 | * |
7 | * Based on the SuperH Ethernet driver | 8 | * Based on the SuperH Ethernet driver |
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License version 2, | ||
11 | * as published by the Free Software Foundation. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #ifndef __RAVB_H__ | 11 | #ifndef __RAVB_H__ |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c06f2df895c2..aff5516b781e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* Renesas Ethernet AVB device driver | 2 | /* Renesas Ethernet AVB device driver |
2 | * | 3 | * |
3 | * Copyright (C) 2014-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2014-2015 Renesas Electronics Corporation |
@@ -5,10 +6,6 @@ | |||
5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
6 | * | 7 | * |
7 | * Based on the SuperH Ethernet driver | 8 | * Based on the SuperH Ethernet driver |
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License version 2, | ||
11 | * as published by the Free Software Foundation. | ||
12 | */ | 9 | */ |
13 | 10 | ||
14 | #include <linux/cache.h> | 11 | #include <linux/cache.h> |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5573199c4536..f27a0dc8c563 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* SuperH Ethernet device driver | 2 | /* SuperH Ethernet device driver |
2 | * | 3 | * |
3 | * Copyright (C) 2014 Renesas Electronics Corporation | 4 | * Copyright (C) 2014 Renesas Electronics Corporation |
@@ -5,18 +6,6 @@ | |||
5 | * Copyright (C) 2008-2014 Renesas Solutions Corp. | 6 | * Copyright (C) 2008-2014 Renesas Solutions Corp. |
6 | * Copyright (C) 2013-2017 Cogent Embedded, Inc. | 7 | * Copyright (C) 2013-2017 Cogent Embedded, Inc. |
7 | * Copyright (C) 2014 Codethink Limited | 8 | * Copyright (C) 2014 Codethink Limited |
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms and conditions of the GNU General Public License, | ||
11 | * version 2, as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | */ | 9 | */ |
21 | 10 | ||
22 | #include <linux/module.h> | 11 | #include <linux/module.h> |
@@ -809,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = { | |||
809 | .magic = 1, | 798 | .magic = 1, |
810 | .cexcr = 1, | 799 | .cexcr = 1, |
811 | }; | 800 | }; |
801 | |||
802 | /* R7S9210 */ | ||
803 | static struct sh_eth_cpu_data r7s9210_data = { | ||
804 | .soft_reset = sh_eth_soft_reset, | ||
805 | |||
806 | .set_duplex = sh_eth_set_duplex, | ||
807 | .set_rate = sh_eth_set_rate_rcar, | ||
808 | |||
809 | .register_type = SH_ETH_REG_FAST_SH4, | ||
810 | |||
811 | .edtrr_trns = EDTRR_TRNS_ETHER, | ||
812 | .ecsr_value = ECSR_ICD, | ||
813 | .ecsipr_value = ECSIPR_ICDIP, | ||
814 | .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP | | ||
815 | EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | | ||
816 | EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | | ||
817 | EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP | | ||
818 | EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | | ||
819 | EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP | | ||
820 | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, | ||
821 | |||
822 | .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, | ||
823 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | | ||
824 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, | ||
825 | |||
826 | .fdr_value = 0x0000070f, | ||
827 | |||
828 | .apr = 1, | ||
829 | .mpr = 1, | ||
830 | .tpauser = 1, | ||
831 | .hw_swap = 1, | ||
832 | .rpadir = 1, | ||
833 | .no_ade = 1, | ||
834 | .xdfar_rw = 1, | ||
835 | }; | ||
812 | #endif /* CONFIG_OF */ | 836 | #endif /* CONFIG_OF */ |
813 | 837 | ||
814 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) | 838 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) |
@@ -3132,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = { | |||
3132 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, | 3156 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, |
3133 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, | 3157 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, |
3134 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, | 3158 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, |
3159 | { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data }, | ||
3135 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, | 3160 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, |
3136 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, | 3161 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, |
3137 | { } | 3162 | { } |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index f94be99cf400..0c18650bbfe6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -1,19 +1,8 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* SuperH Ethernet device driver | 2 | /* SuperH Ethernet device driver |
2 | * | 3 | * |
3 | * Copyright (C) 2006-2012 Nobuhiro Iwamatsu | 4 | * Copyright (C) 2006-2012 Nobuhiro Iwamatsu |
4 | * Copyright (C) 2008-2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2008-2012 Renesas Solutions Corp. |
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | */ | 6 | */ |
18 | 7 | ||
19 | #ifndef __SH_ETH_H__ | 8 | #ifndef __SH_ETH_H__ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index edf20361ea5f..324049eebb9b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH | |||
33 | select PHYLIB | 33 | select PHYLIB |
34 | select CRC32 | 34 | select CRC32 |
35 | select MII | 35 | select MII |
36 | depends on OF && COMMON_CLK && HAS_DMA | 36 | depends on OF && HAS_DMA |
37 | help | 37 | help |
38 | Support for chips using the snps,dwc-qos-ethernet.txt DT binding. | 38 | Support for chips using the snps,dwc-qos-ethernet.txt DT binding. |
39 | 39 | ||
@@ -57,7 +57,7 @@ config DWMAC_ANARION | |||
57 | config DWMAC_IPQ806X | 57 | config DWMAC_IPQ806X |
58 | tristate "QCA IPQ806x DWMAC support" | 58 | tristate "QCA IPQ806x DWMAC support" |
59 | default ARCH_QCOM | 59 | default ARCH_QCOM |
60 | depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) | 60 | depends on OF && (ARCH_QCOM || COMPILE_TEST) |
61 | select MFD_SYSCON | 61 | select MFD_SYSCON |
62 | help | 62 | help |
63 | Support for QCA IPQ806X DWMAC Ethernet. | 63 | Support for QCA IPQ806X DWMAC Ethernet. |
@@ -100,7 +100,7 @@ config DWMAC_OXNAS | |||
100 | config DWMAC_ROCKCHIP | 100 | config DWMAC_ROCKCHIP |
101 | tristate "Rockchip dwmac support" | 101 | tristate "Rockchip dwmac support" |
102 | default ARCH_ROCKCHIP | 102 | default ARCH_ROCKCHIP |
103 | depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) | 103 | depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST) |
104 | select MFD_SYSCON | 104 | select MFD_SYSCON |
105 | help | 105 | help |
106 | Support for Ethernet controller on Rockchip RK3288 SoC. | 106 | Support for Ethernet controller on Rockchip RK3288 SoC. |
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP | |||
110 | 110 | ||
111 | config DWMAC_SOCFPGA | 111 | config DWMAC_SOCFPGA |
112 | tristate "SOCFPGA dwmac support" | 112 | tristate "SOCFPGA dwmac support" |
113 | default ARCH_SOCFPGA | 113 | default (ARCH_SOCFPGA || ARCH_STRATIX10) |
114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) | 114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) |
115 | select MFD_SYSCON | 115 | select MFD_SYSCON |
116 | help | 116 | help |
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA | |||
123 | config DWMAC_STI | 123 | config DWMAC_STI |
124 | tristate "STi GMAC support" | 124 | tristate "STi GMAC support" |
125 | default ARCH_STI | 125 | default ARCH_STI |
126 | depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) | 126 | depends on OF && (ARCH_STI || COMPILE_TEST) |
127 | select MFD_SYSCON | 127 | select MFD_SYSCON |
128 | ---help--- | 128 | ---help--- |
129 | Support for ethernet controller on STi SOCs. | 129 | Support for ethernet controller on STi SOCs. |
@@ -147,7 +147,7 @@ config DWMAC_STM32 | |||
147 | config DWMAC_SUNXI | 147 | config DWMAC_SUNXI |
148 | tristate "Allwinner GMAC support" | 148 | tristate "Allwinner GMAC support" |
149 | default ARCH_SUNXI | 149 | default ARCH_SUNXI |
150 | depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) | 150 | depends on OF && (ARCH_SUNXI || COMPILE_TEST) |
151 | ---help--- | 151 | ---help--- |
152 | Support for Allwinner A20/A31 GMAC ethernet controllers. | 152 | Support for Allwinner A20/A31 GMAC ethernet controllers. |
153 | 153 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 76649adf8fb0..c0a855b7ab3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -112,7 +112,6 @@ struct stmmac_priv { | |||
112 | u32 tx_count_frames; | 112 | u32 tx_count_frames; |
113 | u32 tx_coal_frames; | 113 | u32 tx_coal_frames; |
114 | u32 tx_coal_timer; | 114 | u32 tx_coal_timer; |
115 | bool tx_timer_armed; | ||
116 | 115 | ||
117 | int tx_coalesce; | 116 | int tx_coalesce; |
118 | int hwts_tx_en; | 117 | int hwts_tx_en; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ff1ffb46198a..9f458bb16f2a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3147 | * element in case of no SG. | 3147 | * element in case of no SG. |
3148 | */ | 3148 | */ |
3149 | priv->tx_count_frames += nfrags + 1; | 3149 | priv->tx_count_frames += nfrags + 1; |
3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames) && | 3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { |
3151 | !priv->tx_timer_armed) { | ||
3152 | mod_timer(&priv->txtimer, | 3151 | mod_timer(&priv->txtimer, |
3153 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | 3152 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); |
3154 | priv->tx_timer_armed = true; | ||
3155 | } else { | 3153 | } else { |
3156 | priv->tx_count_frames = 0; | 3154 | priv->tx_count_frames = 0; |
3157 | stmmac_set_tx_ic(priv, desc); | 3155 | stmmac_set_tx_ic(priv, desc); |
3158 | priv->xstats.tx_set_ic_bit++; | 3156 | priv->xstats.tx_set_ic_bit++; |
3159 | priv->tx_timer_armed = false; | ||
3160 | } | 3157 | } |
3161 | 3158 | ||
3162 | skb_tx_timestamp(skb); | 3159 | skb_tx_timestamp(skb); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 1a96dd9c1091..531294f4978b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | |||
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, | |||
61 | struct stmmac_tc_entry *action_entry = entry; | 61 | struct stmmac_tc_entry *action_entry = entry; |
62 | const struct tc_action *act; | 62 | const struct tc_action *act; |
63 | struct tcf_exts *exts; | 63 | struct tcf_exts *exts; |
64 | LIST_HEAD(actions); | 64 | int i; |
65 | 65 | ||
66 | exts = cls->knode.exts; | 66 | exts = cls->knode.exts; |
67 | if (!tcf_exts_has_actions(exts)) | 67 | if (!tcf_exts_has_actions(exts)) |
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, | |||
69 | if (frag) | 69 | if (frag) |
70 | action_entry = frag; | 70 | action_entry = frag; |
71 | 71 | ||
72 | tcf_exts_to_list(exts, &actions); | 72 | tcf_exts_for_each_action(i, act, exts) { |
73 | list_for_each_entry(act, &actions, list) { | ||
74 | /* Accept */ | 73 | /* Accept */ |
75 | if (is_tcf_gact_ok(act)) { | 74 | if (is_tcf_gact_ok(act)) { |
76 | action_entry->val.af = 1; | 75 | action_entry->val.af = 1; |
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 0c1adad7415d..396e1cd10667 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) | |||
170 | struct device_node *node; | 170 | struct device_node *node; |
171 | struct cpsw_phy_sel_priv *priv; | 171 | struct cpsw_phy_sel_priv *priv; |
172 | 172 | ||
173 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); | 173 | node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0); |
174 | if (!node) { | 174 | if (!node) { |
175 | dev_err(dev, "Phy mode driver DT not found\n"); | 175 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); |
176 | return; | 176 | if (!node) { |
177 | dev_err(dev, "Phy mode driver DT not found\n"); | ||
178 | return; | ||
179 | } | ||
177 | } | 180 | } |
178 | 181 | ||
179 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 182 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 507f68190cb1..70921bbe0e28 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
30 | #include <linux/inetdevice.h> | 30 | #include <linux/inetdevice.h> |
31 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
32 | #include <linux/pci.h> | ||
32 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
33 | #include <linux/if_vlan.h> | 34 | #include <linux/if_vlan.h> |
34 | #include <linux/in.h> | 35 | #include <linux/in.h> |
@@ -2039,12 +2040,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
2039 | { | 2040 | { |
2040 | struct net_device *ndev; | 2041 | struct net_device *ndev; |
2041 | struct net_device_context *net_device_ctx; | 2042 | struct net_device_context *net_device_ctx; |
2043 | struct device *pdev = vf_netdev->dev.parent; | ||
2042 | struct netvsc_device *netvsc_dev; | 2044 | struct netvsc_device *netvsc_dev; |
2043 | int ret; | 2045 | int ret; |
2044 | 2046 | ||
2045 | if (vf_netdev->addr_len != ETH_ALEN) | 2047 | if (vf_netdev->addr_len != ETH_ALEN) |
2046 | return NOTIFY_DONE; | 2048 | return NOTIFY_DONE; |
2047 | 2049 | ||
2050 | if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) | ||
2051 | return NOTIFY_DONE; | ||
2052 | |||
2048 | /* | 2053 | /* |
2049 | * We will use the MAC address to locate the synthetic interface to | 2054 | * We will use the MAC address to locate the synthetic interface to |
2050 | * associate with the VF interface. If we don't find a matching | 2055 | * associate with the VF interface. If we don't find a matching |
@@ -2201,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev, | |||
2201 | 2206 | ||
2202 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 2207 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
2203 | 2208 | ||
2209 | /* We must get rtnl lock before scheduling nvdev->subchan_work, | ||
2210 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait | ||
2211 | * all subchannels to show up, but that may not happen because | ||
2212 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() | ||
2213 | * -> ... -> device_add() -> ... -> __device_attach() can't get | ||
2214 | * the device lock, so all the subchannels can't be processed -- | ||
2215 | * finally netvsc_subchan_work() hangs for ever. | ||
2216 | */ | ||
2217 | rtnl_lock(); | ||
2218 | |||
2204 | if (nvdev->num_chn > 1) | 2219 | if (nvdev->num_chn > 1) |
2205 | schedule_work(&nvdev->subchan_work); | 2220 | schedule_work(&nvdev->subchan_work); |
2206 | 2221 | ||
@@ -2219,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev, | |||
2219 | else | 2234 | else |
2220 | net->max_mtu = ETH_DATA_LEN; | 2235 | net->max_mtu = ETH_DATA_LEN; |
2221 | 2236 | ||
2222 | rtnl_lock(); | ||
2223 | ret = register_netdevice(net); | 2237 | ret = register_netdevice(net); |
2224 | if (ret != 0) { | 2238 | if (ret != 0) { |
2225 | pr_err("Unable to register netdev.\n"); | 2239 | pr_err("Unable to register netdev.\n"); |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4637d980310e..52fffb98fde9 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
398 | switch (type) { | 398 | switch (type) { |
399 | case hwmon_temp: | 399 | case hwmon_temp: |
400 | switch (attr) { | 400 | switch (attr) { |
401 | case hwmon_temp_input: | ||
402 | case hwmon_temp_min_alarm: | 401 | case hwmon_temp_min_alarm: |
403 | case hwmon_temp_max_alarm: | 402 | case hwmon_temp_max_alarm: |
404 | case hwmon_temp_lcrit_alarm: | 403 | case hwmon_temp_lcrit_alarm: |
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
407 | case hwmon_temp_max: | 406 | case hwmon_temp_max: |
408 | case hwmon_temp_lcrit: | 407 | case hwmon_temp_lcrit: |
409 | case hwmon_temp_crit: | 408 | case hwmon_temp_crit: |
409 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
410 | return 0; | ||
411 | /* fall through */ | ||
412 | case hwmon_temp_input: | ||
410 | return 0444; | 413 | return 0444; |
411 | default: | 414 | default: |
412 | return 0; | 415 | return 0; |
413 | } | 416 | } |
414 | case hwmon_in: | 417 | case hwmon_in: |
415 | switch (attr) { | 418 | switch (attr) { |
416 | case hwmon_in_input: | ||
417 | case hwmon_in_min_alarm: | 419 | case hwmon_in_min_alarm: |
418 | case hwmon_in_max_alarm: | 420 | case hwmon_in_max_alarm: |
419 | case hwmon_in_lcrit_alarm: | 421 | case hwmon_in_lcrit_alarm: |
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
422 | case hwmon_in_max: | 424 | case hwmon_in_max: |
423 | case hwmon_in_lcrit: | 425 | case hwmon_in_lcrit: |
424 | case hwmon_in_crit: | 426 | case hwmon_in_crit: |
427 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
428 | return 0; | ||
429 | /* fall through */ | ||
430 | case hwmon_in_input: | ||
425 | return 0444; | 431 | return 0444; |
426 | default: | 432 | default: |
427 | return 0; | 433 | return 0; |
428 | } | 434 | } |
429 | case hwmon_curr: | 435 | case hwmon_curr: |
430 | switch (attr) { | 436 | switch (attr) { |
431 | case hwmon_curr_input: | ||
432 | case hwmon_curr_min_alarm: | 437 | case hwmon_curr_min_alarm: |
433 | case hwmon_curr_max_alarm: | 438 | case hwmon_curr_max_alarm: |
434 | case hwmon_curr_lcrit_alarm: | 439 | case hwmon_curr_lcrit_alarm: |
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
437 | case hwmon_curr_max: | 442 | case hwmon_curr_max: |
438 | case hwmon_curr_lcrit: | 443 | case hwmon_curr_lcrit: |
439 | case hwmon_curr_crit: | 444 | case hwmon_curr_crit: |
445 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
446 | return 0; | ||
447 | /* fall through */ | ||
448 | case hwmon_curr_input: | ||
440 | return 0444; | 449 | return 0444; |
441 | default: | 450 | default: |
442 | return 0; | 451 | return 0; |
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
452 | channel == 1) | 461 | channel == 1) |
453 | return 0; | 462 | return 0; |
454 | switch (attr) { | 463 | switch (attr) { |
455 | case hwmon_power_input: | ||
456 | case hwmon_power_min_alarm: | 464 | case hwmon_power_min_alarm: |
457 | case hwmon_power_max_alarm: | 465 | case hwmon_power_max_alarm: |
458 | case hwmon_power_lcrit_alarm: | 466 | case hwmon_power_lcrit_alarm: |
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
461 | case hwmon_power_max: | 469 | case hwmon_power_max: |
462 | case hwmon_power_lcrit: | 470 | case hwmon_power_lcrit: |
463 | case hwmon_power_crit: | 471 | case hwmon_power_crit: |
472 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
473 | return 0; | ||
474 | /* fall through */ | ||
475 | case hwmon_power_input: | ||
464 | return 0444; | 476 | return 0444; |
465 | default: | 477 | default: |
466 | return 0; | 478 | return 0; |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 97742708460b..2cd71bdb6484 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
5217 | netdev->hw_features &= ~NETIF_F_RXCSUM; | 5217 | netdev->hw_features &= ~NETIF_F_RXCSUM; |
5218 | } | 5218 | } |
5219 | 5219 | ||
5220 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && | 5220 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && |
5221 | udev->serial && !strcmp(udev->serial, "000001000000")) { | 5221 | (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { |
5222 | dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); | 5222 | dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); |
5223 | set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); | 5223 | set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); |
5224 | } | 5224 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index b4c3a957c102..73969dbeb5c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? | 985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? |
986 | iwl_ext_nvm_channels : iwl_nvm_channels; | 986 | iwl_ext_nvm_channels : iwl_nvm_channels; |
987 | struct ieee80211_regdomain *regd, *copy_rd; | 987 | struct ieee80211_regdomain *regd, *copy_rd; |
988 | int size_of_regd, regd_to_copy, wmms_to_copy; | 988 | int size_of_regd, regd_to_copy; |
989 | int size_of_wmms = 0; | ||
990 | struct ieee80211_reg_rule *rule; | 989 | struct ieee80211_reg_rule *rule; |
991 | struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; | ||
992 | struct regdb_ptrs *regdb_ptrs; | 990 | struct regdb_ptrs *regdb_ptrs; |
993 | enum nl80211_band band; | 991 | enum nl80211_band band; |
994 | int center_freq, prev_center_freq = 0; | 992 | int center_freq, prev_center_freq = 0; |
995 | int valid_rules = 0, n_wmms = 0; | 993 | int valid_rules = 0; |
996 | int i; | ||
997 | bool new_rule; | 994 | bool new_rule; |
998 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? | 995 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? |
999 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; | 996 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; |
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1012 | sizeof(struct ieee80211_regdomain) + | 1009 | sizeof(struct ieee80211_regdomain) + |
1013 | num_of_ch * sizeof(struct ieee80211_reg_rule); | 1010 | num_of_ch * sizeof(struct ieee80211_reg_rule); |
1014 | 1011 | ||
1015 | if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) | 1012 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
1016 | size_of_wmms = | ||
1017 | num_of_ch * sizeof(struct ieee80211_wmm_rule); | ||
1018 | |||
1019 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | ||
1020 | if (!regd) | 1013 | if (!regd) |
1021 | return ERR_PTR(-ENOMEM); | 1014 | return ERR_PTR(-ENOMEM); |
1022 | 1015 | ||
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1030 | regd->alpha2[0] = fw_mcc >> 8; | 1023 | regd->alpha2[0] = fw_mcc >> 8; |
1031 | regd->alpha2[1] = fw_mcc & 0xff; | 1024 | regd->alpha2[1] = fw_mcc & 0xff; |
1032 | 1025 | ||
1033 | wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
1034 | |||
1035 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { | 1026 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { |
1036 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); | 1027 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); |
1037 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? | 1028 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? |
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1085 | band == NL80211_BAND_2GHZ) | 1076 | band == NL80211_BAND_2GHZ) |
1086 | continue; | 1077 | continue; |
1087 | 1078 | ||
1088 | if (!reg_query_regdb_wmm(regd->alpha2, center_freq, | 1079 | reg_query_regdb_wmm(regd->alpha2, center_freq, rule); |
1089 | ®db_ptrs[n_wmms].token, wmm_rule)) { | ||
1090 | /* Add only new rules */ | ||
1091 | for (i = 0; i < n_wmms; i++) { | ||
1092 | if (regdb_ptrs[i].token == | ||
1093 | regdb_ptrs[n_wmms].token) { | ||
1094 | rule->wmm_rule = regdb_ptrs[i].rule; | ||
1095 | break; | ||
1096 | } | ||
1097 | } | ||
1098 | if (i == n_wmms) { | ||
1099 | rule->wmm_rule = wmm_rule; | ||
1100 | regdb_ptrs[n_wmms++].rule = wmm_rule; | ||
1101 | wmm_rule++; | ||
1102 | } | ||
1103 | } | ||
1104 | } | 1080 | } |
1105 | 1081 | ||
1106 | regd->n_reg_rules = valid_rules; | 1082 | regd->n_reg_rules = valid_rules; |
1107 | regd->n_wmm_rules = n_wmms; | ||
1108 | 1083 | ||
1109 | /* | 1084 | /* |
1110 | * Narrow down regdom for unused regulatory rules to prevent hole | 1085 | * Narrow down regdom for unused regulatory rules to prevent hole |
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
1113 | regd_to_copy = sizeof(struct ieee80211_regdomain) + | 1088 | regd_to_copy = sizeof(struct ieee80211_regdomain) + |
1114 | valid_rules * sizeof(struct ieee80211_reg_rule); | 1089 | valid_rules * sizeof(struct ieee80211_reg_rule); |
1115 | 1090 | ||
1116 | wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; | 1091 | copy_rd = kzalloc(regd_to_copy, GFP_KERNEL); |
1117 | |||
1118 | copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); | ||
1119 | if (!copy_rd) { | 1092 | if (!copy_rd) { |
1120 | copy_rd = ERR_PTR(-ENOMEM); | 1093 | copy_rd = ERR_PTR(-ENOMEM); |
1121 | goto out; | 1094 | goto out; |
1122 | } | 1095 | } |
1123 | 1096 | ||
1124 | memcpy(copy_rd, regd, regd_to_copy); | 1097 | memcpy(copy_rd, regd, regd_to_copy); |
1125 | memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, | ||
1126 | wmms_to_copy); | ||
1127 | |||
1128 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); | ||
1129 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
1130 | |||
1131 | for (i = 0; i < regd->n_reg_rules; i++) { | ||
1132 | if (!regd->reg_rules[i].wmm_rule) | ||
1133 | continue; | ||
1134 | |||
1135 | copy_rd->reg_rules[i].wmm_rule = d_wmm + | ||
1136 | (regd->reg_rules[i].wmm_rule - s_wmm); | ||
1137 | } | ||
1138 | 1098 | ||
1139 | out: | 1099 | out: |
1140 | kfree(regdb_ptrs); | 1100 | kfree(regdb_ptrs); |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 998dfac0fcff..1068757ec42e 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <net/net_namespace.h> | 34 | #include <net/net_namespace.h> |
35 | #include <net/netns/generic.h> | 35 | #include <net/netns/generic.h> |
36 | #include <linux/rhashtable.h> | 36 | #include <linux/rhashtable.h> |
37 | #include <linux/nospec.h> | ||
37 | #include "mac80211_hwsim.h" | 38 | #include "mac80211_hwsim.h" |
38 | 39 | ||
39 | #define WARN_QUEUE 100 | 40 | #define WARN_QUEUE 100 |
@@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2820 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 2821 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
2821 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 2822 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
2822 | IEEE80211_VHT_CAP_TXSTBC | | 2823 | IEEE80211_VHT_CAP_TXSTBC | |
2823 | IEEE80211_VHT_CAP_RXSTBC_1 | | ||
2824 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
2825 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
2826 | IEEE80211_VHT_CAP_RXSTBC_4 | | 2824 | IEEE80211_VHT_CAP_RXSTBC_4 | |
2827 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; | 2825 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; |
2828 | sband->vht_cap.vht_mcs.rx_mcs_map = | 2826 | sband->vht_cap.vht_mcs.rx_mcs_map = |
@@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
3317 | if (info->attrs[HWSIM_ATTR_CHANNELS]) | 3315 | if (info->attrs[HWSIM_ATTR_CHANNELS]) |
3318 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); | 3316 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); |
3319 | 3317 | ||
3318 | if (param.channels < 1) { | ||
3319 | GENL_SET_ERR_MSG(info, "must have at least one channel"); | ||
3320 | return -EINVAL; | ||
3321 | } | ||
3322 | |||
3320 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { | 3323 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { |
3321 | GENL_SET_ERR_MSG(info, "too many channels specified"); | 3324 | GENL_SET_ERR_MSG(info, "too many channels specified"); |
3322 | return -EINVAL; | 3325 | return -EINVAL; |
@@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
3350 | kfree(hwname); | 3353 | kfree(hwname); |
3351 | return -EINVAL; | 3354 | return -EINVAL; |
3352 | } | 3355 | } |
3356 | |||
3357 | idx = array_index_nospec(idx, | ||
3358 | ARRAY_SIZE(hwsim_world_regdom_custom)); | ||
3353 | param.regd = hwsim_world_regdom_custom[idx]; | 3359 | param.regd = hwsim_world_regdom_custom[idx]; |
3354 | } | 3360 | } |
3355 | 3361 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b9951d2067e..d668682f91df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, | |||
316 | old_value = *dbbuf_db; | 316 | old_value = *dbbuf_db; |
317 | *dbbuf_db = value; | 317 | *dbbuf_db = value; |
318 | 318 | ||
319 | /* | ||
320 | * Ensure that the doorbell is updated before reading the event | ||
321 | * index from memory. The controller needs to provide similar | ||
322 | * ordering to ensure the envent index is updated before reading | ||
323 | * the doorbell. | ||
324 | */ | ||
325 | mb(); | ||
326 | |||
319 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) | 327 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
320 | return false; | 328 | return false; |
321 | } | 329 | } |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ebf3e7a6c49e..b5ec96abd048 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -1210,7 +1210,7 @@ static int __init nvmet_init(void) | |||
1210 | 1210 | ||
1211 | error = nvmet_init_discovery(); | 1211 | error = nvmet_init_discovery(); |
1212 | if (error) | 1212 | if (error) |
1213 | goto out; | 1213 | goto out_free_work_queue; |
1214 | 1214 | ||
1215 | error = nvmet_init_configfs(); | 1215 | error = nvmet_init_configfs(); |
1216 | if (error) | 1216 | if (error) |
@@ -1219,6 +1219,8 @@ static int __init nvmet_init(void) | |||
1219 | 1219 | ||
1220 | out_exit_discovery: | 1220 | out_exit_discovery: |
1221 | nvmet_exit_discovery(); | 1221 | nvmet_exit_discovery(); |
1222 | out_free_work_queue: | ||
1223 | destroy_workqueue(buffered_io_wq); | ||
1222 | out: | 1224 | out: |
1223 | return error; | 1225 | return error; |
1224 | } | 1226 | } |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 34712def81b1..5251689a1d9a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
@@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) | |||
311 | struct fcloop_tport *tport = tls_req->tport; | 311 | struct fcloop_tport *tport = tls_req->tport; |
312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; | 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; |
313 | 313 | ||
314 | if (tport->remoteport) | 314 | if (!tport || tport->remoteport) |
315 | lsreq->done(lsreq, tls_req->status); | 315 | lsreq->done(lsreq, tls_req->status); |
316 | } | 316 | } |
317 | 317 | ||
@@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, | |||
329 | 329 | ||
330 | if (!rport->targetport) { | 330 | if (!rport->targetport) { |
331 | tls_req->status = -ECONNREFUSED; | 331 | tls_req->status = -ECONNREFUSED; |
332 | tls_req->tport = NULL; | ||
332 | schedule_work(&tls_req->work); | 333 | schedule_work(&tls_req->work); |
333 | return ret; | 334 | return ret; |
334 | } | 335 | } |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 466e3c8582f0..9095b8290150 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex); | |||
54 | */ | 54 | */ |
55 | DEFINE_RAW_SPINLOCK(devtree_lock); | 55 | DEFINE_RAW_SPINLOCK(devtree_lock); |
56 | 56 | ||
57 | bool of_node_name_eq(const struct device_node *np, const char *name) | ||
58 | { | ||
59 | const char *node_name; | ||
60 | size_t len; | ||
61 | |||
62 | if (!np) | ||
63 | return false; | ||
64 | |||
65 | node_name = kbasename(np->full_name); | ||
66 | len = strchrnul(node_name, '@') - node_name; | ||
67 | |||
68 | return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); | ||
69 | } | ||
70 | |||
71 | bool of_node_name_prefix(const struct device_node *np, const char *prefix) | ||
72 | { | ||
73 | if (!np) | ||
74 | return false; | ||
75 | |||
76 | return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; | ||
77 | } | ||
78 | |||
57 | int of_n_addr_cells(struct device_node *np) | 79 | int of_n_addr_cells(struct device_node *np) |
58 | { | 80 | { |
59 | u32 cells; | 81 | u32 cells; |
@@ -720,6 +742,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node, | |||
720 | EXPORT_SYMBOL(of_get_next_available_child); | 742 | EXPORT_SYMBOL(of_get_next_available_child); |
721 | 743 | ||
722 | /** | 744 | /** |
745 | * of_get_compatible_child - Find compatible child node | ||
746 | * @parent: parent node | ||
747 | * @compatible: compatible string | ||
748 | * | ||
749 | * Lookup child node whose compatible property contains the given compatible | ||
750 | * string. | ||
751 | * | ||
752 | * Returns a node pointer with refcount incremented, use of_node_put() on it | ||
753 | * when done; or NULL if not found. | ||
754 | */ | ||
755 | struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
756 | const char *compatible) | ||
757 | { | ||
758 | struct device_node *child; | ||
759 | |||
760 | for_each_child_of_node(parent, child) { | ||
761 | if (of_device_is_compatible(child, compatible)) | ||
762 | break; | ||
763 | } | ||
764 | |||
765 | return child; | ||
766 | } | ||
767 | EXPORT_SYMBOL(of_get_compatible_child); | ||
768 | |||
769 | /** | ||
723 | * of_get_child_by_name - Find the child node by name for a given parent | 770 | * of_get_child_by_name - Find the child node by name for a given parent |
724 | * @node: parent node | 771 | * @node: parent node |
725 | * @name: child name to look for. | 772 | * @name: child name to look for. |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 7ba90c290a42..6c59673933e9 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node, | |||
241 | if (!dev) | 241 | if (!dev) |
242 | goto err_clear_flag; | 242 | goto err_clear_flag; |
243 | 243 | ||
244 | /* AMBA devices only support a single DMA mask */ | ||
245 | dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
246 | dev->dev.dma_mask = &dev->dev.coherent_dma_mask; | ||
247 | |||
244 | /* setup generic device info */ | 248 | /* setup generic device info */ |
245 | dev->dev.of_node = of_node_get(node); | 249 | dev->dev.of_node = of_node_get(node); |
246 | dev->dev.fwnode = &node->fwnode; | 250 | dev->dev.fwnode = &node->fwnode; |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8fc851a9e116..7c097006c54d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT | |||
52 | default y | 52 | default y |
53 | depends on SCSI | 53 | depends on SCSI |
54 | ---help--- | 54 | ---help--- |
55 | This option enables the new blk-mq based I/O path for SCSI | 55 | This option enables the blk-mq based I/O path for SCSI devices by |
56 | devices by default. With the option the scsi_mod.use_blk_mq | 56 | default. With this option the scsi_mod.use_blk_mq module/boot |
57 | module/boot option defaults to Y, without it to N, but it can | 57 | option defaults to Y, without it to N, but it can still be |
58 | still be overridden either way. | 58 | overridden either way. |
59 | 59 | ||
60 | If unsure say N. | 60 | If unsure say Y. |
61 | 61 | ||
62 | config SCSI_PROC_FS | 62 | config SCSI_PROC_FS |
63 | bool "legacy /proc/scsi/ support" | 63 | bool "legacy /proc/scsi/ support" |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 29bf1e60f542..39eb415987fc 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -1346,7 +1346,7 @@ struct fib { | |||
1346 | struct aac_hba_map_info { | 1346 | struct aac_hba_map_info { |
1347 | __le32 rmw_nexus; /* nexus for native HBA devices */ | 1347 | __le32 rmw_nexus; /* nexus for native HBA devices */ |
1348 | u8 devtype; /* device type */ | 1348 | u8 devtype; /* device type */ |
1349 | u8 reset_state; /* 0 - no reset, 1..x - */ | 1349 | s8 reset_state; /* 0 - no reset, 1..x - */ |
1350 | /* after xth TM LUN reset */ | 1350 | /* after xth TM LUN reset */ |
1351 | u16 qd_limit; | 1351 | u16 qd_limit; |
1352 | u32 scan_counter; | 1352 | u32 scan_counter; |
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 23d07e9f87d0..e51923886475 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
@@ -1602,6 +1602,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) | |||
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | /** | 1604 | /** |
1605 | * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits | ||
1606 | * @caps32: a 32-bit Port Capabilities value | ||
1607 | * | ||
1608 | * Returns the equivalent 16-bit Port Capabilities value. Note that | ||
1609 | * not all 32-bit Port Capabilities can be represented in the 16-bit | ||
1610 | * Port Capabilities and some fields/values may not make it. | ||
1611 | */ | ||
1612 | fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) | ||
1613 | { | ||
1614 | fw_port_cap16_t caps16 = 0; | ||
1615 | |||
1616 | #define CAP32_TO_CAP16(__cap) \ | ||
1617 | do { \ | ||
1618 | if (caps32 & FW_PORT_CAP32_##__cap) \ | ||
1619 | caps16 |= FW_PORT_CAP_##__cap; \ | ||
1620 | } while (0) | ||
1621 | |||
1622 | CAP32_TO_CAP16(SPEED_100M); | ||
1623 | CAP32_TO_CAP16(SPEED_1G); | ||
1624 | CAP32_TO_CAP16(SPEED_10G); | ||
1625 | CAP32_TO_CAP16(SPEED_25G); | ||
1626 | CAP32_TO_CAP16(SPEED_40G); | ||
1627 | CAP32_TO_CAP16(SPEED_100G); | ||
1628 | CAP32_TO_CAP16(FC_RX); | ||
1629 | CAP32_TO_CAP16(FC_TX); | ||
1630 | CAP32_TO_CAP16(802_3_PAUSE); | ||
1631 | CAP32_TO_CAP16(802_3_ASM_DIR); | ||
1632 | CAP32_TO_CAP16(ANEG); | ||
1633 | CAP32_TO_CAP16(FORCE_PAUSE); | ||
1634 | CAP32_TO_CAP16(MDIAUTO); | ||
1635 | CAP32_TO_CAP16(MDISTRAIGHT); | ||
1636 | CAP32_TO_CAP16(FEC_RS); | ||
1637 | CAP32_TO_CAP16(FEC_BASER_RS); | ||
1638 | |||
1639 | #undef CAP32_TO_CAP16 | ||
1640 | |||
1641 | return caps16; | ||
1642 | } | ||
1643 | |||
1644 | /** | ||
1605 | * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities | 1645 | * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities |
1606 | * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value | 1646 | * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value |
1607 | * | 1647 | * |
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw) | |||
1759 | val = 1; | 1799 | val = 1; |
1760 | 1800 | ||
1761 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, | 1801 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, |
1762 | hw->pfn, 0, 1, ¶m, &val, false, | 1802 | hw->pfn, 0, 1, ¶m, &val, true, |
1763 | NULL); | 1803 | NULL); |
1764 | 1804 | ||
1765 | if (csio_mb_issue(hw, mbp)) { | 1805 | if (csio_mb_issue(hw, mbp)) { |
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw) | |||
1769 | return -EINVAL; | 1809 | return -EINVAL; |
1770 | } | 1810 | } |
1771 | 1811 | ||
1772 | csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, | 1812 | csio_mb_process_read_params_rsp(hw, mbp, &retval, |
1773 | &val); | 1813 | 0, NULL); |
1774 | if (retval != FW_SUCCESS) { | 1814 | fw_caps = retval ? FW_CAPS16 : FW_CAPS32; |
1775 | csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n", | ||
1776 | portid, retval); | ||
1777 | mempool_free(mbp, hw->mb_mempool); | ||
1778 | return -EINVAL; | ||
1779 | } | ||
1780 | |||
1781 | fw_caps = val; | ||
1782 | } | 1815 | } |
1783 | 1816 | ||
1784 | /* Read PORT information */ | 1817 | /* Read PORT information */ |
@@ -2364,8 +2397,8 @@ bye: | |||
2364 | } | 2397 | } |
2365 | 2398 | ||
2366 | /* | 2399 | /* |
2367 | * Returns -EINVAL if attempts to flash the firmware failed | 2400 | * Returns -EINVAL if attempts to flash the firmware failed, |
2368 | * else returns 0, | 2401 | * -ENOMEM if memory allocation failed else returns 0, |
2369 | * if flashing was not attempted because the card had the | 2402 | * if flashing was not attempted because the card had the |
2370 | * latest firmware ECANCELED is returned | 2403 | * latest firmware ECANCELED is returned |
2371 | */ | 2404 | */ |
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) | |||
2393 | return -EINVAL; | 2426 | return -EINVAL; |
2394 | } | 2427 | } |
2395 | 2428 | ||
2429 | /* allocate memory to read the header of the firmware on the | ||
2430 | * card | ||
2431 | */ | ||
2432 | card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); | ||
2433 | if (!card_fw) | ||
2434 | return -ENOMEM; | ||
2435 | |||
2396 | if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) | 2436 | if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) |
2397 | fw_bin_file = FW_FNAME_T5; | 2437 | fw_bin_file = FW_FNAME_T5; |
2398 | else | 2438 | else |
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) | |||
2406 | fw_size = fw->size; | 2446 | fw_size = fw->size; |
2407 | } | 2447 | } |
2408 | 2448 | ||
2409 | /* allocate memory to read the header of the firmware on the | ||
2410 | * card | ||
2411 | */ | ||
2412 | card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); | ||
2413 | |||
2414 | /* upgrade FW logic */ | 2449 | /* upgrade FW logic */ |
2415 | ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, | 2450 | ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, |
2416 | hw->fw_state, reset); | 2451 | hw->fw_state, reset); |
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 9e73ef771eb7..e351af6e7c81 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h | |||
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int, | |||
639 | 639 | ||
640 | fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); | 640 | fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); |
641 | fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); | 641 | fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); |
642 | fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32); | ||
642 | fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); | 643 | fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); |
643 | 644 | ||
644 | int csio_hw_start(struct csio_hw *); | 645 | int csio_hw_start(struct csio_hw *); |
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index c026417269c3..6f13673d6aa0 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c | |||
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | |||
368 | FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); | 368 | FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); |
369 | 369 | ||
370 | if (fw_caps == FW_CAPS16) | 370 | if (fw_caps == FW_CAPS16) |
371 | cmdp->u.l1cfg.rcap = cpu_to_be32(fc); | 371 | cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc)); |
372 | else | 372 | else |
373 | cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); | 373 | cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); |
374 | } | 374 | } |
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, | |||
395 | *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); | 395 | *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); |
396 | *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); | 396 | *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); |
397 | } else { | 397 | } else { |
398 | *pcaps = ntohs(rsp->u.info32.pcaps32); | 398 | *pcaps = be32_to_cpu(rsp->u.info32.pcaps32); |
399 | *acaps = ntohs(rsp->u.info32.acaps32); | 399 | *acaps = be32_to_cpu(rsp->u.info32.acaps32); |
400 | } | 400 | } |
401 | } | 401 | } |
402 | } | 402 | } |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f02dcc875a09..ea4b0bb0c1cd 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) | |||
563 | } | 563 | } |
564 | EXPORT_SYMBOL(scsi_host_get); | 564 | EXPORT_SYMBOL(scsi_host_get); |
565 | 565 | ||
566 | struct scsi_host_mq_in_flight { | ||
567 | int cnt; | ||
568 | }; | ||
569 | |||
570 | static void scsi_host_check_in_flight(struct request *rq, void *data, | ||
571 | bool reserved) | ||
572 | { | ||
573 | struct scsi_host_mq_in_flight *in_flight = data; | ||
574 | |||
575 | if (blk_mq_request_started(rq)) | ||
576 | in_flight->cnt++; | ||
577 | } | ||
578 | |||
579 | /** | 566 | /** |
580 | * scsi_host_busy - Return the host busy counter | 567 | * scsi_host_busy - Return the host busy counter |
581 | * @shost: Pointer to Scsi_Host to inc. | 568 | * @shost: Pointer to Scsi_Host to inc. |
582 | **/ | 569 | **/ |
583 | int scsi_host_busy(struct Scsi_Host *shost) | 570 | int scsi_host_busy(struct Scsi_Host *shost) |
584 | { | 571 | { |
585 | struct scsi_host_mq_in_flight in_flight = { | 572 | return atomic_read(&shost->host_busy); |
586 | .cnt = 0, | ||
587 | }; | ||
588 | |||
589 | if (!shost->use_blk_mq) | ||
590 | return atomic_read(&shost->host_busy); | ||
591 | |||
592 | blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, | ||
593 | &in_flight); | ||
594 | return in_flight.cnt; | ||
595 | } | 573 | } |
596 | EXPORT_SYMBOL(scsi_host_busy); | 574 | EXPORT_SYMBOL(scsi_host_busy); |
597 | 575 | ||
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 58bb70b886d7..c120929d4ffe 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
976 | #endif | 976 | #endif |
977 | .sdev_attrs = hpsa_sdev_attrs, | 977 | .sdev_attrs = hpsa_sdev_attrs, |
978 | .shost_attrs = hpsa_shost_attrs, | 978 | .shost_attrs = hpsa_shost_attrs, |
979 | .max_sectors = 1024, | 979 | .max_sectors = 2048, |
980 | .no_write_same = 1, | 980 | .no_write_same = 1, |
981 | }; | 981 | }; |
982 | 982 | ||
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e0d0da5f43d6..43732e8d1347 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -672,7 +672,7 @@ struct lpfc_hba { | |||
672 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ | 672 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ |
673 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ | 673 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ |
674 | #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ | 674 | #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ |
675 | #define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ | 675 | #define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ |
676 | 676 | ||
677 | uint32_t hba_flag; /* hba generic flags */ | 677 | uint32_t hba_flag; /* hba generic flags */ |
678 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ | 678 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5a25553415f8..057a60abe664 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); | |||
5122 | 5122 | ||
5123 | /* | 5123 | /* |
5124 | # lpfc_fdmi_on: Controls FDMI support. | 5124 | # lpfc_fdmi_on: Controls FDMI support. |
5125 | # 0 No FDMI support (default) | 5125 | # 0 No FDMI support |
5126 | # 1 Traditional FDMI support | 5126 | # 1 Traditional FDMI support (default) |
5127 | # Traditional FDMI support means the driver will assume FDMI-2 support; | 5127 | # Traditional FDMI support means the driver will assume FDMI-2 support; |
5128 | # however, if that fails, it will fallback to FDMI-1. | 5128 | # however, if that fails, it will fallback to FDMI-1. |
5129 | # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. | 5129 | # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. |
5130 | # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of | 5130 | # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of |
5131 | # lpfc_fdmi_on. | 5131 | # lpfc_fdmi_on. |
5132 | # Value range [0,1]. Default value is 0. | 5132 | # Value range [0,1]. Default value is 1. |
5133 | */ | 5133 | */ |
5134 | LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); | 5134 | LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); |
5135 | 5135 | ||
5136 | /* | 5136 | /* |
5137 | # Specifies the maximum number of ELS cmds we can have outstanding (for | 5137 | # Specifies the maximum number of ELS cmds we can have outstanding (for |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0adfb3bce0fd..eb97d2dd3651 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) | |||
345 | unsigned long flags; | 345 | unsigned long flags; |
346 | 346 | ||
347 | rcu_read_lock(); | 347 | rcu_read_lock(); |
348 | if (!shost->use_blk_mq) | 348 | atomic_dec(&shost->host_busy); |
349 | atomic_dec(&shost->host_busy); | ||
350 | if (unlikely(scsi_host_in_recovery(shost))) { | 349 | if (unlikely(scsi_host_in_recovery(shost))) { |
351 | spin_lock_irqsave(shost->host_lock, flags); | 350 | spin_lock_irqsave(shost->host_lock, flags); |
352 | if (shost->host_failed || shost->host_eh_scheduled) | 351 | if (shost->host_failed || shost->host_eh_scheduled) |
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget) | |||
445 | 444 | ||
446 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) | 445 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
447 | { | 446 | { |
448 | /* | 447 | if (shost->can_queue > 0 && |
449 | * blk-mq can handle host queue busy efficiently via host-wide driver | ||
450 | * tag allocation | ||
451 | */ | ||
452 | |||
453 | if (!shost->use_blk_mq && shost->can_queue > 0 && | ||
454 | atomic_read(&shost->host_busy) >= shost->can_queue) | 448 | atomic_read(&shost->host_busy) >= shost->can_queue) |
455 | return true; | 449 | return true; |
456 | if (atomic_read(&shost->host_blocked) > 0) | 450 | if (atomic_read(&shost->host_blocked) > 0) |
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
1606 | if (scsi_host_in_recovery(shost)) | 1600 | if (scsi_host_in_recovery(shost)) |
1607 | return 0; | 1601 | return 0; |
1608 | 1602 | ||
1609 | if (!shost->use_blk_mq) | 1603 | busy = atomic_inc_return(&shost->host_busy) - 1; |
1610 | busy = atomic_inc_return(&shost->host_busy) - 1; | ||
1611 | else | ||
1612 | busy = 0; | ||
1613 | if (atomic_read(&shost->host_blocked) > 0) { | 1604 | if (atomic_read(&shost->host_blocked) > 0) { |
1614 | if (busy) | 1605 | if (busy) |
1615 | goto starved; | 1606 | goto starved; |
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
1625 | "unblocking host at zero depth\n")); | 1616 | "unblocking host at zero depth\n")); |
1626 | } | 1617 | } |
1627 | 1618 | ||
1628 | if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) | 1619 | if (shost->can_queue > 0 && busy >= shost->can_queue) |
1629 | goto starved; | 1620 | goto starved; |
1630 | if (shost->host_self_blocked) | 1621 | if (shost->host_self_blocked) |
1631 | goto starved; | 1622 | goto starved; |
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1711 | * with the locks as normal issue path does. | 1702 | * with the locks as normal issue path does. |
1712 | */ | 1703 | */ |
1713 | atomic_inc(&sdev->device_busy); | 1704 | atomic_inc(&sdev->device_busy); |
1714 | 1705 | atomic_inc(&shost->host_busy); | |
1715 | if (!shost->use_blk_mq) | ||
1716 | atomic_inc(&shost->host_busy); | ||
1717 | if (starget->can_queue > 0) | 1706 | if (starget->can_queue > 0) |
1718 | atomic_inc(&starget->target_busy); | 1707 | atomic_inc(&starget->target_busy); |
1719 | 1708 | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c index 768cce0ccb80..76a262674c8d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c | |||
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, | |||
207 | ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); | 207 | ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); |
208 | sgl->offset = sg_offset; | 208 | sgl->offset = sg_offset; |
209 | if (!ret) { | 209 | if (!ret) { |
210 | pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", | 210 | pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", |
211 | __func__, 0, xferlen, sgcnt); | 211 | __func__, 0, xferlen, sgcnt); |
212 | goto rel_ppods; | 212 | goto rel_ppods; |
213 | } | 213 | } |
214 | 214 | ||
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
250 | 250 | ||
251 | ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); | 251 | ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); |
252 | if (ret < 0) { | 252 | if (ret < 0) { |
253 | pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", | 253 | pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", |
254 | csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); | 254 | csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); |
255 | 255 | ||
256 | ttinfo->sgl = NULL; | 256 | ttinfo->sgl = NULL; |
257 | ttinfo->nents = 0; | 257 | ttinfo->nents = 0; |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 977a8307fbb1..4f2816559205 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz, | |||
260 | 260 | ||
261 | mutex_lock(&tz->lock); | 261 | mutex_lock(&tz->lock); |
262 | 262 | ||
263 | if (mode == THERMAL_DEVICE_ENABLED) | 263 | if (mode == THERMAL_DEVICE_ENABLED) { |
264 | tz->polling_delay = data->polling_delay; | 264 | tz->polling_delay = data->polling_delay; |
265 | else | 265 | tz->passive_delay = data->passive_delay; |
266 | } else { | ||
266 | tz->polling_delay = 0; | 267 | tz->polling_delay = 0; |
268 | tz->passive_delay = 0; | ||
269 | } | ||
267 | 270 | ||
268 | mutex_unlock(&tz->lock); | 271 | mutex_unlock(&tz->lock); |
269 | 272 | ||
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index c866cc165960..450ed66edf58 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c | |||
@@ -1,16 +1,6 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | * Copyright 2016 Freescale Semiconductor, Inc. | 2 | // |
3 | * | 3 | // Copyright 2016 Freescale Semiconductor, Inc. |
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | */ | ||
14 | 4 | ||
15 | #include <linux/module.h> | 5 | #include <linux/module.h> |
16 | #include <linux/platform_device.h> | 6 | #include <linux/platform_device.h> |
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
197 | int ret; | 187 | int ret; |
198 | struct qoriq_tmu_data *data; | 188 | struct qoriq_tmu_data *data; |
199 | struct device_node *np = pdev->dev.of_node; | 189 | struct device_node *np = pdev->dev.of_node; |
200 | u32 site = 0; | 190 | u32 site; |
201 | 191 | ||
202 | if (!np) { | 192 | if (!np) { |
203 | dev_err(&pdev->dev, "Device OF-Node is NULL"); | 193 | dev_err(&pdev->dev, "Device OF-Node is NULL"); |
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
233 | if (ret < 0) | 223 | if (ret < 0) |
234 | goto err_tmu; | 224 | goto err_tmu; |
235 | 225 | ||
236 | data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, | 226 | data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev, |
237 | data, &tmu_tz_ops); | 227 | data->sensor_id, |
228 | data, &tmu_tz_ops); | ||
238 | if (IS_ERR(data->tz)) { | 229 | if (IS_ERR(data->tz)) { |
239 | ret = PTR_ERR(data->tz); | 230 | ret = PTR_ERR(data->tz); |
240 | dev_err(&pdev->dev, | 231 | dev_err(&pdev->dev, |
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
243 | } | 234 | } |
244 | 235 | ||
245 | /* Enable monitoring */ | 236 | /* Enable monitoring */ |
246 | site |= 0x1 << (15 - data->sensor_id); | 237 | site = 0x1 << (15 - data->sensor_id); |
247 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); | 238 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); |
248 | 239 | ||
249 | return 0; | 240 | return 0; |
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev) | |||
261 | { | 252 | { |
262 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); | 253 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); |
263 | 254 | ||
264 | thermal_zone_of_sensor_unregister(&pdev->dev, data->tz); | ||
265 | |||
266 | /* Disable monitoring */ | 255 | /* Disable monitoring */ |
267 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); | 256 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); |
268 | 257 | ||
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 766521eb7071..7aed5337bdd3 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c | |||
@@ -1,19 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * R-Car Gen3 THS thermal sensor driver | 3 | * R-Car Gen3 THS thermal sensor driver |
3 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. | 4 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. |
4 | * | 5 | * |
5 | * Copyright (C) 2016 Renesas Electronics Corporation. | 6 | * Copyright (C) 2016 Renesas Electronics Corporation. |
6 | * Copyright (C) 2016 Sang Engineering | 7 | * Copyright (C) 2016 Sang Engineering |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | */ | 8 | */ |
18 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
19 | #include <linux/err.h> | 10 | #include <linux/err.h> |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index e77e63070e99..78f932822d38 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -1,21 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * R-Car THS/TSC thermal sensor driver | 3 | * R-Car THS/TSC thermal sensor driver |
3 | * | 4 | * |
4 | * Copyright (C) 2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2012 Renesas Solutions Corp. |
5 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 6 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; version 2 of the License. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along | ||
17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
19 | */ | 7 | */ |
20 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
21 | #include <linux/err.h> | 9 | #include <linux/err.h> |
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = { | |||
660 | }; | 648 | }; |
661 | module_platform_driver(rcar_thermal_driver); | 649 | module_platform_driver(rcar_thermal_driver); |
662 | 650 | ||
663 | MODULE_LICENSE("GPL"); | 651 | MODULE_LICENSE("GPL v2"); |
664 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); | 652 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); |
665 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); | 653 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 96c1d8400822..b13c6b4b2c66 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, | |||
952 | list_for_each_entry_safe(node, n, &d->pending_list, node) { | 952 | list_for_each_entry_safe(node, n, &d->pending_list, node) { |
953 | struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; | 953 | struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; |
954 | if (msg->iova <= vq_msg->iova && | 954 | if (msg->iova <= vq_msg->iova && |
955 | msg->iova + msg->size - 1 > vq_msg->iova && | 955 | msg->iova + msg->size - 1 >= vq_msg->iova && |
956 | vq_msg->type == VHOST_IOTLB_MISS) { | 956 | vq_msg->type == VHOST_IOTLB_MISS) { |
957 | vhost_poll_queue(&node->vq->poll); | 957 | vhost_poll_queue(&node->vq->poll); |
958 | list_del(&node->node); | 958 | list_del(&node->node); |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index f2088838f690..5b471889d723 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev, | |||
402 | } | 402 | } |
403 | static DEVICE_ATTR_RO(modalias); | 403 | static DEVICE_ATTR_RO(modalias); |
404 | 404 | ||
405 | static ssize_t state_show(struct device *dev, | ||
406 | struct device_attribute *attr, char *buf) | ||
407 | { | ||
408 | return sprintf(buf, "%s\n", | ||
409 | xenbus_strstate(to_xenbus_device(dev)->state)); | ||
410 | } | ||
411 | static DEVICE_ATTR_RO(state); | ||
412 | |||
405 | static struct attribute *xenbus_dev_attrs[] = { | 413 | static struct attribute *xenbus_dev_attrs[] = { |
406 | &dev_attr_nodename.attr, | 414 | &dev_attr_nodename.attr, |
407 | &dev_attr_devtype.attr, | 415 | &dev_attr_devtype.attr, |
408 | &dev_attr_modalias.attr, | 416 | &dev_attr_modalias.attr, |
417 | &dev_attr_state.attr, | ||
409 | NULL, | 418 | NULL, |
410 | }; | 419 | }; |
411 | 420 | ||
diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 0c3285c8db95..476dcbb79713 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c | |||
@@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size) | |||
98 | goto inval; | 98 | goto inval; |
99 | 99 | ||
100 | args = strchr(name, ' '); | 100 | args = strchr(name, ' '); |
101 | if (!args) | 101 | if (args) { |
102 | goto inval; | 102 | do { |
103 | do { | 103 | *args++ = 0; |
104 | *args++ = 0; | 104 | } while(*args == ' '); |
105 | } while(*args == ' '); | 105 | if (!*args) |
106 | if (!*args) | 106 | goto inval; |
107 | goto inval; | 107 | } |
108 | 108 | ||
109 | /* determine command to perform */ | 109 | /* determine command to perform */ |
110 | _debug("cmd=%s name=%s args=%s", buf, name, args); | 110 | _debug("cmd=%s name=%s args=%s", buf, name, args); |
@@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size) | |||
120 | 120 | ||
121 | if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags)) | 121 | if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags)) |
122 | afs_put_cell(net, cell); | 122 | afs_put_cell(net, cell); |
123 | printk("kAFS: Added new cell '%s'\n", name); | ||
124 | } else { | 123 | } else { |
125 | goto inval; | 124 | goto inval; |
126 | } | 125 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 53af9f5253f4..2cddfe7806a4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1280,6 +1280,7 @@ struct btrfs_root { | |||
1280 | int send_in_progress; | 1280 | int send_in_progress; |
1281 | struct btrfs_subvolume_writers *subv_writers; | 1281 | struct btrfs_subvolume_writers *subv_writers; |
1282 | atomic_t will_be_snapshotted; | 1282 | atomic_t will_be_snapshotted; |
1283 | atomic_t snapshot_force_cow; | ||
1283 | 1284 | ||
1284 | /* For qgroup metadata reserved space */ | 1285 | /* For qgroup metadata reserved space */ |
1285 | spinlock_t qgroup_meta_rsv_lock; | 1286 | spinlock_t qgroup_meta_rsv_lock; |
@@ -3390,9 +3391,9 @@ do { \ | |||
3390 | #define btrfs_debug(fs_info, fmt, args...) \ | 3391 | #define btrfs_debug(fs_info, fmt, args...) \ |
3391 | btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) | 3392 | btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) |
3392 | #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ | 3393 | #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ |
3393 | btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) | 3394 | btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) |
3394 | #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ | 3395 | #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ |
3395 | btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) | 3396 | btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) |
3396 | #define btrfs_debug_rl(fs_info, fmt, args...) \ | 3397 | #define btrfs_debug_rl(fs_info, fmt, args...) \ |
3397 | btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) | 3398 | btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) |
3398 | #endif | 3399 | #endif |
@@ -3404,6 +3405,13 @@ do { \ | |||
3404 | rcu_read_unlock(); \ | 3405 | rcu_read_unlock(); \ |
3405 | } while (0) | 3406 | } while (0) |
3406 | 3407 | ||
3408 | #define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \ | ||
3409 | do { \ | ||
3410 | rcu_read_lock(); \ | ||
3411 | btrfs_no_printk(fs_info, fmt, ##args); \ | ||
3412 | rcu_read_unlock(); \ | ||
3413 | } while (0) | ||
3414 | |||
3407 | #define btrfs_printk_ratelimited(fs_info, fmt, args...) \ | 3415 | #define btrfs_printk_ratelimited(fs_info, fmt, args...) \ |
3408 | do { \ | 3416 | do { \ |
3409 | static DEFINE_RATELIMIT_STATE(_rs, \ | 3417 | static DEFINE_RATELIMIT_STATE(_rs, \ |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5124c15705ce..05dc3c17cb62 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, | |||
1187 | atomic_set(&root->log_batch, 0); | 1187 | atomic_set(&root->log_batch, 0); |
1188 | refcount_set(&root->refs, 1); | 1188 | refcount_set(&root->refs, 1); |
1189 | atomic_set(&root->will_be_snapshotted, 0); | 1189 | atomic_set(&root->will_be_snapshotted, 0); |
1190 | atomic_set(&root->snapshot_force_cow, 0); | ||
1190 | root->log_transid = 0; | 1191 | root->log_transid = 0; |
1191 | root->log_transid_committed = -1; | 1192 | root->log_transid_committed = -1; |
1192 | root->last_log_commit = 0; | 1193 | root->last_log_commit = 0; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index de6f75f5547b..2d9074295d7f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) | |||
5800 | * root: the root of the parent directory | 5800 | * root: the root of the parent directory |
5801 | * rsv: block reservation | 5801 | * rsv: block reservation |
5802 | * items: the number of items that we need do reservation | 5802 | * items: the number of items that we need do reservation |
5803 | * qgroup_reserved: used to return the reserved size in qgroup | 5803 | * use_global_rsv: allow fallback to the global block reservation |
5804 | * | 5804 | * |
5805 | * This function is used to reserve the space for snapshot/subvolume | 5805 | * This function is used to reserve the space for snapshot/subvolume |
5806 | * creation and deletion. Those operations are different with the | 5806 | * creation and deletion. Those operations are different with the |
@@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) | |||
5810 | * the space reservation mechanism in start_transaction(). | 5810 | * the space reservation mechanism in start_transaction(). |
5811 | */ | 5811 | */ |
5812 | int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, | 5812 | int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, |
5813 | struct btrfs_block_rsv *rsv, | 5813 | struct btrfs_block_rsv *rsv, int items, |
5814 | int items, | ||
5815 | bool use_global_rsv) | 5814 | bool use_global_rsv) |
5816 | { | 5815 | { |
5816 | u64 qgroup_num_bytes = 0; | ||
5817 | u64 num_bytes; | 5817 | u64 num_bytes; |
5818 | int ret; | 5818 | int ret; |
5819 | struct btrfs_fs_info *fs_info = root->fs_info; | 5819 | struct btrfs_fs_info *fs_info = root->fs_info; |
@@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, | |||
5821 | 5821 | ||
5822 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { | 5822 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { |
5823 | /* One for parent inode, two for dir entries */ | 5823 | /* One for parent inode, two for dir entries */ |
5824 | num_bytes = 3 * fs_info->nodesize; | 5824 | qgroup_num_bytes = 3 * fs_info->nodesize; |
5825 | ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); | 5825 | ret = btrfs_qgroup_reserve_meta_prealloc(root, |
5826 | qgroup_num_bytes, true); | ||
5826 | if (ret) | 5827 | if (ret) |
5827 | return ret; | 5828 | return ret; |
5828 | } else { | ||
5829 | num_bytes = 0; | ||
5830 | } | 5829 | } |
5831 | 5830 | ||
5832 | num_bytes = btrfs_calc_trans_metadata_size(fs_info, items); | 5831 | num_bytes = btrfs_calc_trans_metadata_size(fs_info, items); |
@@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, | |||
5838 | if (ret == -ENOSPC && use_global_rsv) | 5837 | if (ret == -ENOSPC && use_global_rsv) |
5839 | ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1); | 5838 | ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1); |
5840 | 5839 | ||
5841 | if (ret && num_bytes) | 5840 | if (ret && qgroup_num_bytes) |
5842 | btrfs_qgroup_free_meta_prealloc(root, num_bytes); | 5841 | btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); |
5843 | 5842 | ||
5844 | return ret; | 5843 | return ret; |
5845 | } | 5844 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9357a19d2bff..3ea5339603cf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1271 | u64 disk_num_bytes; | 1271 | u64 disk_num_bytes; |
1272 | u64 ram_bytes; | 1272 | u64 ram_bytes; |
1273 | int extent_type; | 1273 | int extent_type; |
1274 | int ret, err; | 1274 | int ret; |
1275 | int type; | 1275 | int type; |
1276 | int nocow; | 1276 | int nocow; |
1277 | int check_prev = 1; | 1277 | int check_prev = 1; |
@@ -1403,11 +1403,8 @@ next_slot: | |||
1403 | * if there are pending snapshots for this root, | 1403 | * if there are pending snapshots for this root, |
1404 | * we fall into common COW way. | 1404 | * we fall into common COW way. |
1405 | */ | 1405 | */ |
1406 | if (!nolock) { | 1406 | if (!nolock && atomic_read(&root->snapshot_force_cow)) |
1407 | err = btrfs_start_write_no_snapshotting(root); | 1407 | goto out_check; |
1408 | if (!err) | ||
1409 | goto out_check; | ||
1410 | } | ||
1411 | /* | 1408 | /* |
1412 | * force cow if csum exists in the range. | 1409 | * force cow if csum exists in the range. |
1413 | * this ensure that csum for a given extent are | 1410 | * this ensure that csum for a given extent are |
@@ -1416,9 +1413,6 @@ next_slot: | |||
1416 | ret = csum_exist_in_range(fs_info, disk_bytenr, | 1413 | ret = csum_exist_in_range(fs_info, disk_bytenr, |
1417 | num_bytes); | 1414 | num_bytes); |
1418 | if (ret) { | 1415 | if (ret) { |
1419 | if (!nolock) | ||
1420 | btrfs_end_write_no_snapshotting(root); | ||
1421 | |||
1422 | /* | 1416 | /* |
1423 | * ret could be -EIO if the above fails to read | 1417 | * ret could be -EIO if the above fails to read |
1424 | * metadata. | 1418 | * metadata. |
@@ -1431,11 +1425,8 @@ next_slot: | |||
1431 | WARN_ON_ONCE(nolock); | 1425 | WARN_ON_ONCE(nolock); |
1432 | goto out_check; | 1426 | goto out_check; |
1433 | } | 1427 | } |
1434 | if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { | 1428 | if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) |
1435 | if (!nolock) | ||
1436 | btrfs_end_write_no_snapshotting(root); | ||
1437 | goto out_check; | 1429 | goto out_check; |
1438 | } | ||
1439 | nocow = 1; | 1430 | nocow = 1; |
1440 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | 1431 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
1441 | extent_end = found_key.offset + | 1432 | extent_end = found_key.offset + |
@@ -1448,8 +1439,6 @@ next_slot: | |||
1448 | out_check: | 1439 | out_check: |
1449 | if (extent_end <= start) { | 1440 | if (extent_end <= start) { |
1450 | path->slots[0]++; | 1441 | path->slots[0]++; |
1451 | if (!nolock && nocow) | ||
1452 | btrfs_end_write_no_snapshotting(root); | ||
1453 | if (nocow) | 1442 | if (nocow) |
1454 | btrfs_dec_nocow_writers(fs_info, disk_bytenr); | 1443 | btrfs_dec_nocow_writers(fs_info, disk_bytenr); |
1455 | goto next_slot; | 1444 | goto next_slot; |
@@ -1471,8 +1460,6 @@ out_check: | |||
1471 | end, page_started, nr_written, 1, | 1460 | end, page_started, nr_written, 1, |
1472 | NULL); | 1461 | NULL); |
1473 | if (ret) { | 1462 | if (ret) { |
1474 | if (!nolock && nocow) | ||
1475 | btrfs_end_write_no_snapshotting(root); | ||
1476 | if (nocow) | 1463 | if (nocow) |
1477 | btrfs_dec_nocow_writers(fs_info, | 1464 | btrfs_dec_nocow_writers(fs_info, |
1478 | disk_bytenr); | 1465 | disk_bytenr); |
@@ -1492,8 +1479,6 @@ out_check: | |||
1492 | ram_bytes, BTRFS_COMPRESS_NONE, | 1479 | ram_bytes, BTRFS_COMPRESS_NONE, |
1493 | BTRFS_ORDERED_PREALLOC); | 1480 | BTRFS_ORDERED_PREALLOC); |
1494 | if (IS_ERR(em)) { | 1481 | if (IS_ERR(em)) { |
1495 | if (!nolock && nocow) | ||
1496 | btrfs_end_write_no_snapshotting(root); | ||
1497 | if (nocow) | 1482 | if (nocow) |
1498 | btrfs_dec_nocow_writers(fs_info, | 1483 | btrfs_dec_nocow_writers(fs_info, |
1499 | disk_bytenr); | 1484 | disk_bytenr); |
@@ -1532,8 +1517,6 @@ out_check: | |||
1532 | EXTENT_CLEAR_DATA_RESV, | 1517 | EXTENT_CLEAR_DATA_RESV, |
1533 | PAGE_UNLOCK | PAGE_SET_PRIVATE2); | 1518 | PAGE_UNLOCK | PAGE_SET_PRIVATE2); |
1534 | 1519 | ||
1535 | if (!nolock && nocow) | ||
1536 | btrfs_end_write_no_snapshotting(root); | ||
1537 | cur_offset = extent_end; | 1520 | cur_offset = extent_end; |
1538 | 1521 | ||
1539 | /* | 1522 | /* |
@@ -6639,6 +6622,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
6639 | drop_inode = 1; | 6622 | drop_inode = 1; |
6640 | } else { | 6623 | } else { |
6641 | struct dentry *parent = dentry->d_parent; | 6624 | struct dentry *parent = dentry->d_parent; |
6625 | int ret; | ||
6626 | |||
6642 | err = btrfs_update_inode(trans, root, inode); | 6627 | err = btrfs_update_inode(trans, root, inode); |
6643 | if (err) | 6628 | if (err) |
6644 | goto fail; | 6629 | goto fail; |
@@ -6652,7 +6637,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
6652 | goto fail; | 6637 | goto fail; |
6653 | } | 6638 | } |
6654 | d_instantiate(dentry, inode); | 6639 | d_instantiate(dentry, inode); |
6655 | btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent); | 6640 | ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent, |
6641 | true, NULL); | ||
6642 | if (ret == BTRFS_NEED_TRANS_COMMIT) { | ||
6643 | err = btrfs_commit_transaction(trans); | ||
6644 | trans = NULL; | ||
6645 | } | ||
6656 | } | 6646 | } |
6657 | 6647 | ||
6658 | fail: | 6648 | fail: |
@@ -9388,14 +9378,21 @@ static int btrfs_rename_exchange(struct inode *old_dir, | |||
9388 | u64 new_idx = 0; | 9378 | u64 new_idx = 0; |
9389 | u64 root_objectid; | 9379 | u64 root_objectid; |
9390 | int ret; | 9380 | int ret; |
9391 | int ret2; | ||
9392 | bool root_log_pinned = false; | 9381 | bool root_log_pinned = false; |
9393 | bool dest_log_pinned = false; | 9382 | bool dest_log_pinned = false; |
9383 | struct btrfs_log_ctx ctx_root; | ||
9384 | struct btrfs_log_ctx ctx_dest; | ||
9385 | bool sync_log_root = false; | ||
9386 | bool sync_log_dest = false; | ||
9387 | bool commit_transaction = false; | ||
9394 | 9388 | ||
9395 | /* we only allow rename subvolume link between subvolumes */ | 9389 | /* we only allow rename subvolume link between subvolumes */ |
9396 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) | 9390 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) |
9397 | return -EXDEV; | 9391 | return -EXDEV; |
9398 | 9392 | ||
9393 | btrfs_init_log_ctx(&ctx_root, old_inode); | ||
9394 | btrfs_init_log_ctx(&ctx_dest, new_inode); | ||
9395 | |||
9399 | /* close the race window with snapshot create/destroy ioctl */ | 9396 | /* close the race window with snapshot create/destroy ioctl */ |
9400 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) | 9397 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
9401 | down_read(&fs_info->subvol_sem); | 9398 | down_read(&fs_info->subvol_sem); |
@@ -9542,15 +9539,29 @@ static int btrfs_rename_exchange(struct inode *old_dir, | |||
9542 | 9539 | ||
9543 | if (root_log_pinned) { | 9540 | if (root_log_pinned) { |
9544 | parent = new_dentry->d_parent; | 9541 | parent = new_dentry->d_parent; |
9545 | btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), | 9542 | ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), |
9546 | parent); | 9543 | BTRFS_I(old_dir), parent, |
9544 | false, &ctx_root); | ||
9545 | if (ret == BTRFS_NEED_LOG_SYNC) | ||
9546 | sync_log_root = true; | ||
9547 | else if (ret == BTRFS_NEED_TRANS_COMMIT) | ||
9548 | commit_transaction = true; | ||
9549 | ret = 0; | ||
9547 | btrfs_end_log_trans(root); | 9550 | btrfs_end_log_trans(root); |
9548 | root_log_pinned = false; | 9551 | root_log_pinned = false; |
9549 | } | 9552 | } |
9550 | if (dest_log_pinned) { | 9553 | if (dest_log_pinned) { |
9551 | parent = old_dentry->d_parent; | 9554 | if (!commit_transaction) { |
9552 | btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir), | 9555 | parent = old_dentry->d_parent; |
9553 | parent); | 9556 | ret = btrfs_log_new_name(trans, BTRFS_I(new_inode), |
9557 | BTRFS_I(new_dir), parent, | ||
9558 | false, &ctx_dest); | ||
9559 | if (ret == BTRFS_NEED_LOG_SYNC) | ||
9560 | sync_log_dest = true; | ||
9561 | else if (ret == BTRFS_NEED_TRANS_COMMIT) | ||
9562 | commit_transaction = true; | ||
9563 | ret = 0; | ||
9564 | } | ||
9554 | btrfs_end_log_trans(dest); | 9565 | btrfs_end_log_trans(dest); |
9555 | dest_log_pinned = false; | 9566 | dest_log_pinned = false; |
9556 | } | 9567 | } |
@@ -9583,8 +9594,26 @@ out_fail: | |||
9583 | dest_log_pinned = false; | 9594 | dest_log_pinned = false; |
9584 | } | 9595 | } |
9585 | } | 9596 | } |
9586 | ret2 = btrfs_end_transaction(trans); | 9597 | if (!ret && sync_log_root && !commit_transaction) { |
9587 | ret = ret ? ret : ret2; | 9598 | ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, |
9599 | &ctx_root); | ||
9600 | if (ret) | ||
9601 | commit_transaction = true; | ||
9602 | } | ||
9603 | if (!ret && sync_log_dest && !commit_transaction) { | ||
9604 | ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root, | ||
9605 | &ctx_dest); | ||
9606 | if (ret) | ||
9607 | commit_transaction = true; | ||
9608 | } | ||
9609 | if (commit_transaction) { | ||
9610 | ret = btrfs_commit_transaction(trans); | ||
9611 | } else { | ||
9612 | int ret2; | ||
9613 | |||
9614 | ret2 = btrfs_end_transaction(trans); | ||
9615 | ret = ret ? ret : ret2; | ||
9616 | } | ||
9588 | out_notrans: | 9617 | out_notrans: |
9589 | if (new_ino == BTRFS_FIRST_FREE_OBJECTID) | 9618 | if (new_ino == BTRFS_FIRST_FREE_OBJECTID) |
9590 | up_read(&fs_info->subvol_sem); | 9619 | up_read(&fs_info->subvol_sem); |
@@ -9661,6 +9690,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
9661 | int ret; | 9690 | int ret; |
9662 | u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); | 9691 | u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); |
9663 | bool log_pinned = false; | 9692 | bool log_pinned = false; |
9693 | struct btrfs_log_ctx ctx; | ||
9694 | bool sync_log = false; | ||
9695 | bool commit_transaction = false; | ||
9664 | 9696 | ||
9665 | if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | 9697 | if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
9666 | return -EPERM; | 9698 | return -EPERM; |
@@ -9818,8 +9850,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
9818 | if (log_pinned) { | 9850 | if (log_pinned) { |
9819 | struct dentry *parent = new_dentry->d_parent; | 9851 | struct dentry *parent = new_dentry->d_parent; |
9820 | 9852 | ||
9821 | btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), | 9853 | btrfs_init_log_ctx(&ctx, old_inode); |
9822 | parent); | 9854 | ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), |
9855 | BTRFS_I(old_dir), parent, | ||
9856 | false, &ctx); | ||
9857 | if (ret == BTRFS_NEED_LOG_SYNC) | ||
9858 | sync_log = true; | ||
9859 | else if (ret == BTRFS_NEED_TRANS_COMMIT) | ||
9860 | commit_transaction = true; | ||
9861 | ret = 0; | ||
9823 | btrfs_end_log_trans(root); | 9862 | btrfs_end_log_trans(root); |
9824 | log_pinned = false; | 9863 | log_pinned = false; |
9825 | } | 9864 | } |
@@ -9856,7 +9895,19 @@ out_fail: | |||
9856 | btrfs_end_log_trans(root); | 9895 | btrfs_end_log_trans(root); |
9857 | log_pinned = false; | 9896 | log_pinned = false; |
9858 | } | 9897 | } |
9859 | btrfs_end_transaction(trans); | 9898 | if (!ret && sync_log) { |
9899 | ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx); | ||
9900 | if (ret) | ||
9901 | commit_transaction = true; | ||
9902 | } | ||
9903 | if (commit_transaction) { | ||
9904 | ret = btrfs_commit_transaction(trans); | ||
9905 | } else { | ||
9906 | int ret2; | ||
9907 | |||
9908 | ret2 = btrfs_end_transaction(trans); | ||
9909 | ret = ret ? ret : ret2; | ||
9910 | } | ||
9860 | out_notrans: | 9911 | out_notrans: |
9861 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) | 9912 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
9862 | up_read(&fs_info->subvol_sem); | 9913 | up_read(&fs_info->subvol_sem); |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 63600dc2ac4c..d60b6caf09e8 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, | |||
747 | struct btrfs_pending_snapshot *pending_snapshot; | 747 | struct btrfs_pending_snapshot *pending_snapshot; |
748 | struct btrfs_trans_handle *trans; | 748 | struct btrfs_trans_handle *trans; |
749 | int ret; | 749 | int ret; |
750 | bool snapshot_force_cow = false; | ||
750 | 751 | ||
751 | if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) | 752 | if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) |
752 | return -EINVAL; | 753 | return -EINVAL; |
@@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, | |||
763 | goto free_pending; | 764 | goto free_pending; |
764 | } | 765 | } |
765 | 766 | ||
767 | /* | ||
768 | * Force new buffered writes to reserve space even when NOCOW is | ||
769 | * possible. This is to avoid later writeback (running dealloc) to | ||
770 | * fallback to COW mode and unexpectedly fail with ENOSPC. | ||
771 | */ | ||
766 | atomic_inc(&root->will_be_snapshotted); | 772 | atomic_inc(&root->will_be_snapshotted); |
767 | smp_mb__after_atomic(); | 773 | smp_mb__after_atomic(); |
768 | /* wait for no snapshot writes */ | 774 | /* wait for no snapshot writes */ |
@@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, | |||
773 | if (ret) | 779 | if (ret) |
774 | goto dec_and_free; | 780 | goto dec_and_free; |
775 | 781 | ||
782 | /* | ||
783 | * All previous writes have started writeback in NOCOW mode, so now | ||
784 | * we force future writes to fallback to COW mode during snapshot | ||
785 | * creation. | ||
786 | */ | ||
787 | atomic_inc(&root->snapshot_force_cow); | ||
788 | snapshot_force_cow = true; | ||
789 | |||
776 | btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); | 790 | btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); |
777 | 791 | ||
778 | btrfs_init_block_rsv(&pending_snapshot->block_rsv, | 792 | btrfs_init_block_rsv(&pending_snapshot->block_rsv, |
@@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, | |||
837 | fail: | 851 | fail: |
838 | btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); | 852 | btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); |
839 | dec_and_free: | 853 | dec_and_free: |
854 | if (snapshot_force_cow) | ||
855 | atomic_dec(&root->snapshot_force_cow); | ||
840 | if (atomic_dec_and_test(&root->will_be_snapshotted)) | 856 | if (atomic_dec_and_test(&root->will_be_snapshotted)) |
841 | wake_up_var(&root->will_be_snapshotted); | 857 | wake_up_var(&root->will_be_snapshotted); |
842 | free_pending: | 858 | free_pending: |
@@ -3453,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, | |||
3453 | 3469 | ||
3454 | same_lock_start = min_t(u64, loff, dst_loff); | 3470 | same_lock_start = min_t(u64, loff, dst_loff); |
3455 | same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; | 3471 | same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; |
3472 | } else { | ||
3473 | /* | ||
3474 | * If the source and destination inodes are different, the | ||
3475 | * source's range end offset matches the source's i_size, that | ||
3476 | * i_size is not a multiple of the sector size, and the | ||
3477 | * destination range does not go past the destination's i_size, | ||
3478 | * we must round down the length to the nearest sector size | ||
3479 | * multiple. If we don't do this adjustment we end replacing | ||
3480 | * with zeroes the bytes in the range that starts at the | ||
3481 | * deduplication range's end offset and ends at the next sector | ||
3482 | * size multiple. | ||
3483 | */ | ||
3484 | if (loff + olen == i_size_read(src) && | ||
3485 | dst_loff + len < i_size_read(dst)) { | ||
3486 | const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize; | ||
3487 | |||
3488 | len = round_down(i_size_read(src), sz) - loff; | ||
3489 | olen = len; | ||
3490 | } | ||
3456 | } | 3491 | } |
3457 | 3492 | ||
3458 | again: | 3493 | again: |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 4353bb69bb86..d4917c0cddf5 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -1019,10 +1019,9 @@ out_add_root: | |||
1019 | spin_unlock(&fs_info->qgroup_lock); | 1019 | spin_unlock(&fs_info->qgroup_lock); |
1020 | 1020 | ||
1021 | ret = btrfs_commit_transaction(trans); | 1021 | ret = btrfs_commit_transaction(trans); |
1022 | if (ret) { | 1022 | trans = NULL; |
1023 | trans = NULL; | 1023 | if (ret) |
1024 | goto out_free_path; | 1024 | goto out_free_path; |
1025 | } | ||
1026 | 1025 | ||
1027 | ret = qgroup_rescan_init(fs_info, 0, 1); | 1026 | ret = qgroup_rescan_init(fs_info, 0, 1); |
1028 | if (!ret) { | 1027 | if (!ret) { |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 1650dc44a5e3..3c2ae0e4f25a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, | |||
6025 | * Call this after adding a new name for a file and it will properly | 6025 | * Call this after adding a new name for a file and it will properly |
6026 | * update the log to reflect the new name. | 6026 | * update the log to reflect the new name. |
6027 | * | 6027 | * |
6028 | * It will return zero if all goes well, and it will return 1 if a | 6028 | * @ctx can not be NULL when @sync_log is false, and should be NULL when it's |
6029 | * full transaction commit is required. | 6029 | * true (because it's not used). |
6030 | * | ||
6031 | * Return value depends on whether @sync_log is true or false. | ||
6032 | * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be | ||
6033 | * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT | ||
6034 | * otherwise. | ||
6035 | * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to | ||
6036 | * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log, | ||
6037 | * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be | ||
6038 | * committed (without attempting to sync the log). | ||
6030 | */ | 6039 | */ |
6031 | int btrfs_log_new_name(struct btrfs_trans_handle *trans, | 6040 | int btrfs_log_new_name(struct btrfs_trans_handle *trans, |
6032 | struct btrfs_inode *inode, struct btrfs_inode *old_dir, | 6041 | struct btrfs_inode *inode, struct btrfs_inode *old_dir, |
6033 | struct dentry *parent) | 6042 | struct dentry *parent, |
6043 | bool sync_log, struct btrfs_log_ctx *ctx) | ||
6034 | { | 6044 | { |
6035 | struct btrfs_fs_info *fs_info = trans->fs_info; | 6045 | struct btrfs_fs_info *fs_info = trans->fs_info; |
6046 | int ret; | ||
6036 | 6047 | ||
6037 | /* | 6048 | /* |
6038 | * this will force the logging code to walk the dentry chain | 6049 | * this will force the logging code to walk the dentry chain |
@@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, | |||
6047 | */ | 6058 | */ |
6048 | if (inode->logged_trans <= fs_info->last_trans_committed && | 6059 | if (inode->logged_trans <= fs_info->last_trans_committed && |
6049 | (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) | 6060 | (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) |
6050 | return 0; | 6061 | return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT : |
6062 | BTRFS_DONT_NEED_LOG_SYNC; | ||
6063 | |||
6064 | if (sync_log) { | ||
6065 | struct btrfs_log_ctx ctx2; | ||
6066 | |||
6067 | btrfs_init_log_ctx(&ctx2, &inode->vfs_inode); | ||
6068 | ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, | ||
6069 | LOG_INODE_EXISTS, &ctx2); | ||
6070 | if (ret == BTRFS_NO_LOG_SYNC) | ||
6071 | return BTRFS_DONT_NEED_TRANS_COMMIT; | ||
6072 | else if (ret) | ||
6073 | return BTRFS_NEED_TRANS_COMMIT; | ||
6074 | |||
6075 | ret = btrfs_sync_log(trans, inode->root, &ctx2); | ||
6076 | if (ret) | ||
6077 | return BTRFS_NEED_TRANS_COMMIT; | ||
6078 | return BTRFS_DONT_NEED_TRANS_COMMIT; | ||
6079 | } | ||
6080 | |||
6081 | ASSERT(ctx); | ||
6082 | ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, | ||
6083 | LOG_INODE_EXISTS, ctx); | ||
6084 | if (ret == BTRFS_NO_LOG_SYNC) | ||
6085 | return BTRFS_DONT_NEED_LOG_SYNC; | ||
6086 | else if (ret) | ||
6087 | return BTRFS_NEED_TRANS_COMMIT; | ||
6051 | 6088 | ||
6052 | return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, | 6089 | return BTRFS_NEED_LOG_SYNC; |
6053 | LOG_INODE_EXISTS, NULL); | ||
6054 | } | 6090 | } |
6055 | 6091 | ||
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 122e68b89a5a..7ab9bb88a639 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h | |||
@@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, | |||
71 | int for_rename); | 71 | int for_rename); |
72 | void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, | 72 | void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, |
73 | struct btrfs_inode *dir); | 73 | struct btrfs_inode *dir); |
74 | /* Return values for btrfs_log_new_name() */ | ||
75 | enum { | ||
76 | BTRFS_DONT_NEED_TRANS_COMMIT, | ||
77 | BTRFS_NEED_TRANS_COMMIT, | ||
78 | BTRFS_DONT_NEED_LOG_SYNC, | ||
79 | BTRFS_NEED_LOG_SYNC, | ||
80 | }; | ||
74 | int btrfs_log_new_name(struct btrfs_trans_handle *trans, | 81 | int btrfs_log_new_name(struct btrfs_trans_handle *trans, |
75 | struct btrfs_inode *inode, struct btrfs_inode *old_dir, | 82 | struct btrfs_inode *inode, struct btrfs_inode *old_dir, |
76 | struct dentry *parent); | 83 | struct dentry *parent, |
84 | bool sync_log, struct btrfs_log_ctx *ctx); | ||
77 | 85 | ||
78 | #endif | 86 | #endif |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da86706123ff..f4405e430da6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -4491,7 +4491,12 @@ again: | |||
4491 | 4491 | ||
4492 | /* Now btrfs_update_device() will change the on-disk size. */ | 4492 | /* Now btrfs_update_device() will change the on-disk size. */ |
4493 | ret = btrfs_update_device(trans, device); | 4493 | ret = btrfs_update_device(trans, device); |
4494 | btrfs_end_transaction(trans); | 4494 | if (ret < 0) { |
4495 | btrfs_abort_transaction(trans, ret); | ||
4496 | btrfs_end_transaction(trans); | ||
4497 | } else { | ||
4498 | ret = btrfs_commit_transaction(trans); | ||
4499 | } | ||
4495 | done: | 4500 | done: |
4496 | btrfs_free_path(path); | 4501 | btrfs_free_path(path); |
4497 | if (ret) { | 4502 | if (ret) { |
diff --git a/fs/buffer.c b/fs/buffer.c index 4cc679d5bf58..6f1ae3ac9789 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/buffer_head.h> | 39 | #include <linux/buffer_head.h> |
40 | #include <linux/task_io_accounting_ops.h> | 40 | #include <linux/task_io_accounting_ops.h> |
41 | #include <linux/bio.h> | 41 | #include <linux/bio.h> |
42 | #include <linux/notifier.h> | ||
43 | #include <linux/cpu.h> | 42 | #include <linux/cpu.h> |
44 | #include <linux/bitops.h> | 43 | #include <linux/bitops.h> |
45 | #include <linux/mpage.h> | 44 | #include <linux/mpage.h> |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 43ca3b763875..eab1359d0553 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg) | |||
602 | 602 | ||
603 | /* | 603 | /* |
604 | * create a new fs client | 604 | * create a new fs client |
605 | * | ||
606 | * Success or not, this function consumes @fsopt and @opt. | ||
605 | */ | 607 | */ |
606 | static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, | 608 | static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, |
607 | struct ceph_options *opt) | 609 | struct ceph_options *opt) |
@@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, | |||
609 | struct ceph_fs_client *fsc; | 611 | struct ceph_fs_client *fsc; |
610 | int page_count; | 612 | int page_count; |
611 | size_t size; | 613 | size_t size; |
612 | int err = -ENOMEM; | 614 | int err; |
613 | 615 | ||
614 | fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); | 616 | fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); |
615 | if (!fsc) | 617 | if (!fsc) { |
616 | return ERR_PTR(-ENOMEM); | 618 | err = -ENOMEM; |
619 | goto fail; | ||
620 | } | ||
617 | 621 | ||
618 | fsc->client = ceph_create_client(opt, fsc); | 622 | fsc->client = ceph_create_client(opt, fsc); |
619 | if (IS_ERR(fsc->client)) { | 623 | if (IS_ERR(fsc->client)) { |
620 | err = PTR_ERR(fsc->client); | 624 | err = PTR_ERR(fsc->client); |
621 | goto fail; | 625 | goto fail; |
622 | } | 626 | } |
627 | opt = NULL; /* fsc->client now owns this */ | ||
623 | 628 | ||
624 | fsc->client->extra_mon_dispatch = extra_mon_dispatch; | 629 | fsc->client->extra_mon_dispatch = extra_mon_dispatch; |
625 | fsc->client->osdc.abort_on_full = true; | 630 | fsc->client->osdc.abort_on_full = true; |
@@ -677,6 +682,9 @@ fail_client: | |||
677 | ceph_destroy_client(fsc->client); | 682 | ceph_destroy_client(fsc->client); |
678 | fail: | 683 | fail: |
679 | kfree(fsc); | 684 | kfree(fsc); |
685 | if (opt) | ||
686 | ceph_destroy_options(opt); | ||
687 | destroy_mount_options(fsopt); | ||
680 | return ERR_PTR(err); | 688 | return ERR_PTR(err); |
681 | } | 689 | } |
682 | 690 | ||
@@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, | |||
1042 | fsc = create_fs_client(fsopt, opt); | 1050 | fsc = create_fs_client(fsopt, opt); |
1043 | if (IS_ERR(fsc)) { | 1051 | if (IS_ERR(fsc)) { |
1044 | res = ERR_CAST(fsc); | 1052 | res = ERR_CAST(fsc); |
1045 | destroy_mount_options(fsopt); | ||
1046 | ceph_destroy_options(opt); | ||
1047 | goto out_final; | 1053 | goto out_final; |
1048 | } | 1054 | } |
1049 | 1055 | ||
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index b380e0871372..a2b2355e7f01 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target) | |||
105 | case SFM_LESSTHAN: | 105 | case SFM_LESSTHAN: |
106 | *target = '<'; | 106 | *target = '<'; |
107 | break; | 107 | break; |
108 | case SFM_SLASH: | ||
109 | *target = '\\'; | ||
110 | break; | ||
111 | case SFM_SPACE: | 108 | case SFM_SPACE: |
112 | *target = ' '; | 109 | *target = ' '; |
113 | break; | 110 | break; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index c832a8a1970a..7aa08dba4719 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -2547,7 +2547,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
2547 | if (tcon == NULL) | 2547 | if (tcon == NULL) |
2548 | return -ENOMEM; | 2548 | return -ENOMEM; |
2549 | 2549 | ||
2550 | snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName); | 2550 | snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname); |
2551 | 2551 | ||
2552 | /* cannot fail */ | 2552 | /* cannot fail */ |
2553 | nls_codepage = load_nls_default(); | 2553 | nls_codepage = load_nls_default(); |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index d32eaa4b2437..6e8765f44508 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path, | |||
467 | oparms.cifs_sb = cifs_sb; | 467 | oparms.cifs_sb = cifs_sb; |
468 | oparms.desired_access = GENERIC_READ; | 468 | oparms.desired_access = GENERIC_READ; |
469 | oparms.create_options = CREATE_NOT_DIR; | 469 | oparms.create_options = CREATE_NOT_DIR; |
470 | if (backup_cred(cifs_sb)) | ||
471 | oparms.create_options |= CREATE_OPEN_BACKUP_INTENT; | ||
470 | oparms.disposition = FILE_OPEN; | 472 | oparms.disposition = FILE_OPEN; |
471 | oparms.path = path; | 473 | oparms.path = path; |
472 | oparms.fid = &fid; | 474 | oparms.fid = &fid; |
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index db0453660ff6..6a9c47541c53 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
@@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) | |||
248 | * MacOS server pads after SMB2.1 write response with 3 bytes | 248 | * MacOS server pads after SMB2.1 write response with 3 bytes |
249 | * of junk. Other servers match RFC1001 len to actual | 249 | * of junk. Other servers match RFC1001 len to actual |
250 | * SMB2/SMB3 frame length (header + smb2 response specific data) | 250 | * SMB2/SMB3 frame length (header + smb2 response specific data) |
251 | * Some windows servers do too when compounding is used. | 251 | * Some windows servers also pad up to 8 bytes when compounding. |
252 | * Log the server error (once), but allow it and continue | 252 | * If pad is longer than eight bytes, log the server behavior |
253 | * (once), since may indicate a problem but allow it and continue | ||
253 | * since the frame is parseable. | 254 | * since the frame is parseable. |
254 | */ | 255 | */ |
255 | if (clc_len < len) { | 256 | if (clc_len < len) { |
256 | printk_once(KERN_WARNING | 257 | pr_warn_once( |
257 | "SMB2 server sent bad RFC1001 len %d not %d\n", | 258 | "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n", |
258 | len, clc_len); | 259 | len, clc_len, command, mid); |
259 | return 0; | 260 | return 0; |
260 | } | 261 | } |
262 | pr_warn_once( | ||
263 | "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n", | ||
264 | len, clc_len, command, mid); | ||
261 | 265 | ||
262 | return 1; | 266 | return 1; |
263 | } | 267 | } |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 247a98e6c856..d954ce36b473 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, | |||
630 | oparms.tcon = tcon; | 630 | oparms.tcon = tcon; |
631 | oparms.desired_access = FILE_READ_ATTRIBUTES; | 631 | oparms.desired_access = FILE_READ_ATTRIBUTES; |
632 | oparms.disposition = FILE_OPEN; | 632 | oparms.disposition = FILE_OPEN; |
633 | oparms.create_options = 0; | 633 | if (backup_cred(cifs_sb)) |
634 | oparms.create_options = CREATE_OPEN_BACKUP_INTENT; | ||
635 | else | ||
636 | oparms.create_options = 0; | ||
634 | oparms.fid = &fid; | 637 | oparms.fid = &fid; |
635 | oparms.reconnect = false; | 638 | oparms.reconnect = false; |
636 | 639 | ||
@@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, | |||
779 | oparms.tcon = tcon; | 782 | oparms.tcon = tcon; |
780 | oparms.desired_access = FILE_READ_EA; | 783 | oparms.desired_access = FILE_READ_EA; |
781 | oparms.disposition = FILE_OPEN; | 784 | oparms.disposition = FILE_OPEN; |
782 | oparms.create_options = 0; | 785 | if (backup_cred(cifs_sb)) |
786 | oparms.create_options = CREATE_OPEN_BACKUP_INTENT; | ||
787 | else | ||
788 | oparms.create_options = 0; | ||
783 | oparms.fid = &fid; | 789 | oparms.fid = &fid; |
784 | oparms.reconnect = false; | 790 | oparms.reconnect = false; |
785 | 791 | ||
@@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, | |||
858 | oparms.tcon = tcon; | 864 | oparms.tcon = tcon; |
859 | oparms.desired_access = FILE_WRITE_EA; | 865 | oparms.desired_access = FILE_WRITE_EA; |
860 | oparms.disposition = FILE_OPEN; | 866 | oparms.disposition = FILE_OPEN; |
861 | oparms.create_options = 0; | 867 | if (backup_cred(cifs_sb)) |
868 | oparms.create_options = CREATE_OPEN_BACKUP_INTENT; | ||
869 | else | ||
870 | oparms.create_options = 0; | ||
862 | oparms.fid = &fid; | 871 | oparms.fid = &fid; |
863 | oparms.reconnect = false; | 872 | oparms.reconnect = false; |
864 | 873 | ||
@@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, | |||
1453 | oparms.tcon = tcon; | 1462 | oparms.tcon = tcon; |
1454 | oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; | 1463 | oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; |
1455 | oparms.disposition = FILE_OPEN; | 1464 | oparms.disposition = FILE_OPEN; |
1456 | oparms.create_options = 0; | 1465 | if (backup_cred(cifs_sb)) |
1466 | oparms.create_options = CREATE_OPEN_BACKUP_INTENT; | ||
1467 | else | ||
1468 | oparms.create_options = 0; | ||
1457 | oparms.fid = fid; | 1469 | oparms.fid = fid; |
1458 | oparms.reconnect = false; | 1470 | oparms.reconnect = false; |
1459 | 1471 | ||
@@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, | |||
1857 | oparms.tcon = tcon; | 1869 | oparms.tcon = tcon; |
1858 | oparms.desired_access = FILE_READ_ATTRIBUTES; | 1870 | oparms.desired_access = FILE_READ_ATTRIBUTES; |
1859 | oparms.disposition = FILE_OPEN; | 1871 | oparms.disposition = FILE_OPEN; |
1860 | oparms.create_options = 0; | 1872 | if (backup_cred(cifs_sb)) |
1873 | oparms.create_options = CREATE_OPEN_BACKUP_INTENT; | ||
1874 | else | ||
1875 | oparms.create_options = 0; | ||
1861 | oparms.fid = &fid; | 1876 | oparms.fid = &fid; |
1862 | oparms.reconnect = false; | 1877 | oparms.reconnect = false; |
1863 | 1878 | ||
@@ -3639,7 +3654,7 @@ struct smb_version_values smb21_values = { | |||
3639 | struct smb_version_values smb3any_values = { | 3654 | struct smb_version_values smb3any_values = { |
3640 | .version_string = SMB3ANY_VERSION_STRING, | 3655 | .version_string = SMB3ANY_VERSION_STRING, |
3641 | .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ | 3656 | .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ |
3642 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, | 3657 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, |
3643 | .large_lock_type = 0, | 3658 | .large_lock_type = 0, |
3644 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, | 3659 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, |
3645 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, | 3660 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, |
@@ -3660,7 +3675,7 @@ struct smb_version_values smb3any_values = { | |||
3660 | struct smb_version_values smbdefault_values = { | 3675 | struct smb_version_values smbdefault_values = { |
3661 | .version_string = SMBDEFAULT_VERSION_STRING, | 3676 | .version_string = SMBDEFAULT_VERSION_STRING, |
3662 | .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ | 3677 | .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ |
3663 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, | 3678 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, |
3664 | .large_lock_type = 0, | 3679 | .large_lock_type = 0, |
3665 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, | 3680 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, |
3666 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, | 3681 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, |
@@ -3681,7 +3696,7 @@ struct smb_version_values smbdefault_values = { | |||
3681 | struct smb_version_values smb30_values = { | 3696 | struct smb_version_values smb30_values = { |
3682 | .version_string = SMB30_VERSION_STRING, | 3697 | .version_string = SMB30_VERSION_STRING, |
3683 | .protocol_id = SMB30_PROT_ID, | 3698 | .protocol_id = SMB30_PROT_ID, |
3684 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, | 3699 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, |
3685 | .large_lock_type = 0, | 3700 | .large_lock_type = 0, |
3686 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, | 3701 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, |
3687 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, | 3702 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, |
@@ -3702,7 +3717,7 @@ struct smb_version_values smb30_values = { | |||
3702 | struct smb_version_values smb302_values = { | 3717 | struct smb_version_values smb302_values = { |
3703 | .version_string = SMB302_VERSION_STRING, | 3718 | .version_string = SMB302_VERSION_STRING, |
3704 | .protocol_id = SMB302_PROT_ID, | 3719 | .protocol_id = SMB302_PROT_ID, |
3705 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, | 3720 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, |
3706 | .large_lock_type = 0, | 3721 | .large_lock_type = 0, |
3707 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, | 3722 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, |
3708 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, | 3723 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, |
@@ -3723,7 +3738,7 @@ struct smb_version_values smb302_values = { | |||
3723 | struct smb_version_values smb311_values = { | 3738 | struct smb_version_values smb311_values = { |
3724 | .version_string = SMB311_VERSION_STRING, | 3739 | .version_string = SMB311_VERSION_STRING, |
3725 | .protocol_id = SMB311_PROT_ID, | 3740 | .protocol_id = SMB311_PROT_ID, |
3726 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, | 3741 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, |
3727 | .large_lock_type = 0, | 3742 | .large_lock_type = 0, |
3728 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, | 3743 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, |
3729 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, | 3744 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 5740aa809be6..c08acfc77abc 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock, | |||
2178 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || | 2178 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || |
2179 | *oplock == SMB2_OPLOCK_LEVEL_NONE) | 2179 | *oplock == SMB2_OPLOCK_LEVEL_NONE) |
2180 | req->RequestedOplockLevel = *oplock; | 2180 | req->RequestedOplockLevel = *oplock; |
2181 | else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && | ||
2182 | (oparms->create_options & CREATE_NOT_FILE)) | ||
2183 | req->RequestedOplockLevel = *oplock; /* no srv lease support */ | ||
2181 | else { | 2184 | else { |
2182 | rc = add_lease_context(server, iov, &n_iov, | 2185 | rc = add_lease_context(server, iov, &n_iov, |
2183 | oparms->fid->lease_key, oplock); | 2186 | oparms->fid->lease_key, oplock); |
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index ec3fba7d492f..488a9e7f8f66 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mpage.h> | 24 | #include <linux/mpage.h> |
25 | #include <linux/user_namespace.h> | 25 | #include <linux/user_namespace.h> |
26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
27 | #include <linux/blkdev.h> | ||
27 | 28 | ||
28 | #include "isofs.h" | 29 | #include "isofs.h" |
29 | #include "zisofs.h" | 30 | #include "zisofs.h" |
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) | |||
653 | /* | 654 | /* |
654 | * What if bugger tells us to go beyond page size? | 655 | * What if bugger tells us to go beyond page size? |
655 | */ | 656 | */ |
657 | if (bdev_logical_block_size(s->s_bdev) > 2048) { | ||
658 | printk(KERN_WARNING | ||
659 | "ISOFS: unsupported/invalid hardware sector size %d\n", | ||
660 | bdev_logical_block_size(s->s_bdev)); | ||
661 | goto out_freesbi; | ||
662 | } | ||
656 | opt.blocksize = sb_min_blocksize(s, opt.blocksize); | 663 | opt.blocksize = sb_min_blocksize(s, opt.blocksize); |
657 | 664 | ||
658 | sbi->s_high_sierra = 0; /* default is iso9660 */ | 665 | sbi->s_high_sierra = 0; /* default is iso9660 */ |
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 03b8ba933eb2..235b959fc2b3 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * alloc.c - NILFS dat/inode allocator | 3 | * alloc.c - NILFS dat/inode allocator |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Originally written by Koji Sato. | 7 | * Originally written by Koji Sato. |
17 | * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. | 8 | * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index 05149e606a78..0303c3968cee 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator | 3 | * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Originally written by Koji Sato. | 7 | * Originally written by Koji Sato. |
17 | * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. | 8 | * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 01fb1831ca25..fb5a9a8a13cf 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * bmap.c - NILFS block mapping. | 3 | * bmap.c - NILFS block mapping. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index 2b6ffbe5997a..2c63858e81c9 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * bmap.h - NILFS block mapping. | 3 | * bmap.h - NILFS block mapping. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index dec98cab729d..ebb24a314f43 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * btnode.c - NILFS B-tree node cache | 3 | * btnode.c - NILFS B-tree node cache |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Originally written by Seiji Kihara. | 7 | * Originally written by Seiji Kihara. |
17 | * Fully revised by Ryusuke Konishi for stabilization and simplification. | 8 | * Fully revised by Ryusuke Konishi for stabilization and simplification. |
18 | * | 9 | * |
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 4e8aaa1aeb65..0f88dbc9bcb3 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * btnode.h - NILFS B-tree node cache | 3 | * btnode.h - NILFS B-tree node cache |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Seiji Kihara. | 7 | * Written by Seiji Kihara. |
17 | * Revised by Ryusuke Konishi. | 8 | * Revised by Ryusuke Konishi. |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 16a7a67a11c9..23e043eca237 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * btree.c - NILFS B-tree. | 3 | * btree.c - NILFS B-tree. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index 2184e47fa4bf..d1421b646ce4 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * btree.h - NILFS B-tree. | 3 | * btree.h - NILFS B-tree. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index a15a1601e931..8d41311b5db4 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * cpfile.c - NILFS checkpoint file. | 3 | * cpfile.c - NILFS checkpoint file. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h index 6eca972f9673..6336222df24a 100644 --- a/fs/nilfs2/cpfile.h +++ b/fs/nilfs2/cpfile.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * cpfile.h - NILFS checkpoint file. | 3 | * cpfile.h - NILFS checkpoint file. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index dffedb2f8817..6f4066636be9 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * dat.c - NILFS disk address translation. | 3 | * dat.c - NILFS disk address translation. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h index 57dc6cf466d0..b17ee34580ae 100644 --- a/fs/nilfs2/dat.h +++ b/fs/nilfs2/dat.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * dat.h - NILFS disk address translation. | 3 | * dat.h - NILFS disk address translation. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 582831ab3eb9..81394e22d0a0 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * dir.c - NILFS directory entry operations | 3 | * dir.c - NILFS directory entry operations |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Modified for NILFS by Amagai Yoshiji. | 7 | * Modified for NILFS by Amagai Yoshiji. |
17 | */ | 8 | */ |
18 | /* | 9 | /* |
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 96e3ed0d9652..533e24ea3a88 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * direct.c - NILFS direct block pointer. | 3 | * direct.c - NILFS direct block pointer. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h index cfe85e848bba..ec9a23c77994 100644 --- a/fs/nilfs2/direct.h +++ b/fs/nilfs2/direct.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * direct.h - NILFS direct block pointer. | 3 | * direct.h - NILFS direct block pointer. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 7da0fac71dc2..64bc81363c6c 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * file.c - NILFS regular file handling primitives including fsync(). | 3 | * file.c - NILFS regular file handling primitives including fsync(). |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Amagai Yoshiji and Ryusuke Konishi. | 7 | * Written by Amagai Yoshiji and Ryusuke Konishi. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 853a831dcde0..aa3c328ee189 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * gcinode.c - dummy inodes to buffer blocks for garbage collection | 3 | * gcinode.c - dummy inodes to buffer blocks for garbage collection |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. | 7 | * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. |
17 | * Revised by Ryusuke Konishi. | 8 | * Revised by Ryusuke Konishi. |
18 | * | 9 | * |
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index b8fa45c20c63..4140d232cadc 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * ifile.c - NILFS inode file | 3 | * ifile.c - NILFS inode file |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Amagai Yoshiji. | 7 | * Written by Amagai Yoshiji. |
17 | * Revised by Ryusuke Konishi. | 8 | * Revised by Ryusuke Konishi. |
18 | * | 9 | * |
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h index 188b94fe0ec5..a1e1e5711a05 100644 --- a/fs/nilfs2/ifile.h +++ b/fs/nilfs2/ifile.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * ifile.h - NILFS inode file | 3 | * ifile.h - NILFS inode file |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Amagai Yoshiji. | 7 | * Written by Amagai Yoshiji. |
17 | * Revised by Ryusuke Konishi. | 8 | * Revised by Ryusuke Konishi. |
18 | * | 9 | * |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 6a612d832e7d..671085512e0f 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * inode.c - NILFS inode operations. | 3 | * inode.c - NILFS inode operations. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | * | 8 | * |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 1d2c3d7711fe..9b96d79eea6c 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * ioctl.c - NILFS ioctl operations. | 3 | * ioctl.c - NILFS ioctl operations. |
3 | * | 4 | * |
4 | * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index c6bc1033e7d2..700870a92bc4 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * mdt.c - meta data file for NILFS | 3 | * mdt.c - meta data file for NILFS |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h index 3f67f3932097..e77aea4bb921 100644 --- a/fs/nilfs2/mdt.h +++ b/fs/nilfs2/mdt.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * mdt.h - NILFS meta data file prototype and definitions | 3 | * mdt.h - NILFS meta data file prototype and definitions |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index dd52d3f82e8d..9fe6d4ab74f0 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * namei.c - NILFS pathname lookup operations. | 3 | * namei.c - NILFS pathname lookup operations. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi. | 7 | * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi. |
17 | */ | 8 | */ |
18 | /* | 9 | /* |
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 33f8c8fc96e8..a2f247b6a209 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * nilfs.h - NILFS local header file. | 3 | * nilfs.h - NILFS local header file. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato and Ryusuke Konishi. | 7 | * Written by Koji Sato and Ryusuke Konishi. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 4cb850a6f1c2..329a056b73b1 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * page.c - buffer/page management specific to NILFS | 3 | * page.c - buffer/page management specific to NILFS |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi and Seiji Kihara. | 7 | * Written by Ryusuke Konishi and Seiji Kihara. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index f3687c958fa8..62b9bb469e92 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * page.h - buffer/page management specific to NILFS | 3 | * page.h - buffer/page management specific to NILFS |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi and Seiji Kihara. | 7 | * Written by Ryusuke Konishi and Seiji Kihara. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 5139efed1888..140b663e91c7 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * recovery.c - NILFS recovery logic | 3 | * recovery.c - NILFS recovery logic |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 68cb9e4740b4..20c479b5e41b 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * segbuf.c - NILFS segment buffer | 3 | * segbuf.c - NILFS segment buffer |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | * | 8 | * |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h index 10e16935fff6..9bea1bd59041 100644 --- a/fs/nilfs2/segbuf.h +++ b/fs/nilfs2/segbuf.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * segbuf.h - NILFS Segment buffer prototypes and definitions | 3 | * segbuf.h - NILFS Segment buffer prototypes and definitions |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | * | 8 | * |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0953635e7d48..445eef41bfaf 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * segment.c - NILFS segment constructor. | 3 | * segment.c - NILFS segment constructor. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | * | 8 | * |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 04634e3e3d58..f5cf5308f3fc 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * segment.h - NILFS Segment constructor prototypes and definitions | 3 | * segment.h - NILFS Segment constructor prototypes and definitions |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | * | 8 | * |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index c7fa139d50e8..bf3f8f05c89b 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * sufile.c - NILFS segment usage file. | 3 | * sufile.c - NILFS segment usage file. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | * Revised by Ryusuke Konishi. | 8 | * Revised by Ryusuke Konishi. |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 673a891350f4..c4e2c7a7add1 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * sufile.h - NILFS segment usage file. | 3 | * sufile.h - NILFS segment usage file. |
3 | * | 4 | * |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Koji Sato. | 7 | * Written by Koji Sato. |
17 | */ | 8 | */ |
18 | 9 | ||
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 1b9067cf4511..26290aa1023f 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * super.c - NILFS module and super block management. | 3 | * super.c - NILFS module and super block management. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | */ | 8 | */ |
18 | /* | 9 | /* |
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index 4b25837e7724..e60be7bb55b0 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c | |||
@@ -1,19 +1,10 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * sysfs.c - sysfs support implementation. | 3 | * sysfs.c - sysfs support implementation. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. |
5 | * Copyright (C) 2014 HGST, Inc., a Western Digital Company. | 6 | * Copyright (C) 2014 HGST, Inc., a Western Digital Company. |
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> | 8 | * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> |
18 | */ | 9 | */ |
19 | 10 | ||
diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h index 648cedf9c06e..d001eb862dae 100644 --- a/fs/nilfs2/sysfs.h +++ b/fs/nilfs2/sysfs.h | |||
@@ -1,19 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * sysfs.h - sysfs support declarations. | 3 | * sysfs.h - sysfs support declarations. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. |
5 | * Copyright (C) 2014 HGST, Inc., a Western Digital Company. | 6 | * Copyright (C) 2014 HGST, Inc., a Western Digital Company. |
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> | 8 | * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> |
18 | */ | 9 | */ |
19 | 10 | ||
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 1a85317e83f0..484785cdf96e 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -1,18 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* | 2 | /* |
2 | * the_nilfs.c - the_nilfs shared structure. | 3 | * the_nilfs.c - the_nilfs shared structure. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | * | 8 | * |
18 | */ | 9 | */ |
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 36da1779f976..380a543c5b19 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h | |||
@@ -1,18 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * the_nilfs.h - the_nilfs shared structure. | 3 | * the_nilfs.h - the_nilfs shared structure. |
3 | * | 4 | * |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 5 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * Written by Ryusuke Konishi. | 7 | * Written by Ryusuke Konishi. |
17 | * | 8 | * |
18 | */ | 9 | */ |
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index f174397b63a0..ababdbfab537 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c | |||
@@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, | |||
351 | 351 | ||
352 | iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); | 352 | iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); |
353 | 353 | ||
354 | if ((mask & FS_MODIFY) || | 354 | iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = |
355 | (test_mask & to_tell->i_fsnotify_mask)) { | 355 | fsnotify_first_mark(&to_tell->i_fsnotify_marks); |
356 | iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = | 356 | if (mnt) { |
357 | fsnotify_first_mark(&to_tell->i_fsnotify_marks); | ||
358 | } | ||
359 | |||
360 | if (mnt && ((mask & FS_MODIFY) || | ||
361 | (test_mask & mnt->mnt_fsnotify_mask))) { | ||
362 | iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = | ||
363 | fsnotify_first_mark(&to_tell->i_fsnotify_marks); | ||
364 | iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = | 357 | iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = |
365 | fsnotify_first_mark(&mnt->mnt_fsnotify_marks); | 358 | fsnotify_first_mark(&mnt->mnt_fsnotify_marks); |
366 | } | 359 | } |
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 05506d60131c..59cdb27826de 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) | |||
132 | struct fsnotify_mark *mark; | 132 | struct fsnotify_mark *mark; |
133 | 133 | ||
134 | assert_spin_locked(&conn->lock); | 134 | assert_spin_locked(&conn->lock); |
135 | /* We can get detached connector here when inode is getting unlinked. */ | ||
136 | if (!fsnotify_valid_obj_type(conn->type)) | ||
137 | return; | ||
135 | hlist_for_each_entry(mark, &conn->list, obj_list) { | 138 | hlist_for_each_entry(mark, &conn->list, obj_list) { |
136 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) | 139 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) |
137 | new_mask |= mark->mask; | 140 | new_mask |= mark->mask; |
138 | } | 141 | } |
139 | if (WARN_ON(!fsnotify_valid_obj_type(conn->type))) | ||
140 | return; | ||
141 | |||
142 | *fsnotify_conn_mask_p(conn) = new_mask; | 142 | *fsnotify_conn_mask_p(conn) = new_mask; |
143 | } | 143 | } |
144 | 144 | ||
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 860bfbe7a07a..f0cbf58ad4da 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/quotaops.h> | 18 | #include <linux/quotaops.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
21 | #include <linux/nospec.h> | ||
21 | 22 | ||
22 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, | 23 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, |
23 | qid_t id) | 24 | qid_t id) |
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr) | |||
120 | struct if_dqinfo uinfo; | 121 | struct if_dqinfo uinfo; |
121 | int ret; | 122 | int ret; |
122 | 123 | ||
123 | /* This checks whether qc_state has enough entries... */ | ||
124 | BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); | ||
125 | if (!sb->s_qcop->get_state) | 124 | if (!sb->s_qcop->get_state) |
126 | return -ENOSYS; | 125 | return -ENOSYS; |
127 | ret = sb->s_qcop->get_state(sb, &state); | 126 | ret = sb->s_qcop->get_state(sb, &state); |
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) | |||
354 | * GETXSTATE quotactl has space for just one set of time limits so | 353 | * GETXSTATE quotactl has space for just one set of time limits so |
355 | * report them for the first enabled quota type | 354 | * report them for the first enabled quota type |
356 | */ | 355 | */ |
357 | for (type = 0; type < XQM_MAXQUOTAS; type++) | 356 | for (type = 0; type < MAXQUOTAS; type++) |
358 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) | 357 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) |
359 | break; | 358 | break; |
360 | BUG_ON(type == XQM_MAXQUOTAS); | 359 | BUG_ON(type == MAXQUOTAS); |
361 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | 360 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
362 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | 361 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
363 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | 362 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) | |||
427 | * GETXSTATV quotactl has space for just one set of time limits so | 426 | * GETXSTATV quotactl has space for just one set of time limits so |
428 | * report them for the first enabled quota type | 427 | * report them for the first enabled quota type |
429 | */ | 428 | */ |
430 | for (type = 0; type < XQM_MAXQUOTAS; type++) | 429 | for (type = 0; type < MAXQUOTAS; type++) |
431 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) | 430 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) |
432 | break; | 431 | break; |
433 | BUG_ON(type == XQM_MAXQUOTAS); | 432 | BUG_ON(type == MAXQUOTAS); |
434 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | 433 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
435 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | 434 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
436 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | 435 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
701 | { | 700 | { |
702 | int ret; | 701 | int ret; |
703 | 702 | ||
704 | if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) | 703 | if (type >= MAXQUOTAS) |
705 | return -EINVAL; | 704 | return -EINVAL; |
705 | type = array_index_nospec(type, MAXQUOTAS); | ||
706 | /* | 706 | /* |
707 | * Quota not supported on this fs? Check this before s_quota_types | 707 | * Quota not supported on this fs? Check this before s_quota_types |
708 | * since they needn't be set if quota is not supported at all. | 708 | * since they needn't be set if quota is not supported at all. |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 3040dc2a32f6..6f515651a2c2 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb, | |||
764 | struct kernel_lb_addr *root) | 764 | struct kernel_lb_addr *root) |
765 | { | 765 | { |
766 | struct buffer_head *bh = NULL; | 766 | struct buffer_head *bh = NULL; |
767 | long lastblock; | ||
768 | uint16_t ident; | 767 | uint16_t ident; |
769 | struct udf_sb_info *sbi; | ||
770 | 768 | ||
771 | if (fileset->logicalBlockNum != 0xFFFFFFFF || | 769 | if (fileset->logicalBlockNum != 0xFFFFFFFF || |
772 | fileset->partitionReferenceNum != 0xFFFF) { | 770 | fileset->partitionReferenceNum != 0xFFFF) { |
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb, | |||
779 | return 1; | 777 | return 1; |
780 | } | 778 | } |
781 | 779 | ||
782 | } | ||
783 | |||
784 | sbi = UDF_SB(sb); | ||
785 | if (!bh) { | ||
786 | /* Search backwards through the partitions */ | ||
787 | struct kernel_lb_addr newfileset; | ||
788 | |||
789 | /* --> cvg: FIXME - is it reasonable? */ | ||
790 | return 1; | ||
791 | |||
792 | for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; | ||
793 | (newfileset.partitionReferenceNum != 0xFFFF && | ||
794 | fileset->logicalBlockNum == 0xFFFFFFFF && | ||
795 | fileset->partitionReferenceNum == 0xFFFF); | ||
796 | newfileset.partitionReferenceNum--) { | ||
797 | lastblock = sbi->s_partmaps | ||
798 | [newfileset.partitionReferenceNum] | ||
799 | .s_partition_len; | ||
800 | newfileset.logicalBlockNum = 0; | ||
801 | |||
802 | do { | ||
803 | bh = udf_read_ptagged(sb, &newfileset, 0, | ||
804 | &ident); | ||
805 | if (!bh) { | ||
806 | newfileset.logicalBlockNum++; | ||
807 | continue; | ||
808 | } | ||
809 | |||
810 | switch (ident) { | ||
811 | case TAG_IDENT_SBD: | ||
812 | { | ||
813 | struct spaceBitmapDesc *sp; | ||
814 | sp = (struct spaceBitmapDesc *) | ||
815 | bh->b_data; | ||
816 | newfileset.logicalBlockNum += 1 + | ||
817 | ((le32_to_cpu(sp->numOfBytes) + | ||
818 | sizeof(struct spaceBitmapDesc) | ||
819 | - 1) >> sb->s_blocksize_bits); | ||
820 | brelse(bh); | ||
821 | break; | ||
822 | } | ||
823 | case TAG_IDENT_FSD: | ||
824 | *fileset = newfileset; | ||
825 | break; | ||
826 | default: | ||
827 | newfileset.logicalBlockNum++; | ||
828 | brelse(bh); | ||
829 | bh = NULL; | ||
830 | break; | ||
831 | } | ||
832 | } while (newfileset.logicalBlockNum < lastblock && | ||
833 | fileset->logicalBlockNum == 0xFFFFFFFF && | ||
834 | fileset->partitionReferenceNum == 0xFFFF); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | if ((fileset->logicalBlockNum != 0xFFFFFFFF || | ||
839 | fileset->partitionReferenceNum != 0xFFFF) && bh) { | ||
840 | udf_debug("Fileset at block=%u, partition=%u\n", | 780 | udf_debug("Fileset at block=%u, partition=%u\n", |
841 | fileset->logicalBlockNum, | 781 | fileset->logicalBlockNum, |
842 | fileset->partitionReferenceNum); | 782 | fileset->partitionReferenceNum); |
843 | 783 | ||
844 | sbi->s_partition = fileset->partitionReferenceNum; | 784 | UDF_SB(sb)->s_partition = fileset->partitionReferenceNum; |
845 | udf_load_fileset(sb, bh, root); | 785 | udf_load_fileset(sb, bh, root); |
846 | brelse(bh); | 786 | brelse(bh); |
847 | return 0; | 787 | return 0; |
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ | |||
1570 | */ | 1510 | */ |
1571 | #define PART_DESC_ALLOC_STEP 32 | 1511 | #define PART_DESC_ALLOC_STEP 32 |
1572 | 1512 | ||
1513 | struct part_desc_seq_scan_data { | ||
1514 | struct udf_vds_record rec; | ||
1515 | u32 partnum; | ||
1516 | }; | ||
1517 | |||
1573 | struct desc_seq_scan_data { | 1518 | struct desc_seq_scan_data { |
1574 | struct udf_vds_record vds[VDS_POS_LENGTH]; | 1519 | struct udf_vds_record vds[VDS_POS_LENGTH]; |
1575 | unsigned int size_part_descs; | 1520 | unsigned int size_part_descs; |
1576 | struct udf_vds_record *part_descs_loc; | 1521 | unsigned int num_part_descs; |
1522 | struct part_desc_seq_scan_data *part_descs_loc; | ||
1577 | }; | 1523 | }; |
1578 | 1524 | ||
1579 | static struct udf_vds_record *handle_partition_descriptor( | 1525 | static struct udf_vds_record *handle_partition_descriptor( |
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor( | |||
1582 | { | 1528 | { |
1583 | struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; | 1529 | struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; |
1584 | int partnum; | 1530 | int partnum; |
1531 | int i; | ||
1585 | 1532 | ||
1586 | partnum = le16_to_cpu(desc->partitionNumber); | 1533 | partnum = le16_to_cpu(desc->partitionNumber); |
1587 | if (partnum >= data->size_part_descs) { | 1534 | for (i = 0; i < data->num_part_descs; i++) |
1588 | struct udf_vds_record *new_loc; | 1535 | if (partnum == data->part_descs_loc[i].partnum) |
1536 | return &(data->part_descs_loc[i].rec); | ||
1537 | if (data->num_part_descs >= data->size_part_descs) { | ||
1538 | struct part_desc_seq_scan_data *new_loc; | ||
1589 | unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); | 1539 | unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); |
1590 | 1540 | ||
1591 | new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); | 1541 | new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); |
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor( | |||
1597 | data->part_descs_loc = new_loc; | 1547 | data->part_descs_loc = new_loc; |
1598 | data->size_part_descs = new_size; | 1548 | data->size_part_descs = new_size; |
1599 | } | 1549 | } |
1600 | return &(data->part_descs_loc[partnum]); | 1550 | return &(data->part_descs_loc[data->num_part_descs++].rec); |
1601 | } | 1551 | } |
1602 | 1552 | ||
1603 | 1553 | ||
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence( | |||
1647 | 1597 | ||
1648 | memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); | 1598 | memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); |
1649 | data.size_part_descs = PART_DESC_ALLOC_STEP; | 1599 | data.size_part_descs = PART_DESC_ALLOC_STEP; |
1600 | data.num_part_descs = 0; | ||
1650 | data.part_descs_loc = kcalloc(data.size_part_descs, | 1601 | data.part_descs_loc = kcalloc(data.size_part_descs, |
1651 | sizeof(*data.part_descs_loc), | 1602 | sizeof(*data.part_descs_loc), |
1652 | GFP_KERNEL); | 1603 | GFP_KERNEL); |
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence( | |||
1658 | * are in it. | 1609 | * are in it. |
1659 | */ | 1610 | */ |
1660 | for (; (!done && block <= lastblock); block++) { | 1611 | for (; (!done && block <= lastblock); block++) { |
1661 | |||
1662 | bh = udf_read_tagged(sb, block, block, &ident); | 1612 | bh = udf_read_tagged(sb, block, block, &ident); |
1663 | if (!bh) | 1613 | if (!bh) |
1664 | break; | 1614 | break; |
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence( | |||
1730 | } | 1680 | } |
1731 | 1681 | ||
1732 | /* Now handle prevailing Partition Descriptors */ | 1682 | /* Now handle prevailing Partition Descriptors */ |
1733 | for (i = 0; i < data.size_part_descs; i++) { | 1683 | for (i = 0; i < data.num_part_descs; i++) { |
1734 | if (data.part_descs_loc[i].block) { | 1684 | ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); |
1735 | ret = udf_load_partdesc(sb, | 1685 | if (ret < 0) |
1736 | data.part_descs_loc[i].block); | 1686 | return ret; |
1737 | if (ret < 0) | ||
1738 | return ret; | ||
1739 | } | ||
1740 | } | 1687 | } |
1741 | 1688 | ||
1742 | return 0; | 1689 | return 0; |
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index ca1d2cc2cdfa..18863d56273c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h | |||
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, | |||
199 | 199 | ||
200 | #define __declare_arg_0(a0, res) \ | 200 | #define __declare_arg_0(a0, res) \ |
201 | struct arm_smccc_res *___res = res; \ | 201 | struct arm_smccc_res *___res = res; \ |
202 | register u32 r0 asm("r0") = a0; \ | 202 | register unsigned long r0 asm("r0") = (u32)a0; \ |
203 | register unsigned long r1 asm("r1"); \ | 203 | register unsigned long r1 asm("r1"); \ |
204 | register unsigned long r2 asm("r2"); \ | 204 | register unsigned long r2 asm("r2"); \ |
205 | register unsigned long r3 asm("r3") | 205 | register unsigned long r3 asm("r3") |
206 | 206 | ||
207 | #define __declare_arg_1(a0, a1, res) \ | 207 | #define __declare_arg_1(a0, a1, res) \ |
208 | typeof(a1) __a1 = a1; \ | ||
208 | struct arm_smccc_res *___res = res; \ | 209 | struct arm_smccc_res *___res = res; \ |
209 | register u32 r0 asm("r0") = a0; \ | 210 | register unsigned long r0 asm("r0") = (u32)a0; \ |
210 | register typeof(a1) r1 asm("r1") = a1; \ | 211 | register unsigned long r1 asm("r1") = __a1; \ |
211 | register unsigned long r2 asm("r2"); \ | 212 | register unsigned long r2 asm("r2"); \ |
212 | register unsigned long r3 asm("r3") | 213 | register unsigned long r3 asm("r3") |
213 | 214 | ||
214 | #define __declare_arg_2(a0, a1, a2, res) \ | 215 | #define __declare_arg_2(a0, a1, a2, res) \ |
216 | typeof(a1) __a1 = a1; \ | ||
217 | typeof(a2) __a2 = a2; \ | ||
215 | struct arm_smccc_res *___res = res; \ | 218 | struct arm_smccc_res *___res = res; \ |
216 | register u32 r0 asm("r0") = a0; \ | 219 | register unsigned long r0 asm("r0") = (u32)a0; \ |
217 | register typeof(a1) r1 asm("r1") = a1; \ | 220 | register unsigned long r1 asm("r1") = __a1; \ |
218 | register typeof(a2) r2 asm("r2") = a2; \ | 221 | register unsigned long r2 asm("r2") = __a2; \ |
219 | register unsigned long r3 asm("r3") | 222 | register unsigned long r3 asm("r3") |
220 | 223 | ||
221 | #define __declare_arg_3(a0, a1, a2, a3, res) \ | 224 | #define __declare_arg_3(a0, a1, a2, a3, res) \ |
225 | typeof(a1) __a1 = a1; \ | ||
226 | typeof(a2) __a2 = a2; \ | ||
227 | typeof(a3) __a3 = a3; \ | ||
222 | struct arm_smccc_res *___res = res; \ | 228 | struct arm_smccc_res *___res = res; \ |
223 | register u32 r0 asm("r0") = a0; \ | 229 | register unsigned long r0 asm("r0") = (u32)a0; \ |
224 | register typeof(a1) r1 asm("r1") = a1; \ | 230 | register unsigned long r1 asm("r1") = __a1; \ |
225 | register typeof(a2) r2 asm("r2") = a2; \ | 231 | register unsigned long r2 asm("r2") = __a2; \ |
226 | register typeof(a3) r3 asm("r3") = a3 | 232 | register unsigned long r3 asm("r3") = __a3 |
227 | 233 | ||
228 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ | 234 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ |
235 | typeof(a4) __a4 = a4; \ | ||
229 | __declare_arg_3(a0, a1, a2, a3, res); \ | 236 | __declare_arg_3(a0, a1, a2, a3, res); \ |
230 | register typeof(a4) r4 asm("r4") = a4 | 237 | register unsigned long r4 asm("r4") = __a4 |
231 | 238 | ||
232 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ | 239 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ |
240 | typeof(a5) __a5 = a5; \ | ||
233 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ | 241 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ |
234 | register typeof(a5) r5 asm("r5") = a5 | 242 | register unsigned long r5 asm("r5") = __a5 |
235 | 243 | ||
236 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ | 244 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ |
245 | typeof(a6) __a6 = a6; \ | ||
237 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ | 246 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ |
238 | register typeof(a6) r6 asm("r6") = a6 | 247 | register unsigned long r6 asm("r6") = __a6 |
239 | 248 | ||
240 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ | 249 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ |
250 | typeof(a7) __a7 = a7; \ | ||
241 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ | 251 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ |
242 | register typeof(a7) r7 asm("r7") = a7 | 252 | register unsigned long r7 asm("r7") = __a7 |
243 | 253 | ||
244 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) | 254 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) |
245 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) | 255 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 34aec30e06c7..6d766a19f2bb 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
@@ -56,6 +56,7 @@ struct blkcg { | |||
56 | struct list_head all_blkcgs_node; | 56 | struct list_head all_blkcgs_node; |
57 | #ifdef CONFIG_CGROUP_WRITEBACK | 57 | #ifdef CONFIG_CGROUP_WRITEBACK |
58 | struct list_head cgwb_list; | 58 | struct list_head cgwb_list; |
59 | refcount_t cgwb_refcnt; | ||
59 | #endif | 60 | #endif |
60 | }; | 61 | }; |
61 | 62 | ||
@@ -89,7 +90,6 @@ struct blkg_policy_data { | |||
89 | /* the blkg and policy id this per-policy data belongs to */ | 90 | /* the blkg and policy id this per-policy data belongs to */ |
90 | struct blkcg_gq *blkg; | 91 | struct blkcg_gq *blkg; |
91 | int plid; | 92 | int plid; |
92 | bool offline; | ||
93 | }; | 93 | }; |
94 | 94 | ||
95 | /* | 95 | /* |
@@ -387,6 +387,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) | |||
387 | return cpd ? cpd->blkcg : NULL; | 387 | return cpd ? cpd->blkcg : NULL; |
388 | } | 388 | } |
389 | 389 | ||
390 | extern void blkcg_destroy_blkgs(struct blkcg *blkcg); | ||
391 | |||
392 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
393 | |||
394 | /** | ||
395 | * blkcg_cgwb_get - get a reference for blkcg->cgwb_list | ||
396 | * @blkcg: blkcg of interest | ||
397 | * | ||
398 | * This is used to track the number of active wb's related to a blkcg. | ||
399 | */ | ||
400 | static inline void blkcg_cgwb_get(struct blkcg *blkcg) | ||
401 | { | ||
402 | refcount_inc(&blkcg->cgwb_refcnt); | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list | ||
407 | * @blkcg: blkcg of interest | ||
408 | * | ||
409 | * This is used to track the number of active wb's related to a blkcg. | ||
410 | * When this count goes to zero, all active wb has finished so the | ||
411 | * blkcg can continue destruction by calling blkcg_destroy_blkgs(). | ||
412 | * This work may occur in cgwb_release_workfn() on the cgwb_release | ||
413 | * workqueue. | ||
414 | */ | ||
415 | static inline void blkcg_cgwb_put(struct blkcg *blkcg) | ||
416 | { | ||
417 | if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) | ||
418 | blkcg_destroy_blkgs(blkcg); | ||
419 | } | ||
420 | |||
421 | #else | ||
422 | |||
423 | static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } | ||
424 | |||
425 | static inline void blkcg_cgwb_put(struct blkcg *blkcg) | ||
426 | { | ||
427 | /* wb isn't being accounted, so trigger destruction right away */ | ||
428 | blkcg_destroy_blkgs(blkcg); | ||
429 | } | ||
430 | |||
431 | #endif | ||
432 | |||
390 | /** | 433 | /** |
391 | * blkg_path - format cgroup path of blkg | 434 | * blkg_path - format cgroup path of blkg |
392 | * @blkg: blkg of interest | 435 | * @blkg: blkg of interest |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b79387fd57da..65b4eaed1d96 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) | |||
855 | } | 855 | } |
856 | 856 | ||
857 | u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); | 857 | u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); |
858 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); | 858 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred); |
859 | 859 | ||
860 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); | 860 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); |
861 | /** | 861 | /** |
diff --git a/include/linux/of.h b/include/linux/of.h index 4d25e4f952d9..99b0ebf49632 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) | |||
256 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) | 256 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) |
257 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) | 257 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) |
258 | 258 | ||
259 | extern bool of_node_name_eq(const struct device_node *np, const char *name); | ||
260 | extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); | ||
261 | |||
259 | static inline const char *of_node_full_name(const struct device_node *np) | 262 | static inline const char *of_node_full_name(const struct device_node *np) |
260 | { | 263 | { |
261 | return np ? np->full_name : "<no-node>"; | 264 | return np ? np->full_name : "<no-node>"; |
@@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node, | |||
290 | extern struct device_node *of_get_next_available_child( | 293 | extern struct device_node *of_get_next_available_child( |
291 | const struct device_node *node, struct device_node *prev); | 294 | const struct device_node *node, struct device_node *prev); |
292 | 295 | ||
296 | extern struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
297 | const char *compatible); | ||
293 | extern struct device_node *of_get_child_by_name(const struct device_node *node, | 298 | extern struct device_node *of_get_child_by_name(const struct device_node *node, |
294 | const char *name); | 299 | const char *name); |
295 | 300 | ||
@@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) | |||
561 | return NULL; | 566 | return NULL; |
562 | } | 567 | } |
563 | 568 | ||
569 | static inline bool of_node_name_eq(const struct device_node *np, const char *name) | ||
570 | { | ||
571 | return false; | ||
572 | } | ||
573 | |||
574 | static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) | ||
575 | { | ||
576 | return false; | ||
577 | } | ||
578 | |||
564 | static inline const char* of_node_full_name(const struct device_node *np) | 579 | static inline const char* of_node_full_name(const struct device_node *np) |
565 | { | 580 | { |
566 | return "<no-node>"; | 581 | return "<no-node>"; |
@@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void) | |||
632 | return false; | 647 | return false; |
633 | } | 648 | } |
634 | 649 | ||
650 | static inline struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
651 | const char *compatible) | ||
652 | { | ||
653 | return NULL; | ||
654 | } | ||
655 | |||
635 | static inline struct device_node *of_get_child_by_name( | 656 | static inline struct device_node *of_get_child_by_name( |
636 | const struct device_node *node, | 657 | const struct device_node *node, |
637 | const char *name) | 658 | const char *name) |
@@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node( | |||
967 | return of_find_matching_node_and_match(from, matches, NULL); | 988 | return of_find_matching_node_and_match(from, matches, NULL); |
968 | } | 989 | } |
969 | 990 | ||
991 | static inline const char *of_node_get_device_type(const struct device_node *np) | ||
992 | { | ||
993 | return of_get_property(np, "type", NULL); | ||
994 | } | ||
995 | |||
996 | static inline bool of_node_is_type(const struct device_node *np, const char *type) | ||
997 | { | ||
998 | const char *match = of_node_get_device_type(np); | ||
999 | |||
1000 | return np && match && type && !strcmp(match, type); | ||
1001 | } | ||
1002 | |||
970 | /** | 1003 | /** |
971 | * of_property_count_u8_elems - Count the number of u8 elements in a property | 1004 | * of_property_count_u8_elems - Count the number of u8 elements in a property |
972 | * | 1005 | * |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 99d366cb0e9f..d157983b84cf 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -3084,4 +3084,6 @@ | |||
3084 | 3084 | ||
3085 | #define PCI_VENDOR_ID_OCZ 0x1b85 | 3085 | #define PCI_VENDOR_ID_OCZ 0x1b85 |
3086 | 3086 | ||
3087 | #define PCI_VENDOR_ID_NCUBE 0x10ff | ||
3088 | |||
3087 | #endif /* _LINUX_PCI_IDS_H */ | 3089 | #endif /* _LINUX_PCI_IDS_H */ |
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9abc0ca7259b..9f0aa1b48c78 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for Texas Instruments INA219, INA226 power monitor chips | 2 | * Driver for Texas Instruments INA219, INA226 power monitor chips |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 4 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
diff --git a/include/linux/quota.h b/include/linux/quota.h index ca9772c8e48b..f32dd270b8e3 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -408,13 +408,7 @@ struct qc_type_state { | |||
408 | 408 | ||
409 | struct qc_state { | 409 | struct qc_state { |
410 | unsigned int s_incoredqs; /* Number of dquots in core */ | 410 | unsigned int s_incoredqs; /* Number of dquots in core */ |
411 | /* | 411 | struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ |
412 | * Per quota type information. The array should really have | ||
413 | * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in | ||
414 | * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS | ||
415 | * supports project quotas, this can be changed to MAXQUOTAS | ||
416 | */ | ||
417 | struct qc_type_state s_state[XQM_MAXQUOTAS]; | ||
418 | }; | 412 | }; |
419 | 413 | ||
420 | /* Structure for communicating via ->set_info */ | 414 | /* Structure for communicating via ->set_info */ |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 5d738804e3d6..a5a3cfc3c2fa 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); | |||
258 | extern int persistent_clock_is_local; | 258 | extern int persistent_clock_is_local; |
259 | 259 | ||
260 | extern void read_persistent_clock64(struct timespec64 *ts); | 260 | extern void read_persistent_clock64(struct timespec64 *ts); |
261 | void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock, | 261 | void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, |
262 | struct timespec64 *boot_offset); | 262 | struct timespec64 *boot_offset); |
263 | extern int update_persistent_clock64(struct timespec64 now); | 263 | extern int update_persistent_clock64(struct timespec64 now); |
264 | 264 | ||
265 | /* | 265 | /* |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 7f2e16e76ac4..041f7e56a289 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -158,8 +158,10 @@ extern void syscall_unregfunc(void); | |||
158 | * For rcuidle callers, use srcu since sched-rcu \ | 158 | * For rcuidle callers, use srcu since sched-rcu \ |
159 | * doesn't work from the idle path. \ | 159 | * doesn't work from the idle path. \ |
160 | */ \ | 160 | */ \ |
161 | if (rcuidle) \ | 161 | if (rcuidle) { \ |
162 | idx = srcu_read_lock_notrace(&tracepoint_srcu); \ | 162 | idx = srcu_read_lock_notrace(&tracepoint_srcu); \ |
163 | rcu_irq_enter_irqson(); \ | ||
164 | } \ | ||
163 | \ | 165 | \ |
164 | it_func_ptr = rcu_dereference_raw((tp)->funcs); \ | 166 | it_func_ptr = rcu_dereference_raw((tp)->funcs); \ |
165 | \ | 167 | \ |
@@ -171,8 +173,10 @@ extern void syscall_unregfunc(void); | |||
171 | } while ((++it_func_ptr)->func); \ | 173 | } while ((++it_func_ptr)->func); \ |
172 | } \ | 174 | } \ |
173 | \ | 175 | \ |
174 | if (rcuidle) \ | 176 | if (rcuidle) { \ |
177 | rcu_irq_exit_irqson(); \ | ||
175 | srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ | 178 | srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ |
179 | } \ | ||
176 | \ | 180 | \ |
177 | preempt_enable_notrace(); \ | 181 | preempt_enable_notrace(); \ |
178 | } while (0) | 182 | } while (0) |
diff --git a/include/net/act_api.h b/include/net/act_api.h index 1ad5b19e83a9..970303448c90 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h | |||
@@ -23,13 +23,11 @@ struct tc_action { | |||
23 | const struct tc_action_ops *ops; | 23 | const struct tc_action_ops *ops; |
24 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ | 24 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ |
25 | __u32 order; | 25 | __u32 order; |
26 | struct list_head list; | ||
27 | struct tcf_idrinfo *idrinfo; | 26 | struct tcf_idrinfo *idrinfo; |
28 | 27 | ||
29 | u32 tcfa_index; | 28 | u32 tcfa_index; |
30 | refcount_t tcfa_refcnt; | 29 | refcount_t tcfa_refcnt; |
31 | atomic_t tcfa_bindcnt; | 30 | atomic_t tcfa_bindcnt; |
32 | u32 tcfa_capab; | ||
33 | int tcfa_action; | 31 | int tcfa_action; |
34 | struct tcf_t tcfa_tm; | 32 | struct tcf_t tcfa_tm; |
35 | struct gnet_stats_basic_packed tcfa_bstats; | 33 | struct gnet_stats_basic_packed tcfa_bstats; |
@@ -44,7 +42,6 @@ struct tc_action { | |||
44 | #define tcf_index common.tcfa_index | 42 | #define tcf_index common.tcfa_index |
45 | #define tcf_refcnt common.tcfa_refcnt | 43 | #define tcf_refcnt common.tcfa_refcnt |
46 | #define tcf_bindcnt common.tcfa_bindcnt | 44 | #define tcf_bindcnt common.tcfa_bindcnt |
47 | #define tcf_capab common.tcfa_capab | ||
48 | #define tcf_action common.tcfa_action | 45 | #define tcf_action common.tcfa_action |
49 | #define tcf_tm common.tcfa_tm | 46 | #define tcf_tm common.tcfa_tm |
50 | #define tcf_bstats common.tcfa_bstats | 47 | #define tcf_bstats common.tcfa_bstats |
@@ -102,7 +99,6 @@ struct tc_action_ops { | |||
102 | size_t (*get_fill_size)(const struct tc_action *act); | 99 | size_t (*get_fill_size)(const struct tc_action *act); |
103 | struct net_device *(*get_dev)(const struct tc_action *a); | 100 | struct net_device *(*get_dev)(const struct tc_action *a); |
104 | void (*put_dev)(struct net_device *dev); | 101 | void (*put_dev)(struct net_device *dev); |
105 | int (*delete)(struct net *net, u32 index); | ||
106 | }; | 102 | }; |
107 | 103 | ||
108 | struct tc_action_net { | 104 | struct tc_action_net { |
@@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, | |||
148 | const struct tc_action_ops *ops, | 144 | const struct tc_action_ops *ops, |
149 | struct netlink_ext_ack *extack); | 145 | struct netlink_ext_ack *extack); |
150 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); | 146 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); |
151 | bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, | ||
152 | int bind); | ||
153 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | 147 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, |
154 | struct tc_action **a, const struct tc_action_ops *ops, | 148 | struct tc_action **a, const struct tc_action_ops *ops, |
155 | int bind, bool cpustats); | 149 | int bind, bool cpustats); |
@@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); | |||
158 | void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); | 152 | void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); |
159 | int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, | 153 | int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, |
160 | struct tc_action **a, int bind); | 154 | struct tc_action **a, int bind); |
161 | int tcf_idr_delete_index(struct tc_action_net *tn, u32 index); | ||
162 | int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); | 155 | int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); |
163 | 156 | ||
164 | static inline int tcf_idr_release(struct tc_action *a, bool bind) | 157 | static inline int tcf_idr_release(struct tc_action *a, bool bind) |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9a850973e09a..8ebabc9873d1 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -4865,8 +4865,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); | |||
4865 | * | 4865 | * |
4866 | * Return: 0 on success. -ENODATA. | 4866 | * Return: 0 on success. -ENODATA. |
4867 | */ | 4867 | */ |
4868 | int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr, | 4868 | int reg_query_regdb_wmm(char *alpha2, int freq, |
4869 | struct ieee80211_wmm_rule *rule); | 4869 | struct ieee80211_reg_rule *rule); |
4870 | 4870 | ||
4871 | /* | 4871 | /* |
4872 | * callbacks for asynchronous cfg80211 methods, notification | 4872 | * callbacks for asynchronous cfg80211 methods, notification |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ef727f71336e..75a3f3fdb359 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
@@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) | |||
298 | #endif | 298 | #endif |
299 | } | 299 | } |
300 | 300 | ||
301 | static inline void tcf_exts_to_list(const struct tcf_exts *exts, | ||
302 | struct list_head *actions) | ||
303 | { | ||
304 | #ifdef CONFIG_NET_CLS_ACT | 301 | #ifdef CONFIG_NET_CLS_ACT |
305 | int i; | 302 | #define tcf_exts_for_each_action(i, a, exts) \ |
306 | 303 | for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) | |
307 | for (i = 0; i < exts->nr_actions; i++) { | 304 | #else |
308 | struct tc_action *a = exts->actions[i]; | 305 | #define tcf_exts_for_each_action(i, a, exts) \ |
309 | 306 | for (; 0; (void)(i), (void)(a), (void)(exts)) | |
310 | list_add_tail(&a->list, actions); | ||
311 | } | ||
312 | #endif | 307 | #endif |
313 | } | ||
314 | 308 | ||
315 | static inline void | 309 | static inline void |
316 | tcf_exts_stats_update(const struct tcf_exts *exts, | 310 | tcf_exts_stats_update(const struct tcf_exts *exts, |
@@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) | |||
361 | #endif | 355 | #endif |
362 | } | 356 | } |
363 | 357 | ||
358 | static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts) | ||
359 | { | ||
360 | #ifdef CONFIG_NET_CLS_ACT | ||
361 | return exts->actions[0]; | ||
362 | #else | ||
363 | return NULL; | ||
364 | #endif | ||
365 | } | ||
366 | |||
364 | /** | 367 | /** |
365 | * tcf_exts_exec - execute tc filter extensions | 368 | * tcf_exts_exec - execute tc filter extensions |
366 | * @skb: socket buffer | 369 | * @skb: socket buffer |
diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 60f8cc86a447..3469750df0f4 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h | |||
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule { | |||
217 | struct ieee80211_reg_rule { | 217 | struct ieee80211_reg_rule { |
218 | struct ieee80211_freq_range freq_range; | 218 | struct ieee80211_freq_range freq_range; |
219 | struct ieee80211_power_rule power_rule; | 219 | struct ieee80211_power_rule power_rule; |
220 | struct ieee80211_wmm_rule *wmm_rule; | 220 | struct ieee80211_wmm_rule wmm_rule; |
221 | u32 flags; | 221 | u32 flags; |
222 | u32 dfs_cac_ms; | 222 | u32 dfs_cac_ms; |
223 | bool has_wmm; | ||
223 | }; | 224 | }; |
224 | 225 | ||
225 | struct ieee80211_regdomain { | 226 | struct ieee80211_regdomain { |
226 | struct rcu_head rcu_head; | 227 | struct rcu_head rcu_head; |
227 | u32 n_reg_rules; | 228 | u32 n_reg_rules; |
228 | u32 n_wmm_rules; | ||
229 | char alpha2[3]; | 229 | char alpha2[3]; |
230 | enum nl80211_dfs_regions dfs_region; | 230 | enum nl80211_dfs_regions dfs_region; |
231 | struct ieee80211_reg_rule reg_rules[]; | 231 | struct ieee80211_reg_rule reg_rules[]; |
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 7b8c9e19bad1..910cc4334b21 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h | |||
@@ -65,7 +65,7 @@ | |||
65 | 65 | ||
66 | /* keyctl structures */ | 66 | /* keyctl structures */ |
67 | struct keyctl_dh_params { | 67 | struct keyctl_dh_params { |
68 | __s32 private; | 68 | __s32 dh_private; |
69 | __s32 prime; | 69 | __s32 prime; |
70 | __s32 base; | 70 | __s32 base; |
71 | }; | 71 | }; |
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index dc520e1a4123..8b73cb603c5f 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
39 | #include <linux/socket.h> /* For __kernel_sockaddr_storage. */ | 39 | #include <linux/socket.h> /* For __kernel_sockaddr_storage. */ |
40 | #include <linux/in6.h> /* For struct in6_addr. */ | ||
40 | 41 | ||
41 | #define RDS_IB_ABI_VERSION 0x301 | 42 | #define RDS_IB_ABI_VERSION 0x301 |
42 | 43 | ||
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index b1e22c40c4b6..84c3de89696a 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h | |||
@@ -176,7 +176,7 @@ struct vhost_memory { | |||
176 | #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 | 176 | #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 |
177 | 177 | ||
178 | #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) | 178 | #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) |
179 | #define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64) | 179 | #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) |
180 | 180 | ||
181 | /* VHOST_NET specific defines */ | 181 | /* VHOST_NET specific defines */ |
182 | 182 | ||
@@ -199,6 +199,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) | |||
199 | } | 199 | } |
200 | 200 | ||
201 | ipc_unlock_object(ipcp); | 201 | ipc_unlock_object(ipcp); |
202 | ipcp = ERR_PTR(-EIDRM); | ||
202 | err: | 203 | err: |
203 | rcu_read_unlock(); | 204 | rcu_read_unlock(); |
204 | /* | 205 | /* |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 04b8eda94e7d..03cc59ee9c95 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/jhash.h> | 15 | #include <linux/jhash.h> |
16 | #include <linux/filter.h> | 16 | #include <linux/filter.h> |
17 | #include <linux/rculist_nulls.h> | 17 | #include <linux/rculist_nulls.h> |
18 | #include <linux/random.h> | ||
18 | #include <uapi/linux/btf.h> | 19 | #include <uapi/linux/btf.h> |
19 | #include "percpu_freelist.h" | 20 | #include "percpu_freelist.h" |
20 | #include "bpf_lru_list.h" | 21 | #include "bpf_lru_list.h" |
@@ -41,6 +42,7 @@ struct bpf_htab { | |||
41 | atomic_t count; /* number of elements in this hashtable */ | 42 | atomic_t count; /* number of elements in this hashtable */ |
42 | u32 n_buckets; /* number of hash buckets */ | 43 | u32 n_buckets; /* number of hash buckets */ |
43 | u32 elem_size; /* size of each element in bytes */ | 44 | u32 elem_size; /* size of each element in bytes */ |
45 | u32 hashrnd; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | /* each htab element is struct htab_elem + key + value */ | 48 | /* each htab element is struct htab_elem + key + value */ |
@@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
371 | if (!htab->buckets) | 373 | if (!htab->buckets) |
372 | goto free_htab; | 374 | goto free_htab; |
373 | 375 | ||
376 | htab->hashrnd = get_random_int(); | ||
374 | for (i = 0; i < htab->n_buckets; i++) { | 377 | for (i = 0; i < htab->n_buckets; i++) { |
375 | INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); | 378 | INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); |
376 | raw_spin_lock_init(&htab->buckets[i].lock); | 379 | raw_spin_lock_init(&htab->buckets[i].lock); |
@@ -402,9 +405,9 @@ free_htab: | |||
402 | return ERR_PTR(err); | 405 | return ERR_PTR(err); |
403 | } | 406 | } |
404 | 407 | ||
405 | static inline u32 htab_map_hash(const void *key, u32 key_len) | 408 | static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) |
406 | { | 409 | { |
407 | return jhash(key, key_len, 0); | 410 | return jhash(key, key_len, hashrnd); |
408 | } | 411 | } |
409 | 412 | ||
410 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) | 413 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) |
@@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) | |||
470 | 473 | ||
471 | key_size = map->key_size; | 474 | key_size = map->key_size; |
472 | 475 | ||
473 | hash = htab_map_hash(key, key_size); | 476 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
474 | 477 | ||
475 | head = select_bucket(htab, hash); | 478 | head = select_bucket(htab, hash); |
476 | 479 | ||
@@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |||
597 | if (!key) | 600 | if (!key) |
598 | goto find_first_elem; | 601 | goto find_first_elem; |
599 | 602 | ||
600 | hash = htab_map_hash(key, key_size); | 603 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
601 | 604 | ||
602 | head = select_bucket(htab, hash); | 605 | head = select_bucket(htab, hash); |
603 | 606 | ||
@@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
824 | 827 | ||
825 | key_size = map->key_size; | 828 | key_size = map->key_size; |
826 | 829 | ||
827 | hash = htab_map_hash(key, key_size); | 830 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
828 | 831 | ||
829 | b = __select_bucket(htab, hash); | 832 | b = __select_bucket(htab, hash); |
830 | head = &b->head; | 833 | head = &b->head; |
@@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
880 | 883 | ||
881 | key_size = map->key_size; | 884 | key_size = map->key_size; |
882 | 885 | ||
883 | hash = htab_map_hash(key, key_size); | 886 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
884 | 887 | ||
885 | b = __select_bucket(htab, hash); | 888 | b = __select_bucket(htab, hash); |
886 | head = &b->head; | 889 | head = &b->head; |
@@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, | |||
945 | 948 | ||
946 | key_size = map->key_size; | 949 | key_size = map->key_size; |
947 | 950 | ||
948 | hash = htab_map_hash(key, key_size); | 951 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
949 | 952 | ||
950 | b = __select_bucket(htab, hash); | 953 | b = __select_bucket(htab, hash); |
951 | head = &b->head; | 954 | head = &b->head; |
@@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, | |||
998 | 1001 | ||
999 | key_size = map->key_size; | 1002 | key_size = map->key_size; |
1000 | 1003 | ||
1001 | hash = htab_map_hash(key, key_size); | 1004 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
1002 | 1005 | ||
1003 | b = __select_bucket(htab, hash); | 1006 | b = __select_bucket(htab, hash); |
1004 | head = &b->head; | 1007 | head = &b->head; |
@@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) | |||
1071 | 1074 | ||
1072 | key_size = map->key_size; | 1075 | key_size = map->key_size; |
1073 | 1076 | ||
1074 | hash = htab_map_hash(key, key_size); | 1077 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
1075 | b = __select_bucket(htab, hash); | 1078 | b = __select_bucket(htab, hash); |
1076 | head = &b->head; | 1079 | head = &b->head; |
1077 | 1080 | ||
@@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) | |||
1103 | 1106 | ||
1104 | key_size = map->key_size; | 1107 | key_size = map->key_size; |
1105 | 1108 | ||
1106 | hash = htab_map_hash(key, key_size); | 1109 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
1107 | b = __select_bucket(htab, hash); | 1110 | b = __select_bucket(htab, hash); |
1108 | head = &b->head; | 1111 | head = &b->head; |
1109 | 1112 | ||
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 98e621a29e8e..488ef9663c01 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
@@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock); | 238 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock); |
239 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md); | 239 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge); |
240 | 240 | ||
241 | static void bpf_tcp_release(struct sock *sk) | 241 | static void bpf_tcp_release(struct sock *sk) |
242 | { | 242 | { |
@@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk) | |||
248 | goto out; | 248 | goto out; |
249 | 249 | ||
250 | if (psock->cork) { | 250 | if (psock->cork) { |
251 | free_start_sg(psock->sock, psock->cork); | 251 | free_start_sg(psock->sock, psock->cork, true); |
252 | kfree(psock->cork); | 252 | kfree(psock->cork); |
253 | psock->cork = NULL; | 253 | psock->cork = NULL; |
254 | } | 254 | } |
@@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
330 | close_fun = psock->save_close; | 330 | close_fun = psock->save_close; |
331 | 331 | ||
332 | if (psock->cork) { | 332 | if (psock->cork) { |
333 | free_start_sg(psock->sock, psock->cork); | 333 | free_start_sg(psock->sock, psock->cork, true); |
334 | kfree(psock->cork); | 334 | kfree(psock->cork); |
335 | psock->cork = NULL; | 335 | psock->cork = NULL; |
336 | } | 336 | } |
337 | 337 | ||
338 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { | 338 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { |
339 | list_del(&md->list); | 339 | list_del(&md->list); |
340 | free_start_sg(psock->sock, md); | 340 | free_start_sg(psock->sock, md, true); |
341 | kfree(md); | 341 | kfree(md); |
342 | } | 342 | } |
343 | 343 | ||
@@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
369 | /* If another thread deleted this object skip deletion. | 369 | /* If another thread deleted this object skip deletion. |
370 | * The refcnt on psock may or may not be zero. | 370 | * The refcnt on psock may or may not be zero. |
371 | */ | 371 | */ |
372 | if (l) { | 372 | if (l && l == link) { |
373 | hlist_del_rcu(&link->hash_node); | 373 | hlist_del_rcu(&link->hash_node); |
374 | smap_release_sock(psock, link->sk); | 374 | smap_release_sock(psock, link->sk); |
375 | free_htab_elem(htab, link); | 375 | free_htab_elem(htab, link); |
@@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes, | |||
570 | md->sg_start = i; | 570 | md->sg_start = i; |
571 | } | 571 | } |
572 | 572 | ||
573 | static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | 573 | static int free_sg(struct sock *sk, int start, |
574 | struct sk_msg_buff *md, bool charge) | ||
574 | { | 575 | { |
575 | struct scatterlist *sg = md->sg_data; | 576 | struct scatterlist *sg = md->sg_data; |
576 | int i = start, free = 0; | 577 | int i = start, free = 0; |
577 | 578 | ||
578 | while (sg[i].length) { | 579 | while (sg[i].length) { |
579 | free += sg[i].length; | 580 | free += sg[i].length; |
580 | sk_mem_uncharge(sk, sg[i].length); | 581 | if (charge) |
582 | sk_mem_uncharge(sk, sg[i].length); | ||
581 | if (!md->skb) | 583 | if (!md->skb) |
582 | put_page(sg_page(&sg[i])); | 584 | put_page(sg_page(&sg[i])); |
583 | sg[i].length = 0; | 585 | sg[i].length = 0; |
@@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | |||
594 | return free; | 596 | return free; |
595 | } | 597 | } |
596 | 598 | ||
597 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) | 599 | static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge) |
598 | { | 600 | { |
599 | int free = free_sg(sk, md->sg_start, md); | 601 | int free = free_sg(sk, md->sg_start, md, charge); |
600 | 602 | ||
601 | md->sg_start = md->sg_end; | 603 | md->sg_start = md->sg_end; |
602 | return free; | 604 | return free; |
@@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) | |||
604 | 606 | ||
605 | static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) | 607 | static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) |
606 | { | 608 | { |
607 | return free_sg(sk, md->sg_curr, md); | 609 | return free_sg(sk, md->sg_curr, md, true); |
608 | } | 610 | } |
609 | 611 | ||
610 | static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) | 612 | static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) |
@@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, | |||
718 | list_add_tail(&r->list, &psock->ingress); | 720 | list_add_tail(&r->list, &psock->ingress); |
719 | sk->sk_data_ready(sk); | 721 | sk->sk_data_ready(sk); |
720 | } else { | 722 | } else { |
721 | free_start_sg(sk, r); | 723 | free_start_sg(sk, r, true); |
722 | kfree(r); | 724 | kfree(r); |
723 | } | 725 | } |
724 | 726 | ||
@@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, | |||
752 | release_sock(sk); | 754 | release_sock(sk); |
753 | } | 755 | } |
754 | smap_release_sock(psock, sk); | 756 | smap_release_sock(psock, sk); |
755 | if (unlikely(err)) | 757 | return err; |
756 | goto out; | ||
757 | return 0; | ||
758 | out_rcu: | 758 | out_rcu: |
759 | rcu_read_unlock(); | 759 | rcu_read_unlock(); |
760 | out: | 760 | return 0; |
761 | free_bytes_sg(NULL, send, md, false); | ||
762 | return err; | ||
763 | } | 761 | } |
764 | 762 | ||
765 | static inline void bpf_md_init(struct smap_psock *psock) | 763 | static inline void bpf_md_init(struct smap_psock *psock) |
@@ -822,7 +820,7 @@ more_data: | |||
822 | case __SK_PASS: | 820 | case __SK_PASS: |
823 | err = bpf_tcp_push(sk, send, m, flags, true); | 821 | err = bpf_tcp_push(sk, send, m, flags, true); |
824 | if (unlikely(err)) { | 822 | if (unlikely(err)) { |
825 | *copied -= free_start_sg(sk, m); | 823 | *copied -= free_start_sg(sk, m, true); |
826 | break; | 824 | break; |
827 | } | 825 | } |
828 | 826 | ||
@@ -845,16 +843,17 @@ more_data: | |||
845 | lock_sock(sk); | 843 | lock_sock(sk); |
846 | 844 | ||
847 | if (unlikely(err < 0)) { | 845 | if (unlikely(err < 0)) { |
848 | free_start_sg(sk, m); | 846 | int free = free_start_sg(sk, m, false); |
847 | |||
849 | psock->sg_size = 0; | 848 | psock->sg_size = 0; |
850 | if (!cork) | 849 | if (!cork) |
851 | *copied -= send; | 850 | *copied -= free; |
852 | } else { | 851 | } else { |
853 | psock->sg_size -= send; | 852 | psock->sg_size -= send; |
854 | } | 853 | } |
855 | 854 | ||
856 | if (cork) { | 855 | if (cork) { |
857 | free_start_sg(sk, m); | 856 | free_start_sg(sk, m, true); |
858 | psock->sg_size = 0; | 857 | psock->sg_size = 0; |
859 | kfree(m); | 858 | kfree(m); |
860 | m = NULL; | 859 | m = NULL; |
@@ -912,6 +911,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
912 | 911 | ||
913 | if (unlikely(flags & MSG_ERRQUEUE)) | 912 | if (unlikely(flags & MSG_ERRQUEUE)) |
914 | return inet_recv_error(sk, msg, len, addr_len); | 913 | return inet_recv_error(sk, msg, len, addr_len); |
914 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
915 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | ||
915 | 916 | ||
916 | rcu_read_lock(); | 917 | rcu_read_lock(); |
917 | psock = smap_psock_sk(sk); | 918 | psock = smap_psock_sk(sk); |
@@ -922,9 +923,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
922 | goto out; | 923 | goto out; |
923 | rcu_read_unlock(); | 924 | rcu_read_unlock(); |
924 | 925 | ||
925 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
926 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | ||
927 | |||
928 | lock_sock(sk); | 926 | lock_sock(sk); |
929 | bytes_ready: | 927 | bytes_ready: |
930 | while (copied != len) { | 928 | while (copied != len) { |
@@ -1122,7 +1120,7 @@ wait_for_memory: | |||
1122 | err = sk_stream_wait_memory(sk, &timeo); | 1120 | err = sk_stream_wait_memory(sk, &timeo); |
1123 | if (err) { | 1121 | if (err) { |
1124 | if (m && m != psock->cork) | 1122 | if (m && m != psock->cork) |
1125 | free_start_sg(sk, m); | 1123 | free_start_sg(sk, m, true); |
1126 | goto out_err; | 1124 | goto out_err; |
1127 | } | 1125 | } |
1128 | } | 1126 | } |
@@ -1427,12 +1425,15 @@ out: | |||
1427 | static void smap_write_space(struct sock *sk) | 1425 | static void smap_write_space(struct sock *sk) |
1428 | { | 1426 | { |
1429 | struct smap_psock *psock; | 1427 | struct smap_psock *psock; |
1428 | void (*write_space)(struct sock *sk); | ||
1430 | 1429 | ||
1431 | rcu_read_lock(); | 1430 | rcu_read_lock(); |
1432 | psock = smap_psock_sk(sk); | 1431 | psock = smap_psock_sk(sk); |
1433 | if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) | 1432 | if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) |
1434 | schedule_work(&psock->tx_work); | 1433 | schedule_work(&psock->tx_work); |
1434 | write_space = psock->save_write_space; | ||
1435 | rcu_read_unlock(); | 1435 | rcu_read_unlock(); |
1436 | write_space(sk); | ||
1436 | } | 1437 | } |
1437 | 1438 | ||
1438 | static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) | 1439 | static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) |
@@ -1461,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu) | |||
1461 | schedule_work(&psock->gc_work); | 1462 | schedule_work(&psock->gc_work); |
1462 | } | 1463 | } |
1463 | 1464 | ||
1465 | static bool psock_is_smap_sk(struct sock *sk) | ||
1466 | { | ||
1467 | return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops; | ||
1468 | } | ||
1469 | |||
1464 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock) | 1470 | static void smap_release_sock(struct smap_psock *psock, struct sock *sock) |
1465 | { | 1471 | { |
1466 | if (refcount_dec_and_test(&psock->refcnt)) { | 1472 | if (refcount_dec_and_test(&psock->refcnt)) { |
1467 | tcp_cleanup_ulp(sock); | 1473 | if (psock_is_smap_sk(sock)) |
1474 | tcp_cleanup_ulp(sock); | ||
1468 | write_lock_bh(&sock->sk_callback_lock); | 1475 | write_lock_bh(&sock->sk_callback_lock); |
1469 | smap_stop_sock(psock, sock); | 1476 | smap_stop_sock(psock, sock); |
1470 | write_unlock_bh(&sock->sk_callback_lock); | 1477 | write_unlock_bh(&sock->sk_callback_lock); |
@@ -1578,13 +1585,13 @@ static void smap_gc_work(struct work_struct *w) | |||
1578 | bpf_prog_put(psock->bpf_tx_msg); | 1585 | bpf_prog_put(psock->bpf_tx_msg); |
1579 | 1586 | ||
1580 | if (psock->cork) { | 1587 | if (psock->cork) { |
1581 | free_start_sg(psock->sock, psock->cork); | 1588 | free_start_sg(psock->sock, psock->cork, true); |
1582 | kfree(psock->cork); | 1589 | kfree(psock->cork); |
1583 | } | 1590 | } |
1584 | 1591 | ||
1585 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { | 1592 | list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { |
1586 | list_del(&md->list); | 1593 | list_del(&md->list); |
1587 | free_start_sg(psock->sock, md); | 1594 | free_start_sg(psock->sock, md, true); |
1588 | kfree(md); | 1595 | kfree(md); |
1589 | } | 1596 | } |
1590 | 1597 | ||
@@ -1891,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, | |||
1891 | * doesn't update user data. | 1898 | * doesn't update user data. |
1892 | */ | 1899 | */ |
1893 | if (psock) { | 1900 | if (psock) { |
1901 | if (!psock_is_smap_sk(sock)) { | ||
1902 | err = -EBUSY; | ||
1903 | goto out_progs; | ||
1904 | } | ||
1894 | if (READ_ONCE(psock->bpf_parse) && parse) { | 1905 | if (READ_ONCE(psock->bpf_parse) && parse) { |
1895 | err = -EBUSY; | 1906 | err = -EBUSY; |
1896 | goto out_progs; | 1907 | goto out_progs; |
@@ -2140,7 +2151,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) | |||
2140 | return ERR_PTR(-EPERM); | 2151 | return ERR_PTR(-EPERM); |
2141 | 2152 | ||
2142 | /* check sanity of attributes */ | 2153 | /* check sanity of attributes */ |
2143 | if (attr->max_entries == 0 || attr->value_size != 4 || | 2154 | if (attr->max_entries == 0 || |
2155 | attr->key_size == 0 || | ||
2156 | attr->value_size != 4 || | ||
2144 | attr->map_flags & ~SOCK_CREATE_FLAG_MASK) | 2157 | attr->map_flags & ~SOCK_CREATE_FLAG_MASK) |
2145 | return ERR_PTR(-EINVAL); | 2158 | return ERR_PTR(-EINVAL); |
2146 | 2159 | ||
@@ -2267,8 +2280,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, | |||
2267 | } | 2280 | } |
2268 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, | 2281 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, |
2269 | htab->map.numa_node); | 2282 | htab->map.numa_node); |
2270 | if (!l_new) | 2283 | if (!l_new) { |
2284 | atomic_dec(&htab->count); | ||
2271 | return ERR_PTR(-ENOMEM); | 2285 | return ERR_PTR(-ENOMEM); |
2286 | } | ||
2272 | 2287 | ||
2273 | memcpy(l_new->key, key, key_size); | 2288 | memcpy(l_new->key, key, key_size); |
2274 | l_new->sk = sk; | 2289 | l_new->sk = sk; |
diff --git a/kernel/cpu.c b/kernel/cpu.c index ed44d7d34c2d..0097acec1c71 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { } | |||
102 | * @name: Name of the step | 102 | * @name: Name of the step |
103 | * @startup: Startup function of the step | 103 | * @startup: Startup function of the step |
104 | * @teardown: Teardown function of the step | 104 | * @teardown: Teardown function of the step |
105 | * @skip_onerr: Do not invoke the functions on error rollback | ||
106 | * Will go away once the notifiers are gone | ||
107 | * @cant_stop: Bringup/teardown can't be stopped at this step | 105 | * @cant_stop: Bringup/teardown can't be stopped at this step |
108 | */ | 106 | */ |
109 | struct cpuhp_step { | 107 | struct cpuhp_step { |
@@ -119,7 +117,6 @@ struct cpuhp_step { | |||
119 | struct hlist_node *node); | 117 | struct hlist_node *node); |
120 | } teardown; | 118 | } teardown; |
121 | struct hlist_head list; | 119 | struct hlist_head list; |
122 | bool skip_onerr; | ||
123 | bool cant_stop; | 120 | bool cant_stop; |
124 | bool multi_instance; | 121 | bool multi_instance; |
125 | }; | 122 | }; |
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu) | |||
550 | 547 | ||
551 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | 548 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
552 | { | 549 | { |
553 | for (st->state--; st->state > st->target; st->state--) { | 550 | for (st->state--; st->state > st->target; st->state--) |
554 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 551 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
555 | |||
556 | if (!step->skip_onerr) | ||
557 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | ||
558 | } | ||
559 | } | 552 | } |
560 | 553 | ||
561 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 554 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
@@ -614,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
614 | bool bringup = st->bringup; | 607 | bool bringup = st->bringup; |
615 | enum cpuhp_state state; | 608 | enum cpuhp_state state; |
616 | 609 | ||
610 | if (WARN_ON_ONCE(!st->should_run)) | ||
611 | return; | ||
612 | |||
617 | /* | 613 | /* |
618 | * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures | 614 | * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures |
619 | * that if we see ->should_run we also see the rest of the state. | 615 | * that if we see ->should_run we also see the rest of the state. |
620 | */ | 616 | */ |
621 | smp_mb(); | 617 | smp_mb(); |
622 | 618 | ||
623 | if (WARN_ON_ONCE(!st->should_run)) | ||
624 | return; | ||
625 | |||
626 | cpuhp_lock_acquire(bringup); | 619 | cpuhp_lock_acquire(bringup); |
627 | 620 | ||
628 | if (st->single) { | 621 | if (st->single) { |
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
644 | 637 | ||
645 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); | 638 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); |
646 | 639 | ||
647 | if (st->rollback) { | ||
648 | struct cpuhp_step *step = cpuhp_get_step(state); | ||
649 | if (step->skip_onerr) | ||
650 | goto next; | ||
651 | } | ||
652 | |||
653 | if (cpuhp_is_atomic_state(state)) { | 640 | if (cpuhp_is_atomic_state(state)) { |
654 | local_irq_disable(); | 641 | local_irq_disable(); |
655 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); | 642 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); |
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
673 | st->should_run = false; | 660 | st->should_run = false; |
674 | } | 661 | } |
675 | 662 | ||
676 | next: | ||
677 | cpuhp_lock_release(bringup); | 663 | cpuhp_lock_release(bringup); |
678 | 664 | ||
679 | if (!st->should_run) | 665 | if (!st->should_run) |
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void) | |||
916 | 902 | ||
917 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) | 903 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
918 | { | 904 | { |
919 | for (st->state++; st->state < st->target; st->state++) { | 905 | for (st->state++; st->state < st->target; st->state++) |
920 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 906 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
921 | |||
922 | if (!step->skip_onerr) | ||
923 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); | ||
924 | } | ||
925 | } | 907 | } |
926 | 908 | ||
927 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 909 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
@@ -934,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | |||
934 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | 916 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
935 | if (ret) { | 917 | if (ret) { |
936 | st->target = prev_state; | 918 | st->target = prev_state; |
937 | undo_cpu_down(cpu, st); | 919 | if (st->state < prev_state) |
920 | undo_cpu_down(cpu, st); | ||
938 | break; | 921 | break; |
939 | } | 922 | } |
940 | } | 923 | } |
@@ -987,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
987 | * to do the further cleanups. | 970 | * to do the further cleanups. |
988 | */ | 971 | */ |
989 | ret = cpuhp_down_callbacks(cpu, st, target); | 972 | ret = cpuhp_down_callbacks(cpu, st, target); |
990 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { | 973 | if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { |
991 | cpuhp_reset_state(st, prev_state); | 974 | cpuhp_reset_state(st, prev_state); |
992 | __cpuhp_kick_ap(st); | 975 | __cpuhp_kick_ap(st); |
993 | } | 976 | } |
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 1c35b7b945d0..de87b0282e74 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c | |||
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | |||
168 | int dma_direct_supported(struct device *dev, u64 mask) | 168 | int dma_direct_supported(struct device *dev, u64 mask) |
169 | { | 169 | { |
170 | #ifdef CONFIG_ZONE_DMA | 170 | #ifdef CONFIG_ZONE_DMA |
171 | if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) | 171 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))) |
172 | return 0; | 172 | return 0; |
173 | #else | 173 | #else |
174 | /* | 174 | /* |
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask) | |||
177 | * memory, or by providing a ZONE_DMA32. If neither is the case, the | 177 | * memory, or by providing a ZONE_DMA32. If neither is the case, the |
178 | * architecture needs to use an IOMMU instead of the direct mapping. | 178 | * architecture needs to use an IOMMU instead of the direct mapping. |
179 | */ | 179 | */ |
180 | if (mask < DMA_BIT_MASK(32)) | 180 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) |
181 | return 0; | 181 | return 0; |
182 | #endif | 182 | #endif |
183 | /* | 183 | /* |
diff --git a/kernel/fork.c b/kernel/fork.c index d896e9ca38b0..f0b58479534f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, | |||
550 | goto out; | 550 | goto out; |
551 | } | 551 | } |
552 | /* a new mm has just been created */ | 552 | /* a new mm has just been created */ |
553 | arch_dup_mmap(oldmm, mm); | 553 | retval = arch_dup_mmap(oldmm, mm); |
554 | retval = 0; | ||
555 | out: | 554 | out: |
556 | up_write(&mm->mmap_sem); | 555 | up_write(&mm->mmap_sem); |
557 | flush_tlb_mm(oldmm); | 556 | flush_tlb_mm(oldmm); |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 924e37fb1620..fd6f8ed28e01 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/kmsg_dump.h> | 38 | #include <linux/kmsg_dump.h> |
39 | #include <linux/syslog.h> | 39 | #include <linux/syslog.h> |
40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
41 | #include <linux/notifier.h> | ||
42 | #include <linux/rculist.h> | 41 | #include <linux/rculist.h> |
43 | #include <linux/poll.h> | 42 | #include <linux/poll.h> |
44 | #include <linux/irq_work.h> | 43 | #include <linux/irq_work.h> |
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index a0a74c533e4b..0913b4d385de 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c | |||
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) | |||
306 | return printk_safe_log_store(s, fmt, args); | 306 | return printk_safe_log_store(s, fmt, args); |
307 | } | 307 | } |
308 | 308 | ||
309 | void printk_nmi_enter(void) | 309 | void notrace printk_nmi_enter(void) |
310 | { | 310 | { |
311 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); | 311 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); |
312 | } | 312 | } |
313 | 313 | ||
314 | void printk_nmi_exit(void) | 314 | void notrace printk_nmi_exit(void) |
315 | { | 315 | { |
316 | this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); | 316 | this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); |
317 | } | 317 | } |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f74fb00d8064..0e6e97a01942 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags) | |||
133 | spin_unlock_irqrestore(&watchdog_lock, *flags); | 133 | spin_unlock_irqrestore(&watchdog_lock, *flags); |
134 | } | 134 | } |
135 | 135 | ||
136 | static int clocksource_watchdog_kthread(void *data); | ||
137 | static void __clocksource_change_rating(struct clocksource *cs, int rating); | ||
138 | |||
136 | /* | 139 | /* |
137 | * Interval: 0.5sec Threshold: 0.0625s | 140 | * Interval: 0.5sec Threshold: 0.0625s |
138 | */ | 141 | */ |
139 | #define WATCHDOG_INTERVAL (HZ >> 1) | 142 | #define WATCHDOG_INTERVAL (HZ >> 1) |
140 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) | 143 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) |
141 | 144 | ||
145 | static void clocksource_watchdog_work(struct work_struct *work) | ||
146 | { | ||
147 | /* | ||
148 | * We cannot directly run clocksource_watchdog_kthread() here, because | ||
149 | * clocksource_select() calls timekeeping_notify() which uses | ||
150 | * stop_machine(). One cannot use stop_machine() from a workqueue() due | ||
151 | * lock inversions wrt CPU hotplug. | ||
152 | * | ||
153 | * Also, we only ever run this work once or twice during the lifetime | ||
154 | * of the kernel, so there is no point in creating a more permanent | ||
155 | * kthread for this. | ||
156 | * | ||
157 | * If kthread_run fails the next watchdog scan over the | ||
158 | * watchdog_list will find the unstable clock again. | ||
159 | */ | ||
160 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); | ||
161 | } | ||
162 | |||
142 | static void __clocksource_unstable(struct clocksource *cs) | 163 | static void __clocksource_unstable(struct clocksource *cs) |
143 | { | 164 | { |
144 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | 165 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
145 | cs->flags |= CLOCK_SOURCE_UNSTABLE; | 166 | cs->flags |= CLOCK_SOURCE_UNSTABLE; |
146 | 167 | ||
147 | /* | 168 | /* |
148 | * If the clocksource is registered clocksource_watchdog_work() will | 169 | * If the clocksource is registered clocksource_watchdog_kthread() will |
149 | * re-rate and re-select. | 170 | * re-rate and re-select. |
150 | */ | 171 | */ |
151 | if (list_empty(&cs->list)) { | 172 | if (list_empty(&cs->list)) { |
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs) | |||
156 | if (cs->mark_unstable) | 177 | if (cs->mark_unstable) |
157 | cs->mark_unstable(cs); | 178 | cs->mark_unstable(cs); |
158 | 179 | ||
159 | /* kick clocksource_watchdog_work() */ | 180 | /* kick clocksource_watchdog_kthread() */ |
160 | if (finished_booting) | 181 | if (finished_booting) |
161 | schedule_work(&watchdog_work); | 182 | schedule_work(&watchdog_work); |
162 | } | 183 | } |
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs) | |||
166 | * @cs: clocksource to be marked unstable | 187 | * @cs: clocksource to be marked unstable |
167 | * | 188 | * |
168 | * This function is called by the x86 TSC code to mark clocksources as unstable; | 189 | * This function is called by the x86 TSC code to mark clocksources as unstable; |
169 | * it defers demotion and re-selection to a work. | 190 | * it defers demotion and re-selection to a kthread. |
170 | */ | 191 | */ |
171 | void clocksource_mark_unstable(struct clocksource *cs) | 192 | void clocksource_mark_unstable(struct clocksource *cs) |
172 | { | 193 | { |
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) | |||
391 | } | 412 | } |
392 | } | 413 | } |
393 | 414 | ||
394 | static void __clocksource_change_rating(struct clocksource *cs, int rating); | 415 | static int __clocksource_watchdog_kthread(void) |
395 | |||
396 | static int __clocksource_watchdog_work(void) | ||
397 | { | 416 | { |
398 | struct clocksource *cs, *tmp; | 417 | struct clocksource *cs, *tmp; |
399 | unsigned long flags; | 418 | unsigned long flags; |
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void) | |||
418 | return select; | 437 | return select; |
419 | } | 438 | } |
420 | 439 | ||
421 | static void clocksource_watchdog_work(struct work_struct *work) | 440 | static int clocksource_watchdog_kthread(void *data) |
422 | { | 441 | { |
423 | mutex_lock(&clocksource_mutex); | 442 | mutex_lock(&clocksource_mutex); |
424 | if (__clocksource_watchdog_work()) | 443 | if (__clocksource_watchdog_kthread()) |
425 | clocksource_select(); | 444 | clocksource_select(); |
426 | mutex_unlock(&clocksource_mutex); | 445 | mutex_unlock(&clocksource_mutex); |
446 | return 0; | ||
427 | } | 447 | } |
428 | 448 | ||
429 | static bool clocksource_is_watchdog(struct clocksource *cs) | 449 | static bool clocksource_is_watchdog(struct clocksource *cs) |
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) | |||
442 | static void clocksource_select_watchdog(bool fallback) { } | 462 | static void clocksource_select_watchdog(bool fallback) { } |
443 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } | 463 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } |
444 | static inline void clocksource_resume_watchdog(void) { } | 464 | static inline void clocksource_resume_watchdog(void) { } |
445 | static inline int __clocksource_watchdog_work(void) { return 0; } | 465 | static inline int __clocksource_watchdog_kthread(void) { return 0; } |
446 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } | 466 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } |
447 | void clocksource_mark_unstable(struct clocksource *cs) { } | 467 | void clocksource_mark_unstable(struct clocksource *cs) { } |
448 | 468 | ||
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void) | |||
810 | /* | 830 | /* |
811 | * Run the watchdog first to eliminate unstable clock sources | 831 | * Run the watchdog first to eliminate unstable clock sources |
812 | */ | 832 | */ |
813 | __clocksource_watchdog_work(); | 833 | __clocksource_watchdog_kthread(); |
814 | clocksource_select(); | 834 | clocksource_select(); |
815 | mutex_unlock(&clocksource_mutex); | 835 | mutex_unlock(&clocksource_mutex); |
816 | return 0; | 836 | return 0; |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5470dce212c0..977918d5d350 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -261,7 +261,7 @@ static void __touch_watchdog(void) | |||
261 | * entering idle state. This should only be used for scheduler events. | 261 | * entering idle state. This should only be used for scheduler events. |
262 | * Use touch_softlockup_watchdog() for everything else. | 262 | * Use touch_softlockup_watchdog() for everything else. |
263 | */ | 263 | */ |
264 | void touch_softlockup_watchdog_sched(void) | 264 | notrace void touch_softlockup_watchdog_sched(void) |
265 | { | 265 | { |
266 | /* | 266 | /* |
267 | * Preemption can be enabled. It doesn't matter which CPU's timestamp | 267 | * Preemption can be enabled. It doesn't matter which CPU's timestamp |
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void) | |||
270 | raw_cpu_write(watchdog_touch_ts, 0); | 270 | raw_cpu_write(watchdog_touch_ts, 0); |
271 | } | 271 | } |
272 | 272 | ||
273 | void touch_softlockup_watchdog(void) | 273 | notrace void touch_softlockup_watchdog(void) |
274 | { | 274 | { |
275 | touch_softlockup_watchdog_sched(); | 275 | touch_softlockup_watchdog_sched(); |
276 | wq_watchdog_touch(raw_smp_processor_id()); | 276 | wq_watchdog_touch(raw_smp_processor_id()); |
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 1f7020d65d0a..71381168dede 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c | |||
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask; | |||
29 | static unsigned long hardlockup_allcpu_dumped; | 29 | static unsigned long hardlockup_allcpu_dumped; |
30 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); | 30 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); |
31 | 31 | ||
32 | void arch_touch_nmi_watchdog(void) | 32 | notrace void arch_touch_nmi_watchdog(void) |
33 | { | 33 | { |
34 | /* | 34 | /* |
35 | * Using __raw here because some code paths have | 35 | * Using __raw here because some code paths have |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 60e80198c3df..0280deac392e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) | |||
5574 | mod_timer(&wq_watchdog_timer, jiffies + thresh); | 5574 | mod_timer(&wq_watchdog_timer, jiffies + thresh); |
5575 | } | 5575 | } |
5576 | 5576 | ||
5577 | void wq_watchdog_touch(int cpu) | 5577 | notrace void wq_watchdog_touch(int cpu) |
5578 | { | 5578 | { |
5579 | if (cpu >= 0) | 5579 | if (cpu >= 0) |
5580 | per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; | 5580 | per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 613316724c6a..4966c4fbe7f7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM | |||
1277 | time. This is really bad from a security perspective, and | 1277 | time. This is really bad from a security perspective, and |
1278 | so architecture maintainers really need to do what they can | 1278 | so architecture maintainers really need to do what they can |
1279 | to get the CRNG seeded sooner after the system is booted. | 1279 | to get the CRNG seeded sooner after the system is booted. |
1280 | However, since users can not do anything actionble to | 1280 | However, since users cannot do anything actionable to |
1281 | address this, by default the kernel will issue only a single | 1281 | address this, by default the kernel will issue only a single |
1282 | warning for the first use of unseeded randomness. | 1282 | warning for the first use of unseeded randomness. |
1283 | 1283 | ||
1284 | Say Y here if you want to receive warnings for all uses of | 1284 | Say Y here if you want to receive warnings for all uses of |
1285 | unseeded randomness. This will be of use primarily for | 1285 | unseeded randomness. This will be of use primarily for |
1286 | those developers interersted in improving the security of | 1286 | those developers interested in improving the security of |
1287 | Linux kernels running on their architecture (or | 1287 | Linux kernels running on their architecture (or |
1288 | subarchitecture). | 1288 | subarchitecture). |
1289 | 1289 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index c72577e472f2..a66595ba5543 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -4,7 +4,6 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/percpu_counter.h> | 6 | #include <linux/percpu_counter.h> |
7 | #include <linux/notifier.h> | ||
8 | #include <linux/mutex.h> | 7 | #include <linux/mutex.h> |
9 | #include <linux/init.h> | 8 | #include <linux/init.h> |
10 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 310e29b51507..30526afa8343 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/rhashtable.h> | 28 | #include <linux/rhashtable.h> |
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <linux/rhashtable.h> | ||
32 | 31 | ||
33 | #define HASH_DEFAULT_SIZE 64UL | 32 | #define HASH_DEFAULT_SIZE 64UL |
34 | #define HASH_MIN_SIZE 4U | 33 | #define HASH_MIN_SIZE 4U |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f5981e9d6ae2..8a8bb8796c6c 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work) | |||
491 | { | 491 | { |
492 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, | 492 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, |
493 | release_work); | 493 | release_work); |
494 | struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css); | ||
494 | 495 | ||
495 | mutex_lock(&wb->bdi->cgwb_release_mutex); | 496 | mutex_lock(&wb->bdi->cgwb_release_mutex); |
496 | wb_shutdown(wb); | 497 | wb_shutdown(wb); |
@@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work) | |||
499 | css_put(wb->blkcg_css); | 500 | css_put(wb->blkcg_css); |
500 | mutex_unlock(&wb->bdi->cgwb_release_mutex); | 501 | mutex_unlock(&wb->bdi->cgwb_release_mutex); |
501 | 502 | ||
503 | /* triggers blkg destruction if cgwb_refcnt becomes zero */ | ||
504 | blkcg_cgwb_put(blkcg); | ||
505 | |||
502 | fprop_local_destroy_percpu(&wb->memcg_completions); | 506 | fprop_local_destroy_percpu(&wb->memcg_completions); |
503 | percpu_ref_exit(&wb->refcnt); | 507 | percpu_ref_exit(&wb->refcnt); |
504 | wb_exit(wb); | 508 | wb_exit(wb); |
@@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi, | |||
597 | list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); | 601 | list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); |
598 | list_add(&wb->memcg_node, memcg_cgwb_list); | 602 | list_add(&wb->memcg_node, memcg_cgwb_list); |
599 | list_add(&wb->blkcg_node, blkcg_cgwb_list); | 603 | list_add(&wb->blkcg_node, blkcg_cgwb_list); |
604 | blkcg_cgwb_get(blkcg); | ||
600 | css_get(memcg_css); | 605 | css_get(memcg_css); |
601 | css_get(blkcg_css); | 606 | css_get(blkcg_css); |
602 | } | 607 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c3bc7e9c9a2a..533f9b00147d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | |||
821 | * but we need to be consistent with PTEs and architectures that | 821 | * but we need to be consistent with PTEs and architectures that |
822 | * can't support a 'special' bit. | 822 | * can't support a 'special' bit. |
823 | */ | 823 | */ |
824 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); | 824 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && |
825 | !pfn_t_devmap(pfn)); | ||
825 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == | 826 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
826 | (VM_PFNMAP|VM_MIXEDMAP)); | 827 | (VM_PFNMAP|VM_MIXEDMAP)); |
827 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); | 828 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
828 | BUG_ON(!pfn_t_devmap(pfn)); | ||
829 | 829 | ||
830 | if (addr < vma->vm_start || addr >= vma->vm_end) | 830 | if (addr < vma->vm_start || addr >= vma->vm_end) |
831 | return VM_FAULT_SIGBUS; | 831 | return VM_FAULT_SIGBUS; |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 9a085d525bbc..17dd883198ae 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void) | |||
2097 | 2097 | ||
2098 | kmemleak_initialized = 1; | 2098 | kmemleak_initialized = 1; |
2099 | 2099 | ||
2100 | dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL, | ||
2101 | &kmemleak_fops); | ||
2102 | if (!dentry) | ||
2103 | pr_warn("Failed to create the debugfs kmemleak file\n"); | ||
2104 | |||
2100 | if (kmemleak_error) { | 2105 | if (kmemleak_error) { |
2101 | /* | 2106 | /* |
2102 | * Some error occurred and kmemleak was disabled. There is a | 2107 | * Some error occurred and kmemleak was disabled. There is a |
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void) | |||
2108 | return -ENOMEM; | 2113 | return -ENOMEM; |
2109 | } | 2114 | } |
2110 | 2115 | ||
2111 | dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL, | ||
2112 | &kmemleak_fops); | ||
2113 | if (!dentry) | ||
2114 | pr_warn("Failed to create the debugfs kmemleak file\n"); | ||
2115 | mutex_lock(&scan_mutex); | 2116 | mutex_lock(&scan_mutex); |
2116 | start_scan_thread(); | 2117 | start_scan_thread(); |
2117 | mutex_unlock(&scan_mutex); | 2118 | mutex_unlock(&scan_mutex); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4ead5a4817de..e79cb59552d9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int | |||
1701 | if (mem_cgroup_out_of_memory(memcg, mask, order)) | 1701 | if (mem_cgroup_out_of_memory(memcg, mask, order)) |
1702 | return OOM_SUCCESS; | 1702 | return OOM_SUCCESS; |
1703 | 1703 | ||
1704 | WARN(1,"Memory cgroup charge failed because of no reclaimable memory! " | ||
1705 | "This looks like a misconfiguration or a kernel bug."); | ||
1706 | return OOM_FAILED; | 1704 | return OOM_FAILED; |
1707 | } | 1705 | } |
1708 | 1706 | ||
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9eea6e809a4e..38d94b703e9d 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) | |||
1333 | if (__PageMovable(page)) | 1333 | if (__PageMovable(page)) |
1334 | return pfn; | 1334 | return pfn; |
1335 | if (PageHuge(page)) { | 1335 | if (PageHuge(page)) { |
1336 | if (page_huge_active(page)) | 1336 | if (hugepage_migration_supported(page_hstate(page)) && |
1337 | page_huge_active(page)) | ||
1337 | return pfn; | 1338 | return pfn; |
1338 | else | 1339 | else |
1339 | pfn = round_up(pfn + 1, | 1340 | pfn = round_up(pfn + 1, |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b5b25e4dcbbb..f10aa5360616 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm) | |||
522 | 522 | ||
523 | tlb_gather_mmu(&tlb, mm, start, end); | 523 | tlb_gather_mmu(&tlb, mm, start, end); |
524 | if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) { | 524 | if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) { |
525 | tlb_finish_mmu(&tlb, start, end); | ||
525 | ret = false; | 526 | ret = false; |
526 | continue; | 527 | continue; |
527 | } | 528 | } |
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc) | |||
1103 | } | 1104 | } |
1104 | 1105 | ||
1105 | select_bad_process(oc); | 1106 | select_bad_process(oc); |
1106 | /* Found nothing?!?! Either we hang forever, or we panic. */ | 1107 | /* Found nothing?!?! */ |
1107 | if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { | 1108 | if (!oc->chosen) { |
1108 | dump_header(oc, NULL); | 1109 | dump_header(oc, NULL); |
1109 | panic("Out of memory and no killable processes...\n"); | 1110 | pr_warn("Out of memory and no killable processes...\n"); |
1111 | /* | ||
1112 | * If we got here due to an actual allocation at the | ||
1113 | * system level, we cannot survive this and will enter | ||
1114 | * an endless loop in the allocator. Bail out now. | ||
1115 | */ | ||
1116 | if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) | ||
1117 | panic("System is deadlocked on memory\n"); | ||
1110 | } | 1118 | } |
1111 | if (oc->chosen && oc->chosen != (void *)-1UL) | 1119 | if (oc->chosen && oc->chosen != (void *)-1UL) |
1112 | oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : | 1120 | oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6551d3b0dc30..84ae9bf5858a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/mpage.h> | 27 | #include <linux/mpage.h> |
28 | #include <linux/rmap.h> | 28 | #include <linux/rmap.h> |
29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
30 | #include <linux/notifier.h> | ||
31 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
32 | #include <linux/sysctl.h> | 31 | #include <linux/sysctl.h> |
33 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e75865d58ba7..89d2a2ab3fe6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/ratelimit.h> | 33 | #include <linux/ratelimit.h> |
34 | #include <linux/oom.h> | 34 | #include <linux/oom.h> |
35 | #include <linux/notifier.h> | ||
36 | #include <linux/topology.h> | 35 | #include <linux/topology.h> |
37 | #include <linux/sysctl.h> | 36 | #include <linux/sysctl.h> |
38 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
@@ -7709,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
7709 | * handle each tail page individually in migration. | 7708 | * handle each tail page individually in migration. |
7710 | */ | 7709 | */ |
7711 | if (PageHuge(page)) { | 7710 | if (PageHuge(page)) { |
7711 | |||
7712 | if (!hugepage_migration_supported(page_hstate(page))) | ||
7713 | goto unmovable; | ||
7714 | |||
7712 | iter = round_up(iter + 1, 1<<compound_order(page)) - 1; | 7715 | iter = round_up(iter + 1, 1<<compound_order(page)) - 1; |
7713 | continue; | 7716 | continue; |
7714 | } | 7717 | } |
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include "slab.h" | 20 | #include "slab.h" |
21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/notifier.h> | ||
23 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
24 | #include <linux/kasan.h> | 23 | #include <linux/kasan.h> |
25 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) | |||
435 | EXPORT_SYMBOL(kvmalloc_node); | 435 | EXPORT_SYMBOL(kvmalloc_node); |
436 | 436 | ||
437 | /** | 437 | /** |
438 | * kvfree - free memory allocated with kvmalloc | 438 | * kvfree() - Free memory. |
439 | * @addr: pointer returned by kvmalloc | 439 | * @addr: Pointer to allocated memory. |
440 | * | 440 | * |
441 | * If the memory is allocated from vmalloc area it is freed with vfree(). | 441 | * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). |
442 | * Otherwise kfree() is used. | 442 | * It is slightly more efficient to use kfree() or vfree() if you are certain |
443 | * that you know which one to use. | ||
444 | * | ||
445 | * Context: Any context except NMI. | ||
443 | */ | 446 | */ |
444 | void kvfree(const void *addr) | 447 | void kvfree(const void *addr) |
445 | { | 448 | { |
diff --git a/net/core/dev.c b/net/core/dev.c index 325fc5088370..82114e1111e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -93,7 +93,6 @@ | |||
93 | #include <linux/netdevice.h> | 93 | #include <linux/netdevice.h> |
94 | #include <linux/etherdevice.h> | 94 | #include <linux/etherdevice.h> |
95 | #include <linux/ethtool.h> | 95 | #include <linux/ethtool.h> |
96 | #include <linux/notifier.h> | ||
97 | #include <linux/skbuff.h> | 96 | #include <linux/skbuff.h> |
98 | #include <linux/bpf.h> | 97 | #include <linux/bpf.h> |
99 | #include <linux/bpf_trace.h> | 98 | #include <linux/bpf_trace.h> |
diff --git a/net/core/filter.c b/net/core/filter.c index c25eb36f1320..aecdeba052d3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { | |||
2282 | .arg2_type = ARG_ANYTHING, | 2282 | .arg2_type = ARG_ANYTHING, |
2283 | }; | 2283 | }; |
2284 | 2284 | ||
2285 | #define sk_msg_iter_var(var) \ | ||
2286 | do { \ | ||
2287 | var++; \ | ||
2288 | if (var == MAX_SKB_FRAGS) \ | ||
2289 | var = 0; \ | ||
2290 | } while (0) | ||
2291 | |||
2285 | BPF_CALL_4(bpf_msg_pull_data, | 2292 | BPF_CALL_4(bpf_msg_pull_data, |
2286 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) | 2293 | struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) |
2287 | { | 2294 | { |
2288 | unsigned int len = 0, offset = 0, copy = 0; | 2295 | unsigned int len = 0, offset = 0, copy = 0, poffset = 0; |
2296 | int bytes = end - start, bytes_sg_total; | ||
2289 | struct scatterlist *sg = msg->sg_data; | 2297 | struct scatterlist *sg = msg->sg_data; |
2290 | int first_sg, last_sg, i, shift; | 2298 | int first_sg, last_sg, i, shift; |
2291 | unsigned char *p, *to, *from; | 2299 | unsigned char *p, *to, *from; |
2292 | int bytes = end - start; | ||
2293 | struct page *page; | 2300 | struct page *page; |
2294 | 2301 | ||
2295 | if (unlikely(flags || end <= start)) | 2302 | if (unlikely(flags || end <= start)) |
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2299 | i = msg->sg_start; | 2306 | i = msg->sg_start; |
2300 | do { | 2307 | do { |
2301 | len = sg[i].length; | 2308 | len = sg[i].length; |
2302 | offset += len; | ||
2303 | if (start < offset + len) | 2309 | if (start < offset + len) |
2304 | break; | 2310 | break; |
2305 | i++; | 2311 | offset += len; |
2306 | if (i == MAX_SKB_FRAGS) | 2312 | sk_msg_iter_var(i); |
2307 | i = 0; | ||
2308 | } while (i != msg->sg_end); | 2313 | } while (i != msg->sg_end); |
2309 | 2314 | ||
2310 | if (unlikely(start >= offset + len)) | 2315 | if (unlikely(start >= offset + len)) |
2311 | return -EINVAL; | 2316 | return -EINVAL; |
2312 | 2317 | ||
2313 | if (!msg->sg_copy[i] && bytes <= len) | ||
2314 | goto out; | ||
2315 | |||
2316 | first_sg = i; | 2318 | first_sg = i; |
2319 | /* The start may point into the sg element so we need to also | ||
2320 | * account for the headroom. | ||
2321 | */ | ||
2322 | bytes_sg_total = start - offset + bytes; | ||
2323 | if (!msg->sg_copy[i] && bytes_sg_total <= len) | ||
2324 | goto out; | ||
2317 | 2325 | ||
2318 | /* At this point we need to linearize multiple scatterlist | 2326 | /* At this point we need to linearize multiple scatterlist |
2319 | * elements or a single shared page. Either way we need to | 2327 | * elements or a single shared page. Either way we need to |
@@ -2327,37 +2335,32 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2327 | */ | 2335 | */ |
2328 | do { | 2336 | do { |
2329 | copy += sg[i].length; | 2337 | copy += sg[i].length; |
2330 | i++; | 2338 | sk_msg_iter_var(i); |
2331 | if (i == MAX_SKB_FRAGS) | 2339 | if (bytes_sg_total <= copy) |
2332 | i = 0; | ||
2333 | if (bytes < copy) | ||
2334 | break; | 2340 | break; |
2335 | } while (i != msg->sg_end); | 2341 | } while (i != msg->sg_end); |
2336 | last_sg = i; | 2342 | last_sg = i; |
2337 | 2343 | ||
2338 | if (unlikely(copy < end - start)) | 2344 | if (unlikely(bytes_sg_total > copy)) |
2339 | return -EINVAL; | 2345 | return -EINVAL; |
2340 | 2346 | ||
2341 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); | 2347 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); |
2342 | if (unlikely(!page)) | 2348 | if (unlikely(!page)) |
2343 | return -ENOMEM; | 2349 | return -ENOMEM; |
2344 | p = page_address(page); | 2350 | p = page_address(page); |
2345 | offset = 0; | ||
2346 | 2351 | ||
2347 | i = first_sg; | 2352 | i = first_sg; |
2348 | do { | 2353 | do { |
2349 | from = sg_virt(&sg[i]); | 2354 | from = sg_virt(&sg[i]); |
2350 | len = sg[i].length; | 2355 | len = sg[i].length; |
2351 | to = p + offset; | 2356 | to = p + poffset; |
2352 | 2357 | ||
2353 | memcpy(to, from, len); | 2358 | memcpy(to, from, len); |
2354 | offset += len; | 2359 | poffset += len; |
2355 | sg[i].length = 0; | 2360 | sg[i].length = 0; |
2356 | put_page(sg_page(&sg[i])); | 2361 | put_page(sg_page(&sg[i])); |
2357 | 2362 | ||
2358 | i++; | 2363 | sk_msg_iter_var(i); |
2359 | if (i == MAX_SKB_FRAGS) | ||
2360 | i = 0; | ||
2361 | } while (i != last_sg); | 2364 | } while (i != last_sg); |
2362 | 2365 | ||
2363 | sg[first_sg].length = copy; | 2366 | sg[first_sg].length = copy; |
@@ -2367,11 +2370,15 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2367 | * had a single entry though we can just replace it and | 2370 | * had a single entry though we can just replace it and |
2368 | * be done. Otherwise walk the ring and shift the entries. | 2371 | * be done. Otherwise walk the ring and shift the entries. |
2369 | */ | 2372 | */ |
2370 | shift = last_sg - first_sg - 1; | 2373 | WARN_ON_ONCE(last_sg == first_sg); |
2374 | shift = last_sg > first_sg ? | ||
2375 | last_sg - first_sg - 1 : | ||
2376 | MAX_SKB_FRAGS - first_sg + last_sg - 1; | ||
2371 | if (!shift) | 2377 | if (!shift) |
2372 | goto out; | 2378 | goto out; |
2373 | 2379 | ||
2374 | i = first_sg + 1; | 2380 | i = first_sg; |
2381 | sk_msg_iter_var(i); | ||
2375 | do { | 2382 | do { |
2376 | int move_from; | 2383 | int move_from; |
2377 | 2384 | ||
@@ -2388,15 +2395,13 @@ BPF_CALL_4(bpf_msg_pull_data, | |||
2388 | sg[move_from].page_link = 0; | 2395 | sg[move_from].page_link = 0; |
2389 | sg[move_from].offset = 0; | 2396 | sg[move_from].offset = 0; |
2390 | 2397 | ||
2391 | i++; | 2398 | sk_msg_iter_var(i); |
2392 | if (i == MAX_SKB_FRAGS) | ||
2393 | i = 0; | ||
2394 | } while (1); | 2399 | } while (1); |
2395 | msg->sg_end -= shift; | 2400 | msg->sg_end -= shift; |
2396 | if (msg->sg_end < 0) | 2401 | if (msg->sg_end < 0) |
2397 | msg->sg_end += MAX_SKB_FRAGS; | 2402 | msg->sg_end += MAX_SKB_FRAGS; |
2398 | out: | 2403 | out: |
2399 | msg->data = sg_virt(&sg[i]) + start - offset; | 2404 | msg->data = sg_virt(&sg[first_sg]) + start - offset; |
2400 | msg->data_end = msg->data + bytes; | 2405 | msg->data_end = msg->data + bytes; |
2401 | 2406 | ||
2402 | return 0; | 2407 | return 0; |
@@ -7281,7 +7286,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, | |||
7281 | break; | 7286 | break; |
7282 | 7287 | ||
7283 | case offsetof(struct sk_reuseport_md, ip_protocol): | 7288 | case offsetof(struct sk_reuseport_md, ip_protocol): |
7284 | BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE); | 7289 | BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); |
7285 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, | 7290 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, |
7286 | BPF_W, 0); | 7291 | BPF_W, 0); |
7287 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); | 7292 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 24431e578310..60c928894a78 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol) | |||
324 | 324 | ||
325 | rtnl_lock(); | 325 | rtnl_lock(); |
326 | tab = rtnl_msg_handlers[protocol]; | 326 | tab = rtnl_msg_handlers[protocol]; |
327 | if (!tab) { | ||
328 | rtnl_unlock(); | ||
329 | return; | ||
330 | } | ||
327 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); | 331 | RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); |
328 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { | 332 | for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { |
329 | link = tab[msgindex]; | 333 | link = tab[msgindex]; |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e63c554e0623..9f3209ff7ffd 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -19,12 +19,10 @@ | |||
19 | #include <linux/of_mdio.h> | 19 | #include <linux/of_mdio.h> |
20 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
21 | #include <linux/of_net.h> | 21 | #include <linux/of_net.h> |
22 | #include <linux/of_gpio.h> | ||
23 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
24 | #include <linux/sysfs.h> | 23 | #include <linux/sysfs.h> |
25 | #include <linux/phy_fixed.h> | 24 | #include <linux/phy_fixed.h> |
26 | #include <linux/ptp_classify.h> | 25 | #include <linux/ptp_classify.h> |
27 | #include <linux/gpio/consumer.h> | ||
28 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
29 | 27 | ||
30 | #include "dsa_priv.h" | 28 | #include "dsa_priv.h" |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 962c4fd338ba..1c45c1d6d241 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, | |||
767 | const struct tc_action *a; | 767 | const struct tc_action *a; |
768 | struct dsa_port *to_dp; | 768 | struct dsa_port *to_dp; |
769 | int err = -EOPNOTSUPP; | 769 | int err = -EOPNOTSUPP; |
770 | LIST_HEAD(actions); | ||
771 | 770 | ||
772 | if (!ds->ops->port_mirror_add) | 771 | if (!ds->ops->port_mirror_add) |
773 | return err; | 772 | return err; |
@@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, | |||
775 | if (!tcf_exts_has_one_action(cls->exts)) | 774 | if (!tcf_exts_has_one_action(cls->exts)) |
776 | return err; | 775 | return err; |
777 | 776 | ||
778 | tcf_exts_to_list(cls->exts, &actions); | 777 | a = tcf_exts_first_action(cls->exts); |
779 | a = list_first_entry(&actions, struct tc_action, list); | ||
780 | 778 | ||
781 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { | 779 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { |
782 | struct dsa_mall_mirror_tc_entry *mirror; | 780 | struct dsa_mall_mirror_tc_entry *mirror; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index cf75f8944b05..4da39446da2d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t) | |||
820 | spin_lock(&im->lock); | 820 | spin_lock(&im->lock); |
821 | im->tm_running = 0; | 821 | im->tm_running = 0; |
822 | 822 | ||
823 | if (im->unsolicit_count) { | 823 | if (im->unsolicit_count && --im->unsolicit_count) |
824 | im->unsolicit_count--; | ||
825 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); | 824 | igmp_start_timer(im, unsolicited_report_interval(in_dev)); |
826 | } | 825 | |
827 | im->reporter = 1; | 826 | im->reporter = 1; |
828 | spin_unlock(&im->lock); | 827 | spin_unlock(&im->lock); |
829 | 828 | ||
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im) | |||
1308 | 1307 | ||
1309 | if (in_dev->dead) | 1308 | if (in_dev->dead) |
1310 | return; | 1309 | return; |
1310 | |||
1311 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
1311 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { | 1312 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { |
1312 | spin_lock_bh(&im->lock); | 1313 | spin_lock_bh(&im->lock); |
1313 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); | 1314 | igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); |
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
1391 | unsigned int mode) | 1392 | unsigned int mode) |
1392 | { | 1393 | { |
1393 | struct ip_mc_list *im; | 1394 | struct ip_mc_list *im; |
1394 | #ifdef CONFIG_IP_MULTICAST | ||
1395 | struct net *net = dev_net(in_dev->dev); | ||
1396 | #endif | ||
1397 | 1395 | ||
1398 | ASSERT_RTNL(); | 1396 | ASSERT_RTNL(); |
1399 | 1397 | ||
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | |||
1420 | spin_lock_init(&im->lock); | 1418 | spin_lock_init(&im->lock); |
1421 | #ifdef CONFIG_IP_MULTICAST | 1419 | #ifdef CONFIG_IP_MULTICAST |
1422 | timer_setup(&im->timer, igmp_timer_expire, 0); | 1420 | timer_setup(&im->timer, igmp_timer_expire, 0); |
1423 | im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; | ||
1424 | #endif | 1421 | #endif |
1425 | 1422 | ||
1426 | im->next_rcu = in_dev->mc_list; | 1423 | im->next_rcu = in_dev->mc_list; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 51a5d06085ac..ae714aecc31c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1508,11 +1508,14 @@ nla_put_failure: | |||
1508 | 1508 | ||
1509 | static void erspan_setup(struct net_device *dev) | 1509 | static void erspan_setup(struct net_device *dev) |
1510 | { | 1510 | { |
1511 | struct ip_tunnel *t = netdev_priv(dev); | ||
1512 | |||
1511 | ether_setup(dev); | 1513 | ether_setup(dev); |
1512 | dev->netdev_ops = &erspan_netdev_ops; | 1514 | dev->netdev_ops = &erspan_netdev_ops; |
1513 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1515 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1514 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1516 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1515 | ip_tunnel_setup(dev, erspan_net_id); | 1517 | ip_tunnel_setup(dev, erspan_net_id); |
1518 | t->erspan_ver = 1; | ||
1516 | } | 1519 | } |
1517 | 1520 | ||
1518 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { | 1521 | static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { |
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 13d34427ca3d..02ff2dde9609 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c | |||
@@ -95,11 +95,10 @@ struct bbr { | |||
95 | u32 mode:3, /* current bbr_mode in state machine */ | 95 | u32 mode:3, /* current bbr_mode in state machine */ |
96 | prev_ca_state:3, /* CA state on previous ACK */ | 96 | prev_ca_state:3, /* CA state on previous ACK */ |
97 | packet_conservation:1, /* use packet conservation? */ | 97 | packet_conservation:1, /* use packet conservation? */ |
98 | restore_cwnd:1, /* decided to revert cwnd to old value */ | ||
99 | round_start:1, /* start of packet-timed tx->ack round? */ | 98 | round_start:1, /* start of packet-timed tx->ack round? */ |
100 | idle_restart:1, /* restarting after idle? */ | 99 | idle_restart:1, /* restarting after idle? */ |
101 | probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ | 100 | probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ |
102 | unused:12, | 101 | unused:13, |
103 | lt_is_sampling:1, /* taking long-term ("LT") samples now? */ | 102 | lt_is_sampling:1, /* taking long-term ("LT") samples now? */ |
104 | lt_rtt_cnt:7, /* round trips in long-term interval */ | 103 | lt_rtt_cnt:7, /* round trips in long-term interval */ |
105 | lt_use_bw:1; /* use lt_bw as our bw estimate? */ | 104 | lt_use_bw:1; /* use lt_bw as our bw estimate? */ |
@@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8; | |||
175 | /* If we estimate we're policed, use lt_bw for this many round trips: */ | 174 | /* If we estimate we're policed, use lt_bw for this many round trips: */ |
176 | static const u32 bbr_lt_bw_max_rtts = 48; | 175 | static const u32 bbr_lt_bw_max_rtts = 48; |
177 | 176 | ||
177 | static void bbr_check_probe_rtt_done(struct sock *sk); | ||
178 | |||
178 | /* Do we estimate that STARTUP filled the pipe? */ | 179 | /* Do we estimate that STARTUP filled the pipe? */ |
179 | static bool bbr_full_bw_reached(const struct sock *sk) | 180 | static bool bbr_full_bw_reached(const struct sock *sk) |
180 | { | 181 | { |
@@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) | |||
309 | */ | 310 | */ |
310 | if (bbr->mode == BBR_PROBE_BW) | 311 | if (bbr->mode == BBR_PROBE_BW) |
311 | bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); | 312 | bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); |
313 | else if (bbr->mode == BBR_PROBE_RTT) | ||
314 | bbr_check_probe_rtt_done(sk); | ||
312 | } | 315 | } |
313 | } | 316 | } |
314 | 317 | ||
@@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore( | |||
396 | cwnd = tcp_packets_in_flight(tp) + acked; | 399 | cwnd = tcp_packets_in_flight(tp) + acked; |
397 | } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { | 400 | } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { |
398 | /* Exiting loss recovery; restore cwnd saved before recovery. */ | 401 | /* Exiting loss recovery; restore cwnd saved before recovery. */ |
399 | bbr->restore_cwnd = 1; | 402 | cwnd = max(cwnd, bbr->prior_cwnd); |
400 | bbr->packet_conservation = 0; | 403 | bbr->packet_conservation = 0; |
401 | } | 404 | } |
402 | bbr->prev_ca_state = state; | 405 | bbr->prev_ca_state = state; |
403 | 406 | ||
404 | if (bbr->restore_cwnd) { | ||
405 | /* Restore cwnd after exiting loss recovery or PROBE_RTT. */ | ||
406 | cwnd = max(cwnd, bbr->prior_cwnd); | ||
407 | bbr->restore_cwnd = 0; | ||
408 | } | ||
409 | |||
410 | if (bbr->packet_conservation) { | 407 | if (bbr->packet_conservation) { |
411 | *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); | 408 | *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); |
412 | return true; /* yes, using packet conservation */ | 409 | return true; /* yes, using packet conservation */ |
@@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, | |||
423 | { | 420 | { |
424 | struct tcp_sock *tp = tcp_sk(sk); | 421 | struct tcp_sock *tp = tcp_sk(sk); |
425 | struct bbr *bbr = inet_csk_ca(sk); | 422 | struct bbr *bbr = inet_csk_ca(sk); |
426 | u32 cwnd = 0, target_cwnd = 0; | 423 | u32 cwnd = tp->snd_cwnd, target_cwnd = 0; |
427 | 424 | ||
428 | if (!acked) | 425 | if (!acked) |
429 | return; | 426 | goto done; /* no packet fully ACKed; just apply caps */ |
430 | 427 | ||
431 | if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) | 428 | if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) |
432 | goto done; | 429 | goto done; |
@@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) | |||
748 | bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ | 745 | bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ |
749 | } | 746 | } |
750 | 747 | ||
748 | static void bbr_check_probe_rtt_done(struct sock *sk) | ||
749 | { | ||
750 | struct tcp_sock *tp = tcp_sk(sk); | ||
751 | struct bbr *bbr = inet_csk_ca(sk); | ||
752 | |||
753 | if (!(bbr->probe_rtt_done_stamp && | ||
754 | after(tcp_jiffies32, bbr->probe_rtt_done_stamp))) | ||
755 | return; | ||
756 | |||
757 | bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ | ||
758 | tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); | ||
759 | bbr_reset_mode(sk); | ||
760 | } | ||
761 | |||
751 | /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and | 762 | /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and |
752 | * periodically drain the bottleneck queue, to converge to measure the true | 763 | * periodically drain the bottleneck queue, to converge to measure the true |
753 | * min_rtt (unloaded propagation delay). This allows the flows to keep queues | 764 | * min_rtt (unloaded propagation delay). This allows the flows to keep queues |
@@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) | |||
806 | } else if (bbr->probe_rtt_done_stamp) { | 817 | } else if (bbr->probe_rtt_done_stamp) { |
807 | if (bbr->round_start) | 818 | if (bbr->round_start) |
808 | bbr->probe_rtt_round_done = 1; | 819 | bbr->probe_rtt_round_done = 1; |
809 | if (bbr->probe_rtt_round_done && | 820 | if (bbr->probe_rtt_round_done) |
810 | after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) { | 821 | bbr_check_probe_rtt_done(sk); |
811 | bbr->min_rtt_stamp = tcp_jiffies32; | ||
812 | bbr->restore_cwnd = 1; /* snap to prior_cwnd */ | ||
813 | bbr_reset_mode(sk); | ||
814 | } | ||
815 | } | 822 | } |
816 | } | 823 | } |
817 | /* Restart after idle ends only once we process a new S/ACK for data */ | 824 | /* Restart after idle ends only once we process a new S/ACK for data */ |
@@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk) | |||
862 | bbr->has_seen_rtt = 0; | 869 | bbr->has_seen_rtt = 0; |
863 | bbr_init_pacing_rate_from_rtt(sk); | 870 | bbr_init_pacing_rate_from_rtt(sk); |
864 | 871 | ||
865 | bbr->restore_cwnd = 0; | ||
866 | bbr->round_start = 0; | 872 | bbr->round_start = 0; |
867 | bbr->idle_restart = 0; | 873 | bbr->idle_restart = 0; |
868 | bbr->full_bw_reached = 0; | 874 | bbr->full_bw_reached = 0; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9e041fa5c545..44c09eddbb78 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net) | |||
2517 | if (res) | 2517 | if (res) |
2518 | goto fail; | 2518 | goto fail; |
2519 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 2519 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
2520 | |||
2521 | /* Please enforce IP_DF and IPID==0 for RST and | ||
2522 | * ACK sent in SYN-RECV and TIME-WAIT state. | ||
2523 | */ | ||
2524 | inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; | ||
2525 | |||
2520 | *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; | 2526 | *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; |
2521 | } | 2527 | } |
2522 | 2528 | ||
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 75ef332a7caf..12affb7864d9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -184,8 +184,9 @@ kill: | |||
184 | inet_twsk_deschedule_put(tw); | 184 | inet_twsk_deschedule_put(tw); |
185 | return TCP_TW_SUCCESS; | 185 | return TCP_TW_SUCCESS; |
186 | } | 186 | } |
187 | } else { | ||
188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
187 | } | 189 | } |
188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); | ||
189 | 190 | ||
190 | if (tmp_opt.saw_tstamp) { | 191 | if (tmp_opt.saw_tstamp) { |
191 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; | 192 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2fac4ad74867..d51a8c0b3372 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev) | |||
2398 | 2398 | ||
2399 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); | 2399 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); |
2400 | 2400 | ||
2401 | ip6_route_add(&cfg, GFP_ATOMIC, NULL); | 2401 | ip6_route_add(&cfg, GFP_KERNEL, NULL); |
2402 | } | 2402 | } |
2403 | 2403 | ||
2404 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | 2404 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) |
@@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
3062 | if (addr.s6_addr32[3]) { | 3062 | if (addr.s6_addr32[3]) { |
3063 | add_addr(idev, &addr, plen, scope); | 3063 | add_addr(idev, &addr, plen, scope); |
3064 | addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, | 3064 | addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, |
3065 | GFP_ATOMIC); | 3065 | GFP_KERNEL); |
3066 | return; | 3066 | return; |
3067 | } | 3067 | } |
3068 | 3068 | ||
@@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
3087 | 3087 | ||
3088 | add_addr(idev, &addr, plen, flag); | 3088 | add_addr(idev, &addr, plen, flag); |
3089 | addrconf_prefix_route(&addr, plen, 0, idev->dev, | 3089 | addrconf_prefix_route(&addr, plen, 0, idev->dev, |
3090 | 0, pflags, GFP_ATOMIC); | 3090 | 0, pflags, GFP_KERNEL); |
3091 | } | 3091 | } |
3092 | } | 3092 | } |
3093 | } | 3093 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 673bba31eb18..9a4261e50272 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -938,14 +938,14 @@ static int __init inet6_init(void) | |||
938 | 938 | ||
939 | err = proto_register(&pingv6_prot, 1); | 939 | err = proto_register(&pingv6_prot, 1); |
940 | if (err) | 940 | if (err) |
941 | goto out_unregister_ping_proto; | 941 | goto out_unregister_raw_proto; |
942 | 942 | ||
943 | /* We MUST register RAW sockets before we create the ICMP6, | 943 | /* We MUST register RAW sockets before we create the ICMP6, |
944 | * IGMP6, or NDISC control sockets. | 944 | * IGMP6, or NDISC control sockets. |
945 | */ | 945 | */ |
946 | err = rawv6_init(); | 946 | err = rawv6_init(); |
947 | if (err) | 947 | if (err) |
948 | goto out_unregister_raw_proto; | 948 | goto out_unregister_ping_proto; |
949 | 949 | ||
950 | /* Register the family here so that the init calls below will | 950 | /* Register the family here so that the init calls below will |
951 | * be able to create sockets. (?? is this dangerous ??) | 951 | * be able to create sockets. (?? is this dangerous ??) |
@@ -1113,11 +1113,11 @@ netfilter_fail: | |||
1113 | igmp_fail: | 1113 | igmp_fail: |
1114 | ndisc_cleanup(); | 1114 | ndisc_cleanup(); |
1115 | ndisc_fail: | 1115 | ndisc_fail: |
1116 | ip6_mr_cleanup(); | 1116 | icmpv6_cleanup(); |
1117 | icmp_fail: | 1117 | icmp_fail: |
1118 | unregister_pernet_subsys(&inet6_net_ops); | 1118 | ip6_mr_cleanup(); |
1119 | ipmr_fail: | 1119 | ipmr_fail: |
1120 | icmpv6_cleanup(); | 1120 | unregister_pernet_subsys(&inet6_net_ops); |
1121 | register_pernet_fail: | 1121 | register_pernet_fail: |
1122 | sock_unregister(PF_INET6); | 1122 | sock_unregister(PF_INET6); |
1123 | rtnl_unregister_all(PF_INET6); | 1123 | rtnl_unregister_all(PF_INET6); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index d212738e9d10..5516f55e214b 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head) | |||
198 | } | 198 | } |
199 | } | 199 | } |
200 | 200 | ||
201 | lwtstate_put(f6i->fib6_nh.nh_lwtstate); | ||
202 | |||
201 | if (f6i->fib6_nh.nh_dev) | 203 | if (f6i->fib6_nh.nh_dev) |
202 | dev_put(f6i->fib6_nh.nh_dev); | 204 | dev_put(f6i->fib6_nh.nh_dev); |
203 | 205 | ||
@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, | |||
987 | fib6_clean_expires(iter); | 989 | fib6_clean_expires(iter); |
988 | else | 990 | else |
989 | fib6_set_expires(iter, rt->expires); | 991 | fib6_set_expires(iter, rt->expires); |
990 | fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); | 992 | |
993 | if (rt->fib6_pmtu) | ||
994 | fib6_metric_set(iter, RTAX_MTU, | ||
995 | rt->fib6_pmtu); | ||
991 | return -EEXIST; | 996 | return -EEXIST; |
992 | } | 997 | } |
993 | /* If we have the same destination and the same metric, | 998 | /* If we have the same destination and the same metric, |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 18a3794b0f52..e493b041d4ac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1778 | if (data[IFLA_GRE_COLLECT_METADATA]) | 1778 | if (data[IFLA_GRE_COLLECT_METADATA]) |
1779 | parms->collect_md = true; | 1779 | parms->collect_md = true; |
1780 | 1780 | ||
1781 | parms->erspan_ver = 1; | ||
1781 | if (data[IFLA_GRE_ERSPAN_VER]) | 1782 | if (data[IFLA_GRE_ERSPAN_VER]) |
1782 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | 1783 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); |
1783 | 1784 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 5df2a58d945c..419960b0ba16 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1188,7 +1188,15 @@ route_lookup: | |||
1188 | init_tel_txopt(&opt, encap_limit); | 1188 | init_tel_txopt(&opt, encap_limit); |
1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); | 1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); |
1190 | } | 1190 | } |
1191 | hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); | 1191 | |
1192 | if (hop_limit == 0) { | ||
1193 | if (skb->protocol == htons(ETH_P_IP)) | ||
1194 | hop_limit = ip_hdr(skb)->ttl; | ||
1195 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
1196 | hop_limit = ipv6_hdr(skb)->hop_limit; | ||
1197 | else | ||
1198 | hop_limit = ip6_dst_hoplimit(dst); | ||
1199 | } | ||
1192 | 1200 | ||
1193 | /* Calculate max headroom for all the headers and adjust | 1201 | /* Calculate max headroom for all the headers and adjust |
1194 | * needed_headroom if necessary. | 1202 | * needed_headroom if necessary. |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 38dec9da90d3..eeaf7455d51e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
481 | } | 481 | } |
482 | 482 | ||
483 | mtu = dst_mtu(dst); | 483 | mtu = dst_mtu(dst); |
484 | if (!skb->ignore_df && skb->len > mtu) { | 484 | if (skb->len > mtu) { |
485 | skb_dst_update_pmtu(skb, mtu); | 485 | skb_dst_update_pmtu(skb, mtu); |
486 | 486 | ||
487 | if (skb->protocol == htons(ETH_P_IPV6)) { | 487 | if (skb->protocol == htons(ETH_P_IPV6)) { |
@@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n, | |||
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | t = rtnl_dereference(ip6n->tnls_wc[0]); | 1096 | t = rtnl_dereference(ip6n->tnls_wc[0]); |
1097 | unregister_netdevice_queue(t->dev, list); | 1097 | if (t) |
1098 | unregister_netdevice_queue(t->dev, list); | ||
1098 | } | 1099 | } |
1099 | 1100 | ||
1100 | static int __net_init vti6_init_net(struct net *net) | 1101 | static int __net_init vti6_init_net(struct net *net) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7208c16302f6..18e00ce1719a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) | |||
956 | rt->dst.error = 0; | 956 | rt->dst.error = 0; |
957 | rt->dst.output = ip6_output; | 957 | rt->dst.output = ip6_output; |
958 | 958 | ||
959 | if (ort->fib6_type == RTN_LOCAL) { | 959 | if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) { |
960 | rt->dst.input = ip6_input; | 960 | rt->dst.input = ip6_input; |
961 | } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { | 961 | } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { |
962 | rt->dst.input = ip6_mc_input; | 962 | rt->dst.input = ip6_mc_input; |
@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) | |||
996 | rt->rt6i_src = ort->fib6_src; | 996 | rt->rt6i_src = ort->fib6_src; |
997 | #endif | 997 | #endif |
998 | rt->rt6i_prefsrc = ort->fib6_prefsrc; | 998 | rt->rt6i_prefsrc = ort->fib6_prefsrc; |
999 | rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); | ||
1000 | } | 999 | } |
1001 | 1000 | ||
1002 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, | 1001 | static struct fib6_node* fib6_backtrack(struct fib6_node *fn, |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 6449a1c2283b..f0f5fedb8caa 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, | |||
947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) | 947 | if (len < IEEE80211_DEAUTH_FRAME_LEN) |
948 | return; | 948 | return; |
949 | 949 | ||
950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", | 950 | ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
951 | mgmt->sa, mgmt->da, mgmt->bssid, reason); | 951 | ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); |
952 | sta_info_destroy_addr(sdata, mgmt->sa); | 952 | sta_info_destroy_addr(sdata, mgmt->sa); |
953 | } | 953 | } |
954 | 954 | ||
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, | |||
966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); | 966 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); |
967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 967 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
968 | 968 | ||
969 | ibss_dbg(sdata, | 969 | ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
970 | "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", | 970 | ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", |
971 | mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); | 971 | mgmt->bssid, auth_transaction); |
972 | 972 | ||
973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | 973 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) |
974 | return; | 974 | return; |
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
1175 | rx_timestamp = drv_get_tsf(local, sdata); | 1175 | rx_timestamp = drv_get_tsf(local, sdata); |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | ibss_dbg(sdata, | 1178 | ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", |
1179 | "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", | ||
1180 | mgmt->sa, mgmt->bssid, | 1179 | mgmt->sa, mgmt->bssid, |
1181 | (unsigned long long)rx_timestamp, | 1180 | (unsigned long long)rx_timestamp); |
1181 | ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", | ||
1182 | (unsigned long long)beacon_timestamp, | 1182 | (unsigned long long)beacon_timestamp, |
1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), | 1183 | (unsigned long long)(rx_timestamp - beacon_timestamp), |
1184 | jiffies); | 1184 | jiffies); |
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, | |||
1537 | 1537 | ||
1538 | tx_last_beacon = drv_tx_last_beacon(local); | 1538 | tx_last_beacon = drv_tx_last_beacon(local); |
1539 | 1539 | ||
1540 | ibss_dbg(sdata, | 1540 | ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); |
1541 | "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", | 1541 | ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", |
1542 | mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); | 1542 | mgmt->bssid, tx_last_beacon); |
1543 | 1543 | ||
1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) | 1544 | if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) |
1545 | return; | 1545 | return; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 4fb2709cb527..513627896204 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work) | |||
256 | 256 | ||
257 | flush_work(&local->radar_detected_work); | 257 | flush_work(&local->radar_detected_work); |
258 | rtnl_lock(); | 258 | rtnl_lock(); |
259 | list_for_each_entry(sdata, &local->interfaces, list) | 259 | list_for_each_entry(sdata, &local->interfaces, list) { |
260 | /* | ||
261 | * XXX: there may be more work for other vif types and even | ||
262 | * for station mode: a good thing would be to run most of | ||
263 | * the iface type's dependent _stop (ieee80211_mg_stop, | ||
264 | * ieee80211_ibss_stop) etc... | ||
265 | * For now, fix only the specific bug that was seen: race | ||
266 | * between csa_connection_drop_work and us. | ||
267 | */ | ||
268 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
269 | /* | ||
270 | * This worker is scheduled from the iface worker that | ||
271 | * runs on mac80211's workqueue, so we can't be | ||
272 | * scheduling this worker after the cancel right here. | ||
273 | * The exception is ieee80211_chswitch_done. | ||
274 | * Then we can have a race... | ||
275 | */ | ||
276 | cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); | ||
277 | } | ||
260 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); | 278 | flush_delayed_work(&sdata->dec_tailroom_needed_wk); |
279 | } | ||
261 | ieee80211_scan_cancel(local); | 280 | ieee80211_scan_cancel(local); |
262 | 281 | ||
263 | /* make sure any new ROC will consider local->in_reconfig */ | 282 | /* make sure any new ROC will consider local->in_reconfig */ |
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { | |||
471 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | | 490 | cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | |
472 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 491 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
473 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 492 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
474 | IEEE80211_VHT_CAP_RXSTBC_1 | | 493 | IEEE80211_VHT_CAP_RXSTBC_MASK | |
475 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
476 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
477 | IEEE80211_VHT_CAP_RXSTBC_4 | | ||
478 | IEEE80211_VHT_CAP_TXSTBC | | 494 | IEEE80211_VHT_CAP_TXSTBC | |
479 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | | 495 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | |
480 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | | 496 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1208 | #if IS_ENABLED(CONFIG_IPV6) | 1224 | #if IS_ENABLED(CONFIG_IPV6) |
1209 | unregister_inet6addr_notifier(&local->ifa6_notifier); | 1225 | unregister_inet6addr_notifier(&local->ifa6_notifier); |
1210 | #endif | 1226 | #endif |
1227 | ieee80211_txq_teardown_flows(local); | ||
1211 | 1228 | ||
1212 | rtnl_lock(); | 1229 | rtnl_lock(); |
1213 | 1230 | ||
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1236 | skb_queue_purge(&local->skb_queue); | 1253 | skb_queue_purge(&local->skb_queue); |
1237 | skb_queue_purge(&local->skb_queue_unreliable); | 1254 | skb_queue_purge(&local->skb_queue_unreliable); |
1238 | skb_queue_purge(&local->skb_queue_tdls_chsw); | 1255 | skb_queue_purge(&local->skb_queue_tdls_chsw); |
1239 | ieee80211_txq_teardown_flows(local); | ||
1240 | 1256 | ||
1241 | destroy_workqueue(local->workqueue); | 1257 | destroy_workqueue(local->workqueue); |
1242 | wiphy_unregister(local->hw.wiphy); | 1258 | wiphy_unregister(local->hw.wiphy); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 35ad3983ae4b..daf9db3c8f24 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
572 | forward = false; | 572 | forward = false; |
573 | reply = true; | 573 | reply = true; |
574 | target_metric = 0; | 574 | target_metric = 0; |
575 | |||
576 | if (SN_GT(target_sn, ifmsh->sn)) | ||
577 | ifmsh->sn = target_sn; | ||
578 | |||
575 | if (time_after(jiffies, ifmsh->last_sn_update + | 579 | if (time_after(jiffies, ifmsh->last_sn_update + |
576 | net_traversal_jiffies(sdata)) || | 580 | net_traversal_jiffies(sdata)) || |
577 | time_before(jiffies, ifmsh->last_sn_update)) { | 581 | time_before(jiffies, ifmsh->last_sn_update)) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 7fb9957359a3..3dbecae4be73 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1073 | */ | 1073 | */ |
1074 | 1074 | ||
1075 | if (sdata->reserved_chanctx) { | 1075 | if (sdata->reserved_chanctx) { |
1076 | struct ieee80211_supported_band *sband = NULL; | ||
1077 | struct sta_info *mgd_sta = NULL; | ||
1078 | enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20; | ||
1079 | |||
1076 | /* | 1080 | /* |
1077 | * with multi-vif csa driver may call ieee80211_csa_finish() | 1081 | * with multi-vif csa driver may call ieee80211_csa_finish() |
1078 | * many times while waiting for other interfaces to use their | 1082 | * many times while waiting for other interfaces to use their |
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1081 | if (sdata->reserved_ready) | 1085 | if (sdata->reserved_ready) |
1082 | goto out; | 1086 | goto out; |
1083 | 1087 | ||
1088 | if (sdata->vif.bss_conf.chandef.width != | ||
1089 | sdata->csa_chandef.width) { | ||
1090 | /* | ||
1091 | * For managed interface, we need to also update the AP | ||
1092 | * station bandwidth and align the rate scale algorithm | ||
1093 | * on the bandwidth change. Here we only consider the | ||
1094 | * bandwidth of the new channel definition (as channel | ||
1095 | * switch flow does not have the full HT/VHT/HE | ||
1096 | * information), assuming that if additional changes are | ||
1097 | * required they would be done as part of the processing | ||
1098 | * of the next beacon from the AP. | ||
1099 | */ | ||
1100 | switch (sdata->csa_chandef.width) { | ||
1101 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
1102 | case NL80211_CHAN_WIDTH_20: | ||
1103 | default: | ||
1104 | bw = IEEE80211_STA_RX_BW_20; | ||
1105 | break; | ||
1106 | case NL80211_CHAN_WIDTH_40: | ||
1107 | bw = IEEE80211_STA_RX_BW_40; | ||
1108 | break; | ||
1109 | case NL80211_CHAN_WIDTH_80: | ||
1110 | bw = IEEE80211_STA_RX_BW_80; | ||
1111 | break; | ||
1112 | case NL80211_CHAN_WIDTH_80P80: | ||
1113 | case NL80211_CHAN_WIDTH_160: | ||
1114 | bw = IEEE80211_STA_RX_BW_160; | ||
1115 | break; | ||
1116 | } | ||
1117 | |||
1118 | mgd_sta = sta_info_get(sdata, ifmgd->bssid); | ||
1119 | sband = | ||
1120 | local->hw.wiphy->bands[sdata->csa_chandef.chan->band]; | ||
1121 | } | ||
1122 | |||
1123 | if (sdata->vif.bss_conf.chandef.width > | ||
1124 | sdata->csa_chandef.width) { | ||
1125 | mgd_sta->sta.bandwidth = bw; | ||
1126 | rate_control_rate_update(local, sband, mgd_sta, | ||
1127 | IEEE80211_RC_BW_CHANGED); | ||
1128 | } | ||
1129 | |||
1084 | ret = ieee80211_vif_use_reserved_context(sdata); | 1130 | ret = ieee80211_vif_use_reserved_context(sdata); |
1085 | if (ret) { | 1131 | if (ret) { |
1086 | sdata_info(sdata, | 1132 | sdata_info(sdata, |
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1091 | goto out; | 1137 | goto out; |
1092 | } | 1138 | } |
1093 | 1139 | ||
1140 | if (sdata->vif.bss_conf.chandef.width < | ||
1141 | sdata->csa_chandef.width) { | ||
1142 | mgd_sta->sta.bandwidth = bw; | ||
1143 | rate_control_rate_update(local, sband, mgd_sta, | ||
1144 | IEEE80211_RC_BW_CHANGED); | ||
1145 | } | ||
1146 | |||
1094 | goto out; | 1147 | goto out; |
1095 | } | 1148 | } |
1096 | 1149 | ||
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1312 | cbss->beacon_interval)); | 1365 | cbss->beacon_interval)); |
1313 | return; | 1366 | return; |
1314 | drop_connection: | 1367 | drop_connection: |
1368 | /* | ||
1369 | * This is just so that the disconnect flow will know that | ||
1370 | * we were trying to switch channel and failed. In case the | ||
1371 | * mode is 1 (we are not allowed to Tx), we will know not to | ||
1372 | * send a deauthentication frame. Those two fields will be | ||
1373 | * reset when the disconnection worker runs. | ||
1374 | */ | ||
1375 | sdata->vif.csa_active = true; | ||
1376 | sdata->csa_block_tx = csa_ie.mode; | ||
1377 | |||
1315 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); | 1378 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); |
1316 | mutex_unlock(&local->chanctx_mtx); | 1379 | mutex_unlock(&local->chanctx_mtx); |
1317 | mutex_unlock(&local->mtx); | 1380 | mutex_unlock(&local->mtx); |
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2522 | struct ieee80211_local *local = sdata->local; | 2585 | struct ieee80211_local *local = sdata->local; |
2523 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2586 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2524 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; | 2587 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
2588 | bool tx; | ||
2525 | 2589 | ||
2526 | sdata_lock(sdata); | 2590 | sdata_lock(sdata); |
2527 | if (!ifmgd->associated) { | 2591 | if (!ifmgd->associated) { |
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2529 | return; | 2593 | return; |
2530 | } | 2594 | } |
2531 | 2595 | ||
2596 | tx = !sdata->csa_block_tx; | ||
2597 | |||
2532 | /* AP is probably out of range (or not reachable for another reason) so | 2598 | /* AP is probably out of range (or not reachable for another reason) so |
2533 | * remove the bss struct for that AP. | 2599 | * remove the bss struct for that AP. |
2534 | */ | 2600 | */ |
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2536 | 2602 | ||
2537 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | 2603 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, |
2538 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | 2604 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, |
2539 | true, frame_buf); | 2605 | tx, frame_buf); |
2540 | mutex_lock(&local->mtx); | 2606 | mutex_lock(&local->mtx); |
2541 | sdata->vif.csa_active = false; | 2607 | sdata->vif.csa_active = false; |
2542 | ifmgd->csa_waiting_bcn = false; | 2608 | ifmgd->csa_waiting_bcn = false; |
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2547 | } | 2613 | } |
2548 | mutex_unlock(&local->mtx); | 2614 | mutex_unlock(&local->mtx); |
2549 | 2615 | ||
2550 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, | 2616 | ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, |
2551 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); | 2617 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); |
2552 | 2618 | ||
2553 | sdata_unlock(sdata); | 2619 | sdata_unlock(sdata); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 64742f2765c4..96611d5dfadb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1728 | */ | 1728 | */ |
1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && | 1729 | if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && |
1730 | !ieee80211_has_morefrags(hdr->frame_control) && | 1730 | !ieee80211_has_morefrags(hdr->frame_control) && |
1731 | !is_multicast_ether_addr(hdr->addr1) && | ||
1731 | (ieee80211_is_mgmt(hdr->frame_control) || | 1732 | (ieee80211_is_mgmt(hdr->frame_control) || |
1732 | ieee80211_is_data(hdr->frame_control)) && | 1733 | ieee80211_is_data(hdr->frame_control)) && |
1733 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | 1734 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cd332e3e1134..f353d9db54bc 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta) | |||
3078 | } | 3078 | } |
3079 | 3079 | ||
3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, | 3080 | static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, |
3081 | struct sk_buff *skb, int headroom, | 3081 | struct sk_buff *skb, int headroom) |
3082 | int *subframe_len) | ||
3083 | { | 3082 | { |
3084 | int amsdu_len = *subframe_len + sizeof(struct ethhdr); | 3083 | if (skb_headroom(skb) < headroom) { |
3085 | int padding = (4 - amsdu_len) & 3; | ||
3086 | |||
3087 | if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) { | ||
3088 | I802_DEBUG_INC(local->tx_expand_skb_head); | 3084 | I802_DEBUG_INC(local->tx_expand_skb_head); |
3089 | 3085 | ||
3090 | if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) { | 3086 | if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { |
3091 | wiphy_debug(local->hw.wiphy, | 3087 | wiphy_debug(local->hw.wiphy, |
3092 | "failed to reallocate TX buffer\n"); | 3088 | "failed to reallocate TX buffer\n"); |
3093 | return false; | 3089 | return false; |
3094 | } | 3090 | } |
3095 | } | 3091 | } |
3096 | 3092 | ||
3097 | if (padding) { | ||
3098 | *subframe_len += padding; | ||
3099 | skb_put_zero(skb, padding); | ||
3100 | } | ||
3101 | |||
3102 | return true; | 3093 | return true; |
3103 | } | 3094 | } |
3104 | 3095 | ||
@@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, | |||
3122 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) | 3113 | if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) |
3123 | return true; | 3114 | return true; |
3124 | 3115 | ||
3125 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr), | 3116 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr))) |
3126 | &subframe_len)) | ||
3127 | return false; | 3117 | return false; |
3128 | 3118 | ||
3129 | data = skb_push(skb, sizeof(*amsdu_hdr)); | 3119 | data = skb_push(skb, sizeof(*amsdu_hdr)); |
@@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3189 | void *data; | 3179 | void *data; |
3190 | bool ret = false; | 3180 | bool ret = false; |
3191 | unsigned int orig_len; | 3181 | unsigned int orig_len; |
3192 | int n = 1, nfrags; | 3182 | int n = 2, nfrags, pad = 0; |
3183 | u16 hdrlen; | ||
3193 | 3184 | ||
3194 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) | 3185 | if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) |
3195 | return false; | 3186 | return false; |
@@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3222 | if (skb->len + head->len > max_amsdu_len) | 3213 | if (skb->len + head->len > max_amsdu_len) |
3223 | goto out; | 3214 | goto out; |
3224 | 3215 | ||
3225 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) | ||
3226 | goto out; | ||
3227 | |||
3228 | nfrags = 1 + skb_shinfo(skb)->nr_frags; | 3216 | nfrags = 1 + skb_shinfo(skb)->nr_frags; |
3229 | nfrags += 1 + skb_shinfo(head)->nr_frags; | 3217 | nfrags += 1 + skb_shinfo(head)->nr_frags; |
3230 | frag_tail = &skb_shinfo(head)->frag_list; | 3218 | frag_tail = &skb_shinfo(head)->frag_list; |
@@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3240 | if (max_frags && nfrags > max_frags) | 3228 | if (max_frags && nfrags > max_frags) |
3241 | goto out; | 3229 | goto out; |
3242 | 3230 | ||
3243 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2, | 3231 | if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) |
3244 | &subframe_len)) | ||
3245 | goto out; | 3232 | goto out; |
3246 | 3233 | ||
3234 | /* | ||
3235 | * Pad out the previous subframe to a multiple of 4 by adding the | ||
3236 | * padding to the next one, that's being added. Note that head->len | ||
3237 | * is the length of the full A-MSDU, but that works since each time | ||
3238 | * we add a new subframe we pad out the previous one to a multiple | ||
3239 | * of 4 and thus it no longer matters in the next round. | ||
3240 | */ | ||
3241 | hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); | ||
3242 | if ((head->len - hdrlen) & 3) | ||
3243 | pad = 4 - ((head->len - hdrlen) & 3); | ||
3244 | |||
3245 | if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + | ||
3246 | 2 + pad)) | ||
3247 | goto out_recalc; | ||
3248 | |||
3247 | ret = true; | 3249 | ret = true; |
3248 | data = skb_push(skb, ETH_ALEN + 2); | 3250 | data = skb_push(skb, ETH_ALEN + 2); |
3249 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); | 3251 | memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); |
@@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
3253 | memcpy(data, &len, 2); | 3255 | memcpy(data, &len, 2); |
3254 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); | 3256 | memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); |
3255 | 3257 | ||
3258 | memset(skb_push(skb, pad), 0, pad); | ||
3259 | |||
3256 | head->len += skb->len; | 3260 | head->len += skb->len; |
3257 | head->data_len += skb->len; | 3261 | head->data_len += skb->len; |
3258 | *frag_tail = skb; | 3262 | *frag_tail = skb; |
3259 | 3263 | ||
3260 | flow->backlog += head->len - orig_len; | 3264 | out_recalc: |
3261 | tin->backlog_bytes += head->len - orig_len; | 3265 | if (head->len != orig_len) { |
3262 | 3266 | flow->backlog += head->len - orig_len; | |
3263 | fq_recalc_backlog(fq, tin, flow); | 3267 | tin->backlog_bytes += head->len - orig_len; |
3264 | 3268 | ||
3269 | fq_recalc_backlog(fq, tin, flow); | ||
3270 | } | ||
3265 | out: | 3271 | out: |
3266 | spin_unlock_bh(&fq->lock); | 3272 | spin_unlock_bh(&fq->lock); |
3267 | 3273 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 88efda7c9f8a..716cd6442d86 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
1135 | { | 1135 | { |
1136 | struct ieee80211_chanctx_conf *chanctx_conf; | 1136 | struct ieee80211_chanctx_conf *chanctx_conf; |
1137 | const struct ieee80211_reg_rule *rrule; | 1137 | const struct ieee80211_reg_rule *rrule; |
1138 | struct ieee80211_wmm_ac *wmm_ac; | 1138 | const struct ieee80211_wmm_ac *wmm_ac; |
1139 | u16 center_freq = 0; | 1139 | u16 center_freq = 0; |
1140 | 1140 | ||
1141 | if (sdata->vif.type != NL80211_IFTYPE_AP && | 1141 | if (sdata->vif.type != NL80211_IFTYPE_AP && |
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
1154 | 1154 | ||
1155 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); | 1155 | rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); |
1156 | 1156 | ||
1157 | if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) { | 1157 | if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { |
1158 | rcu_read_unlock(); | 1158 | rcu_read_unlock(); |
1159 | return; | 1159 | return; |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | if (sdata->vif.type == NL80211_IFTYPE_AP) | 1162 | if (sdata->vif.type == NL80211_IFTYPE_AP) |
1163 | wmm_ac = &rrule->wmm_rule->ap[ac]; | 1163 | wmm_ac = &rrule->wmm_rule.ap[ac]; |
1164 | else | 1164 | else |
1165 | wmm_ac = &rrule->wmm_rule->client[ac]; | 1165 | wmm_ac = &rrule->wmm_rule.client[ac]; |
1166 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); | 1166 | qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); |
1167 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); | 1167 | qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); |
1168 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); | 1168 | qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); |
1169 | qparam->txop = !qparam->txop ? wmm_ac->cot / 32 : | 1169 | qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); |
1170 | min_t(u16, qparam->txop, wmm_ac->cot / 32); | ||
1171 | rcu_read_unlock(); | 1170 | rcu_read_unlock(); |
1172 | } | 1171 | } |
1173 | 1172 | ||
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 82e6edf9c5d9..45f33d6dedf7 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c | |||
@@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb, | |||
100 | bool found; | 100 | bool found; |
101 | int rc; | 101 | int rc; |
102 | 102 | ||
103 | if (id > ndp->package_num) { | 103 | if (id > ndp->package_num - 1) { |
104 | netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); | 104 | netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); |
105 | return -ENODEV; | 105 | return -ENODEV; |
106 | } | 106 | } |
@@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb, | |||
240 | return 0; /* done */ | 240 | return 0; /* done */ |
241 | 241 | ||
242 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 242 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
243 | &ncsi_genl_family, 0, NCSI_CMD_PKG_INFO); | 243 | &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO); |
244 | if (!hdr) { | 244 | if (!hdr) { |
245 | rc = -EMSGSIZE; | 245 | rc = -EMSGSIZE; |
246 | goto err; | 246 | goto err; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5610061e7f2e..75c92a87e7b2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = { | |||
4137 | .close = packet_mm_close, | 4137 | .close = packet_mm_close, |
4138 | }; | 4138 | }; |
4139 | 4139 | ||
4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int len) | 4140 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, |
4141 | unsigned int len) | ||
4141 | { | 4142 | { |
4142 | int i; | 4143 | int i; |
4143 | 4144 | ||
4144 | for (i = 0; i < len; i++) { | 4145 | for (i = 0; i < len; i++) { |
4145 | if (likely(pg_vec[i].buffer)) { | 4146 | if (likely(pg_vec[i].buffer)) { |
4146 | kvfree(pg_vec[i].buffer); | 4147 | if (is_vmalloc_addr(pg_vec[i].buffer)) |
4148 | vfree(pg_vec[i].buffer); | ||
4149 | else | ||
4150 | free_pages((unsigned long)pg_vec[i].buffer, | ||
4151 | order); | ||
4147 | pg_vec[i].buffer = NULL; | 4152 | pg_vec[i].buffer = NULL; |
4148 | } | 4153 | } |
4149 | } | 4154 | } |
4150 | kfree(pg_vec); | 4155 | kfree(pg_vec); |
4151 | } | 4156 | } |
4152 | 4157 | ||
4153 | static char *alloc_one_pg_vec_page(unsigned long size) | 4158 | static char *alloc_one_pg_vec_page(unsigned long order) |
4154 | { | 4159 | { |
4155 | char *buffer; | 4160 | char *buffer; |
4161 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | | ||
4162 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | ||
4156 | 4163 | ||
4157 | buffer = kvzalloc(size, GFP_KERNEL); | 4164 | buffer = (char *) __get_free_pages(gfp_flags, order); |
4158 | if (buffer) | 4165 | if (buffer) |
4159 | return buffer; | 4166 | return buffer; |
4160 | 4167 | ||
4161 | buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); | 4168 | /* __get_free_pages failed, fall back to vmalloc */ |
4169 | buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); | ||
4170 | if (buffer) | ||
4171 | return buffer; | ||
4162 | 4172 | ||
4163 | return buffer; | 4173 | /* vmalloc failed, lets dig into swap here */ |
4174 | gfp_flags &= ~__GFP_NORETRY; | ||
4175 | buffer = (char *) __get_free_pages(gfp_flags, order); | ||
4176 | if (buffer) | ||
4177 | return buffer; | ||
4178 | |||
4179 | /* complete and utter failure */ | ||
4180 | return NULL; | ||
4164 | } | 4181 | } |
4165 | 4182 | ||
4166 | static struct pgv *alloc_pg_vec(struct tpacket_req *req) | 4183 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) |
4167 | { | 4184 | { |
4168 | unsigned int block_nr = req->tp_block_nr; | 4185 | unsigned int block_nr = req->tp_block_nr; |
4169 | unsigned long size = req->tp_block_size; | ||
4170 | struct pgv *pg_vec; | 4186 | struct pgv *pg_vec; |
4171 | int i; | 4187 | int i; |
4172 | 4188 | ||
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req) | |||
4175 | goto out; | 4191 | goto out; |
4176 | 4192 | ||
4177 | for (i = 0; i < block_nr; i++) { | 4193 | for (i = 0; i < block_nr; i++) { |
4178 | pg_vec[i].buffer = alloc_one_pg_vec_page(size); | 4194 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); |
4179 | if (unlikely(!pg_vec[i].buffer)) | 4195 | if (unlikely(!pg_vec[i].buffer)) |
4180 | goto out_free_pgvec; | 4196 | goto out_free_pgvec; |
4181 | } | 4197 | } |
@@ -4184,7 +4200,7 @@ out: | |||
4184 | return pg_vec; | 4200 | return pg_vec; |
4185 | 4201 | ||
4186 | out_free_pgvec: | 4202 | out_free_pgvec: |
4187 | free_pg_vec(pg_vec, block_nr); | 4203 | free_pg_vec(pg_vec, order, block_nr); |
4188 | pg_vec = NULL; | 4204 | pg_vec = NULL; |
4189 | goto out; | 4205 | goto out; |
4190 | } | 4206 | } |
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4194 | { | 4210 | { |
4195 | struct pgv *pg_vec = NULL; | 4211 | struct pgv *pg_vec = NULL; |
4196 | struct packet_sock *po = pkt_sk(sk); | 4212 | struct packet_sock *po = pkt_sk(sk); |
4213 | int was_running, order = 0; | ||
4197 | struct packet_ring_buffer *rb; | 4214 | struct packet_ring_buffer *rb; |
4198 | struct sk_buff_head *rb_queue; | 4215 | struct sk_buff_head *rb_queue; |
4199 | int was_running; | ||
4200 | __be16 num; | 4216 | __be16 num; |
4201 | int err = -EINVAL; | 4217 | int err = -EINVAL; |
4202 | /* Added to avoid minimal code churn */ | 4218 | /* Added to avoid minimal code churn */ |
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4258 | goto out; | 4274 | goto out; |
4259 | 4275 | ||
4260 | err = -ENOMEM; | 4276 | err = -ENOMEM; |
4261 | pg_vec = alloc_pg_vec(req); | 4277 | order = get_order(req->tp_block_size); |
4278 | pg_vec = alloc_pg_vec(req, order); | ||
4262 | if (unlikely(!pg_vec)) | 4279 | if (unlikely(!pg_vec)) |
4263 | goto out; | 4280 | goto out; |
4264 | switch (po->tp_version) { | 4281 | switch (po->tp_version) { |
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4312 | rb->frame_size = req->tp_frame_size; | 4329 | rb->frame_size = req->tp_frame_size; |
4313 | spin_unlock_bh(&rb_queue->lock); | 4330 | spin_unlock_bh(&rb_queue->lock); |
4314 | 4331 | ||
4332 | swap(rb->pg_vec_order, order); | ||
4315 | swap(rb->pg_vec_len, req->tp_block_nr); | 4333 | swap(rb->pg_vec_len, req->tp_block_nr); |
4316 | 4334 | ||
4317 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | 4335 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; |
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4337 | } | 4355 | } |
4338 | 4356 | ||
4339 | if (pg_vec) | 4357 | if (pg_vec) |
4340 | free_pg_vec(pg_vec, req->tp_block_nr); | 4358 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
4341 | out: | 4359 | out: |
4342 | return err; | 4360 | return err; |
4343 | } | 4361 | } |
diff --git a/net/packet/internal.h b/net/packet/internal.h index 8f50036f62f0..3bb7c5fb3bff 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h | |||
@@ -64,6 +64,7 @@ struct packet_ring_buffer { | |||
64 | unsigned int frame_size; | 64 | unsigned int frame_size; |
65 | unsigned int frame_max; | 65 | unsigned int frame_max; |
66 | 66 | ||
67 | unsigned int pg_vec_order; | ||
67 | unsigned int pg_vec_pages; | 68 | unsigned int pg_vec_pages; |
68 | unsigned int pg_vec_len; | 69 | unsigned int pg_vec_len; |
69 | 70 | ||
diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 01b3bd6a3708..b9092111bc45 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | 1 | ||
2 | config RDS | 2 | config RDS |
3 | tristate "The RDS Protocol" | 3 | tristate "The Reliable Datagram Sockets Protocol" |
4 | depends on INET | 4 | depends on INET |
5 | ---help--- | 5 | ---help--- |
6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, | 6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, |
diff --git a/net/rds/ib.c b/net/rds/ib.c index c1d97640c0be..eba75c1ba359 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, | |||
341 | 341 | ||
342 | if (rds_conn_state(conn) == RDS_CONN_UP) { | 342 | if (rds_conn_state(conn) == RDS_CONN_UP) { |
343 | struct rds_ib_device *rds_ibdev; | 343 | struct rds_ib_device *rds_ibdev; |
344 | struct rdma_dev_addr *dev_addr; | ||
345 | 344 | ||
346 | ic = conn->c_transport_data; | 345 | ic = conn->c_transport_data; |
347 | dev_addr = &ic->i_cm_id->route.addr.dev_addr; | 346 | rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, |
348 | rdma_addr_get_sgid(dev_addr, | 347 | (union ib_gid *)&iinfo6->dst_gid); |
349 | (union ib_gid *)&iinfo6->src_gid); | ||
350 | rdma_addr_get_dgid(dev_addr, | ||
351 | (union ib_gid *)&iinfo6->dst_gid); | ||
352 | |||
353 | rds_ibdev = ic->rds_ibdev; | 348 | rds_ibdev = ic->rds_ibdev; |
354 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; | 349 | iinfo6->max_send_wr = ic->i_send_ring.w_nr; |
355 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; | 350 | iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 2c7b7c352d3e..b9bbcf3d6c63 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <net/tcp.h> | 37 | #include <net/tcp.h> |
38 | #include <net/net_namespace.h> | 38 | #include <net/net_namespace.h> |
39 | #include <net/netns/generic.h> | 39 | #include <net/netns/generic.h> |
40 | #include <net/tcp.h> | ||
41 | #include <net/addrconf.h> | 40 | #include <net/addrconf.h> |
42 | 41 | ||
43 | #include "rds.h" | 42 | #include "rds.h" |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 00192a996be0..0f8465852254 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/mod_devicetable.h> | ||
23 | #include <linux/rfkill.h> | 24 | #include <linux/rfkill.h> |
24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
25 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 229d63c99be2..e12f8ef7baa4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, | |||
300 | } | 300 | } |
301 | EXPORT_SYMBOL(tcf_generic_walker); | 301 | EXPORT_SYMBOL(tcf_generic_walker); |
302 | 302 | ||
303 | static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, | 303 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) |
304 | struct tc_action **a, int bind) | ||
305 | { | 304 | { |
306 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | 305 | struct tcf_idrinfo *idrinfo = tn->idrinfo; |
307 | struct tc_action *p; | 306 | struct tc_action *p; |
308 | 307 | ||
309 | spin_lock(&idrinfo->lock); | 308 | spin_lock(&idrinfo->lock); |
310 | p = idr_find(&idrinfo->action_idr, index); | 309 | p = idr_find(&idrinfo->action_idr, index); |
311 | if (IS_ERR(p)) { | 310 | if (IS_ERR(p)) |
312 | p = NULL; | 311 | p = NULL; |
313 | } else if (p) { | 312 | else if (p) |
314 | refcount_inc(&p->tcfa_refcnt); | 313 | refcount_inc(&p->tcfa_refcnt); |
315 | if (bind) | ||
316 | atomic_inc(&p->tcfa_bindcnt); | ||
317 | } | ||
318 | spin_unlock(&idrinfo->lock); | 314 | spin_unlock(&idrinfo->lock); |
319 | 315 | ||
320 | if (p) { | 316 | if (p) { |
@@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, | |||
323 | } | 319 | } |
324 | return false; | 320 | return false; |
325 | } | 321 | } |
326 | |||
327 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) | ||
328 | { | ||
329 | return __tcf_idr_check(tn, index, a, 0); | ||
330 | } | ||
331 | EXPORT_SYMBOL(tcf_idr_search); | 322 | EXPORT_SYMBOL(tcf_idr_search); |
332 | 323 | ||
333 | bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, | 324 | static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) |
334 | int bind) | ||
335 | { | 325 | { |
336 | return __tcf_idr_check(tn, index, a, bind); | ||
337 | } | ||
338 | EXPORT_SYMBOL(tcf_idr_check); | ||
339 | |||
340 | int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) | ||
341 | { | ||
342 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | ||
343 | struct tc_action *p; | 326 | struct tc_action *p; |
344 | int ret = 0; | 327 | int ret = 0; |
345 | 328 | ||
@@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) | |||
370 | spin_unlock(&idrinfo->lock); | 353 | spin_unlock(&idrinfo->lock); |
371 | return ret; | 354 | return ret; |
372 | } | 355 | } |
373 | EXPORT_SYMBOL(tcf_idr_delete_index); | ||
374 | 356 | ||
375 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | 357 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, |
376 | struct tc_action **a, const struct tc_action_ops *ops, | 358 | struct tc_action **a, const struct tc_action_ops *ops, |
@@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | |||
409 | 391 | ||
410 | p->idrinfo = idrinfo; | 392 | p->idrinfo = idrinfo; |
411 | p->ops = ops; | 393 | p->ops = ops; |
412 | INIT_LIST_HEAD(&p->list); | ||
413 | *a = p; | 394 | *a = p; |
414 | return 0; | 395 | return 0; |
415 | err3: | 396 | err3: |
@@ -681,19 +662,30 @@ int tcf_action_destroy(struct tc_action *actions[], int bind) | |||
681 | return ret; | 662 | return ret; |
682 | } | 663 | } |
683 | 664 | ||
665 | static int tcf_action_destroy_1(struct tc_action *a, int bind) | ||
666 | { | ||
667 | struct tc_action *actions[] = { a, NULL }; | ||
668 | |||
669 | return tcf_action_destroy(actions, bind); | ||
670 | } | ||
671 | |||
684 | static int tcf_action_put(struct tc_action *p) | 672 | static int tcf_action_put(struct tc_action *p) |
685 | { | 673 | { |
686 | return __tcf_action_put(p, false); | 674 | return __tcf_action_put(p, false); |
687 | } | 675 | } |
688 | 676 | ||
677 | /* Put all actions in this array, skip those NULL's. */ | ||
689 | static void tcf_action_put_many(struct tc_action *actions[]) | 678 | static void tcf_action_put_many(struct tc_action *actions[]) |
690 | { | 679 | { |
691 | int i; | 680 | int i; |
692 | 681 | ||
693 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { | 682 | for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { |
694 | struct tc_action *a = actions[i]; | 683 | struct tc_action *a = actions[i]; |
695 | const struct tc_action_ops *ops = a->ops; | 684 | const struct tc_action_ops *ops; |
696 | 685 | ||
686 | if (!a) | ||
687 | continue; | ||
688 | ops = a->ops; | ||
697 | if (tcf_action_put(a)) | 689 | if (tcf_action_put(a)) |
698 | module_put(ops->owner); | 690 | module_put(ops->owner); |
699 | } | 691 | } |
@@ -896,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, | |||
896 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { | 888 | if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { |
897 | err = tcf_action_goto_chain_init(a, tp); | 889 | err = tcf_action_goto_chain_init(a, tp); |
898 | if (err) { | 890 | if (err) { |
899 | struct tc_action *actions[] = { a, NULL }; | 891 | tcf_action_destroy_1(a, bind); |
900 | |||
901 | tcf_action_destroy(actions, bind); | ||
902 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); | 892 | NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); |
903 | return ERR_PTR(err); | 893 | return ERR_PTR(err); |
904 | } | 894 | } |
905 | } | 895 | } |
906 | 896 | ||
907 | if (!tcf_action_valid(a->tcfa_action)) { | 897 | if (!tcf_action_valid(a->tcfa_action)) { |
908 | NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead"); | 898 | tcf_action_destroy_1(a, bind); |
909 | a->tcfa_action = TC_ACT_UNSPEC; | 899 | NL_SET_ERR_MSG(extack, "Invalid control action value"); |
900 | return ERR_PTR(-EINVAL); | ||
910 | } | 901 | } |
911 | 902 | ||
912 | return a; | 903 | return a; |
@@ -1175,41 +1166,38 @@ err_out: | |||
1175 | return err; | 1166 | return err; |
1176 | } | 1167 | } |
1177 | 1168 | ||
1178 | static int tcf_action_delete(struct net *net, struct tc_action *actions[], | 1169 | static int tcf_action_delete(struct net *net, struct tc_action *actions[]) |
1179 | int *acts_deleted, struct netlink_ext_ack *extack) | ||
1180 | { | 1170 | { |
1181 | u32 act_index; | 1171 | int i; |
1182 | int ret, i; | ||
1183 | 1172 | ||
1184 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { | 1173 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { |
1185 | struct tc_action *a = actions[i]; | 1174 | struct tc_action *a = actions[i]; |
1186 | const struct tc_action_ops *ops = a->ops; | 1175 | const struct tc_action_ops *ops = a->ops; |
1187 | |||
1188 | /* Actions can be deleted concurrently so we must save their | 1176 | /* Actions can be deleted concurrently so we must save their |
1189 | * type and id to search again after reference is released. | 1177 | * type and id to search again after reference is released. |
1190 | */ | 1178 | */ |
1191 | act_index = a->tcfa_index; | 1179 | struct tcf_idrinfo *idrinfo = a->idrinfo; |
1180 | u32 act_index = a->tcfa_index; | ||
1192 | 1181 | ||
1182 | actions[i] = NULL; | ||
1193 | if (tcf_action_put(a)) { | 1183 | if (tcf_action_put(a)) { |
1194 | /* last reference, action was deleted concurrently */ | 1184 | /* last reference, action was deleted concurrently */ |
1195 | module_put(ops->owner); | 1185 | module_put(ops->owner); |
1196 | } else { | 1186 | } else { |
1187 | int ret; | ||
1188 | |||
1197 | /* now do the delete */ | 1189 | /* now do the delete */ |
1198 | ret = ops->delete(net, act_index); | 1190 | ret = tcf_idr_delete_index(idrinfo, act_index); |
1199 | if (ret < 0) { | 1191 | if (ret < 0) |
1200 | *acts_deleted = i + 1; | ||
1201 | return ret; | 1192 | return ret; |
1202 | } | ||
1203 | } | 1193 | } |
1204 | } | 1194 | } |
1205 | *acts_deleted = i; | ||
1206 | return 0; | 1195 | return 0; |
1207 | } | 1196 | } |
1208 | 1197 | ||
1209 | static int | 1198 | static int |
1210 | tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], | 1199 | tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], |
1211 | int *acts_deleted, u32 portid, size_t attr_size, | 1200 | u32 portid, size_t attr_size, struct netlink_ext_ack *extack) |
1212 | struct netlink_ext_ack *extack) | ||
1213 | { | 1201 | { |
1214 | int ret; | 1202 | int ret; |
1215 | struct sk_buff *skb; | 1203 | struct sk_buff *skb; |
@@ -1227,7 +1215,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], | |||
1227 | } | 1215 | } |
1228 | 1216 | ||
1229 | /* now do the delete */ | 1217 | /* now do the delete */ |
1230 | ret = tcf_action_delete(net, actions, acts_deleted, extack); | 1218 | ret = tcf_action_delete(net, actions); |
1231 | if (ret < 0) { | 1219 | if (ret < 0) { |
1232 | NL_SET_ERR_MSG(extack, "Failed to delete TC action"); | 1220 | NL_SET_ERR_MSG(extack, "Failed to delete TC action"); |
1233 | kfree_skb(skb); | 1221 | kfree_skb(skb); |
@@ -1249,8 +1237,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
1249 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | 1237 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
1250 | struct tc_action *act; | 1238 | struct tc_action *act; |
1251 | size_t attr_size = 0; | 1239 | size_t attr_size = 0; |
1252 | struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; | 1240 | struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; |
1253 | int acts_deleted = 0; | ||
1254 | 1241 | ||
1255 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); | 1242 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); |
1256 | if (ret < 0) | 1243 | if (ret < 0) |
@@ -1280,14 +1267,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
1280 | if (event == RTM_GETACTION) | 1267 | if (event == RTM_GETACTION) |
1281 | ret = tcf_get_notify(net, portid, n, actions, event, extack); | 1268 | ret = tcf_get_notify(net, portid, n, actions, event, extack); |
1282 | else { /* delete */ | 1269 | else { /* delete */ |
1283 | ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, | 1270 | ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); |
1284 | attr_size, extack); | ||
1285 | if (ret) | 1271 | if (ret) |
1286 | goto err; | 1272 | goto err; |
1287 | return ret; | 1273 | return 0; |
1288 | } | 1274 | } |
1289 | err: | 1275 | err: |
1290 | tcf_action_put_many(&actions[acts_deleted]); | 1276 | tcf_action_put_many(actions); |
1291 | return ret; | 1277 | return ret; |
1292 | } | 1278 | } |
1293 | 1279 | ||
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index d30b23e42436..0c68bc9cf0b4 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index, | |||
395 | return tcf_idr_search(tn, a, index); | 395 | return tcf_idr_search(tn, a, index); |
396 | } | 396 | } |
397 | 397 | ||
398 | static int tcf_bpf_delete(struct net *net, u32 index) | ||
399 | { | ||
400 | struct tc_action_net *tn = net_generic(net, bpf_net_id); | ||
401 | |||
402 | return tcf_idr_delete_index(tn, index); | ||
403 | } | ||
404 | |||
405 | static struct tc_action_ops act_bpf_ops __read_mostly = { | 398 | static struct tc_action_ops act_bpf_ops __read_mostly = { |
406 | .kind = "bpf", | 399 | .kind = "bpf", |
407 | .type = TCA_ACT_BPF, | 400 | .type = TCA_ACT_BPF, |
@@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { | |||
412 | .init = tcf_bpf_init, | 405 | .init = tcf_bpf_init, |
413 | .walk = tcf_bpf_walker, | 406 | .walk = tcf_bpf_walker, |
414 | .lookup = tcf_bpf_search, | 407 | .lookup = tcf_bpf_search, |
415 | .delete = tcf_bpf_delete, | ||
416 | .size = sizeof(struct tcf_bpf), | 408 | .size = sizeof(struct tcf_bpf), |
417 | }; | 409 | }; |
418 | 410 | ||
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 54c0bf54f2ac..6f0f273f1139 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c | |||
@@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index, | |||
198 | return tcf_idr_search(tn, a, index); | 198 | return tcf_idr_search(tn, a, index); |
199 | } | 199 | } |
200 | 200 | ||
201 | static int tcf_connmark_delete(struct net *net, u32 index) | ||
202 | { | ||
203 | struct tc_action_net *tn = net_generic(net, connmark_net_id); | ||
204 | |||
205 | return tcf_idr_delete_index(tn, index); | ||
206 | } | ||
207 | |||
208 | static struct tc_action_ops act_connmark_ops = { | 201 | static struct tc_action_ops act_connmark_ops = { |
209 | .kind = "connmark", | 202 | .kind = "connmark", |
210 | .type = TCA_ACT_CONNMARK, | 203 | .type = TCA_ACT_CONNMARK, |
@@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = { | |||
214 | .init = tcf_connmark_init, | 207 | .init = tcf_connmark_init, |
215 | .walk = tcf_connmark_walker, | 208 | .walk = tcf_connmark_walker, |
216 | .lookup = tcf_connmark_search, | 209 | .lookup = tcf_connmark_search, |
217 | .delete = tcf_connmark_delete, | ||
218 | .size = sizeof(struct tcf_connmark_info), | 210 | .size = sizeof(struct tcf_connmark_info), |
219 | }; | 211 | }; |
220 | 212 | ||
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index e698d3fe2080..b8a67ae3105a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) | |||
659 | return nla_total_size(sizeof(struct tc_csum)); | 659 | return nla_total_size(sizeof(struct tc_csum)); |
660 | } | 660 | } |
661 | 661 | ||
662 | static int tcf_csum_delete(struct net *net, u32 index) | ||
663 | { | ||
664 | struct tc_action_net *tn = net_generic(net, csum_net_id); | ||
665 | |||
666 | return tcf_idr_delete_index(tn, index); | ||
667 | } | ||
668 | |||
669 | static struct tc_action_ops act_csum_ops = { | 662 | static struct tc_action_ops act_csum_ops = { |
670 | .kind = "csum", | 663 | .kind = "csum", |
671 | .type = TCA_ACT_CSUM, | 664 | .type = TCA_ACT_CSUM, |
@@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = { | |||
677 | .walk = tcf_csum_walker, | 670 | .walk = tcf_csum_walker, |
678 | .lookup = tcf_csum_search, | 671 | .lookup = tcf_csum_search, |
679 | .get_fill_size = tcf_csum_get_fill_size, | 672 | .get_fill_size = tcf_csum_get_fill_size, |
680 | .delete = tcf_csum_delete, | ||
681 | .size = sizeof(struct tcf_csum), | 673 | .size = sizeof(struct tcf_csum), |
682 | }; | 674 | }; |
683 | 675 | ||
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 6a3f25a8ffb3..cd1d9bd32ef9 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) | |||
243 | return sz; | 243 | return sz; |
244 | } | 244 | } |
245 | 245 | ||
246 | static int tcf_gact_delete(struct net *net, u32 index) | ||
247 | { | ||
248 | struct tc_action_net *tn = net_generic(net, gact_net_id); | ||
249 | |||
250 | return tcf_idr_delete_index(tn, index); | ||
251 | } | ||
252 | |||
253 | static struct tc_action_ops act_gact_ops = { | 246 | static struct tc_action_ops act_gact_ops = { |
254 | .kind = "gact", | 247 | .kind = "gact", |
255 | .type = TCA_ACT_GACT, | 248 | .type = TCA_ACT_GACT, |
@@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = { | |||
261 | .walk = tcf_gact_walker, | 254 | .walk = tcf_gact_walker, |
262 | .lookup = tcf_gact_search, | 255 | .lookup = tcf_gact_search, |
263 | .get_fill_size = tcf_gact_get_fill_size, | 256 | .get_fill_size = tcf_gact_get_fill_size, |
264 | .delete = tcf_gact_delete, | ||
265 | .size = sizeof(struct tcf_gact), | 257 | .size = sizeof(struct tcf_gact), |
266 | }; | 258 | }; |
267 | 259 | ||
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index d1081bdf1bdb..06a3d4801878 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid) | |||
167 | { | 167 | { |
168 | struct tcf_meta_ops *o; | 168 | struct tcf_meta_ops *o; |
169 | 169 | ||
170 | read_lock_bh(&ife_mod_lock); | 170 | read_lock(&ife_mod_lock); |
171 | list_for_each_entry(o, &ifeoplist, list) { | 171 | list_for_each_entry(o, &ifeoplist, list) { |
172 | if (o->metaid == metaid) { | 172 | if (o->metaid == metaid) { |
173 | if (!try_module_get(o->owner)) | 173 | if (!try_module_get(o->owner)) |
174 | o = NULL; | 174 | o = NULL; |
175 | read_unlock_bh(&ife_mod_lock); | 175 | read_unlock(&ife_mod_lock); |
176 | return o; | 176 | return o; |
177 | } | 177 | } |
178 | } | 178 | } |
179 | read_unlock_bh(&ife_mod_lock); | 179 | read_unlock(&ife_mod_lock); |
180 | 180 | ||
181 | return NULL; | 181 | return NULL; |
182 | } | 182 | } |
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops) | |||
190 | !mops->get || !mops->alloc) | 190 | !mops->get || !mops->alloc) |
191 | return -EINVAL; | 191 | return -EINVAL; |
192 | 192 | ||
193 | write_lock_bh(&ife_mod_lock); | 193 | write_lock(&ife_mod_lock); |
194 | 194 | ||
195 | list_for_each_entry(m, &ifeoplist, list) { | 195 | list_for_each_entry(m, &ifeoplist, list) { |
196 | if (m->metaid == mops->metaid || | 196 | if (m->metaid == mops->metaid || |
197 | (strcmp(mops->name, m->name) == 0)) { | 197 | (strcmp(mops->name, m->name) == 0)) { |
198 | write_unlock_bh(&ife_mod_lock); | 198 | write_unlock(&ife_mod_lock); |
199 | return -EEXIST; | 199 | return -EEXIST; |
200 | } | 200 | } |
201 | } | 201 | } |
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops) | |||
204 | mops->release = ife_release_meta_gen; | 204 | mops->release = ife_release_meta_gen; |
205 | 205 | ||
206 | list_add_tail(&mops->list, &ifeoplist); | 206 | list_add_tail(&mops->list, &ifeoplist); |
207 | write_unlock_bh(&ife_mod_lock); | 207 | write_unlock(&ife_mod_lock); |
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | EXPORT_SYMBOL_GPL(unregister_ife_op); | 210 | EXPORT_SYMBOL_GPL(unregister_ife_op); |
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) | |||
214 | struct tcf_meta_ops *m; | 214 | struct tcf_meta_ops *m; |
215 | int err = -ENOENT; | 215 | int err = -ENOENT; |
216 | 216 | ||
217 | write_lock_bh(&ife_mod_lock); | 217 | write_lock(&ife_mod_lock); |
218 | list_for_each_entry(m, &ifeoplist, list) { | 218 | list_for_each_entry(m, &ifeoplist, list) { |
219 | if (m->metaid == mops->metaid) { | 219 | if (m->metaid == mops->metaid) { |
220 | list_del(&mops->list); | 220 | list_del(&mops->list); |
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) | |||
222 | break; | 222 | break; |
223 | } | 223 | } |
224 | } | 224 | } |
225 | write_unlock_bh(&ife_mod_lock); | 225 | write_unlock(&ife_mod_lock); |
226 | 226 | ||
227 | return err; | 227 | return err; |
228 | } | 228 | } |
@@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid) | |||
265 | #endif | 265 | #endif |
266 | 266 | ||
267 | /* called when adding new meta information | 267 | /* called when adding new meta information |
268 | * under ife->tcf_lock for existing action | ||
269 | */ | 268 | */ |
270 | static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | 269 | static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) |
271 | void *val, int len, bool exists, | ||
272 | bool rtnl_held) | ||
273 | { | 270 | { |
274 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | 271 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
275 | int ret = 0; | 272 | int ret = 0; |
@@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
277 | if (!ops) { | 274 | if (!ops) { |
278 | ret = -ENOENT; | 275 | ret = -ENOENT; |
279 | #ifdef CONFIG_MODULES | 276 | #ifdef CONFIG_MODULES |
280 | if (exists) | ||
281 | spin_unlock_bh(&ife->tcf_lock); | ||
282 | if (rtnl_held) | 277 | if (rtnl_held) |
283 | rtnl_unlock(); | 278 | rtnl_unlock(); |
284 | request_module("ife-meta-%s", ife_meta_id2name(metaid)); | 279 | request_module("ife-meta-%s", ife_meta_id2name(metaid)); |
285 | if (rtnl_held) | 280 | if (rtnl_held) |
286 | rtnl_lock(); | 281 | rtnl_lock(); |
287 | if (exists) | ||
288 | spin_lock_bh(&ife->tcf_lock); | ||
289 | ops = find_ife_oplist(metaid); | 282 | ops = find_ife_oplist(metaid); |
290 | #endif | 283 | #endif |
291 | } | 284 | } |
@@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
302 | } | 295 | } |
303 | 296 | ||
304 | /* called when adding new meta information | 297 | /* called when adding new meta information |
305 | * under ife->tcf_lock for existing action | ||
306 | */ | 298 | */ |
307 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | 299 | static int __add_metainfo(const struct tcf_meta_ops *ops, |
308 | int len, bool atomic) | 300 | struct tcf_ife_info *ife, u32 metaid, void *metaval, |
301 | int len, bool atomic, bool exists) | ||
309 | { | 302 | { |
310 | struct tcf_meta_info *mi = NULL; | 303 | struct tcf_meta_info *mi = NULL; |
311 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | ||
312 | int ret = 0; | 304 | int ret = 0; |
313 | 305 | ||
314 | if (!ops) | ||
315 | return -ENOENT; | ||
316 | |||
317 | mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); | 306 | mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); |
318 | if (!mi) { | 307 | if (!mi) |
319 | /*put back what find_ife_oplist took */ | ||
320 | module_put(ops->owner); | ||
321 | return -ENOMEM; | 308 | return -ENOMEM; |
322 | } | ||
323 | 309 | ||
324 | mi->metaid = metaid; | 310 | mi->metaid = metaid; |
325 | mi->ops = ops; | 311 | mi->ops = ops; |
@@ -327,29 +313,61 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | |||
327 | ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); | 313 | ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); |
328 | if (ret != 0) { | 314 | if (ret != 0) { |
329 | kfree(mi); | 315 | kfree(mi); |
330 | module_put(ops->owner); | ||
331 | return ret; | 316 | return ret; |
332 | } | 317 | } |
333 | } | 318 | } |
334 | 319 | ||
320 | if (exists) | ||
321 | spin_lock_bh(&ife->tcf_lock); | ||
335 | list_add_tail(&mi->metalist, &ife->metalist); | 322 | list_add_tail(&mi->metalist, &ife->metalist); |
323 | if (exists) | ||
324 | spin_unlock_bh(&ife->tcf_lock); | ||
325 | |||
326 | return ret; | ||
327 | } | ||
328 | |||
329 | static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, | ||
330 | struct tcf_ife_info *ife, u32 metaid, | ||
331 | bool exists) | ||
332 | { | ||
333 | int ret; | ||
334 | |||
335 | if (!try_module_get(ops->owner)) | ||
336 | return -ENOENT; | ||
337 | ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); | ||
338 | if (ret) | ||
339 | module_put(ops->owner); | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | ||
344 | int len, bool exists) | ||
345 | { | ||
346 | const struct tcf_meta_ops *ops = find_ife_oplist(metaid); | ||
347 | int ret; | ||
336 | 348 | ||
349 | if (!ops) | ||
350 | return -ENOENT; | ||
351 | ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); | ||
352 | if (ret) | ||
353 | /*put back what find_ife_oplist took */ | ||
354 | module_put(ops->owner); | ||
337 | return ret; | 355 | return ret; |
338 | } | 356 | } |
339 | 357 | ||
340 | static int use_all_metadata(struct tcf_ife_info *ife) | 358 | static int use_all_metadata(struct tcf_ife_info *ife, bool exists) |
341 | { | 359 | { |
342 | struct tcf_meta_ops *o; | 360 | struct tcf_meta_ops *o; |
343 | int rc = 0; | 361 | int rc = 0; |
344 | int installed = 0; | 362 | int installed = 0; |
345 | 363 | ||
346 | read_lock_bh(&ife_mod_lock); | 364 | read_lock(&ife_mod_lock); |
347 | list_for_each_entry(o, &ifeoplist, list) { | 365 | list_for_each_entry(o, &ifeoplist, list) { |
348 | rc = add_metainfo(ife, o->metaid, NULL, 0, true); | 366 | rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); |
349 | if (rc == 0) | 367 | if (rc == 0) |
350 | installed += 1; | 368 | installed += 1; |
351 | } | 369 | } |
352 | read_unlock_bh(&ife_mod_lock); | 370 | read_unlock(&ife_mod_lock); |
353 | 371 | ||
354 | if (installed) | 372 | if (installed) |
355 | return 0; | 373 | return 0; |
@@ -396,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
396 | struct tcf_meta_info *e, *n; | 414 | struct tcf_meta_info *e, *n; |
397 | 415 | ||
398 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { | 416 | list_for_each_entry_safe(e, n, &ife->metalist, metalist) { |
399 | module_put(e->ops->owner); | ||
400 | list_del(&e->metalist); | 417 | list_del(&e->metalist); |
401 | if (e->metaval) { | 418 | if (e->metaval) { |
402 | if (e->ops->release) | 419 | if (e->ops->release) |
@@ -404,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a) | |||
404 | else | 421 | else |
405 | kfree(e->metaval); | 422 | kfree(e->metaval); |
406 | } | 423 | } |
424 | module_put(e->ops->owner); | ||
407 | kfree(e); | 425 | kfree(e); |
408 | } | 426 | } |
409 | } | 427 | } |
@@ -422,7 +440,6 @@ static void tcf_ife_cleanup(struct tc_action *a) | |||
422 | kfree_rcu(p, rcu); | 440 | kfree_rcu(p, rcu); |
423 | } | 441 | } |
424 | 442 | ||
425 | /* under ife->tcf_lock for existing action */ | ||
426 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, | 443 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
427 | bool exists, bool rtnl_held) | 444 | bool exists, bool rtnl_held) |
428 | { | 445 | { |
@@ -436,8 +453,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, | |||
436 | val = nla_data(tb[i]); | 453 | val = nla_data(tb[i]); |
437 | len = nla_len(tb[i]); | 454 | len = nla_len(tb[i]); |
438 | 455 | ||
439 | rc = load_metaops_and_vet(ife, i, val, len, exists, | 456 | rc = load_metaops_and_vet(i, val, len, rtnl_held); |
440 | rtnl_held); | ||
441 | if (rc != 0) | 457 | if (rc != 0) |
442 | return rc; | 458 | return rc; |
443 | 459 | ||
@@ -540,8 +556,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
540 | p->eth_type = ife_type; | 556 | p->eth_type = ife_type; |
541 | } | 557 | } |
542 | 558 | ||
543 | if (exists) | ||
544 | spin_lock_bh(&ife->tcf_lock); | ||
545 | 559 | ||
546 | if (ret == ACT_P_CREATED) | 560 | if (ret == ACT_P_CREATED) |
547 | INIT_LIST_HEAD(&ife->metalist); | 561 | INIT_LIST_HEAD(&ife->metalist); |
@@ -551,10 +565,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
551 | NULL, NULL); | 565 | NULL, NULL); |
552 | if (err) { | 566 | if (err) { |
553 | metadata_parse_err: | 567 | metadata_parse_err: |
554 | if (exists) | ||
555 | spin_unlock_bh(&ife->tcf_lock); | ||
556 | tcf_idr_release(*a, bind); | 568 | tcf_idr_release(*a, bind); |
557 | |||
558 | kfree(p); | 569 | kfree(p); |
559 | return err; | 570 | return err; |
560 | } | 571 | } |
@@ -569,17 +580,16 @@ metadata_parse_err: | |||
569 | * as we can. You better have at least one else we are | 580 | * as we can. You better have at least one else we are |
570 | * going to bail out | 581 | * going to bail out |
571 | */ | 582 | */ |
572 | err = use_all_metadata(ife); | 583 | err = use_all_metadata(ife, exists); |
573 | if (err) { | 584 | if (err) { |
574 | if (exists) | ||
575 | spin_unlock_bh(&ife->tcf_lock); | ||
576 | tcf_idr_release(*a, bind); | 585 | tcf_idr_release(*a, bind); |
577 | |||
578 | kfree(p); | 586 | kfree(p); |
579 | return err; | 587 | return err; |
580 | } | 588 | } |
581 | } | 589 | } |
582 | 590 | ||
591 | if (exists) | ||
592 | spin_lock_bh(&ife->tcf_lock); | ||
583 | ife->tcf_action = parm->action; | 593 | ife->tcf_action = parm->action; |
584 | /* protected by tcf_lock when modifying existing action */ | 594 | /* protected by tcf_lock when modifying existing action */ |
585 | rcu_swap_protected(ife->params, p, 1); | 595 | rcu_swap_protected(ife->params, p, 1); |
@@ -853,13 +863,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index, | |||
853 | return tcf_idr_search(tn, a, index); | 863 | return tcf_idr_search(tn, a, index); |
854 | } | 864 | } |
855 | 865 | ||
856 | static int tcf_ife_delete(struct net *net, u32 index) | ||
857 | { | ||
858 | struct tc_action_net *tn = net_generic(net, ife_net_id); | ||
859 | |||
860 | return tcf_idr_delete_index(tn, index); | ||
861 | } | ||
862 | |||
863 | static struct tc_action_ops act_ife_ops = { | 866 | static struct tc_action_ops act_ife_ops = { |
864 | .kind = "ife", | 867 | .kind = "ife", |
865 | .type = TCA_ACT_IFE, | 868 | .type = TCA_ACT_IFE, |
@@ -870,7 +873,6 @@ static struct tc_action_ops act_ife_ops = { | |||
870 | .init = tcf_ife_init, | 873 | .init = tcf_ife_init, |
871 | .walk = tcf_ife_walker, | 874 | .walk = tcf_ife_walker, |
872 | .lookup = tcf_ife_search, | 875 | .lookup = tcf_ife_search, |
873 | .delete = tcf_ife_delete, | ||
874 | .size = sizeof(struct tcf_ife_info), | 876 | .size = sizeof(struct tcf_ife_info), |
875 | }; | 877 | }; |
876 | 878 | ||
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 51f235bbeb5b..23273b5303fd 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, | |||
337 | return tcf_idr_search(tn, a, index); | 337 | return tcf_idr_search(tn, a, index); |
338 | } | 338 | } |
339 | 339 | ||
340 | static int tcf_ipt_delete(struct net *net, u32 index) | ||
341 | { | ||
342 | struct tc_action_net *tn = net_generic(net, ipt_net_id); | ||
343 | |||
344 | return tcf_idr_delete_index(tn, index); | ||
345 | } | ||
346 | |||
347 | static struct tc_action_ops act_ipt_ops = { | 340 | static struct tc_action_ops act_ipt_ops = { |
348 | .kind = "ipt", | 341 | .kind = "ipt", |
349 | .type = TCA_ACT_IPT, | 342 | .type = TCA_ACT_IPT, |
@@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = { | |||
354 | .init = tcf_ipt_init, | 347 | .init = tcf_ipt_init, |
355 | .walk = tcf_ipt_walker, | 348 | .walk = tcf_ipt_walker, |
356 | .lookup = tcf_ipt_search, | 349 | .lookup = tcf_ipt_search, |
357 | .delete = tcf_ipt_delete, | ||
358 | .size = sizeof(struct tcf_ipt), | 350 | .size = sizeof(struct tcf_ipt), |
359 | }; | 351 | }; |
360 | 352 | ||
@@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, | |||
395 | return tcf_idr_search(tn, a, index); | 387 | return tcf_idr_search(tn, a, index); |
396 | } | 388 | } |
397 | 389 | ||
398 | static int tcf_xt_delete(struct net *net, u32 index) | ||
399 | { | ||
400 | struct tc_action_net *tn = net_generic(net, xt_net_id); | ||
401 | |||
402 | return tcf_idr_delete_index(tn, index); | ||
403 | } | ||
404 | |||
405 | static struct tc_action_ops act_xt_ops = { | 390 | static struct tc_action_ops act_xt_ops = { |
406 | .kind = "xt", | 391 | .kind = "xt", |
407 | .type = TCA_ACT_XT, | 392 | .type = TCA_ACT_XT, |
@@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = { | |||
412 | .init = tcf_xt_init, | 397 | .init = tcf_xt_init, |
413 | .walk = tcf_xt_walker, | 398 | .walk = tcf_xt_walker, |
414 | .lookup = tcf_xt_search, | 399 | .lookup = tcf_xt_search, |
415 | .delete = tcf_xt_delete, | ||
416 | .size = sizeof(struct tcf_ipt), | 400 | .size = sizeof(struct tcf_ipt), |
417 | }; | 401 | }; |
418 | 402 | ||
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 38fd20f10f67..8bf66d0a6800 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev) | |||
395 | dev_put(dev); | 395 | dev_put(dev); |
396 | } | 396 | } |
397 | 397 | ||
398 | static int tcf_mirred_delete(struct net *net, u32 index) | ||
399 | { | ||
400 | struct tc_action_net *tn = net_generic(net, mirred_net_id); | ||
401 | |||
402 | return tcf_idr_delete_index(tn, index); | ||
403 | } | ||
404 | |||
405 | static struct tc_action_ops act_mirred_ops = { | 398 | static struct tc_action_ops act_mirred_ops = { |
406 | .kind = "mirred", | 399 | .kind = "mirred", |
407 | .type = TCA_ACT_MIRRED, | 400 | .type = TCA_ACT_MIRRED, |
@@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = { | |||
416 | .size = sizeof(struct tcf_mirred), | 409 | .size = sizeof(struct tcf_mirred), |
417 | .get_dev = tcf_mirred_get_dev, | 410 | .get_dev = tcf_mirred_get_dev, |
418 | .put_dev = tcf_mirred_put_dev, | 411 | .put_dev = tcf_mirred_put_dev, |
419 | .delete = tcf_mirred_delete, | ||
420 | }; | 412 | }; |
421 | 413 | ||
422 | static __net_init int mirred_init_net(struct net *net) | 414 | static __net_init int mirred_init_net(struct net *net) |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 822e903bfc25..4313aa102440 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index, | |||
300 | return tcf_idr_search(tn, a, index); | 300 | return tcf_idr_search(tn, a, index); |
301 | } | 301 | } |
302 | 302 | ||
303 | static int tcf_nat_delete(struct net *net, u32 index) | ||
304 | { | ||
305 | struct tc_action_net *tn = net_generic(net, nat_net_id); | ||
306 | |||
307 | return tcf_idr_delete_index(tn, index); | ||
308 | } | ||
309 | |||
310 | static struct tc_action_ops act_nat_ops = { | 303 | static struct tc_action_ops act_nat_ops = { |
311 | .kind = "nat", | 304 | .kind = "nat", |
312 | .type = TCA_ACT_NAT, | 305 | .type = TCA_ACT_NAT, |
@@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = { | |||
316 | .init = tcf_nat_init, | 309 | .init = tcf_nat_init, |
317 | .walk = tcf_nat_walker, | 310 | .walk = tcf_nat_walker, |
318 | .lookup = tcf_nat_search, | 311 | .lookup = tcf_nat_search, |
319 | .delete = tcf_nat_delete, | ||
320 | .size = sizeof(struct tcf_nat), | 312 | .size = sizeof(struct tcf_nat), |
321 | }; | 313 | }; |
322 | 314 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 8a7a7cb94e83..ad99a99f11f6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
109 | { | 109 | { |
110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); | 110 | struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); |
111 | 111 | ||
112 | if (!keys_start) | ||
113 | goto nla_failure; | ||
112 | for (; n > 0; n--) { | 114 | for (; n > 0; n--) { |
113 | struct nlattr *key_start; | 115 | struct nlattr *key_start; |
114 | 116 | ||
115 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); | 117 | key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); |
118 | if (!key_start) | ||
119 | goto nla_failure; | ||
116 | 120 | ||
117 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || | 121 | if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || |
118 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { | 122 | nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) |
119 | nlmsg_trim(skb, keys_start); | 123 | goto nla_failure; |
120 | return -EINVAL; | ||
121 | } | ||
122 | 124 | ||
123 | nla_nest_end(skb, key_start); | 125 | nla_nest_end(skb, key_start); |
124 | 126 | ||
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, | |||
128 | nla_nest_end(skb, keys_start); | 130 | nla_nest_end(skb, keys_start); |
129 | 131 | ||
130 | return 0; | 132 | return 0; |
133 | nla_failure: | ||
134 | nla_nest_cancel(skb, keys_start); | ||
135 | return -EINVAL; | ||
131 | } | 136 | } |
132 | 137 | ||
133 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, | 138 | static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
418 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; | 423 | opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; |
419 | 424 | ||
420 | if (p->tcfp_keys_ex) { | 425 | if (p->tcfp_keys_ex) { |
421 | tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); | 426 | if (tcf_pedit_key_ex_dump(skb, |
427 | p->tcfp_keys_ex, | ||
428 | p->tcfp_nkeys)) | ||
429 | goto nla_put_failure; | ||
422 | 430 | ||
423 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) | 431 | if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) |
424 | goto nla_put_failure; | 432 | goto nla_put_failure; |
@@ -460,13 +468,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index, | |||
460 | return tcf_idr_search(tn, a, index); | 468 | return tcf_idr_search(tn, a, index); |
461 | } | 469 | } |
462 | 470 | ||
463 | static int tcf_pedit_delete(struct net *net, u32 index) | ||
464 | { | ||
465 | struct tc_action_net *tn = net_generic(net, pedit_net_id); | ||
466 | |||
467 | return tcf_idr_delete_index(tn, index); | ||
468 | } | ||
469 | |||
470 | static struct tc_action_ops act_pedit_ops = { | 471 | static struct tc_action_ops act_pedit_ops = { |
471 | .kind = "pedit", | 472 | .kind = "pedit", |
472 | .type = TCA_ACT_PEDIT, | 473 | .type = TCA_ACT_PEDIT, |
@@ -477,7 +478,6 @@ static struct tc_action_ops act_pedit_ops = { | |||
477 | .init = tcf_pedit_init, | 478 | .init = tcf_pedit_init, |
478 | .walk = tcf_pedit_walker, | 479 | .walk = tcf_pedit_walker, |
479 | .lookup = tcf_pedit_search, | 480 | .lookup = tcf_pedit_search, |
480 | .delete = tcf_pedit_delete, | ||
481 | .size = sizeof(struct tcf_pedit), | 481 | .size = sizeof(struct tcf_pedit), |
482 | }; | 482 | }; |
483 | 483 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 06f0742db593..5d8bfa878477 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index, | |||
320 | return tcf_idr_search(tn, a, index); | 320 | return tcf_idr_search(tn, a, index); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int tcf_police_delete(struct net *net, u32 index) | ||
324 | { | ||
325 | struct tc_action_net *tn = net_generic(net, police_net_id); | ||
326 | |||
327 | return tcf_idr_delete_index(tn, index); | ||
328 | } | ||
329 | |||
330 | MODULE_AUTHOR("Alexey Kuznetsov"); | 323 | MODULE_AUTHOR("Alexey Kuznetsov"); |
331 | MODULE_DESCRIPTION("Policing actions"); | 324 | MODULE_DESCRIPTION("Policing actions"); |
332 | MODULE_LICENSE("GPL"); | 325 | MODULE_LICENSE("GPL"); |
@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = { | |||
340 | .init = tcf_police_init, | 333 | .init = tcf_police_init, |
341 | .walk = tcf_police_walker, | 334 | .walk = tcf_police_walker, |
342 | .lookup = tcf_police_search, | 335 | .lookup = tcf_police_search, |
343 | .delete = tcf_police_delete, | ||
344 | .size = sizeof(struct tcf_police), | 336 | .size = sizeof(struct tcf_police), |
345 | }; | 337 | }; |
346 | 338 | ||
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 207b4132d1b0..44e9c00657bc 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, | |||
232 | return tcf_idr_search(tn, a, index); | 232 | return tcf_idr_search(tn, a, index); |
233 | } | 233 | } |
234 | 234 | ||
235 | static int tcf_sample_delete(struct net *net, u32 index) | ||
236 | { | ||
237 | struct tc_action_net *tn = net_generic(net, sample_net_id); | ||
238 | |||
239 | return tcf_idr_delete_index(tn, index); | ||
240 | } | ||
241 | |||
242 | static struct tc_action_ops act_sample_ops = { | 235 | static struct tc_action_ops act_sample_ops = { |
243 | .kind = "sample", | 236 | .kind = "sample", |
244 | .type = TCA_ACT_SAMPLE, | 237 | .type = TCA_ACT_SAMPLE, |
@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = { | |||
249 | .cleanup = tcf_sample_cleanup, | 242 | .cleanup = tcf_sample_cleanup, |
250 | .walk = tcf_sample_walker, | 243 | .walk = tcf_sample_walker, |
251 | .lookup = tcf_sample_search, | 244 | .lookup = tcf_sample_search, |
252 | .delete = tcf_sample_delete, | ||
253 | .size = sizeof(struct tcf_sample), | 245 | .size = sizeof(struct tcf_sample), |
254 | }; | 246 | }; |
255 | 247 | ||
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e616523ba3c1..52400d49f81f 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index, | |||
196 | return tcf_idr_search(tn, a, index); | 196 | return tcf_idr_search(tn, a, index); |
197 | } | 197 | } |
198 | 198 | ||
199 | static int tcf_simp_delete(struct net *net, u32 index) | ||
200 | { | ||
201 | struct tc_action_net *tn = net_generic(net, simp_net_id); | ||
202 | |||
203 | return tcf_idr_delete_index(tn, index); | ||
204 | } | ||
205 | |||
206 | static struct tc_action_ops act_simp_ops = { | 199 | static struct tc_action_ops act_simp_ops = { |
207 | .kind = "simple", | 200 | .kind = "simple", |
208 | .type = TCA_ACT_SIMP, | 201 | .type = TCA_ACT_SIMP, |
@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = { | |||
213 | .init = tcf_simp_init, | 206 | .init = tcf_simp_init, |
214 | .walk = tcf_simp_walker, | 207 | .walk = tcf_simp_walker, |
215 | .lookup = tcf_simp_search, | 208 | .lookup = tcf_simp_search, |
216 | .delete = tcf_simp_delete, | ||
217 | .size = sizeof(struct tcf_defact), | 209 | .size = sizeof(struct tcf_defact), |
218 | }; | 210 | }; |
219 | 211 | ||
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 926d7bc4a89d..73e44ce2a883 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index, | |||
299 | return tcf_idr_search(tn, a, index); | 299 | return tcf_idr_search(tn, a, index); |
300 | } | 300 | } |
301 | 301 | ||
302 | static int tcf_skbedit_delete(struct net *net, u32 index) | ||
303 | { | ||
304 | struct tc_action_net *tn = net_generic(net, skbedit_net_id); | ||
305 | |||
306 | return tcf_idr_delete_index(tn, index); | ||
307 | } | ||
308 | |||
309 | static struct tc_action_ops act_skbedit_ops = { | 302 | static struct tc_action_ops act_skbedit_ops = { |
310 | .kind = "skbedit", | 303 | .kind = "skbedit", |
311 | .type = TCA_ACT_SKBEDIT, | 304 | .type = TCA_ACT_SKBEDIT, |
@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = { | |||
316 | .cleanup = tcf_skbedit_cleanup, | 309 | .cleanup = tcf_skbedit_cleanup, |
317 | .walk = tcf_skbedit_walker, | 310 | .walk = tcf_skbedit_walker, |
318 | .lookup = tcf_skbedit_search, | 311 | .lookup = tcf_skbedit_search, |
319 | .delete = tcf_skbedit_delete, | ||
320 | .size = sizeof(struct tcf_skbedit), | 312 | .size = sizeof(struct tcf_skbedit), |
321 | }; | 313 | }; |
322 | 314 | ||
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index d6a1af0c4171..588077fafd6c 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c | |||
@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index, | |||
259 | return tcf_idr_search(tn, a, index); | 259 | return tcf_idr_search(tn, a, index); |
260 | } | 260 | } |
261 | 261 | ||
262 | static int tcf_skbmod_delete(struct net *net, u32 index) | ||
263 | { | ||
264 | struct tc_action_net *tn = net_generic(net, skbmod_net_id); | ||
265 | |||
266 | return tcf_idr_delete_index(tn, index); | ||
267 | } | ||
268 | |||
269 | static struct tc_action_ops act_skbmod_ops = { | 262 | static struct tc_action_ops act_skbmod_ops = { |
270 | .kind = "skbmod", | 263 | .kind = "skbmod", |
271 | .type = TCA_ACT_SKBMOD, | 264 | .type = TCA_ACT_SKBMOD, |
@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = { | |||
276 | .cleanup = tcf_skbmod_cleanup, | 269 | .cleanup = tcf_skbmod_cleanup, |
277 | .walk = tcf_skbmod_walker, | 270 | .walk = tcf_skbmod_walker, |
278 | .lookup = tcf_skbmod_search, | 271 | .lookup = tcf_skbmod_search, |
279 | .delete = tcf_skbmod_delete, | ||
280 | .size = sizeof(struct tcf_skbmod), | 272 | .size = sizeof(struct tcf_skbmod), |
281 | }; | 273 | }; |
282 | 274 | ||
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 8f09cf08d8fe..420759153d5f 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
@@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index, | |||
548 | return tcf_idr_search(tn, a, index); | 548 | return tcf_idr_search(tn, a, index); |
549 | } | 549 | } |
550 | 550 | ||
551 | static int tunnel_key_delete(struct net *net, u32 index) | ||
552 | { | ||
553 | struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); | ||
554 | |||
555 | return tcf_idr_delete_index(tn, index); | ||
556 | } | ||
557 | |||
558 | static struct tc_action_ops act_tunnel_key_ops = { | 551 | static struct tc_action_ops act_tunnel_key_ops = { |
559 | .kind = "tunnel_key", | 552 | .kind = "tunnel_key", |
560 | .type = TCA_ACT_TUNNEL_KEY, | 553 | .type = TCA_ACT_TUNNEL_KEY, |
@@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = { | |||
565 | .cleanup = tunnel_key_release, | 558 | .cleanup = tunnel_key_release, |
566 | .walk = tunnel_key_walker, | 559 | .walk = tunnel_key_walker, |
567 | .lookup = tunnel_key_search, | 560 | .lookup = tunnel_key_search, |
568 | .delete = tunnel_key_delete, | ||
569 | .size = sizeof(struct tcf_tunnel_key), | 561 | .size = sizeof(struct tcf_tunnel_key), |
570 | }; | 562 | }; |
571 | 563 | ||
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 209e70ad2c09..033d273afe50 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c | |||
@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index, | |||
296 | return tcf_idr_search(tn, a, index); | 296 | return tcf_idr_search(tn, a, index); |
297 | } | 297 | } |
298 | 298 | ||
299 | static int tcf_vlan_delete(struct net *net, u32 index) | ||
300 | { | ||
301 | struct tc_action_net *tn = net_generic(net, vlan_net_id); | ||
302 | |||
303 | return tcf_idr_delete_index(tn, index); | ||
304 | } | ||
305 | |||
306 | static struct tc_action_ops act_vlan_ops = { | 299 | static struct tc_action_ops act_vlan_ops = { |
307 | .kind = "vlan", | 300 | .kind = "vlan", |
308 | .type = TCA_ACT_VLAN, | 301 | .type = TCA_ACT_VLAN, |
@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = { | |||
313 | .cleanup = tcf_vlan_cleanup, | 306 | .cleanup = tcf_vlan_cleanup, |
314 | .walk = tcf_vlan_walker, | 307 | .walk = tcf_vlan_walker, |
315 | .lookup = tcf_vlan_search, | 308 | .lookup = tcf_vlan_search, |
316 | .delete = tcf_vlan_delete, | ||
317 | .size = sizeof(struct tcf_vlan), | 309 | .size = sizeof(struct tcf_vlan), |
318 | }; | 310 | }; |
319 | 311 | ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 31bd1439cf60..1a67af8a6e8c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -1252,7 +1252,7 @@ replay: | |||
1252 | } | 1252 | } |
1253 | chain = tcf_chain_get(block, chain_index, true); | 1253 | chain = tcf_chain_get(block, chain_index, true); |
1254 | if (!chain) { | 1254 | if (!chain) { |
1255 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1255 | NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); |
1256 | err = -ENOMEM; | 1256 | err = -ENOMEM; |
1257 | goto errout; | 1257 | goto errout; |
1258 | } | 1258 | } |
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | |||
1399 | goto errout; | 1399 | goto errout; |
1400 | } | 1400 | } |
1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); | 1401 | NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); |
1402 | err = -EINVAL; | 1402 | err = -ENOENT; |
1403 | goto errout; | 1403 | goto errout; |
1404 | } | 1404 | } |
1405 | 1405 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d5d2a6dc3921..f218ccf1e2d9 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
914 | struct nlattr *opt = tca[TCA_OPTIONS]; | 914 | struct nlattr *opt = tca[TCA_OPTIONS]; |
915 | struct nlattr *tb[TCA_U32_MAX + 1]; | 915 | struct nlattr *tb[TCA_U32_MAX + 1]; |
916 | u32 htid, flags = 0; | 916 | u32 htid, flags = 0; |
917 | size_t sel_size; | ||
917 | int err; | 918 | int err; |
918 | #ifdef CONFIG_CLS_U32_PERF | 919 | #ifdef CONFIG_CLS_U32_PERF |
919 | size_t size; | 920 | size_t size; |
@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
1076 | } | 1077 | } |
1077 | 1078 | ||
1078 | s = nla_data(tb[TCA_U32_SEL]); | 1079 | s = nla_data(tb[TCA_U32_SEL]); |
1080 | sel_size = struct_size(s, keys, s->nkeys); | ||
1081 | if (nla_len(tb[TCA_U32_SEL]) < sel_size) { | ||
1082 | err = -EINVAL; | ||
1083 | goto erridr; | ||
1084 | } | ||
1079 | 1085 | ||
1080 | n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); | 1086 | n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); |
1081 | if (n == NULL) { | 1087 | if (n == NULL) { |
1082 | err = -ENOBUFS; | 1088 | err = -ENOBUFS; |
1083 | goto erridr; | 1089 | goto erridr; |
@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
1092 | } | 1098 | } |
1093 | #endif | 1099 | #endif |
1094 | 1100 | ||
1095 | memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); | 1101 | memcpy(&n->sel, s, sel_size); |
1096 | RCU_INIT_POINTER(n->ht_up, ht); | 1102 | RCU_INIT_POINTER(n->ht_up, ht); |
1097 | n->handle = handle; | 1103 | n->handle = handle; |
1098 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; | 1104 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; |
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 35fc7252187c..c07c30b916d5 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
@@ -64,7 +64,6 @@ | |||
64 | #include <linux/vmalloc.h> | 64 | #include <linux/vmalloc.h> |
65 | #include <linux/reciprocal_div.h> | 65 | #include <linux/reciprocal_div.h> |
66 | #include <net/netlink.h> | 66 | #include <net/netlink.h> |
67 | #include <linux/version.h> | ||
68 | #include <linux/if_vlan.h> | 67 | #include <linux/if_vlan.h> |
69 | #include <net/pkt_sched.h> | 68 | #include <net/pkt_sched.h> |
70 | #include <net/pkt_cls.h> | 69 | #include <net/pkt_cls.h> |
@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode) | |||
621 | } | 620 | } |
622 | 621 | ||
623 | static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, | 622 | static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, |
624 | int flow_mode) | 623 | int flow_mode, u16 flow_override, u16 host_override) |
625 | { | 624 | { |
626 | u32 flow_hash = 0, srchost_hash, dsthost_hash; | 625 | u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0; |
627 | u16 reduced_hash, srchost_idx, dsthost_idx; | 626 | u16 reduced_hash, srchost_idx, dsthost_idx; |
628 | struct flow_keys keys, host_keys; | 627 | struct flow_keys keys, host_keys; |
629 | 628 | ||
630 | if (unlikely(flow_mode == CAKE_FLOW_NONE)) | 629 | if (unlikely(flow_mode == CAKE_FLOW_NONE)) |
631 | return 0; | 630 | return 0; |
632 | 631 | ||
632 | /* If both overrides are set we can skip packet dissection entirely */ | ||
633 | if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) && | ||
634 | (host_override || !(flow_mode & CAKE_FLOW_HOSTS))) | ||
635 | goto skip_hash; | ||
636 | |||
633 | skb_flow_dissect_flow_keys(skb, &keys, | 637 | skb_flow_dissect_flow_keys(skb, &keys, |
634 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); | 638 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); |
635 | 639 | ||
@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, | |||
676 | if (flow_mode & CAKE_FLOW_FLOWS) | 680 | if (flow_mode & CAKE_FLOW_FLOWS) |
677 | flow_hash = flow_hash_from_keys(&keys); | 681 | flow_hash = flow_hash_from_keys(&keys); |
678 | 682 | ||
683 | skip_hash: | ||
684 | if (flow_override) | ||
685 | flow_hash = flow_override - 1; | ||
686 | if (host_override) { | ||
687 | dsthost_hash = host_override - 1; | ||
688 | srchost_hash = host_override - 1; | ||
689 | } | ||
690 | |||
679 | if (!(flow_mode & CAKE_FLOW_FLOWS)) { | 691 | if (!(flow_mode & CAKE_FLOW_FLOWS)) { |
680 | if (flow_mode & CAKE_FLOW_SRC_IP) | 692 | if (flow_mode & CAKE_FLOW_SRC_IP) |
681 | flow_hash ^= srchost_hash; | 693 | flow_hash ^= srchost_hash; |
@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, | |||
1571 | struct cake_sched_data *q = qdisc_priv(sch); | 1583 | struct cake_sched_data *q = qdisc_priv(sch); |
1572 | struct tcf_proto *filter; | 1584 | struct tcf_proto *filter; |
1573 | struct tcf_result res; | 1585 | struct tcf_result res; |
1574 | u32 flow = 0; | 1586 | u16 flow = 0, host = 0; |
1575 | int result; | 1587 | int result; |
1576 | 1588 | ||
1577 | filter = rcu_dereference_bh(q->filter_list); | 1589 | filter = rcu_dereference_bh(q->filter_list); |
@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, | |||
1595 | #endif | 1607 | #endif |
1596 | if (TC_H_MIN(res.classid) <= CAKE_QUEUES) | 1608 | if (TC_H_MIN(res.classid) <= CAKE_QUEUES) |
1597 | flow = TC_H_MIN(res.classid); | 1609 | flow = TC_H_MIN(res.classid); |
1610 | if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16)) | ||
1611 | host = TC_H_MAJ(res.classid) >> 16; | ||
1598 | } | 1612 | } |
1599 | hash: | 1613 | hash: |
1600 | *t = cake_select_tin(sch, skb); | 1614 | *t = cake_select_tin(sch, skb); |
1601 | return flow ?: cake_hash(*t, skb, flow_mode) + 1; | 1615 | return cake_hash(*t, skb, flow_mode, flow, host) + 1; |
1602 | } | 1616 | } |
1603 | 1617 | ||
1604 | static void cake_reconfigure(struct Qdisc *sch); | 1618 | static void cake_reconfigure(struct Qdisc *sch); |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index ef5c9a82d4e8..a644292f9faf 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = { | |||
215 | struct sctp_ht_iter { | 215 | struct sctp_ht_iter { |
216 | struct seq_net_private p; | 216 | struct seq_net_private p; |
217 | struct rhashtable_iter hti; | 217 | struct rhashtable_iter hti; |
218 | int start_fail; | ||
219 | }; | 218 | }; |
220 | 219 | ||
221 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | 220 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | |||
224 | 223 | ||
225 | sctp_transport_walk_start(&iter->hti); | 224 | sctp_transport_walk_start(&iter->hti); |
226 | 225 | ||
227 | iter->start_fail = 0; | ||
228 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); | 226 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); |
229 | } | 227 | } |
230 | 228 | ||
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) | |||
232 | { | 230 | { |
233 | struct sctp_ht_iter *iter = seq->private; | 231 | struct sctp_ht_iter *iter = seq->private; |
234 | 232 | ||
235 | if (iter->start_fail) | ||
236 | return; | ||
237 | sctp_transport_walk_stop(&iter->hti); | 233 | sctp_transport_walk_stop(&iter->hti); |
238 | } | 234 | } |
239 | 235 | ||
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | |||
264 | } | 260 | } |
265 | 261 | ||
266 | transport = (struct sctp_transport *)v; | 262 | transport = (struct sctp_transport *)v; |
267 | if (!sctp_transport_hold(transport)) | ||
268 | return 0; | ||
269 | assoc = transport->asoc; | 263 | assoc = transport->asoc; |
270 | epb = &assoc->base; | 264 | epb = &assoc->base; |
271 | sk = epb->sk; | 265 | sk = epb->sk; |
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) | |||
322 | } | 316 | } |
323 | 317 | ||
324 | transport = (struct sctp_transport *)v; | 318 | transport = (struct sctp_transport *)v; |
325 | if (!sctp_transport_hold(transport)) | ||
326 | return 0; | ||
327 | assoc = transport->asoc; | 319 | assoc = transport->asoc; |
328 | 320 | ||
329 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, | 321 | list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e96b15a66aba..f73e9d38d5ba 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2658 | } | 2658 | } |
2659 | 2659 | ||
2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { | 2660 | if (params->spp_flags & SPP_IPV6_FLOWLABEL) { |
2661 | if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { | 2661 | if (trans) { |
2662 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2662 | if (trans->ipaddr.sa.sa_family == AF_INET6) { |
2663 | SCTP_FLOWLABEL_VAL_MASK; | ||
2664 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
2665 | } else if (asoc) { | ||
2666 | list_for_each_entry(trans, | ||
2667 | &asoc->peer.transport_addr_list, | ||
2668 | transports) { | ||
2669 | if (trans->ipaddr.sa.sa_family != AF_INET6) | ||
2670 | continue; | ||
2671 | trans->flowlabel = params->spp_ipv6_flowlabel & | 2663 | trans->flowlabel = params->spp_ipv6_flowlabel & |
2672 | SCTP_FLOWLABEL_VAL_MASK; | 2664 | SCTP_FLOWLABEL_VAL_MASK; |
2673 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2665 | trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
2674 | } | 2666 | } |
2667 | } else if (asoc) { | ||
2668 | struct sctp_transport *t; | ||
2669 | |||
2670 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
2671 | transports) { | ||
2672 | if (t->ipaddr.sa.sa_family != AF_INET6) | ||
2673 | continue; | ||
2674 | t->flowlabel = params->spp_ipv6_flowlabel & | ||
2675 | SCTP_FLOWLABEL_VAL_MASK; | ||
2676 | t->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | ||
2677 | } | ||
2675 | asoc->flowlabel = params->spp_ipv6_flowlabel & | 2678 | asoc->flowlabel = params->spp_ipv6_flowlabel & |
2676 | SCTP_FLOWLABEL_VAL_MASK; | 2679 | SCTP_FLOWLABEL_VAL_MASK; |
2677 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; | 2680 | asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; |
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2687 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2690 | trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
2688 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2691 | trans->dscp |= SCTP_DSCP_SET_MASK; |
2689 | } else if (asoc) { | 2692 | } else if (asoc) { |
2690 | list_for_each_entry(trans, | 2693 | struct sctp_transport *t; |
2691 | &asoc->peer.transport_addr_list, | 2694 | |
2695 | list_for_each_entry(t, &asoc->peer.transport_addr_list, | ||
2692 | transports) { | 2696 | transports) { |
2693 | trans->dscp = params->spp_dscp & | 2697 | t->dscp = params->spp_dscp & |
2694 | SCTP_DSCP_VAL_MASK; | 2698 | SCTP_DSCP_VAL_MASK; |
2695 | trans->dscp |= SCTP_DSCP_SET_MASK; | 2699 | t->dscp |= SCTP_DSCP_SET_MASK; |
2696 | } | 2700 | } |
2697 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; | 2701 | asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; |
2698 | asoc->dscp |= SCTP_DSCP_SET_MASK; | 2702 | asoc->dscp |= SCTP_DSCP_SET_MASK; |
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, | |||
5005 | break; | 5009 | break; |
5006 | } | 5010 | } |
5007 | 5011 | ||
5012 | if (!sctp_transport_hold(t)) | ||
5013 | continue; | ||
5014 | |||
5008 | if (net_eq(sock_net(t->asoc->base.sk), net) && | 5015 | if (net_eq(sock_net(t->asoc->base.sk), net) && |
5009 | t->asoc->peer.primary_path == t) | 5016 | t->asoc->peer.primary_path == t) |
5010 | break; | 5017 | break; |
5018 | |||
5019 | sctp_transport_put(t); | ||
5011 | } | 5020 | } |
5012 | 5021 | ||
5013 | return t; | 5022 | return t; |
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net, | |||
5017 | struct rhashtable_iter *iter, | 5026 | struct rhashtable_iter *iter, |
5018 | int pos) | 5027 | int pos) |
5019 | { | 5028 | { |
5020 | void *obj = SEQ_START_TOKEN; | 5029 | struct sctp_transport *t; |
5021 | 5030 | ||
5022 | while (pos && (obj = sctp_transport_get_next(net, iter)) && | 5031 | if (!pos) |
5023 | !IS_ERR(obj)) | 5032 | return SEQ_START_TOKEN; |
5024 | pos--; | ||
5025 | 5033 | ||
5026 | return obj; | 5034 | while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) { |
5035 | if (!--pos) | ||
5036 | break; | ||
5037 | sctp_transport_put(t); | ||
5038 | } | ||
5039 | |||
5040 | return t; | ||
5027 | } | 5041 | } |
5028 | 5042 | ||
5029 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), | 5043 | int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), |
@@ -5082,8 +5096,6 @@ again: | |||
5082 | 5096 | ||
5083 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); | 5097 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); |
5084 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { | 5098 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { |
5085 | if (!sctp_transport_hold(tsp)) | ||
5086 | continue; | ||
5087 | ret = cb(tsp, p); | 5099 | ret = cb(tsp, p); |
5088 | if (ret) | 5100 | if (ret) |
5089 | break; | 5101 | break; |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 9ee6cfea56dd..d8026543bf4c 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link"; | |||
51 | * struct tipc_bc_base - base structure for keeping broadcast send state | 51 | * struct tipc_bc_base - base structure for keeping broadcast send state |
52 | * @link: broadcast send link structure | 52 | * @link: broadcast send link structure |
53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages | 53 | * @inputq: data input queue; will only carry SOCK_WAKEUP messages |
54 | * @dest: array keeping number of reachable destinations per bearer | 54 | * @dests: array keeping number of reachable destinations per bearer |
55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any | 55 | * @primary_bearer: a bearer having links to all broadcast destinations, if any |
56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast | 56 | * @bcast_support: indicates if primary bearer, if any, supports broadcast |
57 | * @rcast_support: indicates if all peer nodes support replicast | 57 | * @rcast_support: indicates if all peer nodes support replicast |
58 | * @rc_ratio: dest count as percentage of cluster size where send method changes | 58 | * @rc_ratio: dest count as percentage of cluster size where send method changes |
59 | * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast | 59 | * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast |
60 | */ | 60 | */ |
61 | struct tipc_bc_base { | 61 | struct tipc_bc_base { |
62 | struct tipc_link *link; | 62 | struct tipc_link *link; |
diff --git a/net/tipc/diag.c b/net/tipc/diag.c index aaabb0b776dd..73137f4aeb68 100644 --- a/net/tipc/diag.c +++ b/net/tipc/diag.c | |||
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb, | |||
84 | 84 | ||
85 | if (h->nlmsg_flags & NLM_F_DUMP) { | 85 | if (h->nlmsg_flags & NLM_F_DUMP) { |
86 | struct netlink_dump_control c = { | 86 | struct netlink_dump_control c = { |
87 | .start = tipc_dump_start, | ||
87 | .dump = tipc_diag_dump, | 88 | .dump = tipc_diag_dump, |
89 | .done = tipc_dump_done, | ||
88 | }; | 90 | }; |
89 | netlink_dump_start(net->diag_nlsk, skb, h, &c); | 91 | netlink_dump_start(net->diag_nlsk, skb, h, &c); |
90 | return 0; | 92 | return 0; |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 88f027b502f6..66d5b2c5987a 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
980 | 980 | ||
981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) | 981 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) |
982 | { | 982 | { |
983 | u64 value = (u64)node << 32 | port; | ||
984 | struct tipc_dest *dst; | 983 | struct tipc_dest *dst; |
985 | 984 | ||
986 | list_for_each_entry(dst, l, list) { | 985 | list_for_each_entry(dst, l, list) { |
987 | if (dst->value != value) | 986 | if (dst->node == node && dst->port == port) |
988 | continue; | 987 | return dst; |
989 | return dst; | ||
990 | } | 988 | } |
991 | return NULL; | 989 | return NULL; |
992 | } | 990 | } |
993 | 991 | ||
994 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | 992 | bool tipc_dest_push(struct list_head *l, u32 node, u32 port) |
995 | { | 993 | { |
996 | u64 value = (u64)node << 32 | port; | ||
997 | struct tipc_dest *dst; | 994 | struct tipc_dest *dst; |
998 | 995 | ||
999 | if (tipc_dest_find(l, node, port)) | 996 | if (tipc_dest_find(l, node, port)) |
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port) | |||
1002 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); | 999 | dst = kmalloc(sizeof(*dst), GFP_ATOMIC); |
1003 | if (unlikely(!dst)) | 1000 | if (unlikely(!dst)) |
1004 | return false; | 1001 | return false; |
1005 | dst->value = value; | 1002 | dst->node = node; |
1003 | dst->port = port; | ||
1006 | list_add(&dst->list, l); | 1004 | list_add(&dst->list, l); |
1007 | return true; | 1005 | return true; |
1008 | } | 1006 | } |
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 0febba41da86..892bd750b85f 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h | |||
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net); | |||
133 | 133 | ||
134 | struct tipc_dest { | 134 | struct tipc_dest { |
135 | struct list_head list; | 135 | struct list_head list; |
136 | union { | 136 | u32 port; |
137 | struct { | 137 | u32 node; |
138 | u32 port; | ||
139 | u32 node; | ||
140 | }; | ||
141 | u64 value; | ||
142 | }; | ||
143 | }; | 138 | }; |
144 | 139 | ||
145 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); | 140 | struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 6ff2254088f6..99ee419210ba 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = { | |||
167 | }, | 167 | }, |
168 | { | 168 | { |
169 | .cmd = TIPC_NL_SOCK_GET, | 169 | .cmd = TIPC_NL_SOCK_GET, |
170 | .start = tipc_dump_start, | ||
170 | .dumpit = tipc_nl_sk_dump, | 171 | .dumpit = tipc_nl_sk_dump, |
172 | .done = tipc_dump_done, | ||
171 | .policy = tipc_nl_policy, | 173 | .policy = tipc_nl_policy, |
172 | }, | 174 | }, |
173 | { | 175 | { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c1e93c9515bc..ab7a2a7178f7 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2672,6 +2672,8 @@ void tipc_sk_reinit(struct net *net) | |||
2672 | 2672 | ||
2673 | rhashtable_walk_stop(&iter); | 2673 | rhashtable_walk_stop(&iter); |
2674 | } while (tsk == ERR_PTR(-EAGAIN)); | 2674 | } while (tsk == ERR_PTR(-EAGAIN)); |
2675 | |||
2676 | rhashtable_walk_exit(&iter); | ||
2675 | } | 2677 | } |
2676 | 2678 | ||
2677 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) | 2679 | static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) |
@@ -3227,45 +3229,69 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
3227 | struct netlink_callback *cb, | 3229 | struct netlink_callback *cb, |
3228 | struct tipc_sock *tsk)) | 3230 | struct tipc_sock *tsk)) |
3229 | { | 3231 | { |
3230 | struct net *net = sock_net(skb->sk); | 3232 | struct rhashtable_iter *iter = (void *)cb->args[0]; |
3231 | struct tipc_net *tn = tipc_net(net); | ||
3232 | const struct bucket_table *tbl; | ||
3233 | u32 prev_portid = cb->args[1]; | ||
3234 | u32 tbl_id = cb->args[0]; | ||
3235 | struct rhash_head *pos; | ||
3236 | struct tipc_sock *tsk; | 3233 | struct tipc_sock *tsk; |
3237 | int err; | 3234 | int err; |
3238 | 3235 | ||
3239 | rcu_read_lock(); | 3236 | rhashtable_walk_start(iter); |
3240 | tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); | 3237 | while ((tsk = rhashtable_walk_next(iter)) != NULL) { |
3241 | for (; tbl_id < tbl->size; tbl_id++) { | 3238 | if (IS_ERR(tsk)) { |
3242 | rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { | 3239 | err = PTR_ERR(tsk); |
3243 | spin_lock_bh(&tsk->sk.sk_lock.slock); | 3240 | if (err == -EAGAIN) { |
3244 | if (prev_portid && prev_portid != tsk->portid) { | 3241 | err = 0; |
3245 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
3246 | continue; | 3242 | continue; |
3247 | } | 3243 | } |
3244 | break; | ||
3245 | } | ||
3248 | 3246 | ||
3249 | err = skb_handler(skb, cb, tsk); | 3247 | sock_hold(&tsk->sk); |
3250 | if (err) { | 3248 | rhashtable_walk_stop(iter); |
3251 | prev_portid = tsk->portid; | 3249 | lock_sock(&tsk->sk); |
3252 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 3250 | err = skb_handler(skb, cb, tsk); |
3253 | goto out; | 3251 | if (err) { |
3254 | } | 3252 | release_sock(&tsk->sk); |
3255 | 3253 | sock_put(&tsk->sk); | |
3256 | prev_portid = 0; | 3254 | goto out; |
3257 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | ||
3258 | } | 3255 | } |
3256 | release_sock(&tsk->sk); | ||
3257 | rhashtable_walk_start(iter); | ||
3258 | sock_put(&tsk->sk); | ||
3259 | } | 3259 | } |
3260 | rhashtable_walk_stop(iter); | ||
3260 | out: | 3261 | out: |
3261 | rcu_read_unlock(); | ||
3262 | cb->args[0] = tbl_id; | ||
3263 | cb->args[1] = prev_portid; | ||
3264 | |||
3265 | return skb->len; | 3262 | return skb->len; |
3266 | } | 3263 | } |
3267 | EXPORT_SYMBOL(tipc_nl_sk_walk); | 3264 | EXPORT_SYMBOL(tipc_nl_sk_walk); |
3268 | 3265 | ||
3266 | int tipc_dump_start(struct netlink_callback *cb) | ||
3267 | { | ||
3268 | struct rhashtable_iter *iter = (void *)cb->args[0]; | ||
3269 | struct net *net = sock_net(cb->skb->sk); | ||
3270 | struct tipc_net *tn = tipc_net(net); | ||
3271 | |||
3272 | if (!iter) { | ||
3273 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | ||
3274 | if (!iter) | ||
3275 | return -ENOMEM; | ||
3276 | |||
3277 | cb->args[0] = (long)iter; | ||
3278 | } | ||
3279 | |||
3280 | rhashtable_walk_enter(&tn->sk_rht, iter); | ||
3281 | return 0; | ||
3282 | } | ||
3283 | EXPORT_SYMBOL(tipc_dump_start); | ||
3284 | |||
3285 | int tipc_dump_done(struct netlink_callback *cb) | ||
3286 | { | ||
3287 | struct rhashtable_iter *hti = (void *)cb->args[0]; | ||
3288 | |||
3289 | rhashtable_walk_exit(hti); | ||
3290 | kfree(hti); | ||
3291 | return 0; | ||
3292 | } | ||
3293 | EXPORT_SYMBOL(tipc_dump_done); | ||
3294 | |||
3269 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, | 3295 | int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, |
3270 | struct tipc_sock *tsk, u32 sk_filter_state, | 3296 | struct tipc_sock *tsk, u32 sk_filter_state, |
3271 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) | 3297 | u64 (*tipc_diag_gen_cookie)(struct sock *sk)) |
diff --git a/net/tipc/socket.h b/net/tipc/socket.h index aff9b2ae5a1f..d43032e26532 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h | |||
@@ -68,4 +68,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, | |||
68 | int (*skb_handler)(struct sk_buff *skb, | 68 | int (*skb_handler)(struct sk_buff *skb, |
69 | struct netlink_callback *cb, | 69 | struct netlink_callback *cb, |
70 | struct tipc_sock *tsk)); | 70 | struct tipc_sock *tsk)); |
71 | int tipc_dump_start(struct netlink_callback *cb); | ||
72 | int tipc_dump_done(struct netlink_callback *cb); | ||
71 | #endif | 73 | #endif |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index c8e34ef22c30..2627b5d812e9 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c | |||
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work) | |||
313 | conn_put(con); | 313 | conn_put(con); |
314 | } | 314 | } |
315 | 315 | ||
316 | /* tipc_conn_queue_evt() - interrupt level call from a subscription instance | 316 | /* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance |
317 | * The queued work is launched into tipc_send_work()->tipc_send_to_sock() | 317 | * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock() |
318 | */ | 318 | */ |
319 | void tipc_topsrv_queue_evt(struct net *net, int conid, | 319 | void tipc_topsrv_queue_evt(struct net *net, int conid, |
320 | u32 event, struct tipc_event *evt) | 320 | u32 event, struct tipc_event *evt) |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 93c0c225ab34..180b6640e531 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk) | |||
213 | { | 213 | { |
214 | struct tls_context *ctx = tls_get_ctx(sk); | 214 | struct tls_context *ctx = tls_get_ctx(sk); |
215 | 215 | ||
216 | /* We are already sending pages, ignore notification */ | 216 | /* If in_tcp_sendpages call lower protocol write space handler |
217 | if (ctx->in_tcp_sendpages) | 217 | * to ensure we wake up any waiting operations there. For example |
218 | * if do_tcp_sendpages where to call sk_wait_event. | ||
219 | */ | ||
220 | if (ctx->in_tcp_sendpages) { | ||
221 | ctx->sk_write_space(sk); | ||
218 | return; | 222 | return; |
223 | } | ||
219 | 224 | ||
220 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { | 225 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { |
221 | gfp_t sk_allocation = sk->sk_allocation; | 226 | gfp_t sk_allocation = sk->sk_allocation; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5fb9b7dd9831..4b8ec659e797 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, | |||
669 | goto nla_put_failure; | 669 | goto nla_put_failure; |
670 | 670 | ||
671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, | 671 | if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, |
672 | rule->wmm_rule->client[j].cw_min) || | 672 | rule->wmm_rule.client[j].cw_min) || |
673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, | 673 | nla_put_u16(msg, NL80211_WMMR_CW_MAX, |
674 | rule->wmm_rule->client[j].cw_max) || | 674 | rule->wmm_rule.client[j].cw_max) || |
675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, | 675 | nla_put_u8(msg, NL80211_WMMR_AIFSN, |
676 | rule->wmm_rule->client[j].aifsn) || | 676 | rule->wmm_rule.client[j].aifsn) || |
677 | nla_put_u8(msg, NL80211_WMMR_TXOP, | 677 | nla_put_u16(msg, NL80211_WMMR_TXOP, |
678 | rule->wmm_rule->client[j].cot)) | 678 | rule->wmm_rule.client[j].cot)) |
679 | goto nla_put_failure; | 679 | goto nla_put_failure; |
680 | 680 | ||
681 | nla_nest_end(msg, nl_wmm_rule); | 681 | nla_nest_end(msg, nl_wmm_rule); |
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, | |||
766 | 766 | ||
767 | if (large) { | 767 | if (large) { |
768 | const struct ieee80211_reg_rule *rule = | 768 | const struct ieee80211_reg_rule *rule = |
769 | freq_reg_info(wiphy, chan->center_freq); | 769 | freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); |
770 | 770 | ||
771 | if (!IS_ERR(rule) && rule->wmm_rule) { | 771 | if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { |
772 | if (nl80211_msg_put_wmm_rules(msg, rule)) | 772 | if (nl80211_msg_put_wmm_rules(msg, rule)) |
773 | goto nla_put_failure; | 773 | goto nla_put_failure; |
774 | } | 774 | } |
@@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) | |||
12205 | return -EOPNOTSUPP; | 12205 | return -EOPNOTSUPP; |
12206 | 12206 | ||
12207 | if (!info->attrs[NL80211_ATTR_MDID] || | 12207 | if (!info->attrs[NL80211_ATTR_MDID] || |
12208 | !info->attrs[NL80211_ATTR_IE] || | ||
12208 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 12209 | !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
12209 | return -EINVAL; | 12210 | return -EINVAL; |
12210 | 12211 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 4fc66a117b7d..2f702adf2912 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain * | |||
425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) | 425 | reg_copy_regd(const struct ieee80211_regdomain *src_regd) |
426 | { | 426 | { |
427 | struct ieee80211_regdomain *regd; | 427 | struct ieee80211_regdomain *regd; |
428 | int size_of_regd, size_of_wmms; | 428 | int size_of_regd; |
429 | unsigned int i; | 429 | unsigned int i; |
430 | struct ieee80211_wmm_rule *d_wmm, *s_wmm; | ||
431 | 430 | ||
432 | size_of_regd = | 431 | size_of_regd = |
433 | sizeof(struct ieee80211_regdomain) + | 432 | sizeof(struct ieee80211_regdomain) + |
434 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); | 433 | src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); |
435 | size_of_wmms = src_regd->n_wmm_rules * | ||
436 | sizeof(struct ieee80211_wmm_rule); | ||
437 | 434 | ||
438 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | 435 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
439 | if (!regd) | 436 | if (!regd) |
440 | return ERR_PTR(-ENOMEM); | 437 | return ERR_PTR(-ENOMEM); |
441 | 438 | ||
442 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); | 439 | memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); |
443 | 440 | ||
444 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | 441 | for (i = 0; i < src_regd->n_reg_rules; i++) |
445 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd); | ||
446 | memcpy(d_wmm, s_wmm, size_of_wmms); | ||
447 | |||
448 | for (i = 0; i < src_regd->n_reg_rules; i++) { | ||
449 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], | 442 | memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], |
450 | sizeof(struct ieee80211_reg_rule)); | 443 | sizeof(struct ieee80211_reg_rule)); |
451 | if (!src_regd->reg_rules[i].wmm_rule) | ||
452 | continue; | ||
453 | 444 | ||
454 | regd->reg_rules[i].wmm_rule = d_wmm + | ||
455 | (src_regd->reg_rules[i].wmm_rule - s_wmm) / | ||
456 | sizeof(struct ieee80211_wmm_rule); | ||
457 | } | ||
458 | return regd; | 445 | return regd; |
459 | } | 446 | } |
460 | 447 | ||
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size) | |||
860 | return true; | 847 | return true; |
861 | } | 848 | } |
862 | 849 | ||
863 | static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | 850 | static void set_wmm_rule(struct ieee80211_reg_rule *rrule, |
864 | struct fwdb_wmm_rule *wmm) | 851 | struct fwdb_wmm_rule *wmm) |
865 | { | 852 | { |
853 | struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; | ||
866 | unsigned int i; | 854 | unsigned int i; |
867 | 855 | ||
868 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | 856 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule, | |||
876 | rule->ap[i].aifsn = wmm->ap[i].aifsn; | 864 | rule->ap[i].aifsn = wmm->ap[i].aifsn; |
877 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); | 865 | rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); |
878 | } | 866 | } |
867 | |||
868 | rrule->has_wmm = true; | ||
879 | } | 869 | } |
880 | 870 | ||
881 | static int __regdb_query_wmm(const struct fwdb_header *db, | 871 | static int __regdb_query_wmm(const struct fwdb_header *db, |
882 | const struct fwdb_country *country, int freq, | 872 | const struct fwdb_country *country, int freq, |
883 | u32 *dbptr, struct ieee80211_wmm_rule *rule) | 873 | struct ieee80211_reg_rule *rule) |
884 | { | 874 | { |
885 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 875 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
886 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 876 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
901 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; | 891 | wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; |
902 | wmm = (void *)((u8 *)db + wmm_ptr); | 892 | wmm = (void *)((u8 *)db + wmm_ptr); |
903 | set_wmm_rule(rule, wmm); | 893 | set_wmm_rule(rule, wmm); |
904 | if (dbptr) | ||
905 | *dbptr = wmm_ptr; | ||
906 | return 0; | 894 | return 0; |
907 | } | 895 | } |
908 | } | 896 | } |
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db, | |||
910 | return -ENODATA; | 898 | return -ENODATA; |
911 | } | 899 | } |
912 | 900 | ||
913 | int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | 901 | int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) |
914 | struct ieee80211_wmm_rule *rule) | ||
915 | { | 902 | { |
916 | const struct fwdb_header *hdr = regdb; | 903 | const struct fwdb_header *hdr = regdb; |
917 | const struct fwdb_country *country; | 904 | const struct fwdb_country *country; |
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
925 | country = &hdr->country[0]; | 912 | country = &hdr->country[0]; |
926 | while (country->coll_ptr) { | 913 | while (country->coll_ptr) { |
927 | if (alpha2_equal(alpha2, country->alpha2)) | 914 | if (alpha2_equal(alpha2, country->alpha2)) |
928 | return __regdb_query_wmm(regdb, country, freq, dbptr, | 915 | return __regdb_query_wmm(regdb, country, freq, rule); |
929 | rule); | ||
930 | 916 | ||
931 | country++; | 917 | country++; |
932 | } | 918 | } |
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
935 | } | 921 | } |
936 | EXPORT_SYMBOL(reg_query_regdb_wmm); | 922 | EXPORT_SYMBOL(reg_query_regdb_wmm); |
937 | 923 | ||
938 | struct wmm_ptrs { | ||
939 | struct ieee80211_wmm_rule *rule; | ||
940 | u32 ptr; | ||
941 | }; | ||
942 | |||
943 | static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs, | ||
944 | u32 wmm_ptr, int n_wmms) | ||
945 | { | ||
946 | int i; | ||
947 | |||
948 | for (i = 0; i < n_wmms; i++) { | ||
949 | if (wmm_ptrs[i].ptr == wmm_ptr) | ||
950 | return wmm_ptrs[i].rule; | ||
951 | } | ||
952 | return NULL; | ||
953 | } | ||
954 | |||
955 | static int regdb_query_country(const struct fwdb_header *db, | 924 | static int regdb_query_country(const struct fwdb_header *db, |
956 | const struct fwdb_country *country) | 925 | const struct fwdb_country *country) |
957 | { | 926 | { |
958 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; | 927 | unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; |
959 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); | 928 | struct fwdb_collection *coll = (void *)((u8 *)db + ptr); |
960 | struct ieee80211_regdomain *regdom; | 929 | struct ieee80211_regdomain *regdom; |
961 | struct ieee80211_regdomain *tmp_rd; | 930 | unsigned int size_of_regd, i; |
962 | unsigned int size_of_regd, i, n_wmms = 0; | ||
963 | struct wmm_ptrs *wmm_ptrs; | ||
964 | 931 | ||
965 | size_of_regd = sizeof(struct ieee80211_regdomain) + | 932 | size_of_regd = sizeof(struct ieee80211_regdomain) + |
966 | coll->n_rules * sizeof(struct ieee80211_reg_rule); | 933 | coll->n_rules * sizeof(struct ieee80211_reg_rule); |
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
969 | if (!regdom) | 936 | if (!regdom) |
970 | return -ENOMEM; | 937 | return -ENOMEM; |
971 | 938 | ||
972 | wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL); | ||
973 | if (!wmm_ptrs) { | ||
974 | kfree(regdom); | ||
975 | return -ENOMEM; | ||
976 | } | ||
977 | |||
978 | regdom->n_reg_rules = coll->n_rules; | 939 | regdom->n_reg_rules = coll->n_rules; |
979 | regdom->alpha2[0] = country->alpha2[0]; | 940 | regdom->alpha2[0] = country->alpha2[0]; |
980 | regdom->alpha2[1] = country->alpha2[1]; | 941 | regdom->alpha2[1] = country->alpha2[1]; |
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db, | |||
1013 | 1000 * be16_to_cpu(rule->cac_timeout); | 974 | 1000 * be16_to_cpu(rule->cac_timeout); |
1014 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { | 975 | if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { |
1015 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; | 976 | u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; |
1016 | struct ieee80211_wmm_rule *wmm_pos = | 977 | struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); |
1017 | find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms); | ||
1018 | struct fwdb_wmm_rule *wmm; | ||
1019 | struct ieee80211_wmm_rule *wmm_rule; | ||
1020 | |||
1021 | if (wmm_pos) { | ||
1022 | rrule->wmm_rule = wmm_pos; | ||
1023 | continue; | ||
1024 | } | ||
1025 | wmm = (void *)((u8 *)db + wmm_ptr); | ||
1026 | tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) * | ||
1027 | sizeof(struct ieee80211_wmm_rule), | ||
1028 | GFP_KERNEL); | ||
1029 | |||
1030 | if (!tmp_rd) { | ||
1031 | kfree(regdom); | ||
1032 | kfree(wmm_ptrs); | ||
1033 | return -ENOMEM; | ||
1034 | } | ||
1035 | regdom = tmp_rd; | ||
1036 | |||
1037 | wmm_rule = (struct ieee80211_wmm_rule *) | ||
1038 | ((u8 *)regdom + size_of_regd + n_wmms * | ||
1039 | sizeof(struct ieee80211_wmm_rule)); | ||
1040 | 978 | ||
1041 | set_wmm_rule(wmm_rule, wmm); | 979 | set_wmm_rule(rrule, wmm); |
1042 | wmm_ptrs[n_wmms].ptr = wmm_ptr; | ||
1043 | wmm_ptrs[n_wmms++].rule = wmm_rule; | ||
1044 | } | 980 | } |
1045 | } | 981 | } |
1046 | kfree(wmm_ptrs); | ||
1047 | 982 | ||
1048 | return reg_schedule_apply(regdom); | 983 | return reg_schedule_apply(regdom); |
1049 | } | 984 | } |
diff --git a/net/wireless/util.c b/net/wireless/util.c index e0825a019e9f..959ed3acd240 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, | |||
1456 | u8 *op_class) | 1456 | u8 *op_class) |
1457 | { | 1457 | { |
1458 | u8 vht_opclass; | 1458 | u8 vht_opclass; |
1459 | u16 freq = chandef->center_freq1; | 1459 | u32 freq = chandef->center_freq1; |
1460 | 1460 | ||
1461 | if (freq >= 2412 && freq <= 2472) { | 1461 | if (freq >= 2412 && freq <= 2472) { |
1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) | 1462 | if (chandef->width > NL80211_CHAN_WIDTH_40) |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 911ca6d3cb5a..bfe2dbea480b 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
74 | return 0; | 74 | return 0; |
75 | 75 | ||
76 | if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) | 76 | if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) |
77 | return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ | 77 | return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */ |
78 | 78 | ||
79 | bpf.command = XDP_QUERY_XSK_UMEM; | 79 | bpf.command = XDP_QUERY_XSK_UMEM; |
80 | 80 | ||
81 | rtnl_lock(); | 81 | rtnl_lock(); |
82 | err = xdp_umem_query(dev, queue_id); | 82 | err = xdp_umem_query(dev, queue_id); |
83 | if (err) { | 83 | if (err) { |
84 | err = err < 0 ? -ENOTSUPP : -EBUSY; | 84 | err = err < 0 ? -EOPNOTSUPP : -EBUSY; |
85 | goto err_rtnl_unlock; | 85 | goto err_rtnl_unlock; |
86 | } | 86 | } |
87 | 87 | ||
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c75413d05a63..ce53639a864a 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
@@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ | |||
153 | # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) | 153 | # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) |
154 | cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) | 154 | cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) |
155 | 155 | ||
156 | # cc-if-fullversion | ||
157 | # Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1) | ||
158 | cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4)) | ||
159 | |||
160 | # cc-ldoption | 156 | # cc-ldoption |
161 | # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) | 157 | # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) |
162 | cc-ldoption = $(call try-run,\ | 158 | cc-ldoption = $(call try-run,\ |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 1c48572223d1..5a2d1c9578a0 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -246,8 +246,6 @@ objtool_args += --no-fp | |||
246 | endif | 246 | endif |
247 | ifdef CONFIG_GCOV_KERNEL | 247 | ifdef CONFIG_GCOV_KERNEL |
248 | objtool_args += --no-unreachable | 248 | objtool_args += --no-unreachable |
249 | else | ||
250 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) | ||
251 | endif | 249 | endif |
252 | ifdef CONFIG_RETPOLINE | 250 | ifdef CONFIG_RETPOLINE |
253 | ifneq ($(RETPOLINE_CFLAGS),) | 251 | ifneq ($(RETPOLINE_CFLAGS),) |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 5219280bf7ff..161b0224d6ae 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -380,6 +380,7 @@ our $Attribute = qr{ | |||
380 | __noclone| | 380 | __noclone| |
381 | __deprecated| | 381 | __deprecated| |
382 | __read_mostly| | 382 | __read_mostly| |
383 | __ro_after_init| | ||
383 | __kprobes| | 384 | __kprobes| |
384 | $InitAttribute| | 385 | $InitAttribute| |
385 | ____cacheline_aligned| | 386 | ____cacheline_aligned| |
@@ -3311,7 +3312,7 @@ sub process { | |||
3311 | # known declaration macros | 3312 | # known declaration macros |
3312 | $sline =~ /^\+\s+$declaration_macros/ || | 3313 | $sline =~ /^\+\s+$declaration_macros/ || |
3313 | # start of struct or union or enum | 3314 | # start of struct or union or enum |
3314 | $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ || | 3315 | $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ || |
3315 | # start or end of block or continuation of declaration | 3316 | # start or end of block or continuation of declaration |
3316 | $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ || | 3317 | $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ || |
3317 | # bitfield continuation | 3318 | # bitfield continuation |
diff --git a/scripts/depmod.sh b/scripts/depmod.sh index 999d585eaa73..e083bcae343f 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh | |||
@@ -11,13 +11,14 @@ DEPMOD=$1 | |||
11 | KERNELRELEASE=$2 | 11 | KERNELRELEASE=$2 |
12 | 12 | ||
13 | if ! test -r System.map ; then | 13 | if ! test -r System.map ; then |
14 | echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2 | ||
14 | exit 0 | 15 | exit 0 |
15 | fi | 16 | fi |
16 | 17 | ||
17 | if [ -z $(command -v $DEPMOD) ]; then | 18 | if [ -z $(command -v $DEPMOD) ]; then |
18 | echo "'make modules_install' requires $DEPMOD. Please install it." >&2 | 19 | echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 |
19 | echo "This is probably in the kmod package." >&2 | 20 | echo "This is probably in the kmod package." >&2 |
20 | exit 1 | 21 | exit 0 |
21 | fi | 22 | fi |
22 | 23 | ||
23 | # older versions of depmod require the version string to start with three | 24 | # older versions of depmod require the version string to start with three |
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 4a7bd2192073..67ed9f6ccdf8 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile | |||
@@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c | |||
221 | 221 | ||
222 | # check if necessary packages are available, and configure build flags | 222 | # check if necessary packages are available, and configure build flags |
223 | define filechk_conf_cfg | 223 | define filechk_conf_cfg |
224 | $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \ | ||
225 | $(CONFIG_SHELL) $< | 224 | $(CONFIG_SHELL) $< |
226 | endef | 225 | endef |
227 | 226 | ||
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh deleted file mode 100644 index 7a1c40bfb58c..000000000000 --- a/scripts/kconfig/check-pkgconfig.sh +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | # Check for pkg-config presence | ||
4 | |||
5 | if [ -z $(command -v pkg-config) ]; then | ||
6 | echo "'make *config' requires 'pkg-config'. Please install it." 1>&2 | ||
7 | exit 1 | ||
8 | fi | ||
diff --git a/scripts/kconfig/gconf-cfg.sh b/scripts/kconfig/gconf-cfg.sh index 533b3d8f8f08..480ecd8b9f41 100755 --- a/scripts/kconfig/gconf-cfg.sh +++ b/scripts/kconfig/gconf-cfg.sh | |||
@@ -3,6 +3,13 @@ | |||
3 | 3 | ||
4 | PKG="gtk+-2.0 gmodule-2.0 libglade-2.0" | 4 | PKG="gtk+-2.0 gmodule-2.0 libglade-2.0" |
5 | 5 | ||
6 | if [ -z "$(command -v pkg-config)" ]; then | ||
7 | echo >&2 "*" | ||
8 | echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it." | ||
9 | echo >&2 "*" | ||
10 | exit 1 | ||
11 | fi | ||
12 | |||
6 | if ! pkg-config --exists $PKG; then | 13 | if ! pkg-config --exists $PKG; then |
7 | echo >&2 "*" | 14 | echo >&2 "*" |
8 | echo >&2 "* Unable to find the GTK+ installation. Please make sure that" | 15 | echo >&2 "* Unable to find the GTK+ installation. Please make sure that" |
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh index e6f9facd0077..c812872d7f9d 100755 --- a/scripts/kconfig/mconf-cfg.sh +++ b/scripts/kconfig/mconf-cfg.sh | |||
@@ -4,20 +4,23 @@ | |||
4 | PKG="ncursesw" | 4 | PKG="ncursesw" |
5 | PKG2="ncurses" | 5 | PKG2="ncurses" |
6 | 6 | ||
7 | if pkg-config --exists $PKG; then | 7 | if [ -n "$(command -v pkg-config)" ]; then |
8 | echo cflags=\"$(pkg-config --cflags $PKG)\" | 8 | if pkg-config --exists $PKG; then |
9 | echo libs=\"$(pkg-config --libs $PKG)\" | 9 | echo cflags=\"$(pkg-config --cflags $PKG)\" |
10 | exit 0 | 10 | echo libs=\"$(pkg-config --libs $PKG)\" |
11 | fi | 11 | exit 0 |
12 | fi | ||
12 | 13 | ||
13 | if pkg-config --exists $PKG2; then | 14 | if pkg-config --exists $PKG2; then |
14 | echo cflags=\"$(pkg-config --cflags $PKG2)\" | 15 | echo cflags=\"$(pkg-config --cflags $PKG2)\" |
15 | echo libs=\"$(pkg-config --libs $PKG2)\" | 16 | echo libs=\"$(pkg-config --libs $PKG2)\" |
16 | exit 0 | 17 | exit 0 |
18 | fi | ||
17 | fi | 19 | fi |
18 | 20 | ||
19 | # Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses | 21 | # Check the default paths in case pkg-config is not installed. |
20 | # by pkg-config. | 22 | # (Even if it is installed, some distributions such as openSUSE cannot |
23 | # find ncurses by pkg-config.) | ||
21 | if [ -f /usr/include/ncursesw/ncurses.h ]; then | 24 | if [ -f /usr/include/ncursesw/ncurses.h ]; then |
22 | echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" | 25 | echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" |
23 | echo libs=\"-lncursesw\" | 26 | echo libs=\"-lncursesw\" |
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 83b5836615fb..143c05fec161 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c | |||
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu) | |||
490 | switch (prop->type) { | 490 | switch (prop->type) { |
491 | case P_MENU: | 491 | case P_MENU: |
492 | child_count++; | 492 | child_count++; |
493 | prompt = prompt; | ||
494 | if (single_menu_mode) { | 493 | if (single_menu_mode) { |
495 | item_make("%s%*c%s", | 494 | item_make("%s%*c%s", |
496 | menu->data ? "-->" : "++>", | 495 | menu->data ? "-->" : "++>", |
diff --git a/scripts/kconfig/nconf-cfg.sh b/scripts/kconfig/nconf-cfg.sh index 42f5ac73548e..001559ef0a60 100644 --- a/scripts/kconfig/nconf-cfg.sh +++ b/scripts/kconfig/nconf-cfg.sh | |||
@@ -4,20 +4,23 @@ | |||
4 | PKG="ncursesw menuw panelw" | 4 | PKG="ncursesw menuw panelw" |
5 | PKG2="ncurses menu panel" | 5 | PKG2="ncurses menu panel" |
6 | 6 | ||
7 | if pkg-config --exists $PKG; then | 7 | if [ -n "$(command -v pkg-config)" ]; then |
8 | echo cflags=\"$(pkg-config --cflags $PKG)\" | 8 | if pkg-config --exists $PKG; then |
9 | echo libs=\"$(pkg-config --libs $PKG)\" | 9 | echo cflags=\"$(pkg-config --cflags $PKG)\" |
10 | exit 0 | 10 | echo libs=\"$(pkg-config --libs $PKG)\" |
11 | fi | 11 | exit 0 |
12 | fi | ||
12 | 13 | ||
13 | if pkg-config --exists $PKG2; then | 14 | if pkg-config --exists $PKG2; then |
14 | echo cflags=\"$(pkg-config --cflags $PKG2)\" | 15 | echo cflags=\"$(pkg-config --cflags $PKG2)\" |
15 | echo libs=\"$(pkg-config --libs $PKG2)\" | 16 | echo libs=\"$(pkg-config --libs $PKG2)\" |
16 | exit 0 | 17 | exit 0 |
18 | fi | ||
17 | fi | 19 | fi |
18 | 20 | ||
19 | # Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses | 21 | # Check the default paths in case pkg-config is not installed. |
20 | # by pkg-config. | 22 | # (Even if it is installed, some distributions such as openSUSE cannot |
23 | # find ncurses by pkg-config.) | ||
21 | if [ -f /usr/include/ncursesw/ncurses.h ]; then | 24 | if [ -f /usr/include/ncursesw/ncurses.h ]; then |
22 | echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" | 25 | echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" |
23 | echo libs=\"-lncursesw -lmenuw -lpanelw\" | 26 | echo libs=\"-lncursesw -lmenuw -lpanelw\" |
diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh index 0862e1562536..02ccc0ae1031 100755 --- a/scripts/kconfig/qconf-cfg.sh +++ b/scripts/kconfig/qconf-cfg.sh | |||
@@ -4,6 +4,13 @@ | |||
4 | PKG="Qt5Core Qt5Gui Qt5Widgets" | 4 | PKG="Qt5Core Qt5Gui Qt5Widgets" |
5 | PKG2="QtCore QtGui" | 5 | PKG2="QtCore QtGui" |
6 | 6 | ||
7 | if [ -z "$(command -v pkg-config)" ]; then | ||
8 | echo >&2 "*" | ||
9 | echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it." | ||
10 | echo >&2 "*" | ||
11 | exit 1 | ||
12 | fi | ||
13 | |||
7 | if pkg-config --exists $PKG; then | 14 | if pkg-config --exists $PKG; then |
8 | echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\" | 15 | echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\" |
9 | echo libs=\"$(pkg-config --libs $PKG)\" | 16 | echo libs=\"$(pkg-config --libs $PKG)\" |
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index fe06e77c15eb..f599031260d5 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl | |||
@@ -389,6 +389,9 @@ if ($arch eq "x86_64") { | |||
389 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$"; | 389 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$"; |
390 | $type = ".quad"; | 390 | $type = ".quad"; |
391 | $alignment = 2; | 391 | $alignment = 2; |
392 | } elsif ($arch eq "nds32") { | ||
393 | $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$"; | ||
394 | $alignment = 2; | ||
392 | } else { | 395 | } else { |
393 | die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; | 396 | die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; |
394 | } | 397 | } |
diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 71f39410691b..79f7dd57d571 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion | |||
@@ -74,7 +74,7 @@ scm_version() | |||
74 | fi | 74 | fi |
75 | 75 | ||
76 | # Check for uncommitted changes | 76 | # Check for uncommitted changes |
77 | if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then | 77 | if git status -uno --porcelain | grep -qv '^.. scripts/package'; then |
78 | printf '%s' -dirty | 78 | printf '%s' -dirty |
79 | fi | 79 | fi |
80 | 80 | ||
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c index f2f22d00db18..4ccec1bcf6f5 100644 --- a/security/apparmor/secid.c +++ b/security/apparmor/secid.c | |||
@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) | |||
79 | struct aa_label *label = aa_secid_to_label(secid); | 79 | struct aa_label *label = aa_secid_to_label(secid); |
80 | int len; | 80 | int len; |
81 | 81 | ||
82 | AA_BUG(!secdata); | ||
83 | AA_BUG(!seclen); | 82 | AA_BUG(!seclen); |
84 | 83 | ||
85 | if (!label) | 84 | if (!label) |
diff --git a/security/keys/dh.c b/security/keys/dh.c index 711e89d8c415..3b602a1e27fa 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c | |||
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, | |||
300 | } | 300 | } |
301 | dh_inputs.g_size = dlen; | 301 | dh_inputs.g_size = dlen; |
302 | 302 | ||
303 | dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); | 303 | dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key); |
304 | if (dlen < 0) { | 304 | if (dlen < 0) { |
305 | ret = dlen; | 305 | ret = dlen; |
306 | goto out2; | 306 | goto out2; |
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 69517e18ef07..08d5662039e3 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
@@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) | |||
129 | runtime->avail = 0; | 129 | runtime->avail = 0; |
130 | else | 130 | else |
131 | runtime->avail = runtime->buffer_size; | 131 | runtime->avail = runtime->buffer_size; |
132 | runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL); | 132 | runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL); |
133 | if (!runtime->buffer) { | 133 | if (!runtime->buffer) { |
134 | kfree(runtime); | 134 | kfree(runtime); |
135 | return -ENOMEM; | 135 | return -ENOMEM; |
@@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime, | |||
655 | if (params->avail_min < 1 || params->avail_min > params->buffer_size) | 655 | if (params->avail_min < 1 || params->avail_min > params->buffer_size) |
656 | return -EINVAL; | 656 | return -EINVAL; |
657 | if (params->buffer_size != runtime->buffer_size) { | 657 | if (params->buffer_size != runtime->buffer_size) { |
658 | newbuf = kvmalloc(params->buffer_size, GFP_KERNEL); | 658 | newbuf = kvzalloc(params->buffer_size, GFP_KERNEL); |
659 | if (!newbuf) | 659 | if (!newbuf) |
660 | return -ENOMEM; | 660 | return -ENOMEM; |
661 | spin_lock_irq(&runtime->lock); | 661 | spin_lock_irq(&runtime->lock); |
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index 1bd27576db98..a835558ddbc9 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c | |||
@@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple); | |||
146 | */ | 146 | */ |
147 | void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream) | 147 | void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream) |
148 | { | 148 | { |
149 | snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN); | 149 | snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, |
150 | AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN); | ||
150 | } | 151 | } |
151 | EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start); | 152 | EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start); |
152 | 153 | ||
@@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream) | |||
171 | 172 | ||
172 | snd_hdac_ext_link_stream_clear(stream); | 173 | snd_hdac_ext_link_stream_clear(stream); |
173 | 174 | ||
174 | snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST); | 175 | snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, |
176 | AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST); | ||
175 | udelay(3); | 177 | udelay(3); |
176 | timeout = 50; | 178 | timeout = 50; |
177 | do { | 179 | do { |
@@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id); | |||
242 | void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, | 244 | void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, |
243 | int stream) | 245 | int stream) |
244 | { | 246 | { |
245 | snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream)); | 247 | snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0); |
246 | } | 248 | } |
247 | EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id); | 249 | EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id); |
248 | 250 | ||
@@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus, | |||
415 | bool enable, int index) | 417 | bool enable, int index) |
416 | { | 418 | { |
417 | u32 mask = 0; | 419 | u32 mask = 0; |
418 | u32 register_mask = 0; | ||
419 | 420 | ||
420 | if (!bus->spbcap) { | 421 | if (!bus->spbcap) { |
421 | dev_err(bus->dev, "Address of SPB capability is NULL\n"); | 422 | dev_err(bus->dev, "Address of SPB capability is NULL\n"); |
@@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus, | |||
424 | 425 | ||
425 | mask |= (1 << index); | 426 | mask |= (1 << index); |
426 | 427 | ||
427 | register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL); | ||
428 | |||
429 | mask |= register_mask; | ||
430 | |||
431 | if (enable) | 428 | if (enable) |
432 | snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask); | 429 | snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask); |
433 | else | 430 | else |
434 | snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0); | 431 | snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0); |
435 | } | 432 | } |
@@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus, | |||
503 | bool enable, int index) | 500 | bool enable, int index) |
504 | { | 501 | { |
505 | u32 mask = 0; | 502 | u32 mask = 0; |
506 | u32 register_mask = 0; | ||
507 | 503 | ||
508 | if (!bus->drsmcap) { | 504 | if (!bus->drsmcap) { |
509 | dev_err(bus->dev, "Address of DRSM capability is NULL\n"); | 505 | dev_err(bus->dev, "Address of DRSM capability is NULL\n"); |
@@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus, | |||
512 | 508 | ||
513 | mask |= (1 << index); | 509 | mask |= (1 << index); |
514 | 510 | ||
515 | register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL); | ||
516 | |||
517 | mask |= register_mask; | ||
518 | |||
519 | if (enable) | 511 | if (enable) |
520 | snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask); | 512 | snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask); |
521 | else | 513 | else |
522 | snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0); | 514 | snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0); |
523 | } | 515 | } |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 0a5085537034..26d348b47867 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus) | |||
3935 | 3935 | ||
3936 | list_for_each_codec(codec, bus) { | 3936 | list_for_each_codec(codec, bus) { |
3937 | /* FIXME: maybe a better way needed for forced reset */ | 3937 | /* FIXME: maybe a better way needed for forced reset */ |
3938 | cancel_delayed_work_sync(&codec->jackpoll_work); | 3938 | if (current_work() != &codec->jackpoll_work.work) |
3939 | cancel_delayed_work_sync(&codec->jackpoll_work); | ||
3939 | #ifdef CONFIG_PM | 3940 | #ifdef CONFIG_PM |
3940 | if (hda_codec_is_power_on(codec)) { | 3941 | if (hda_codec_is_power_on(codec)) { |
3941 | hda_call_codec_suspend(codec); | 3942 | hda_call_codec_suspend(codec); |
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index b2ec20e562bd..b455930a3eaf 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c | |||
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = { | |||
68 | [BPF_MAP_TYPE_DEVMAP] = "devmap", | 68 | [BPF_MAP_TYPE_DEVMAP] = "devmap", |
69 | [BPF_MAP_TYPE_SOCKMAP] = "sockmap", | 69 | [BPF_MAP_TYPE_SOCKMAP] = "sockmap", |
70 | [BPF_MAP_TYPE_CPUMAP] = "cpumap", | 70 | [BPF_MAP_TYPE_CPUMAP] = "cpumap", |
71 | [BPF_MAP_TYPE_XSKMAP] = "xskmap", | ||
71 | [BPF_MAP_TYPE_SOCKHASH] = "sockhash", | 72 | [BPF_MAP_TYPE_SOCKHASH] = "sockhash", |
72 | [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", | 73 | [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", |
73 | }; | 74 | }; |
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index 1832100d1b27..6d41323be291 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c | |||
@@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv) | |||
194 | } | 194 | } |
195 | 195 | ||
196 | while (argc) { | 196 | while (argc) { |
197 | if (argc < 2) | 197 | if (argc < 2) { |
198 | BAD_ARG(); | 198 | BAD_ARG(); |
199 | goto err_close_map; | ||
200 | } | ||
199 | 201 | ||
200 | if (is_prefix(*argv, "cpu")) { | 202 | if (is_prefix(*argv, "cpu")) { |
201 | char *endptr; | 203 | char *endptr; |
@@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv) | |||
221 | NEXT_ARG(); | 223 | NEXT_ARG(); |
222 | } else { | 224 | } else { |
223 | BAD_ARG(); | 225 | BAD_ARG(); |
226 | goto err_close_map; | ||
224 | } | 227 | } |
225 | 228 | ||
226 | do_all = false; | 229 | do_all = false; |
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 56c4b3f8a01b..439b8a27488d 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat | |||
@@ -759,12 +759,18 @@ class DebugfsProvider(Provider): | |||
759 | if len(vms) == 0: | 759 | if len(vms) == 0: |
760 | self.do_read = False | 760 | self.do_read = False |
761 | 761 | ||
762 | self.paths = filter(lambda x: "{}-".format(pid) in x, vms) | 762 | self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms)) |
763 | 763 | ||
764 | else: | 764 | else: |
765 | self.paths = [] | 765 | self.paths = [] |
766 | self.do_read = True | 766 | self.do_read = True |
767 | self.reset() | 767 | |
768 | def _verify_paths(self): | ||
769 | """Remove invalid paths""" | ||
770 | for path in self.paths: | ||
771 | if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)): | ||
772 | self.paths.remove(path) | ||
773 | continue | ||
768 | 774 | ||
769 | def read(self, reset=0, by_guest=0): | 775 | def read(self, reset=0, by_guest=0): |
770 | """Returns a dict with format:'file name / field -> current value'. | 776 | """Returns a dict with format:'file name / field -> current value'. |
@@ -780,6 +786,7 @@ class DebugfsProvider(Provider): | |||
780 | # If no debugfs filtering support is available, then don't read. | 786 | # If no debugfs filtering support is available, then don't read. |
781 | if not self.do_read: | 787 | if not self.do_read: |
782 | return results | 788 | return results |
789 | self._verify_paths() | ||
783 | 790 | ||
784 | paths = self.paths | 791 | paths = self.paths |
785 | if self._pid == 0: | 792 | if self._pid == 0: |
@@ -1096,15 +1103,16 @@ class Tui(object): | |||
1096 | pid = self.stats.pid_filter | 1103 | pid = self.stats.pid_filter |
1097 | self.screen.erase() | 1104 | self.screen.erase() |
1098 | gname = self.get_gname_from_pid(pid) | 1105 | gname = self.get_gname_from_pid(pid) |
1106 | self._gname = gname | ||
1099 | if gname: | 1107 | if gname: |
1100 | gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' | 1108 | gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' |
1101 | if len(gname) > MAX_GUEST_NAME_LEN | 1109 | if len(gname) > MAX_GUEST_NAME_LEN |
1102 | else gname)) | 1110 | else gname)) |
1103 | if pid > 0: | 1111 | if pid > 0: |
1104 | self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}' | 1112 | self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname) |
1105 | .format(pid, gname), curses.A_BOLD) | ||
1106 | else: | 1113 | else: |
1107 | self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) | 1114 | self._headline = 'kvm statistics - summary' |
1115 | self.screen.addstr(0, 0, self._headline, curses.A_BOLD) | ||
1108 | if self.stats.fields_filter: | 1116 | if self.stats.fields_filter: |
1109 | regex = self.stats.fields_filter | 1117 | regex = self.stats.fields_filter |
1110 | if len(regex) > MAX_REGEX_LEN: | 1118 | if len(regex) > MAX_REGEX_LEN: |
@@ -1162,6 +1170,19 @@ class Tui(object): | |||
1162 | 1170 | ||
1163 | return sorted_items | 1171 | return sorted_items |
1164 | 1172 | ||
1173 | if not self._is_running_guest(self.stats.pid_filter): | ||
1174 | if self._gname: | ||
1175 | try: # ...to identify the guest by name in case it's back | ||
1176 | pids = self.get_pid_from_gname(self._gname) | ||
1177 | if len(pids) == 1: | ||
1178 | self._refresh_header(pids[0]) | ||
1179 | self._update_pid(pids[0]) | ||
1180 | return | ||
1181 | except: | ||
1182 | pass | ||
1183 | self._display_guest_dead() | ||
1184 | # leave final data on screen | ||
1185 | return | ||
1165 | row = 3 | 1186 | row = 3 |
1166 | self.screen.move(row, 0) | 1187 | self.screen.move(row, 0) |
1167 | self.screen.clrtobot() | 1188 | self.screen.clrtobot() |
@@ -1184,6 +1205,7 @@ class Tui(object): | |||
1184 | # print events | 1205 | # print events |
1185 | tavg = 0 | 1206 | tavg = 0 |
1186 | tcur = 0 | 1207 | tcur = 0 |
1208 | guest_removed = False | ||
1187 | for key, values in get_sorted_events(self, stats): | 1209 | for key, values in get_sorted_events(self, stats): |
1188 | if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): | 1210 | if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): |
1189 | break | 1211 | break |
@@ -1191,7 +1213,10 @@ class Tui(object): | |||
1191 | key = self.get_gname_from_pid(key) | 1213 | key = self.get_gname_from_pid(key) |
1192 | if not key: | 1214 | if not key: |
1193 | continue | 1215 | continue |
1194 | cur = int(round(values.delta / sleeptime)) if values.delta else '' | 1216 | cur = int(round(values.delta / sleeptime)) if values.delta else 0 |
1217 | if cur < 0: | ||
1218 | guest_removed = True | ||
1219 | continue | ||
1195 | if key[0] != ' ': | 1220 | if key[0] != ' ': |
1196 | if values.delta: | 1221 | if values.delta: |
1197 | tcur += values.delta | 1222 | tcur += values.delta |
@@ -1204,13 +1229,21 @@ class Tui(object): | |||
1204 | values.value * 100 / float(ltotal), cur)) | 1229 | values.value * 100 / float(ltotal), cur)) |
1205 | row += 1 | 1230 | row += 1 |
1206 | if row == 3: | 1231 | if row == 3: |
1207 | self.screen.addstr(4, 1, 'No matching events reported yet') | 1232 | if guest_removed: |
1233 | self.screen.addstr(4, 1, 'Guest removed, updating...') | ||
1234 | else: | ||
1235 | self.screen.addstr(4, 1, 'No matching events reported yet') | ||
1208 | if row > 4: | 1236 | if row > 4: |
1209 | tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' | 1237 | tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' |
1210 | self.screen.addstr(row, 1, '%-40s %10d %8s' % | 1238 | self.screen.addstr(row, 1, '%-40s %10d %8s' % |
1211 | ('Total', total, tavg), curses.A_BOLD) | 1239 | ('Total', total, tavg), curses.A_BOLD) |
1212 | self.screen.refresh() | 1240 | self.screen.refresh() |
1213 | 1241 | ||
1242 | def _display_guest_dead(self): | ||
1243 | marker = ' Guest is DEAD ' | ||
1244 | y = min(len(self._headline), 80 - len(marker)) | ||
1245 | self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT) | ||
1246 | |||
1214 | def _show_msg(self, text): | 1247 | def _show_msg(self, text): |
1215 | """Display message centered text and exit on key press""" | 1248 | """Display message centered text and exit on key press""" |
1216 | hint = 'Press any key to continue' | 1249 | hint = 'Press any key to continue' |
@@ -1219,10 +1252,10 @@ class Tui(object): | |||
1219 | (x, term_width) = self.screen.getmaxyx() | 1252 | (x, term_width) = self.screen.getmaxyx() |
1220 | row = 2 | 1253 | row = 2 |
1221 | for line in text: | 1254 | for line in text: |
1222 | start = (term_width - len(line)) / 2 | 1255 | start = (term_width - len(line)) // 2 |
1223 | self.screen.addstr(row, start, line) | 1256 | self.screen.addstr(row, start, line) |
1224 | row += 1 | 1257 | row += 1 |
1225 | self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint, | 1258 | self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint, |
1226 | curses.A_STANDOUT) | 1259 | curses.A_STANDOUT) |
1227 | self.screen.getkey() | 1260 | self.screen.getkey() |
1228 | 1261 | ||
@@ -1319,6 +1352,12 @@ class Tui(object): | |||
1319 | msg = '"' + str(val) + '": Invalid value' | 1352 | msg = '"' + str(val) + '": Invalid value' |
1320 | self._refresh_header() | 1353 | self._refresh_header() |
1321 | 1354 | ||
1355 | def _is_running_guest(self, pid): | ||
1356 | """Check if pid is still a running process.""" | ||
1357 | if not pid: | ||
1358 | return True | ||
1359 | return os.path.isdir(os.path.join('/proc/', str(pid))) | ||
1360 | |||
1322 | def _show_vm_selection_by_guest(self): | 1361 | def _show_vm_selection_by_guest(self): |
1323 | """Draws guest selection mask. | 1362 | """Draws guest selection mask. |
1324 | 1363 | ||
@@ -1346,7 +1385,7 @@ class Tui(object): | |||
1346 | if not guest or guest == '0': | 1385 | if not guest or guest == '0': |
1347 | break | 1386 | break |
1348 | if guest.isdigit(): | 1387 | if guest.isdigit(): |
1349 | if not os.path.isdir(os.path.join('/proc/', guest)): | 1388 | if not self._is_running_guest(guest): |
1350 | msg = '"' + guest + '": Not a running process' | 1389 | msg = '"' + guest + '": Not a running process' |
1351 | continue | 1390 | continue |
1352 | pid = int(guest) | 1391 | pid = int(guest) |
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index f8cc38afffa2..32a194e3e07a 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh | |||
@@ -46,6 +46,9 @@ | |||
46 | # Kselftest framework requirement - SKIP code is 4. | 46 | # Kselftest framework requirement - SKIP code is 4. |
47 | ksft_skip=4 | 47 | ksft_skip=4 |
48 | 48 | ||
49 | # Some systems don't have a ping6 binary anymore | ||
50 | which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping) | ||
51 | |||
49 | tests=" | 52 | tests=" |
50 | pmtu_vti6_exception vti6: PMTU exceptions | 53 | pmtu_vti6_exception vti6: PMTU exceptions |
51 | pmtu_vti4_exception vti4: PMTU exceptions | 54 | pmtu_vti4_exception vti4: PMTU exceptions |
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() { | |||
274 | mtu "${ns_b}" veth_b 4000 | 277 | mtu "${ns_b}" veth_b 4000 |
275 | mtu "${ns_a}" vti6_a 5000 | 278 | mtu "${ns_a}" vti6_a 5000 |
276 | mtu "${ns_b}" vti6_b 5000 | 279 | mtu "${ns_b}" vti6_b 5000 |
277 | ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null | 280 | ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null |
278 | 281 | ||
279 | # Check that exception was created | 282 | # Check that exception was created |
280 | if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then | 283 | if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then |
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() { | |||
334 | fail=0 | 337 | fail=0 |
335 | 338 | ||
336 | min=68 | 339 | min=68 |
337 | max=$((65528 - 20)) | 340 | max=$((65535 - 20)) |
338 | # Check invalid values first | 341 | # Check invalid values first |
339 | for v in $((min - 1)) $((max + 1)); do | 342 | for v in $((min - 1)) $((max + 1)); do |
340 | ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null | 343 | ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json index f03763d81617..30f9b54bd666 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json | |||
@@ -313,6 +313,54 @@ | |||
313 | ] | 313 | ] |
314 | }, | 314 | }, |
315 | { | 315 | { |
316 | "id": "6aaf", | ||
317 | "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]", | ||
318 | "category": [ | ||
319 | "actions", | ||
320 | "police" | ||
321 | ], | ||
322 | "setup": [ | ||
323 | [ | ||
324 | "$TC actions flush action police", | ||
325 | 0, | ||
326 | 1, | ||
327 | 255 | ||
328 | ] | ||
329 | ], | ||
330 | "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1", | ||
331 | "expExitCode": "0", | ||
332 | "verifyCmd": "$TC actions get action police index 1", | ||
333 | "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe", | ||
334 | "matchCount": "1", | ||
335 | "teardown": [ | ||
336 | "$TC actions flush action police" | ||
337 | ] | ||
338 | }, | ||
339 | { | ||
340 | "id": "29b1", | ||
341 | "name": "Add police actions with conform-exceed control <invalid>/drop", | ||
342 | "category": [ | ||
343 | "actions", | ||
344 | "police" | ||
345 | ], | ||
346 | "setup": [ | ||
347 | [ | ||
348 | "$TC actions flush action police", | ||
349 | 0, | ||
350 | 1, | ||
351 | 255 | ||
352 | ] | ||
353 | ], | ||
354 | "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1", | ||
355 | "expExitCode": "255", | ||
356 | "verifyCmd": "$TC actions ls action police", | ||
357 | "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ", | ||
358 | "matchCount": "0", | ||
359 | "teardown": [ | ||
360 | "$TC actions flush action police" | ||
361 | ] | ||
362 | }, | ||
363 | { | ||
316 | "id": "c26f", | 364 | "id": "c26f", |
317 | "name": "Add police action with invalid peakrate value", | 365 | "name": "Add police action with invalid peakrate value", |
318 | "category": [ | 366 | "category": [ |
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index 30cb0a0713ff..37908a83ddc2 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c | |||
@@ -159,12 +159,6 @@ static const char * const page_flag_names[] = { | |||
159 | }; | 159 | }; |
160 | 160 | ||
161 | 161 | ||
162 | static const char * const debugfs_known_mountpoints[] = { | ||
163 | "/sys/kernel/debug", | ||
164 | "/debug", | ||
165 | 0, | ||
166 | }; | ||
167 | |||
168 | /* | 162 | /* |
169 | * data structures | 163 | * data structures |
170 | */ | 164 | */ |
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c index f82c2eaa859d..334b16db0ebb 100644 --- a/tools/vm/slabinfo.c +++ b/tools/vm/slabinfo.c | |||
@@ -30,8 +30,8 @@ struct slabinfo { | |||
30 | int alias; | 30 | int alias; |
31 | int refs; | 31 | int refs; |
32 | int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu; | 32 | int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu; |
33 | int hwcache_align, object_size, objs_per_slab; | 33 | unsigned int hwcache_align, object_size, objs_per_slab; |
34 | int sanity_checks, slab_size, store_user, trace; | 34 | unsigned int sanity_checks, slab_size, store_user, trace; |
35 | int order, poison, reclaim_account, red_zone; | 35 | int order, poison, reclaim_account, red_zone; |
36 | unsigned long partial, objects, slabs, objects_partial, objects_total; | 36 | unsigned long partial, objects, slabs, objects_partial, objects_total; |
37 | unsigned long alloc_fastpath, alloc_slowpath; | 37 | unsigned long alloc_fastpath, alloc_slowpath; |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 91aaf73b00df..ed162a6c57c5 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat | |||
1817 | return 0; | 1817 | return 0; |
1818 | } | 1818 | } |
1819 | 1819 | ||
1820 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
1821 | { | ||
1822 | unsigned long end = hva + PAGE_SIZE; | ||
1823 | |||
1824 | if (!kvm->arch.pgd) | ||
1825 | return 0; | ||
1826 | |||
1827 | trace_kvm_unmap_hva(hva); | ||
1828 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | ||
1829 | return 0; | ||
1830 | } | ||
1831 | |||
1832 | int kvm_unmap_hva_range(struct kvm *kvm, | 1820 | int kvm_unmap_hva_range(struct kvm *kvm, |
1833 | unsigned long start, unsigned long end) | 1821 | unsigned long start, unsigned long end) |
1834 | { | 1822 | { |
@@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data | |||
1860 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | 1848 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
1861 | { | 1849 | { |
1862 | unsigned long end = hva + PAGE_SIZE; | 1850 | unsigned long end = hva + PAGE_SIZE; |
1851 | kvm_pfn_t pfn = pte_pfn(pte); | ||
1863 | pte_t stage2_pte; | 1852 | pte_t stage2_pte; |
1864 | 1853 | ||
1865 | if (!kvm->arch.pgd) | 1854 | if (!kvm->arch.pgd) |
1866 | return; | 1855 | return; |
1867 | 1856 | ||
1868 | trace_kvm_set_spte_hva(hva); | 1857 | trace_kvm_set_spte_hva(hva); |
1869 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | 1858 | |
1859 | /* | ||
1860 | * We've moved a page around, probably through CoW, so let's treat it | ||
1861 | * just like a translation fault and clean the cache to the PoC. | ||
1862 | */ | ||
1863 | clean_dcache_guest_page(pfn, PAGE_SIZE); | ||
1864 | stage2_pte = pfn_pte(pfn, PAGE_S2); | ||
1870 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | 1865 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); |
1871 | } | 1866 | } |
1872 | 1867 | ||
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h index e53b596f483b..57b3edebbb40 100644 --- a/virt/kvm/arm/trace.h +++ b/virt/kvm/arm/trace.h | |||
@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate, | |||
134 | __entry->vcpu_pc, __entry->instr, __entry->cpsr) | 134 | __entry->vcpu_pc, __entry->instr, __entry->cpsr) |
135 | ); | 135 | ); |
136 | 136 | ||
137 | TRACE_EVENT(kvm_unmap_hva, | ||
138 | TP_PROTO(unsigned long hva), | ||
139 | TP_ARGS(hva), | ||
140 | |||
141 | TP_STRUCT__entry( | ||
142 | __field( unsigned long, hva ) | ||
143 | ), | ||
144 | |||
145 | TP_fast_assign( | ||
146 | __entry->hva = hva; | ||
147 | ), | ||
148 | |||
149 | TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) | ||
150 | ); | ||
151 | |||
152 | TRACE_EVENT(kvm_unmap_hva_range, | 137 | TRACE_EVENT(kvm_unmap_hva_range, |
153 | TP_PROTO(unsigned long start, unsigned long end), | 138 | TP_PROTO(unsigned long start, unsigned long end), |
154 | TP_ARGS(start, end), | 139 | TP_ARGS(start, end), |