diff options
252 files changed, 2365 insertions, 1712 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend index 3d5951c8bf5f..e8b60bd766f7 100644 --- a/Documentation/ABI/stable/sysfs-bus-xen-backend +++ b/Documentation/ABI/stable/sysfs-bus-xen-backend | |||
| @@ -73,3 +73,12 @@ KernelVersion: 3.0 | |||
| 73 | Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 73 | Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
| 74 | Description: | 74 | Description: |
| 75 | Number of sectors written by the frontend. | 75 | Number of sectors written by the frontend. |
| 76 | |||
| 77 | What: /sys/bus/xen-backend/devices/*/state | ||
| 78 | Date: August 2018 | ||
| 79 | KernelVersion: 4.19 | ||
| 80 | Contact: Joe Jin <joe.jin@oracle.com> | ||
| 81 | Description: | ||
| 82 | The state of the device. One of: 'Unknown', | ||
| 83 | 'Initialising', 'Initialised', 'Connected', 'Closing', | ||
| 84 | 'Closed', 'Reconfiguring', 'Reconfigured'. | ||
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback index 8bb43b66eb55..4e7babb3ba1f 100644 --- a/Documentation/ABI/testing/sysfs-driver-xen-blkback +++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback | |||
| @@ -15,3 +15,13 @@ Description: | |||
| 15 | blkback. If the frontend tries to use more than | 15 | blkback. If the frontend tries to use more than |
| 16 | max_persistent_grants, the LRU kicks in and starts | 16 | max_persistent_grants, the LRU kicks in and starts |
| 17 | removing 5% of max_persistent_grants every 100ms. | 17 | removing 5% of max_persistent_grants every 100ms. |
| 18 | |||
| 19 | What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds | ||
| 20 | Date: August 2018 | ||
| 21 | KernelVersion: 4.19 | ||
| 22 | Contact: Roger Pau Monné <roger.pau@citrix.com> | ||
| 23 | Description: | ||
| 24 | How long a persistent grant is allowed to remain | ||
| 25 | allocated without being in use. The time is in | ||
| 26 | seconds, 0 means indefinitely long. | ||
| 27 | The default is 60 seconds. | ||
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt index f128f736b4a5..7169a0ec41d8 100644 --- a/Documentation/arm64/sve.txt +++ b/Documentation/arm64/sve.txt | |||
| @@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg) | |||
| 200 | thread. | 200 | thread. |
| 201 | 201 | ||
| 202 | * Changing the vector length causes all of P0..P15, FFR and all bits of | 202 | * Changing the vector length causes all of P0..P15, FFR and all bits of |
| 203 | Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become | 203 | Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become |
| 204 | unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current | 204 | unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current |
| 205 | vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC | 205 | vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC |
| 206 | flag, does not constitute a change to the vector length for this purpose. | 206 | flag, does not constitute a change to the vector length for this purpose. |
| @@ -500,7 +500,7 @@ References | |||
| 500 | [2] arch/arm64/include/uapi/asm/ptrace.h | 500 | [2] arch/arm64/include/uapi/asm/ptrace.h |
| 501 | AArch64 Linux ptrace ABI definitions | 501 | AArch64 Linux ptrace ABI definitions |
| 502 | 502 | ||
| 503 | [3] linux/Documentation/arm64/cpu-feature-registers.txt | 503 | [3] Documentation/arm64/cpu-feature-registers.txt |
| 504 | 504 | ||
| 505 | [4] ARM IHI0055C | 505 | [4] ARM IHI0055C |
| 506 | http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf | 506 | http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt index b0a8af51c388..265b223cd978 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt | |||
| @@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are | |||
| 11 | attached to every HLIC: software interrupts, the timer interrupt, and external | 11 | attached to every HLIC: software interrupts, the timer interrupt, and external |
| 12 | interrupts. Software interrupts are used to send IPIs between cores. The | 12 | interrupts. Software interrupts are used to send IPIs between cores. The |
| 13 | timer interrupt comes from an architecturally mandated real-time timer that is | 13 | timer interrupt comes from an architecturally mandated real-time timer that is |
| 14 | controller via Supervisor Binary Interface (SBI) calls and CSR reads. External | 14 | controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External |
| 15 | interrupts connect all other device interrupts to the HLIC, which are routed | 15 | interrupts connect all other device interrupts to the HLIC, which are routed |
| 16 | via the platform-level interrupt controller (PLIC). | 16 | via the platform-level interrupt controller (PLIC). |
| 17 | 17 | ||
| @@ -25,7 +25,15 @@ in the system. | |||
| 25 | 25 | ||
| 26 | Required properties: | 26 | Required properties: |
| 27 | - compatible : "riscv,cpu-intc" | 27 | - compatible : "riscv,cpu-intc" |
| 28 | - #interrupt-cells : should be <1> | 28 | - #interrupt-cells : should be <1>. The interrupt sources are defined by the |
| 29 | RISC-V supervisor ISA manual, with only the following three interrupts being | ||
| 30 | defined for supervisor mode: | ||
| 31 | - Source 1 is the supervisor software interrupt, which can be sent by an SBI | ||
| 32 | call and is reserved for use by software. | ||
| 33 | - Source 5 is the supervisor timer interrupt, which can be configured by | ||
| 34 | SBI calls and implements a one-shot timer. | ||
| 35 | - Source 9 is the supervisor external interrupt, which chains to all other | ||
| 36 | device interrupts. | ||
| 29 | - interrupt-controller : Identifies the node as an interrupt controller | 37 | - interrupt-controller : Identifies the node as an interrupt controller |
| 30 | 38 | ||
| 31 | Furthermore, this interrupt-controller MUST be embedded inside the cpu | 39 | Furthermore, this interrupt-controller MUST be embedded inside the cpu |
| @@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below. | |||
| 38 | ... | 46 | ... |
| 39 | cpu1-intc: interrupt-controller { | 47 | cpu1-intc: interrupt-controller { |
| 40 | #interrupt-cells = <1>; | 48 | #interrupt-cells = <1>; |
| 41 | compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc"; | 49 | compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc"; |
| 42 | interrupt-controller; | 50 | interrupt-controller; |
| 43 | }; | 51 | }; |
| 44 | }; | 52 | }; |
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt index 5d47a262474c..9407212a85a8 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt | |||
| @@ -7,6 +7,7 @@ Required properties: | |||
| 7 | Examples with soctypes are: | 7 | Examples with soctypes are: |
| 8 | - "renesas,r8a7743-wdt" (RZ/G1M) | 8 | - "renesas,r8a7743-wdt" (RZ/G1M) |
| 9 | - "renesas,r8a7745-wdt" (RZ/G1E) | 9 | - "renesas,r8a7745-wdt" (RZ/G1E) |
| 10 | - "renesas,r8a774a1-wdt" (RZ/G2M) | ||
| 10 | - "renesas,r8a7790-wdt" (R-Car H2) | 11 | - "renesas,r8a7790-wdt" (R-Car H2) |
| 11 | - "renesas,r8a7791-wdt" (R-Car M2-W) | 12 | - "renesas,r8a7791-wdt" (R-Car M2-W) |
| 12 | - "renesas,r8a7792-wdt" (R-Car V2H) | 13 | - "renesas,r8a7792-wdt" (R-Car V2H) |
| @@ -21,8 +22,8 @@ Required properties: | |||
| 21 | - "renesas,r7s72100-wdt" (RZ/A1) | 22 | - "renesas,r7s72100-wdt" (RZ/A1) |
| 22 | The generic compatible string must be: | 23 | The generic compatible string must be: |
| 23 | - "renesas,rza-wdt" for RZ/A | 24 | - "renesas,rza-wdt" for RZ/A |
| 24 | - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G | 25 | - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1 |
| 25 | - "renesas,rcar-gen3-wdt" for R-Car Gen3 | 26 | - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2 |
| 26 | 27 | ||
| 27 | - reg : Should contain WDT registers location and length | 28 | - reg : Should contain WDT registers location and length |
| 28 | - clocks : the clock feeding the watchdog timer. | 29 | - clocks : the clock feeding the watchdog timer. |
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index 72d16f08e431..b8df81f6d6bc 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx | |||
| @@ -32,7 +32,7 @@ Supported chips: | |||
| 32 | Datasheet: Publicly available at the Texas Instruments website | 32 | Datasheet: Publicly available at the Texas Instruments website |
| 33 | http://www.ti.com/ | 33 | http://www.ti.com/ |
| 34 | 34 | ||
| 35 | Author: Lothar Felten <l-felten@ti.com> | 35 | Author: Lothar Felten <lothar.felten@gmail.com> |
| 36 | 36 | ||
| 37 | Description | 37 | Description |
| 38 | ----------- | 38 | ----------- |
diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations index 966610aa4620..203002054120 100644 --- a/Documentation/i2c/DMA-considerations +++ b/Documentation/i2c/DMA-considerations | |||
| @@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the | |||
| 50 | returned buffer. If NULL is returned, the threshold was not met or a bounce | 50 | returned buffer. If NULL is returned, the threshold was not met or a bounce |
| 51 | buffer could not be allocated. Fall back to PIO in that case. | 51 | buffer could not be allocated. Fall back to PIO in that case. |
| 52 | 52 | ||
| 53 | In any case, a buffer obtained from above needs to be released. It ensures data | 53 | In any case, a buffer obtained from above needs to be released. Another helper |
| 54 | is copied back to the message and a potentially used bounce buffer is freed:: | 54 | function ensures a potentially used bounce buffer is freed:: |
| 55 | 55 | ||
| 56 | i2c_release_dma_safe_msg_buf(msg, dma_buf); | 56 | i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred); |
| 57 | |||
| 58 | The last argument 'xferred' controls if the buffer is synced back to the | ||
| 59 | message or not. No syncing is needed in cases setting up DMA had an error and | ||
| 60 | there was no data transferred. | ||
| 57 | 61 | ||
| 58 | The bounce buffer handling from the core is generic and simple. It will always | 62 | The bounce buffer handling from the core is generic and simple. It will always |
| 59 | allocate a new bounce buffer. If you want a more sophisticated handling (e.g. | 63 | allocate a new bounce buffer. If you want a more sophisticated handling (e.g. |
diff --git a/MAINTAINERS b/MAINTAINERS index a5b256b25905..9ad052aeac39 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -8255,9 +8255,9 @@ F: drivers/ata/pata_arasan_cf.c | |||
| 8255 | 8255 | ||
| 8256 | LIBATA PATA DRIVERS | 8256 | LIBATA PATA DRIVERS |
| 8257 | M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 8257 | M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> |
| 8258 | M: Jens Axboe <kernel.dk> | 8258 | M: Jens Axboe <axboe@kernel.dk> |
| 8259 | L: linux-ide@vger.kernel.org | 8259 | L: linux-ide@vger.kernel.org |
| 8260 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git | 8260 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git |
| 8261 | S: Maintained | 8261 | S: Maintained |
| 8262 | F: drivers/ata/pata_*.c | 8262 | F: drivers/ata/pata_*.c |
| 8263 | F: drivers/ata/ata_generic.c | 8263 | F: drivers/ata/ata_generic.c |
| @@ -8275,7 +8275,7 @@ LIBATA SATA AHCI PLATFORM devices support | |||
| 8275 | M: Hans de Goede <hdegoede@redhat.com> | 8275 | M: Hans de Goede <hdegoede@redhat.com> |
| 8276 | M: Jens Axboe <axboe@kernel.dk> | 8276 | M: Jens Axboe <axboe@kernel.dk> |
| 8277 | L: linux-ide@vger.kernel.org | 8277 | L: linux-ide@vger.kernel.org |
| 8278 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git | 8278 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git |
| 8279 | S: Maintained | 8279 | S: Maintained |
| 8280 | F: drivers/ata/ahci_platform.c | 8280 | F: drivers/ata/ahci_platform.c |
| 8281 | F: drivers/ata/libahci_platform.c | 8281 | F: drivers/ata/libahci_platform.c |
| @@ -8291,7 +8291,7 @@ F: drivers/ata/sata_promise.* | |||
| 8291 | LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) | 8291 | LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) |
| 8292 | M: Jens Axboe <axboe@kernel.dk> | 8292 | M: Jens Axboe <axboe@kernel.dk> |
| 8293 | L: linux-ide@vger.kernel.org | 8293 | L: linux-ide@vger.kernel.org |
| 8294 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git | 8294 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git |
| 8295 | S: Maintained | 8295 | S: Maintained |
| 8296 | F: drivers/ata/ | 8296 | F: drivers/ata/ |
| 8297 | F: include/linux/ata.h | 8297 | F: include/linux/ata.h |
| @@ -2,7 +2,7 @@ | |||
| 2 | VERSION = 4 | 2 | VERSION = 4 |
| 3 | PATCHLEVEL = 19 | 3 | PATCHLEVEL = 19 |
| 4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
| 5 | EXTRAVERSION = -rc1 | 5 | EXTRAVERSION = -rc2 |
| 6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
| 7 | 7 | ||
| 8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
| @@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) | |||
| 807 | # disable pointer signed / unsigned warnings in gcc 4.0 | 807 | # disable pointer signed / unsigned warnings in gcc 4.0 |
| 808 | KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) | 808 | KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) |
| 809 | 809 | ||
| 810 | # disable stringop warnings in gcc 8+ | ||
| 811 | KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) | ||
| 812 | |||
| 810 | # disable invalid "can't wrap" optimizations for signed / pointers | 813 | # disable invalid "can't wrap" optimizations for signed / pointers |
| 811 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) | 814 | KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) |
| 812 | 815 | ||
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts index 4d969013f99a..4d969013f99a 100755..100644 --- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts +++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts | |||
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index f0cbd86312dc..d4b7c59eec68 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
| @@ -469,6 +469,7 @@ | |||
| 469 | ti,hwmods = "rtc"; | 469 | ti,hwmods = "rtc"; |
| 470 | clocks = <&clk_32768_ck>; | 470 | clocks = <&clk_32768_ck>; |
| 471 | clock-names = "int-clk"; | 471 | clock-names = "int-clk"; |
| 472 | system-power-controller; | ||
| 472 | status = "disabled"; | 473 | status = "disabled"; |
| 473 | }; | 474 | }; |
| 474 | 475 | ||
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 9fb47724b9c1..ad2ae25b7b4d 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts | |||
| @@ -13,6 +13,43 @@ | |||
| 13 | reg = <0x40000000 0x08000000>; | 13 | reg = <0x40000000 0x08000000>; |
| 14 | }; | 14 | }; |
| 15 | 15 | ||
| 16 | reg_vddio_sd0: regulator-vddio-sd0 { | ||
| 17 | compatible = "regulator-fixed"; | ||
| 18 | regulator-name = "vddio-sd0"; | ||
| 19 | regulator-min-microvolt = <3300000>; | ||
| 20 | regulator-max-microvolt = <3300000>; | ||
| 21 | gpio = <&gpio1 29 0>; | ||
| 22 | }; | ||
| 23 | |||
| 24 | reg_lcd_3v3: regulator-lcd-3v3 { | ||
| 25 | compatible = "regulator-fixed"; | ||
| 26 | regulator-name = "lcd-3v3"; | ||
| 27 | regulator-min-microvolt = <3300000>; | ||
| 28 | regulator-max-microvolt = <3300000>; | ||
| 29 | gpio = <&gpio1 18 0>; | ||
| 30 | enable-active-high; | ||
| 31 | }; | ||
| 32 | |||
| 33 | reg_lcd_5v: regulator-lcd-5v { | ||
| 34 | compatible = "regulator-fixed"; | ||
| 35 | regulator-name = "lcd-5v"; | ||
| 36 | regulator-min-microvolt = <5000000>; | ||
| 37 | regulator-max-microvolt = <5000000>; | ||
| 38 | }; | ||
| 39 | |||
| 40 | panel { | ||
| 41 | compatible = "sii,43wvf1g"; | ||
| 42 | backlight = <&backlight_display>; | ||
| 43 | dvdd-supply = <®_lcd_3v3>; | ||
| 44 | avdd-supply = <®_lcd_5v>; | ||
| 45 | |||
| 46 | port { | ||
| 47 | panel_in: endpoint { | ||
| 48 | remote-endpoint = <&display_out>; | ||
| 49 | }; | ||
| 50 | }; | ||
| 51 | }; | ||
| 52 | |||
| 16 | apb@80000000 { | 53 | apb@80000000 { |
| 17 | apbh@80000000 { | 54 | apbh@80000000 { |
| 18 | gpmi-nand@8000c000 { | 55 | gpmi-nand@8000c000 { |
| @@ -52,31 +89,11 @@ | |||
| 52 | lcdif@80030000 { | 89 | lcdif@80030000 { |
| 53 | pinctrl-names = "default"; | 90 | pinctrl-names = "default"; |
| 54 | pinctrl-0 = <&lcdif_24bit_pins_a>; | 91 | pinctrl-0 = <&lcdif_24bit_pins_a>; |
| 55 | lcd-supply = <®_lcd_3v3>; | ||
| 56 | display = <&display0>; | ||
| 57 | status = "okay"; | 92 | status = "okay"; |
| 58 | 93 | ||
| 59 | display0: display0 { | 94 | port { |
| 60 | bits-per-pixel = <32>; | 95 | display_out: endpoint { |
| 61 | bus-width = <24>; | 96 | remote-endpoint = <&panel_in>; |
| 62 | |||
| 63 | display-timings { | ||
| 64 | native-mode = <&timing0>; | ||
| 65 | timing0: timing0 { | ||
| 66 | clock-frequency = <9200000>; | ||
| 67 | hactive = <480>; | ||
| 68 | vactive = <272>; | ||
| 69 | hback-porch = <15>; | ||
| 70 | hfront-porch = <8>; | ||
| 71 | vback-porch = <12>; | ||
| 72 | vfront-porch = <4>; | ||
| 73 | hsync-len = <1>; | ||
| 74 | vsync-len = <1>; | ||
| 75 | hsync-active = <0>; | ||
| 76 | vsync-active = <0>; | ||
| 77 | de-active = <1>; | ||
| 78 | pixelclk-active = <0>; | ||
| 79 | }; | ||
| 80 | }; | 97 | }; |
| 81 | }; | 98 | }; |
| 82 | }; | 99 | }; |
| @@ -118,32 +135,7 @@ | |||
| 118 | }; | 135 | }; |
| 119 | }; | 136 | }; |
| 120 | 137 | ||
| 121 | regulators { | 138 | backlight_display: backlight { |
| 122 | compatible = "simple-bus"; | ||
| 123 | #address-cells = <1>; | ||
| 124 | #size-cells = <0>; | ||
| 125 | |||
| 126 | reg_vddio_sd0: regulator@0 { | ||
| 127 | compatible = "regulator-fixed"; | ||
| 128 | reg = <0>; | ||
| 129 | regulator-name = "vddio-sd0"; | ||
| 130 | regulator-min-microvolt = <3300000>; | ||
| 131 | regulator-max-microvolt = <3300000>; | ||
| 132 | gpio = <&gpio1 29 0>; | ||
| 133 | }; | ||
| 134 | |||
| 135 | reg_lcd_3v3: regulator@1 { | ||
| 136 | compatible = "regulator-fixed"; | ||
| 137 | reg = <1>; | ||
| 138 | regulator-name = "lcd-3v3"; | ||
| 139 | regulator-min-microvolt = <3300000>; | ||
| 140 | regulator-max-microvolt = <3300000>; | ||
| 141 | gpio = <&gpio1 18 0>; | ||
| 142 | enable-active-high; | ||
| 143 | }; | ||
| 144 | }; | ||
| 145 | |||
| 146 | backlight { | ||
| 147 | compatible = "pwm-backlight"; | 139 | compatible = "pwm-backlight"; |
| 148 | pwms = <&pwm 2 5000000>; | 140 | pwms = <&pwm 2 5000000>; |
| 149 | brightness-levels = <0 4 8 16 32 64 128 255>; | 141 | brightness-levels = <0 4 8 16 32 64 128 255>; |
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 6b0ae667640f..93ab5bdfe068 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts | |||
| @@ -13,6 +13,87 @@ | |||
| 13 | reg = <0x40000000 0x08000000>; | 13 | reg = <0x40000000 0x08000000>; |
| 14 | }; | 14 | }; |
| 15 | 15 | ||
| 16 | |||
| 17 | reg_3p3v: regulator-3p3v { | ||
| 18 | compatible = "regulator-fixed"; | ||
| 19 | regulator-name = "3P3V"; | ||
| 20 | regulator-min-microvolt = <3300000>; | ||
| 21 | regulator-max-microvolt = <3300000>; | ||
| 22 | regulator-always-on; | ||
| 23 | }; | ||
| 24 | |||
| 25 | reg_vddio_sd0: regulator-vddio-sd0 { | ||
| 26 | compatible = "regulator-fixed"; | ||
| 27 | regulator-name = "vddio-sd0"; | ||
| 28 | regulator-min-microvolt = <3300000>; | ||
| 29 | regulator-max-microvolt = <3300000>; | ||
| 30 | gpio = <&gpio3 28 0>; | ||
| 31 | }; | ||
| 32 | |||
| 33 | reg_fec_3v3: regulator-fec-3v3 { | ||
| 34 | compatible = "regulator-fixed"; | ||
| 35 | regulator-name = "fec-3v3"; | ||
| 36 | regulator-min-microvolt = <3300000>; | ||
| 37 | regulator-max-microvolt = <3300000>; | ||
| 38 | gpio = <&gpio2 15 0>; | ||
| 39 | }; | ||
| 40 | |||
| 41 | reg_usb0_vbus: regulator-usb0-vbus { | ||
| 42 | compatible = "regulator-fixed"; | ||
| 43 | regulator-name = "usb0_vbus"; | ||
| 44 | regulator-min-microvolt = <5000000>; | ||
| 45 | regulator-max-microvolt = <5000000>; | ||
| 46 | gpio = <&gpio3 9 0>; | ||
| 47 | enable-active-high; | ||
| 48 | }; | ||
| 49 | |||
| 50 | reg_usb1_vbus: regulator-usb1-vbus { | ||
| 51 | compatible = "regulator-fixed"; | ||
| 52 | regulator-name = "usb1_vbus"; | ||
| 53 | regulator-min-microvolt = <5000000>; | ||
| 54 | regulator-max-microvolt = <5000000>; | ||
| 55 | gpio = <&gpio3 8 0>; | ||
| 56 | enable-active-high; | ||
| 57 | }; | ||
| 58 | |||
| 59 | reg_lcd_3v3: regulator-lcd-3v3 { | ||
| 60 | compatible = "regulator-fixed"; | ||
| 61 | regulator-name = "lcd-3v3"; | ||
| 62 | regulator-min-microvolt = <3300000>; | ||
| 63 | regulator-max-microvolt = <3300000>; | ||
| 64 | gpio = <&gpio3 30 0>; | ||
| 65 | enable-active-high; | ||
| 66 | }; | ||
| 67 | |||
| 68 | reg_can_3v3: regulator-can-3v3 { | ||
| 69 | compatible = "regulator-fixed"; | ||
| 70 | regulator-name = "can-3v3"; | ||
| 71 | regulator-min-microvolt = <3300000>; | ||
| 72 | regulator-max-microvolt = <3300000>; | ||
| 73 | gpio = <&gpio2 13 0>; | ||
| 74 | enable-active-high; | ||
| 75 | }; | ||
| 76 | |||
| 77 | reg_lcd_5v: regulator-lcd-5v { | ||
| 78 | compatible = "regulator-fixed"; | ||
| 79 | regulator-name = "lcd-5v"; | ||
| 80 | regulator-min-microvolt = <5000000>; | ||
| 81 | regulator-max-microvolt = <5000000>; | ||
| 82 | }; | ||
| 83 | |||
| 84 | panel { | ||
| 85 | compatible = "sii,43wvf1g"; | ||
| 86 | backlight = <&backlight_display>; | ||
| 87 | dvdd-supply = <®_lcd_3v3>; | ||
| 88 | avdd-supply = <®_lcd_5v>; | ||
| 89 | |||
| 90 | port { | ||
| 91 | panel_in: endpoint { | ||
| 92 | remote-endpoint = <&display_out>; | ||
| 93 | }; | ||
| 94 | }; | ||
| 95 | }; | ||
| 96 | |||
| 16 | apb@80000000 { | 97 | apb@80000000 { |
| 17 | apbh@80000000 { | 98 | apbh@80000000 { |
| 18 | gpmi-nand@8000c000 { | 99 | gpmi-nand@8000c000 { |
| @@ -116,31 +197,11 @@ | |||
| 116 | pinctrl-names = "default"; | 197 | pinctrl-names = "default"; |
| 117 | pinctrl-0 = <&lcdif_24bit_pins_a | 198 | pinctrl-0 = <&lcdif_24bit_pins_a |
| 118 | &lcdif_pins_evk>; | 199 | &lcdif_pins_evk>; |
| 119 | lcd-supply = <®_lcd_3v3>; | ||
| 120 | display = <&display0>; | ||
| 121 | status = "okay"; | 200 | status = "okay"; |
| 122 | 201 | ||
| 123 | display0: display0 { | 202 | port { |
| 124 | bits-per-pixel = <32>; | 203 | display_out: endpoint { |
| 125 | bus-width = <24>; | 204 | remote-endpoint = <&panel_in>; |
| 126 | |||
| 127 | display-timings { | ||
| 128 | native-mode = <&timing0>; | ||
| 129 | timing0: timing0 { | ||
| 130 | clock-frequency = <33500000>; | ||
| 131 | hactive = <800>; | ||
| 132 | vactive = <480>; | ||
| 133 | hback-porch = <89>; | ||
| 134 | hfront-porch = <164>; | ||
| 135 | vback-porch = <23>; | ||
| 136 | vfront-porch = <10>; | ||
| 137 | hsync-len = <10>; | ||
| 138 | vsync-len = <10>; | ||
| 139 | hsync-active = <0>; | ||
| 140 | vsync-active = <0>; | ||
| 141 | de-active = <1>; | ||
| 142 | pixelclk-active = <0>; | ||
| 143 | }; | ||
| 144 | }; | 205 | }; |
| 145 | }; | 206 | }; |
| 146 | }; | 207 | }; |
| @@ -269,80 +330,6 @@ | |||
| 269 | }; | 330 | }; |
| 270 | }; | 331 | }; |
| 271 | 332 | ||
| 272 | regulators { | ||
| 273 | compatible = "simple-bus"; | ||
| 274 | #address-cells = <1>; | ||
| 275 | #size-cells = <0>; | ||
| 276 | |||
| 277 | reg_3p3v: regulator@0 { | ||
| 278 | compatible = "regulator-fixed"; | ||
| 279 | reg = <0>; | ||
| 280 | regulator-name = "3P3V"; | ||
| 281 | regulator-min-microvolt = <3300000>; | ||
| 282 | regulator-max-microvolt = <3300000>; | ||
| 283 | regulator-always-on; | ||
| 284 | }; | ||
| 285 | |||
| 286 | reg_vddio_sd0: regulator@1 { | ||
| 287 | compatible = "regulator-fixed"; | ||
| 288 | reg = <1>; | ||
| 289 | regulator-name = "vddio-sd0"; | ||
| 290 | regulator-min-microvolt = <3300000>; | ||
| 291 | regulator-max-microvolt = <3300000>; | ||
| 292 | gpio = <&gpio3 28 0>; | ||
| 293 | }; | ||
| 294 | |||
| 295 | reg_fec_3v3: regulator@2 { | ||
| 296 | compatible = "regulator-fixed"; | ||
| 297 | reg = <2>; | ||
| 298 | regulator-name = "fec-3v3"; | ||
| 299 | regulator-min-microvolt = <3300000>; | ||
| 300 | regulator-max-microvolt = <3300000>; | ||
| 301 | gpio = <&gpio2 15 0>; | ||
| 302 | }; | ||
| 303 | |||
| 304 | reg_usb0_vbus: regulator@3 { | ||
| 305 | compatible = "regulator-fixed"; | ||
| 306 | reg = <3>; | ||
| 307 | regulator-name = "usb0_vbus"; | ||
| 308 | regulator-min-microvolt = <5000000>; | ||
| 309 | regulator-max-microvolt = <5000000>; | ||
| 310 | gpio = <&gpio3 9 0>; | ||
| 311 | enable-active-high; | ||
| 312 | }; | ||
| 313 | |||
| 314 | reg_usb1_vbus: regulator@4 { | ||
| 315 | compatible = "regulator-fixed"; | ||
| 316 | reg = <4>; | ||
| 317 | regulator-name = "usb1_vbus"; | ||
| 318 | regulator-min-microvolt = <5000000>; | ||
| 319 | regulator-max-microvolt = <5000000>; | ||
| 320 | gpio = <&gpio3 8 0>; | ||
| 321 | enable-active-high; | ||
| 322 | }; | ||
| 323 | |||
| 324 | reg_lcd_3v3: regulator@5 { | ||
| 325 | compatible = "regulator-fixed"; | ||
| 326 | reg = <5>; | ||
| 327 | regulator-name = "lcd-3v3"; | ||
| 328 | regulator-min-microvolt = <3300000>; | ||
| 329 | regulator-max-microvolt = <3300000>; | ||
| 330 | gpio = <&gpio3 30 0>; | ||
| 331 | enable-active-high; | ||
| 332 | }; | ||
| 333 | |||
| 334 | reg_can_3v3: regulator@6 { | ||
| 335 | compatible = "regulator-fixed"; | ||
| 336 | reg = <6>; | ||
| 337 | regulator-name = "can-3v3"; | ||
| 338 | regulator-min-microvolt = <3300000>; | ||
| 339 | regulator-max-microvolt = <3300000>; | ||
| 340 | gpio = <&gpio2 13 0>; | ||
| 341 | enable-active-high; | ||
| 342 | }; | ||
| 343 | |||
| 344 | }; | ||
| 345 | |||
| 346 | sound { | 333 | sound { |
| 347 | compatible = "fsl,imx28-evk-sgtl5000", | 334 | compatible = "fsl,imx28-evk-sgtl5000", |
| 348 | "fsl,mxs-audio-sgtl5000"; | 335 | "fsl,mxs-audio-sgtl5000"; |
| @@ -363,7 +350,7 @@ | |||
| 363 | }; | 350 | }; |
| 364 | }; | 351 | }; |
| 365 | 352 | ||
| 366 | backlight { | 353 | backlight_display: backlight { |
| 367 | compatible = "pwm-backlight"; | 354 | compatible = "pwm-backlight"; |
| 368 | pwms = <&pwm 2 5000000>; | 355 | pwms = <&pwm 2 5000000>; |
| 369 | brightness-levels = <0 4 8 16 32 64 128 255>; | 356 | brightness-levels = <0 4 8 16 32 64 128 255>; |
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 7cbc2ffa4b3a..7234e8330a57 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi | |||
| @@ -126,10 +126,14 @@ | |||
| 126 | interrupt-names = "msi"; | 126 | interrupt-names = "msi"; |
| 127 | #interrupt-cells = <1>; | 127 | #interrupt-cells = <1>; |
| 128 | interrupt-map-mask = <0 0 0 0x7>; | 128 | interrupt-map-mask = <0 0 0 0x7>; |
| 129 | interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, | 129 | /* |
| 130 | <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, | 130 | * Reference manual lists pci irqs incorrectly |
| 131 | <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, | 131 | * Real hardware ordering is same as imx6: D+MSI, C, B, A |
| 132 | <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; | 132 | */ |
| 133 | interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, | ||
| 134 | <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, | ||
| 135 | <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, | ||
| 136 | <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; | ||
| 133 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, | 137 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, |
| 134 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, | 138 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, |
| 135 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; | 139 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; |
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 12d6822f0057..04758a2a87f0 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts | |||
| @@ -354,7 +354,7 @@ | |||
| 354 | &mmc2 { | 354 | &mmc2 { |
| 355 | vmmc-supply = <&vsdio>; | 355 | vmmc-supply = <&vsdio>; |
| 356 | bus-width = <8>; | 356 | bus-width = <8>; |
| 357 | non-removable; | 357 | ti,non-removable; |
| 358 | }; | 358 | }; |
| 359 | 359 | ||
| 360 | &mmc3 { | 360 | &mmc3 { |
| @@ -621,15 +621,6 @@ | |||
| 621 | OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ | 621 | OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ |
| 622 | >; | 622 | >; |
| 623 | }; | 623 | }; |
| 624 | }; | ||
| 625 | |||
| 626 | &omap4_pmx_wkup { | ||
| 627 | usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { | ||
| 628 | /* gpio_wk0 */ | ||
| 629 | pinctrl-single,pins = < | ||
| 630 | OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) | ||
| 631 | >; | ||
| 632 | }; | ||
| 633 | 624 | ||
| 634 | vibrator_direction_pin: pinmux_vibrator_direction_pin { | 625 | vibrator_direction_pin: pinmux_vibrator_direction_pin { |
| 635 | pinctrl-single,pins = < | 626 | pinctrl-single,pins = < |
| @@ -644,6 +635,15 @@ | |||
| 644 | }; | 635 | }; |
| 645 | }; | 636 | }; |
| 646 | 637 | ||
| 638 | &omap4_pmx_wkup { | ||
| 639 | usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { | ||
| 640 | /* gpio_wk0 */ | ||
| 641 | pinctrl-single,pins = < | ||
| 642 | OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) | ||
| 643 | >; | ||
| 644 | }; | ||
| 645 | }; | ||
| 646 | |||
| 647 | /* | 647 | /* |
| 648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for | 648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for |
| 649 | * uart1 wakeirq. | 649 | * uart1 wakeirq. |
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index e2c127608bcc..7eca43ff69bb 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig | |||
| @@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y | |||
| 257 | CONFIG_DRM=y | 257 | CONFIG_DRM=y |
| 258 | CONFIG_DRM_PANEL_LVDS=y | 258 | CONFIG_DRM_PANEL_LVDS=y |
| 259 | CONFIG_DRM_PANEL_SIMPLE=y | 259 | CONFIG_DRM_PANEL_SIMPLE=y |
| 260 | CONFIG_DRM_PANEL_SEIKO_43WVF1G=y | ||
| 260 | CONFIG_DRM_DW_HDMI_AHB_AUDIO=m | 261 | CONFIG_DRM_DW_HDMI_AHB_AUDIO=m |
| 261 | CONFIG_DRM_DW_HDMI_CEC=y | 262 | CONFIG_DRM_DW_HDMI_CEC=y |
| 262 | CONFIG_DRM_IMX=y | 263 | CONFIG_DRM_IMX=y |
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index 148226e36152..7b8212857535 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig | |||
| @@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y | |||
| 95 | CONFIG_REGULATOR=y | 95 | CONFIG_REGULATOR=y |
| 96 | CONFIG_REGULATOR_FIXED_VOLTAGE=y | 96 | CONFIG_REGULATOR_FIXED_VOLTAGE=y |
| 97 | CONFIG_DRM=y | 97 | CONFIG_DRM=y |
| 98 | CONFIG_DRM_PANEL_SEIKO_43WVF1G=y | ||
| 98 | CONFIG_DRM_MXSFB=y | 99 | CONFIG_DRM_MXSFB=y |
| 99 | CONFIG_FB_MODE_HELPERS=y | 100 | CONFIG_FB_MODE_HELPERS=y |
| 100 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 101 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index df68dc4056e5..5282324c7cef 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig | |||
| @@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y | |||
| 5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
| 6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
| 7 | CONFIG_SLAB=y | 7 | CONFIG_SLAB=y |
| 8 | CONFIG_MODULES=y | ||
| 9 | CONFIG_MODULE_UNLOAD=y | ||
| 10 | CONFIG_PARTITION_ADVANCED=y | ||
| 11 | # CONFIG_ARCH_MULTI_V7 is not set | 8 | # CONFIG_ARCH_MULTI_V7 is not set |
| 12 | CONFIG_ARCH_VERSATILE=y | 9 | CONFIG_ARCH_VERSATILE=y |
| 13 | CONFIG_AEABI=y | 10 | CONFIG_AEABI=y |
| 14 | CONFIG_OABI_COMPAT=y | 11 | CONFIG_OABI_COMPAT=y |
| 15 | CONFIG_CMA=y | ||
| 16 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 12 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
| 17 | CONFIG_ZBOOT_ROM_BSS=0x0 | 13 | CONFIG_ZBOOT_ROM_BSS=0x0 |
| 18 | CONFIG_CMDLINE="root=1f03 mem=32M" | 14 | CONFIG_CMDLINE="root=1f03 mem=32M" |
| 19 | CONFIG_FPE_NWFPE=y | 15 | CONFIG_FPE_NWFPE=y |
| 20 | CONFIG_VFP=y | 16 | CONFIG_VFP=y |
| 17 | CONFIG_MODULES=y | ||
| 18 | CONFIG_MODULE_UNLOAD=y | ||
| 19 | CONFIG_PARTITION_ADVANCED=y | ||
| 20 | CONFIG_CMA=y | ||
| 21 | CONFIG_NET=y | 21 | CONFIG_NET=y |
| 22 | CONFIG_PACKET=y | 22 | CONFIG_PACKET=y |
| 23 | CONFIG_UNIX=y | 23 | CONFIG_UNIX=y |
| @@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y | |||
| 59 | CONFIG_DRM=y | 59 | CONFIG_DRM=y |
| 60 | CONFIG_DRM_PANEL_ARM_VERSATILE=y | 60 | CONFIG_DRM_PANEL_ARM_VERSATILE=y |
| 61 | CONFIG_DRM_PANEL_SIMPLE=y | 61 | CONFIG_DRM_PANEL_SIMPLE=y |
| 62 | CONFIG_DRM_DUMB_VGA_DAC=y | ||
| 62 | CONFIG_DRM_PL111=y | 63 | CONFIG_DRM_PL111=y |
| 63 | CONFIG_FB_MODE_HELPERS=y | 64 | CONFIG_FB_MODE_HELPERS=y |
| 64 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 65 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
| @@ -89,9 +90,10 @@ CONFIG_NFSD=y | |||
| 89 | CONFIG_NFSD_V3=y | 90 | CONFIG_NFSD_V3=y |
| 90 | CONFIG_NLS_CODEPAGE_850=m | 91 | CONFIG_NLS_CODEPAGE_850=m |
| 91 | CONFIG_NLS_ISO8859_1=m | 92 | CONFIG_NLS_ISO8859_1=m |
| 93 | CONFIG_FONTS=y | ||
| 94 | CONFIG_FONT_ACORN_8x8=y | ||
| 95 | CONFIG_DEBUG_FS=y | ||
| 92 | CONFIG_MAGIC_SYSRQ=y | 96 | CONFIG_MAGIC_SYSRQ=y |
| 93 | CONFIG_DEBUG_KERNEL=y | 97 | CONFIG_DEBUG_KERNEL=y |
| 94 | CONFIG_DEBUG_USER=y | 98 | CONFIG_DEBUG_USER=y |
| 95 | CONFIG_DEBUG_LL=y | 99 | CONFIG_DEBUG_LL=y |
| 96 | CONFIG_FONTS=y | ||
| 97 | CONFIG_FONT_ACORN_8x8=y | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 2ceffd85dd3d..cd65ea4e9c54 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
| @@ -2161,6 +2161,37 @@ static int of_dev_hwmod_lookup(struct device_node *np, | |||
| 2161 | } | 2161 | } |
| 2162 | 2162 | ||
| 2163 | /** | 2163 | /** |
| 2164 | * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets | ||
| 2165 | * | ||
| 2166 | * @oh: struct omap_hwmod * | ||
| 2167 | * @np: struct device_node * | ||
| 2168 | * | ||
| 2169 | * Fix up module register offsets for modules with mpu_rt_idx. | ||
| 2170 | * Only needed for cpsw with interconnect target module defined | ||
| 2171 | * in device tree while still using legacy hwmod platform data | ||
| 2172 | * for rev, sysc and syss registers. | ||
| 2173 | * | ||
| 2174 | * Can be removed when all cpsw hwmod platform data has been | ||
| 2175 | * dropped. | ||
| 2176 | */ | ||
| 2177 | static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh, | ||
| 2178 | struct device_node *np, | ||
| 2179 | struct resource *res) | ||
| 2180 | { | ||
| 2181 | struct device_node *child = NULL; | ||
| 2182 | int error; | ||
| 2183 | |||
| 2184 | child = of_get_next_child(np, child); | ||
| 2185 | if (!child) | ||
| 2186 | return; | ||
| 2187 | |||
| 2188 | error = of_address_to_resource(child, oh->mpu_rt_idx, res); | ||
| 2189 | if (error) | ||
| 2190 | pr_err("%s: error mapping mpu_rt_idx: %i\n", | ||
| 2191 | __func__, error); | ||
| 2192 | } | ||
| 2193 | |||
| 2194 | /** | ||
| 2164 | * omap_hwmod_parse_module_range - map module IO range from device tree | 2195 | * omap_hwmod_parse_module_range - map module IO range from device tree |
| 2165 | * @oh: struct omap_hwmod * | 2196 | * @oh: struct omap_hwmod * |
| 2166 | * @np: struct device_node * | 2197 | * @np: struct device_node * |
| @@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh, | |||
| 2220 | size = be32_to_cpup(ranges); | 2251 | size = be32_to_cpup(ranges); |
| 2221 | 2252 | ||
| 2222 | pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", | 2253 | pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", |
| 2223 | oh->name, np->name, base, size); | 2254 | oh ? oh->name : "", np->name, base, size); |
| 2255 | |||
| 2256 | if (oh && oh->mpu_rt_idx) { | ||
| 2257 | omap_hwmod_fix_mpu_rt_idx(oh, np, res); | ||
| 2258 | |||
| 2259 | return 0; | ||
| 2260 | } | ||
| 2224 | 2261 | ||
| 2225 | res->start = base; | 2262 | res->start = base; |
| 2226 | res->end = base + size - 1; | 2263 | res->end = base + size - 1; |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 29e75b47becd..1b1a0e95c751 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK | |||
| 763 | 763 | ||
| 764 | config HOLES_IN_ZONE | 764 | config HOLES_IN_ZONE |
| 765 | def_bool y | 765 | def_bool y |
| 766 | depends on NUMA | ||
| 767 | 766 | ||
| 768 | source kernel/Kconfig.hz | 767 | source kernel/Kconfig.hz |
| 769 | 768 | ||
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f67e8d5e93ad..db8d364f8476 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
| @@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y | |||
| 38 | CONFIG_ARCH_BERLIN=y | 38 | CONFIG_ARCH_BERLIN=y |
| 39 | CONFIG_ARCH_BRCMSTB=y | 39 | CONFIG_ARCH_BRCMSTB=y |
| 40 | CONFIG_ARCH_EXYNOS=y | 40 | CONFIG_ARCH_EXYNOS=y |
| 41 | CONFIG_ARCH_K3=y | ||
| 41 | CONFIG_ARCH_LAYERSCAPE=y | 42 | CONFIG_ARCH_LAYERSCAPE=y |
| 42 | CONFIG_ARCH_LG1K=y | 43 | CONFIG_ARCH_LG1K=y |
| 43 | CONFIG_ARCH_HISI=y | 44 | CONFIG_ARCH_HISI=y |
| @@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y | |||
| 605 | CONFIG_ARCH_TEGRA_210_SOC=y | 606 | CONFIG_ARCH_TEGRA_210_SOC=y |
| 606 | CONFIG_ARCH_TEGRA_186_SOC=y | 607 | CONFIG_ARCH_TEGRA_186_SOC=y |
| 607 | CONFIG_ARCH_TEGRA_194_SOC=y | 608 | CONFIG_ARCH_TEGRA_194_SOC=y |
| 609 | CONFIG_ARCH_K3_AM6_SOC=y | ||
| 610 | CONFIG_SOC_TI=y | ||
| 608 | CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y | 611 | CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y |
| 609 | CONFIG_EXTCON_USB_GPIO=y | 612 | CONFIG_EXTCON_USB_GPIO=y |
| 610 | CONFIG_EXTCON_USBC_CROS_EC=y | 613 | CONFIG_EXTCON_USBC_CROS_EC=y |
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 6e9f33d14930..067d8937d5af 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c | |||
| @@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req) | |||
| 417 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); | 417 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); |
| 418 | put_unaligned_be32(2, iv + GCM_IV_SIZE); | 418 | put_unaligned_be32(2, iv + GCM_IV_SIZE); |
| 419 | 419 | ||
| 420 | while (walk.nbytes >= AES_BLOCK_SIZE) { | 420 | while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { |
| 421 | int blocks = walk.nbytes / AES_BLOCK_SIZE; | 421 | int blocks = walk.nbytes / AES_BLOCK_SIZE; |
| 422 | u8 *dst = walk.dst.virt.addr; | 422 | u8 *dst = walk.dst.virt.addr; |
| 423 | u8 *src = walk.src.virt.addr; | 423 | u8 *src = walk.src.virt.addr; |
| @@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req) | |||
| 437 | NULL); | 437 | NULL); |
| 438 | 438 | ||
| 439 | err = skcipher_walk_done(&walk, | 439 | err = skcipher_walk_done(&walk, |
| 440 | walk.nbytes % AES_BLOCK_SIZE); | 440 | walk.nbytes % (2 * AES_BLOCK_SIZE)); |
| 441 | } | 441 | } |
| 442 | if (walk.nbytes) | 442 | if (walk.nbytes) { |
| 443 | __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, | 443 | __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, |
| 444 | nrounds); | 444 | nrounds); |
| 445 | if (walk.nbytes > AES_BLOCK_SIZE) { | ||
| 446 | crypto_inc(iv, AES_BLOCK_SIZE); | ||
| 447 | __aes_arm64_encrypt(ctx->aes_key.key_enc, | ||
| 448 | ks + AES_BLOCK_SIZE, iv, | ||
| 449 | nrounds); | ||
| 450 | } | ||
| 451 | } | ||
| 445 | } | 452 | } |
| 446 | 453 | ||
| 447 | /* handle the tail */ | 454 | /* handle the tail */ |
| @@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req) | |||
| 545 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); | 552 | __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); |
| 546 | put_unaligned_be32(2, iv + GCM_IV_SIZE); | 553 | put_unaligned_be32(2, iv + GCM_IV_SIZE); |
| 547 | 554 | ||
| 548 | while (walk.nbytes >= AES_BLOCK_SIZE) { | 555 | while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { |
| 549 | int blocks = walk.nbytes / AES_BLOCK_SIZE; | 556 | int blocks = walk.nbytes / AES_BLOCK_SIZE; |
| 550 | u8 *dst = walk.dst.virt.addr; | 557 | u8 *dst = walk.dst.virt.addr; |
| 551 | u8 *src = walk.src.virt.addr; | 558 | u8 *src = walk.src.virt.addr; |
| @@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req) | |||
| 564 | } while (--blocks > 0); | 571 | } while (--blocks > 0); |
| 565 | 572 | ||
| 566 | err = skcipher_walk_done(&walk, | 573 | err = skcipher_walk_done(&walk, |
| 567 | walk.nbytes % AES_BLOCK_SIZE); | 574 | walk.nbytes % (2 * AES_BLOCK_SIZE)); |
| 568 | } | 575 | } |
| 569 | if (walk.nbytes) | 576 | if (walk.nbytes) { |
| 577 | if (walk.nbytes > AES_BLOCK_SIZE) { | ||
| 578 | u8 *iv2 = iv + AES_BLOCK_SIZE; | ||
| 579 | |||
| 580 | memcpy(iv2, iv, AES_BLOCK_SIZE); | ||
| 581 | crypto_inc(iv2, AES_BLOCK_SIZE); | ||
| 582 | |||
| 583 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, | ||
| 584 | iv2, nrounds); | ||
| 585 | } | ||
| 570 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, | 586 | __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, |
| 571 | nrounds); | 587 | nrounds); |
| 588 | } | ||
| 572 | } | 589 | } |
| 573 | 590 | ||
| 574 | /* handle the tail */ | 591 | /* handle the tail */ |
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index b7fb5274b250..0c4fc223f225 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c | |||
| @@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void) | |||
| 69 | crypto_unregister_alg(&sm4_ce_alg); | 69 | crypto_unregister_alg(&sm4_ce_alg); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | module_cpu_feature_match(SM3, sm4_ce_mod_init); | 72 | module_cpu_feature_match(SM4, sm4_ce_mod_init); |
| 73 | module_exit(sm4_ce_mod_fini); | 73 | module_exit(sm4_ce_mod_fini); |
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 3534aa6a4dc2..1b083c500b9a 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c | |||
| @@ -98,11 +98,10 @@ static time64_t pmu_read_time(void) | |||
| 98 | 98 | ||
| 99 | if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) | 99 | if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) |
| 100 | return 0; | 100 | return 0; |
| 101 | while (!req.complete) | 101 | pmu_wait_complete(&req); |
| 102 | pmu_poll(); | ||
| 103 | 102 | ||
| 104 | time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) | | 103 | time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) | |
| 105 | (req.reply[3] << 8) | req.reply[4]); | 104 | (req.reply[2] << 8) | req.reply[3]); |
| 106 | 105 | ||
| 107 | return time - RTC_OFFSET; | 106 | return time - RTC_OFFSET; |
| 108 | } | 107 | } |
| @@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time) | |||
| 116 | (data >> 24) & 0xFF, (data >> 16) & 0xFF, | 115 | (data >> 24) & 0xFF, (data >> 16) & 0xFF, |
| 117 | (data >> 8) & 0xFF, data & 0xFF) < 0) | 116 | (data >> 8) & 0xFF, data & 0xFF) < 0) |
| 118 | return; | 117 | return; |
| 119 | while (!req.complete) | 118 | pmu_wait_complete(&req); |
| 120 | pmu_poll(); | ||
| 121 | } | 119 | } |
| 122 | 120 | ||
| 123 | static __u8 pmu_read_pram(int offset) | 121 | static __u8 pmu_read_pram(int offset) |
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug index 7a49f0d28d14..f1da8a7b17ff 100644 --- a/arch/nios2/Kconfig.debug +++ b/arch/nios2/Kconfig.debug | |||
| @@ -3,15 +3,6 @@ | |||
| 3 | config TRACE_IRQFLAGS_SUPPORT | 3 | config TRACE_IRQFLAGS_SUPPORT |
| 4 | def_bool y | 4 | def_bool y |
| 5 | 5 | ||
| 6 | config DEBUG_STACK_USAGE | ||
| 7 | bool "Enable stack utilization instrumentation" | ||
| 8 | depends on DEBUG_KERNEL | ||
| 9 | help | ||
| 10 | Enables the display of the minimum amount of free stack which each | ||
| 11 | task has ever had available in the sysrq-T and sysrq-P debug output. | ||
| 12 | |||
| 13 | This option will slow down process creation somewhat. | ||
| 14 | |||
| 15 | config EARLY_PRINTK | 6 | config EARLY_PRINTK |
| 16 | bool "Activate early kernel debugging" | 7 | bool "Activate early kernel debugging" |
| 17 | default y | 8 | default y |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index db0b6eebbfa5..a80669209155 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -177,7 +177,6 @@ config PPC | |||
| 177 | select HAVE_ARCH_KGDB | 177 | select HAVE_ARCH_KGDB |
| 178 | select HAVE_ARCH_MMAP_RND_BITS | 178 | select HAVE_ARCH_MMAP_RND_BITS |
| 179 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT | 179 | select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT |
| 180 | select HAVE_ARCH_PREL32_RELOCATIONS | ||
| 181 | select HAVE_ARCH_SECCOMP_FILTER | 180 | select HAVE_ARCH_SECCOMP_FILTER |
| 182 | select HAVE_ARCH_TRACEHOOK | 181 | select HAVE_ARCH_TRACEHOOK |
| 183 | select HAVE_CBPF_JIT if !PPC64 | 182 | select HAVE_CBPF_JIT if !PPC64 |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 3c0e8fb2b773..68e14afecac8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 358 | unsigned long pp, key; | 358 | unsigned long pp, key; |
| 359 | unsigned long v, orig_v, gr; | 359 | unsigned long v, orig_v, gr; |
| 360 | __be64 *hptep; | 360 | __be64 *hptep; |
| 361 | int index; | 361 | long int index; |
| 362 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); | 362 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); |
| 363 | 363 | ||
| 364 | if (kvm_is_radix(vcpu->kvm)) | 364 | if (kvm_is_radix(vcpu->kvm)) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 0af1c0aea1fe..fd6e8c13685f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
| @@ -725,10 +725,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
| 725 | gpa, shift); | 725 | gpa, shift); |
| 726 | kvmppc_radix_tlbie_page(kvm, gpa, shift); | 726 | kvmppc_radix_tlbie_page(kvm, gpa, shift); |
| 727 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { | 727 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { |
| 728 | unsigned long npages = 1; | 728 | unsigned long psize = PAGE_SIZE; |
| 729 | if (shift) | 729 | if (shift) |
| 730 | npages = 1ul << (shift - PAGE_SHIFT); | 730 | psize = 1ul << shift; |
| 731 | kvmppc_update_dirty_map(memslot, gfn, npages); | 731 | kvmppc_update_dirty_map(memslot, gfn, psize); |
| 732 | } | 732 | } |
| 733 | } | 733 | } |
| 734 | return 0; | 734 | return 0; |
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index c229509288ea..439dc7072e05 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h | |||
| @@ -14,6 +14,10 @@ | |||
| 14 | #ifndef _ASM_RISCV_TLB_H | 14 | #ifndef _ASM_RISCV_TLB_H |
| 15 | #define _ASM_RISCV_TLB_H | 15 | #define _ASM_RISCV_TLB_H |
| 16 | 16 | ||
| 17 | struct mmu_gather; | ||
| 18 | |||
| 19 | static void tlb_flush(struct mmu_gather *tlb); | ||
| 20 | |||
| 17 | #include <asm-generic/tlb.h> | 21 | #include <asm-generic/tlb.h> |
| 18 | 22 | ||
| 19 | static inline void tlb_flush(struct mmu_gather *tlb) | 23 | static inline void tlb_flush(struct mmu_gather *tlb) |
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 568026ccf6e8..fb03a4482ad6 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c | |||
| @@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | |||
| 65 | SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, | 65 | SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, |
| 66 | uintptr_t, flags) | 66 | uintptr_t, flags) |
| 67 | { | 67 | { |
| 68 | #ifdef CONFIG_SMP | ||
| 69 | struct mm_struct *mm = current->mm; | ||
| 70 | bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; | ||
| 71 | #endif | ||
| 72 | |||
| 73 | /* Check the reserved flags. */ | 68 | /* Check the reserved flags. */ |
| 74 | if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) | 69 | if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) |
| 75 | return -EINVAL; | 70 | return -EINVAL; |
| 76 | 71 | ||
| 77 | /* | 72 | flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); |
| 78 | * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(), | ||
| 79 | * which generates unused variable warnings all over this function. | ||
| 80 | */ | ||
| 81 | #ifdef CONFIG_SMP | ||
| 82 | flush_icache_mm(mm, local); | ||
| 83 | #else | ||
| 84 | flush_icache_all(); | ||
| 85 | #endif | ||
| 86 | 73 | ||
| 87 | return 0; | 74 | return 0; |
| 88 | } | 75 | } |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index f31a15044c24..a8418e1379eb 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
| @@ -16,7 +16,13 @@ typedef struct { | |||
| 16 | unsigned long asce; | 16 | unsigned long asce; |
| 17 | unsigned long asce_limit; | 17 | unsigned long asce_limit; |
| 18 | unsigned long vdso_base; | 18 | unsigned long vdso_base; |
| 19 | /* The mmu context allocates 4K page tables. */ | 19 | /* |
| 20 | * The following bitfields need a down_write on the mm | ||
| 21 | * semaphore when they are written to. As they are only | ||
| 22 | * written once, they can be read without a lock. | ||
| 23 | * | ||
| 24 | * The mmu context allocates 4K page tables. | ||
| 25 | */ | ||
| 20 | unsigned int alloc_pgste:1; | 26 | unsigned int alloc_pgste:1; |
| 21 | /* The mmu context uses extended page tables. */ | 27 | /* The mmu context uses extended page tables. */ |
| 22 | unsigned int has_pgste:1; | 28 | unsigned int has_pgste:1; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 91ad4a9425c0..f69333fd2fa3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -695,7 +695,9 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
| 695 | r = -EINVAL; | 695 | r = -EINVAL; |
| 696 | else { | 696 | else { |
| 697 | r = 0; | 697 | r = 0; |
| 698 | down_write(&kvm->mm->mmap_sem); | ||
| 698 | kvm->mm->context.allow_gmap_hpage_1m = 1; | 699 | kvm->mm->context.allow_gmap_hpage_1m = 1; |
| 700 | up_write(&kvm->mm->mmap_sem); | ||
| 699 | /* | 701 | /* |
| 700 | * We might have to create fake 4k page | 702 | * We might have to create fake 4k page |
| 701 | * tables. To avoid that the hardware works on | 703 | * tables. To avoid that the hardware works on |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d68f10441a16..8679bd74d337 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
| @@ -280,9 +280,11 @@ retry: | |||
| 280 | goto retry; | 280 | goto retry; |
| 281 | } | 281 | } |
| 282 | } | 282 | } |
| 283 | if (rc) | ||
| 284 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
| 285 | up_read(¤t->mm->mmap_sem); | 283 | up_read(¤t->mm->mmap_sem); |
| 284 | if (rc == -EFAULT) | ||
| 285 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
| 286 | if (rc < 0) | ||
| 287 | return rc; | ||
| 286 | vcpu->run->s.regs.gprs[reg1] &= ~0xff; | 288 | vcpu->run->s.regs.gprs[reg1] &= ~0xff; |
| 287 | vcpu->run->s.regs.gprs[reg1] |= key; | 289 | vcpu->run->s.regs.gprs[reg1] |= key; |
| 288 | return 0; | 290 | return 0; |
| @@ -324,9 +326,11 @@ retry: | |||
| 324 | goto retry; | 326 | goto retry; |
| 325 | } | 327 | } |
| 326 | } | 328 | } |
| 327 | if (rc < 0) | ||
| 328 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
| 329 | up_read(¤t->mm->mmap_sem); | 329 | up_read(¤t->mm->mmap_sem); |
| 330 | if (rc == -EFAULT) | ||
| 331 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
| 332 | if (rc < 0) | ||
| 333 | return rc; | ||
| 330 | kvm_s390_set_psw_cc(vcpu, rc); | 334 | kvm_s390_set_psw_cc(vcpu, rc); |
| 331 | return 0; | 335 | return 0; |
| 332 | } | 336 | } |
| @@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu) | |||
| 390 | FAULT_FLAG_WRITE, &unlocked); | 394 | FAULT_FLAG_WRITE, &unlocked); |
| 391 | rc = !rc ? -EAGAIN : rc; | 395 | rc = !rc ? -EAGAIN : rc; |
| 392 | } | 396 | } |
| 397 | up_read(¤t->mm->mmap_sem); | ||
| 393 | if (rc == -EFAULT) | 398 | if (rc == -EFAULT) |
| 394 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 399 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
| 395 | 400 | if (rc < 0) | |
| 396 | up_read(¤t->mm->mmap_sem); | 401 | return rc; |
| 397 | if (rc >= 0) | 402 | start += PAGE_SIZE; |
| 398 | start += PAGE_SIZE; | ||
| 399 | } | 403 | } |
| 400 | 404 | ||
| 401 | if (m3 & (SSKE_MC | SSKE_MR)) { | 405 | if (m3 & (SSKE_MC | SSKE_MR)) { |
| @@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
| 1002 | FAULT_FLAG_WRITE, &unlocked); | 1006 | FAULT_FLAG_WRITE, &unlocked); |
| 1003 | rc = !rc ? -EAGAIN : rc; | 1007 | rc = !rc ? -EAGAIN : rc; |
| 1004 | } | 1008 | } |
| 1009 | up_read(¤t->mm->mmap_sem); | ||
| 1005 | if (rc == -EFAULT) | 1010 | if (rc == -EFAULT) |
| 1006 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 1011 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
| 1007 | 1012 | if (rc == -EAGAIN) | |
| 1008 | up_read(¤t->mm->mmap_sem); | 1013 | continue; |
| 1009 | if (rc >= 0) | 1014 | if (rc < 0) |
| 1010 | start += PAGE_SIZE; | 1015 | return rc; |
| 1011 | } | 1016 | } |
| 1017 | start += PAGE_SIZE; | ||
| 1012 | } | 1018 | } |
| 1013 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { | 1019 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { |
| 1014 | if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { | 1020 | if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 63844b95c22c..a2b28cd1e3fe 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
| @@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
| 173 | return set_validity_icpt(scb_s, 0x0039U); | 173 | return set_validity_icpt(scb_s, 0x0039U); |
| 174 | 174 | ||
| 175 | /* copy only the wrapping keys */ | 175 | /* copy only the wrapping keys */ |
| 176 | if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) | 176 | if (read_guest_real(vcpu, crycb_addr + 72, |
| 177 | vsie_page->crycb.dea_wrapping_key_mask, 56)) | ||
| 177 | return set_validity_icpt(scb_s, 0x0035U); | 178 | return set_validity_icpt(scb_s, 0x0035U); |
| 178 | 179 | ||
| 179 | scb_s->ecb3 |= ecb3_flags; | 180 | scb_s->ecb3 |= ecb3_flags; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c5ff296bc5d1..1a0be022f91d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -2843,7 +2843,7 @@ config X86_SYSFB | |||
| 2843 | This option, if enabled, marks VGA/VBE/EFI framebuffers as generic | 2843 | This option, if enabled, marks VGA/VBE/EFI framebuffers as generic |
| 2844 | framebuffers so the new generic system-framebuffer drivers can be | 2844 | framebuffers so the new generic system-framebuffer drivers can be |
| 2845 | used on x86. If the framebuffer is not compatible with the generic | 2845 | used on x86. If the framebuffer is not compatible with the generic |
| 2846 | modes, it is adverticed as fallback platform framebuffer so legacy | 2846 | modes, it is advertised as fallback platform framebuffer so legacy |
| 2847 | drivers like efifb, vesafb and uvesafb can pick it up. | 2847 | drivers like efifb, vesafb and uvesafb can pick it up. |
| 2848 | If this option is not selected, all system framebuffers are always | 2848 | If this option is not selected, all system framebuffers are always |
| 2849 | marked as fallback platform framebuffers as usual. | 2849 | marked as fallback platform framebuffers as usual. |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94859241bc3e..8f6e7eb8ae9f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER | |||
| 175 | endif | 175 | endif |
| 176 | endif | 176 | endif |
| 177 | 177 | ||
| 178 | ifndef CC_HAVE_ASM_GOTO | ||
| 179 | $(error Compiler lacks asm-goto support.) | ||
| 180 | endif | ||
| 181 | |||
| 182 | # | ||
| 183 | # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a | ||
| 184 | # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way | ||
| 185 | # to test for this bug at compile-time because the test case needs to execute, | ||
| 186 | # which is a no-go for cross compilers. So check the GCC version instead. | ||
| 187 | # | ||
| 188 | ifdef CONFIG_JUMP_LABEL | ||
| 189 | ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1) | ||
| 190 | ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1) | ||
| 191 | endif | ||
| 192 | endif | ||
| 193 | |||
| 194 | ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) | 178 | ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) |
| 195 | # This compiler flag is not supported by Clang: | 179 | # This compiler flag is not supported by Clang: |
| 196 | KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) | 180 | KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) |
| @@ -312,6 +296,13 @@ PHONY += vdso_install | |||
| 312 | vdso_install: | 296 | vdso_install: |
| 313 | $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ | 297 | $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ |
| 314 | 298 | ||
| 299 | archprepare: checkbin | ||
| 300 | checkbin: | ||
| 301 | ifndef CC_HAVE_ASM_GOTO | ||
| 302 | @echo Compiler lacks asm-goto support. | ||
| 303 | @exit 1 | ||
| 304 | endif | ||
| 305 | |||
| 315 | archclean: | 306 | archclean: |
| 316 | $(Q)rm -rf $(objtree)/arch/i386 | 307 | $(Q)rm -rf $(objtree)/arch/i386 |
| 317 | $(Q)rm -rf $(objtree)/arch/x86_64 | 308 | $(Q)rm -rf $(objtree)/arch/x86_64 |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 9bd139569b41..cb2deb61c5d9 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
| @@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff | |||
| 223 | pcmpeqd TWOONE(%rip), \TMP2 | 223 | pcmpeqd TWOONE(%rip), \TMP2 |
| 224 | pand POLY(%rip), \TMP2 | 224 | pand POLY(%rip), \TMP2 |
| 225 | pxor \TMP2, \TMP3 | 225 | pxor \TMP2, \TMP3 |
| 226 | movdqa \TMP3, HashKey(%arg2) | 226 | movdqu \TMP3, HashKey(%arg2) |
| 227 | 227 | ||
| 228 | movdqa \TMP3, \TMP5 | 228 | movdqa \TMP3, \TMP5 |
| 229 | pshufd $78, \TMP3, \TMP1 | 229 | pshufd $78, \TMP3, \TMP1 |
| 230 | pxor \TMP3, \TMP1 | 230 | pxor \TMP3, \TMP1 |
| 231 | movdqa \TMP1, HashKey_k(%arg2) | 231 | movdqu \TMP1, HashKey_k(%arg2) |
| 232 | 232 | ||
| 233 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 233 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
| 234 | # TMP5 = HashKey^2<<1 (mod poly) | 234 | # TMP5 = HashKey^2<<1 (mod poly) |
| 235 | movdqa \TMP5, HashKey_2(%arg2) | 235 | movdqu \TMP5, HashKey_2(%arg2) |
| 236 | # HashKey_2 = HashKey^2<<1 (mod poly) | 236 | # HashKey_2 = HashKey^2<<1 (mod poly) |
| 237 | pshufd $78, \TMP5, \TMP1 | 237 | pshufd $78, \TMP5, \TMP1 |
| 238 | pxor \TMP5, \TMP1 | 238 | pxor \TMP5, \TMP1 |
| 239 | movdqa \TMP1, HashKey_2_k(%arg2) | 239 | movdqu \TMP1, HashKey_2_k(%arg2) |
| 240 | 240 | ||
| 241 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 241 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
| 242 | # TMP5 = HashKey^3<<1 (mod poly) | 242 | # TMP5 = HashKey^3<<1 (mod poly) |
| 243 | movdqa \TMP5, HashKey_3(%arg2) | 243 | movdqu \TMP5, HashKey_3(%arg2) |
| 244 | pshufd $78, \TMP5, \TMP1 | 244 | pshufd $78, \TMP5, \TMP1 |
| 245 | pxor \TMP5, \TMP1 | 245 | pxor \TMP5, \TMP1 |
| 246 | movdqa \TMP1, HashKey_3_k(%arg2) | 246 | movdqu \TMP1, HashKey_3_k(%arg2) |
| 247 | 247 | ||
| 248 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 | 248 | GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
| 249 | # TMP5 = HashKey^3<<1 (mod poly) | 249 | # TMP5 = HashKey^3<<1 (mod poly) |
| 250 | movdqa \TMP5, HashKey_4(%arg2) | 250 | movdqu \TMP5, HashKey_4(%arg2) |
| 251 | pshufd $78, \TMP5, \TMP1 | 251 | pshufd $78, \TMP5, \TMP1 |
| 252 | pxor \TMP5, \TMP1 | 252 | pxor \TMP5, \TMP1 |
| 253 | movdqa \TMP1, HashKey_4_k(%arg2) | 253 | movdqu \TMP1, HashKey_4_k(%arg2) |
| 254 | .endm | 254 | .endm |
| 255 | 255 | ||
| 256 | # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. | 256 | # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. |
| @@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff | |||
| 271 | movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv | 271 | movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv |
| 272 | 272 | ||
| 273 | PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, | 273 | PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, |
| 274 | movdqa HashKey(%arg2), %xmm13 | 274 | movdqu HashKey(%arg2), %xmm13 |
| 275 | 275 | ||
| 276 | CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ | 276 | CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ |
| 277 | %xmm4, %xmm5, %xmm6 | 277 | %xmm4, %xmm5, %xmm6 |
| @@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 997 | pshufd $78, \XMM5, \TMP6 | 997 | pshufd $78, \XMM5, \TMP6 |
| 998 | pxor \XMM5, \TMP6 | 998 | pxor \XMM5, \TMP6 |
| 999 | paddd ONE(%rip), \XMM0 # INCR CNT | 999 | paddd ONE(%rip), \XMM0 # INCR CNT |
| 1000 | movdqa HashKey_4(%arg2), \TMP5 | 1000 | movdqu HashKey_4(%arg2), \TMP5 |
| 1001 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 | 1001 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
| 1002 | movdqa \XMM0, \XMM1 | 1002 | movdqa \XMM0, \XMM1 |
| 1003 | paddd ONE(%rip), \XMM0 # INCR CNT | 1003 | paddd ONE(%rip), \XMM0 # INCR CNT |
| @@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1016 | pxor (%arg1), \XMM2 | 1016 | pxor (%arg1), \XMM2 |
| 1017 | pxor (%arg1), \XMM3 | 1017 | pxor (%arg1), \XMM3 |
| 1018 | pxor (%arg1), \XMM4 | 1018 | pxor (%arg1), \XMM4 |
| 1019 | movdqa HashKey_4_k(%arg2), \TMP5 | 1019 | movdqu HashKey_4_k(%arg2), \TMP5 |
| 1020 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) | 1020 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
| 1021 | movaps 0x10(%arg1), \TMP1 | 1021 | movaps 0x10(%arg1), \TMP1 |
| 1022 | AESENC \TMP1, \XMM1 # Round 1 | 1022 | AESENC \TMP1, \XMM1 # Round 1 |
| @@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1031 | movdqa \XMM6, \TMP1 | 1031 | movdqa \XMM6, \TMP1 |
| 1032 | pshufd $78, \XMM6, \TMP2 | 1032 | pshufd $78, \XMM6, \TMP2 |
| 1033 | pxor \XMM6, \TMP2 | 1033 | pxor \XMM6, \TMP2 |
| 1034 | movdqa HashKey_3(%arg2), \TMP5 | 1034 | movdqu HashKey_3(%arg2), \TMP5 |
| 1035 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 | 1035 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
| 1036 | movaps 0x30(%arg1), \TMP3 | 1036 | movaps 0x30(%arg1), \TMP3 |
| 1037 | AESENC \TMP3, \XMM1 # Round 3 | 1037 | AESENC \TMP3, \XMM1 # Round 3 |
| @@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1044 | AESENC \TMP3, \XMM2 | 1044 | AESENC \TMP3, \XMM2 |
| 1045 | AESENC \TMP3, \XMM3 | 1045 | AESENC \TMP3, \XMM3 |
| 1046 | AESENC \TMP3, \XMM4 | 1046 | AESENC \TMP3, \XMM4 |
| 1047 | movdqa HashKey_3_k(%arg2), \TMP5 | 1047 | movdqu HashKey_3_k(%arg2), \TMP5 |
| 1048 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1048 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1049 | movaps 0x50(%arg1), \TMP3 | 1049 | movaps 0x50(%arg1), \TMP3 |
| 1050 | AESENC \TMP3, \XMM1 # Round 5 | 1050 | AESENC \TMP3, \XMM1 # Round 5 |
| @@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1058 | movdqa \XMM7, \TMP1 | 1058 | movdqa \XMM7, \TMP1 |
| 1059 | pshufd $78, \XMM7, \TMP2 | 1059 | pshufd $78, \XMM7, \TMP2 |
| 1060 | pxor \XMM7, \TMP2 | 1060 | pxor \XMM7, \TMP2 |
| 1061 | movdqa HashKey_2(%arg2), \TMP5 | 1061 | movdqu HashKey_2(%arg2), \TMP5 |
| 1062 | 1062 | ||
| 1063 | # Multiply TMP5 * HashKey using karatsuba | 1063 | # Multiply TMP5 * HashKey using karatsuba |
| 1064 | 1064 | ||
| @@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1074 | AESENC \TMP3, \XMM2 | 1074 | AESENC \TMP3, \XMM2 |
| 1075 | AESENC \TMP3, \XMM3 | 1075 | AESENC \TMP3, \XMM3 |
| 1076 | AESENC \TMP3, \XMM4 | 1076 | AESENC \TMP3, \XMM4 |
| 1077 | movdqa HashKey_2_k(%arg2), \TMP5 | 1077 | movdqu HashKey_2_k(%arg2), \TMP5 |
| 1078 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1078 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1079 | movaps 0x80(%arg1), \TMP3 | 1079 | movaps 0x80(%arg1), \TMP3 |
| 1080 | AESENC \TMP3, \XMM1 # Round 8 | 1080 | AESENC \TMP3, \XMM1 # Round 8 |
| @@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1092 | movdqa \XMM8, \TMP1 | 1092 | movdqa \XMM8, \TMP1 |
| 1093 | pshufd $78, \XMM8, \TMP2 | 1093 | pshufd $78, \XMM8, \TMP2 |
| 1094 | pxor \XMM8, \TMP2 | 1094 | pxor \XMM8, \TMP2 |
| 1095 | movdqa HashKey(%arg2), \TMP5 | 1095 | movdqu HashKey(%arg2), \TMP5 |
| 1096 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1096 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
| 1097 | movaps 0x90(%arg1), \TMP3 | 1097 | movaps 0x90(%arg1), \TMP3 |
| 1098 | AESENC \TMP3, \XMM1 # Round 9 | 1098 | AESENC \TMP3, \XMM1 # Round 9 |
| @@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@: | |||
| 1121 | AESENCLAST \TMP3, \XMM2 | 1121 | AESENCLAST \TMP3, \XMM2 |
| 1122 | AESENCLAST \TMP3, \XMM3 | 1122 | AESENCLAST \TMP3, \XMM3 |
| 1123 | AESENCLAST \TMP3, \XMM4 | 1123 | AESENCLAST \TMP3, \XMM4 |
| 1124 | movdqa HashKey_k(%arg2), \TMP5 | 1124 | movdqu HashKey_k(%arg2), \TMP5 |
| 1125 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1125 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1126 | movdqu (%arg4,%r11,1), \TMP3 | 1126 | movdqu (%arg4,%r11,1), \TMP3 |
| 1127 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK | 1127 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
| @@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1205 | pshufd $78, \XMM5, \TMP6 | 1205 | pshufd $78, \XMM5, \TMP6 |
| 1206 | pxor \XMM5, \TMP6 | 1206 | pxor \XMM5, \TMP6 |
| 1207 | paddd ONE(%rip), \XMM0 # INCR CNT | 1207 | paddd ONE(%rip), \XMM0 # INCR CNT |
| 1208 | movdqa HashKey_4(%arg2), \TMP5 | 1208 | movdqu HashKey_4(%arg2), \TMP5 |
| 1209 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 | 1209 | PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
| 1210 | movdqa \XMM0, \XMM1 | 1210 | movdqa \XMM0, \XMM1 |
| 1211 | paddd ONE(%rip), \XMM0 # INCR CNT | 1211 | paddd ONE(%rip), \XMM0 # INCR CNT |
| @@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1224 | pxor (%arg1), \XMM2 | 1224 | pxor (%arg1), \XMM2 |
| 1225 | pxor (%arg1), \XMM3 | 1225 | pxor (%arg1), \XMM3 |
| 1226 | pxor (%arg1), \XMM4 | 1226 | pxor (%arg1), \XMM4 |
| 1227 | movdqa HashKey_4_k(%arg2), \TMP5 | 1227 | movdqu HashKey_4_k(%arg2), \TMP5 |
| 1228 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) | 1228 | PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
| 1229 | movaps 0x10(%arg1), \TMP1 | 1229 | movaps 0x10(%arg1), \TMP1 |
| 1230 | AESENC \TMP1, \XMM1 # Round 1 | 1230 | AESENC \TMP1, \XMM1 # Round 1 |
| @@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1239 | movdqa \XMM6, \TMP1 | 1239 | movdqa \XMM6, \TMP1 |
| 1240 | pshufd $78, \XMM6, \TMP2 | 1240 | pshufd $78, \XMM6, \TMP2 |
| 1241 | pxor \XMM6, \TMP2 | 1241 | pxor \XMM6, \TMP2 |
| 1242 | movdqa HashKey_3(%arg2), \TMP5 | 1242 | movdqu HashKey_3(%arg2), \TMP5 |
| 1243 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 | 1243 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
| 1244 | movaps 0x30(%arg1), \TMP3 | 1244 | movaps 0x30(%arg1), \TMP3 |
| 1245 | AESENC \TMP3, \XMM1 # Round 3 | 1245 | AESENC \TMP3, \XMM1 # Round 3 |
| @@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1252 | AESENC \TMP3, \XMM2 | 1252 | AESENC \TMP3, \XMM2 |
| 1253 | AESENC \TMP3, \XMM3 | 1253 | AESENC \TMP3, \XMM3 |
| 1254 | AESENC \TMP3, \XMM4 | 1254 | AESENC \TMP3, \XMM4 |
| 1255 | movdqa HashKey_3_k(%arg2), \TMP5 | 1255 | movdqu HashKey_3_k(%arg2), \TMP5 |
| 1256 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1256 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1257 | movaps 0x50(%arg1), \TMP3 | 1257 | movaps 0x50(%arg1), \TMP3 |
| 1258 | AESENC \TMP3, \XMM1 # Round 5 | 1258 | AESENC \TMP3, \XMM1 # Round 5 |
| @@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1266 | movdqa \XMM7, \TMP1 | 1266 | movdqa \XMM7, \TMP1 |
| 1267 | pshufd $78, \XMM7, \TMP2 | 1267 | pshufd $78, \XMM7, \TMP2 |
| 1268 | pxor \XMM7, \TMP2 | 1268 | pxor \XMM7, \TMP2 |
| 1269 | movdqa HashKey_2(%arg2), \TMP5 | 1269 | movdqu HashKey_2(%arg2), \TMP5 |
| 1270 | 1270 | ||
| 1271 | # Multiply TMP5 * HashKey using karatsuba | 1271 | # Multiply TMP5 * HashKey using karatsuba |
| 1272 | 1272 | ||
| @@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1282 | AESENC \TMP3, \XMM2 | 1282 | AESENC \TMP3, \XMM2 |
| 1283 | AESENC \TMP3, \XMM3 | 1283 | AESENC \TMP3, \XMM3 |
| 1284 | AESENC \TMP3, \XMM4 | 1284 | AESENC \TMP3, \XMM4 |
| 1285 | movdqa HashKey_2_k(%arg2), \TMP5 | 1285 | movdqu HashKey_2_k(%arg2), \TMP5 |
| 1286 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1286 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1287 | movaps 0x80(%arg1), \TMP3 | 1287 | movaps 0x80(%arg1), \TMP3 |
| 1288 | AESENC \TMP3, \XMM1 # Round 8 | 1288 | AESENC \TMP3, \XMM1 # Round 8 |
| @@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation | |||
| 1300 | movdqa \XMM8, \TMP1 | 1300 | movdqa \XMM8, \TMP1 |
| 1301 | pshufd $78, \XMM8, \TMP2 | 1301 | pshufd $78, \XMM8, \TMP2 |
| 1302 | pxor \XMM8, \TMP2 | 1302 | pxor \XMM8, \TMP2 |
| 1303 | movdqa HashKey(%arg2), \TMP5 | 1303 | movdqu HashKey(%arg2), \TMP5 |
| 1304 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1304 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
| 1305 | movaps 0x90(%arg1), \TMP3 | 1305 | movaps 0x90(%arg1), \TMP3 |
| 1306 | AESENC \TMP3, \XMM1 # Round 9 | 1306 | AESENC \TMP3, \XMM1 # Round 9 |
| @@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@: | |||
| 1329 | AESENCLAST \TMP3, \XMM2 | 1329 | AESENCLAST \TMP3, \XMM2 |
| 1330 | AESENCLAST \TMP3, \XMM3 | 1330 | AESENCLAST \TMP3, \XMM3 |
| 1331 | AESENCLAST \TMP3, \XMM4 | 1331 | AESENCLAST \TMP3, \XMM4 |
| 1332 | movdqa HashKey_k(%arg2), \TMP5 | 1332 | movdqu HashKey_k(%arg2), \TMP5 |
| 1333 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1333 | PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1334 | movdqu (%arg4,%r11,1), \TMP3 | 1334 | movdqu (%arg4,%r11,1), \TMP3 |
| 1335 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK | 1335 | pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
| @@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
| 1405 | movdqa \XMM1, \TMP6 | 1405 | movdqa \XMM1, \TMP6 |
| 1406 | pshufd $78, \XMM1, \TMP2 | 1406 | pshufd $78, \XMM1, \TMP2 |
| 1407 | pxor \XMM1, \TMP2 | 1407 | pxor \XMM1, \TMP2 |
| 1408 | movdqa HashKey_4(%arg2), \TMP5 | 1408 | movdqu HashKey_4(%arg2), \TMP5 |
| 1409 | PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 | 1409 | PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 |
| 1410 | PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 | 1410 | PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 |
| 1411 | movdqa HashKey_4_k(%arg2), \TMP4 | 1411 | movdqu HashKey_4_k(%arg2), \TMP4 |
| 1412 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1412 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1413 | movdqa \XMM1, \XMMDst | 1413 | movdqa \XMM1, \XMMDst |
| 1414 | movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 | 1414 | movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 |
| @@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
| 1418 | movdqa \XMM2, \TMP1 | 1418 | movdqa \XMM2, \TMP1 |
| 1419 | pshufd $78, \XMM2, \TMP2 | 1419 | pshufd $78, \XMM2, \TMP2 |
| 1420 | pxor \XMM2, \TMP2 | 1420 | pxor \XMM2, \TMP2 |
| 1421 | movdqa HashKey_3(%arg2), \TMP5 | 1421 | movdqu HashKey_3(%arg2), \TMP5 |
| 1422 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1422 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
| 1423 | PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 | 1423 | PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 |
| 1424 | movdqa HashKey_3_k(%arg2), \TMP4 | 1424 | movdqu HashKey_3_k(%arg2), \TMP4 |
| 1425 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1425 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1426 | pxor \TMP1, \TMP6 | 1426 | pxor \TMP1, \TMP6 |
| 1427 | pxor \XMM2, \XMMDst | 1427 | pxor \XMM2, \XMMDst |
| @@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
| 1433 | movdqa \XMM3, \TMP1 | 1433 | movdqa \XMM3, \TMP1 |
| 1434 | pshufd $78, \XMM3, \TMP2 | 1434 | pshufd $78, \XMM3, \TMP2 |
| 1435 | pxor \XMM3, \TMP2 | 1435 | pxor \XMM3, \TMP2 |
| 1436 | movdqa HashKey_2(%arg2), \TMP5 | 1436 | movdqu HashKey_2(%arg2), \TMP5 |
| 1437 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1437 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
| 1438 | PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 | 1438 | PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 |
| 1439 | movdqa HashKey_2_k(%arg2), \TMP4 | 1439 | movdqu HashKey_2_k(%arg2), \TMP4 |
| 1440 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1440 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1441 | pxor \TMP1, \TMP6 | 1441 | pxor \TMP1, \TMP6 |
| 1442 | pxor \XMM3, \XMMDst | 1442 | pxor \XMM3, \XMMDst |
| @@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst | |||
| 1446 | movdqa \XMM4, \TMP1 | 1446 | movdqa \XMM4, \TMP1 |
| 1447 | pshufd $78, \XMM4, \TMP2 | 1447 | pshufd $78, \XMM4, \TMP2 |
| 1448 | pxor \XMM4, \TMP2 | 1448 | pxor \XMM4, \TMP2 |
| 1449 | movdqa HashKey(%arg2), \TMP5 | 1449 | movdqu HashKey(%arg2), \TMP5 |
| 1450 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 | 1450 | PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
| 1451 | PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 | 1451 | PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 |
| 1452 | movdqa HashKey_k(%arg2), \TMP4 | 1452 | movdqu HashKey_k(%arg2), \TMP4 |
| 1453 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) | 1453 | PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
| 1454 | pxor \TMP1, \TMP6 | 1454 | pxor \TMP1, \TMP6 |
| 1455 | pxor \XMM4, \XMMDst | 1455 | pxor \XMM4, \XMMDst |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 5f4829f10129..dfb2f7c0d019 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
| @@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs | |||
| 2465 | 2465 | ||
| 2466 | perf_callchain_store(entry, regs->ip); | 2466 | perf_callchain_store(entry, regs->ip); |
| 2467 | 2467 | ||
| 2468 | if (!current->mm) | 2468 | if (!nmi_uaccess_okay()) |
| 2469 | return; | 2469 | return; |
| 2470 | 2470 | ||
| 2471 | if (perf_callchain_user32(regs, entry)) | 2471 | if (perf_callchain_user32(regs, entry)) |
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c14f2a74b2be..15450a675031 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
| @@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void) | |||
| 33 | return flags; | 33 | return flags; |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | static inline void native_restore_fl(unsigned long flags) | 36 | extern inline void native_restore_fl(unsigned long flags); |
| 37 | extern inline void native_restore_fl(unsigned long flags) | ||
| 37 | { | 38 | { |
| 38 | asm volatile("push %0 ; popf" | 39 | asm volatile("push %0 ; popf" |
| 39 | : /* no output */ | 40 | : /* no output */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e6a33420b871..3ad10f634d4c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -1237,19 +1237,12 @@ enum emulation_result { | |||
| 1237 | #define EMULTYPE_NO_DECODE (1 << 0) | 1237 | #define EMULTYPE_NO_DECODE (1 << 0) |
| 1238 | #define EMULTYPE_TRAP_UD (1 << 1) | 1238 | #define EMULTYPE_TRAP_UD (1 << 1) |
| 1239 | #define EMULTYPE_SKIP (1 << 2) | 1239 | #define EMULTYPE_SKIP (1 << 2) |
| 1240 | #define EMULTYPE_RETRY (1 << 3) | 1240 | #define EMULTYPE_ALLOW_RETRY (1 << 3) |
| 1241 | #define EMULTYPE_NO_REEXECUTE (1 << 4) | 1241 | #define EMULTYPE_NO_UD_ON_FAIL (1 << 4) |
| 1242 | #define EMULTYPE_NO_UD_ON_FAIL (1 << 5) | 1242 | #define EMULTYPE_VMWARE (1 << 5) |
| 1243 | #define EMULTYPE_VMWARE (1 << 6) | 1243 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); |
| 1244 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, | 1244 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, |
| 1245 | int emulation_type, void *insn, int insn_len); | 1245 | void *insn, int insn_len); |
| 1246 | |||
| 1247 | static inline int emulate_instruction(struct kvm_vcpu *vcpu, | ||
| 1248 | int emulation_type) | ||
| 1249 | { | ||
| 1250 | return x86_emulate_instruction(vcpu, 0, | ||
| 1251 | emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); | ||
| 1252 | } | ||
| 1253 | 1246 | ||
| 1254 | void kvm_enable_efer_bits(u64); | 1247 | void kvm_enable_efer_bits(u64); |
| 1255 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); | 1248 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index a564084c6141..f8b1ad2c3828 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H | 2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
| 3 | #define _ASM_X86_PGTABLE_3LEVEL_H | 3 | #define _ASM_X86_PGTABLE_3LEVEL_H |
| 4 | 4 | ||
| 5 | #include <asm/atomic64_32.h> | ||
| 6 | |||
| 5 | /* | 7 | /* |
| 6 | * Intel Physical Address Extension (PAE) Mode - three-level page | 8 | * Intel Physical Address Extension (PAE) Mode - three-level page |
| 7 | * tables on PPro+ CPUs. | 9 | * tables on PPro+ CPUs. |
| @@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) | |||
| 150 | { | 152 | { |
| 151 | pte_t res; | 153 | pte_t res; |
| 152 | 154 | ||
| 153 | /* xchg acts as a barrier before the setting of the high bits */ | 155 | res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0); |
| 154 | res.pte_low = xchg(&ptep->pte_low, 0); | ||
| 155 | res.pte_high = ptep->pte_high; | ||
| 156 | ptep->pte_high = 0; | ||
| 157 | 156 | ||
| 158 | return res; | 157 | return res; |
| 159 | } | 158 | } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c24297268ebc..d53c54b842da 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
| @@ -132,6 +132,8 @@ struct cpuinfo_x86 { | |||
| 132 | /* Index into per_cpu list: */ | 132 | /* Index into per_cpu list: */ |
| 133 | u16 cpu_index; | 133 | u16 cpu_index; |
| 134 | u32 microcode; | 134 | u32 microcode; |
| 135 | /* Address space bits used by the cache internally */ | ||
| 136 | u8 x86_cache_bits; | ||
| 135 | unsigned initialized : 1; | 137 | unsigned initialized : 1; |
| 136 | } __randomize_layout; | 138 | } __randomize_layout; |
| 137 | 139 | ||
| @@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c); | |||
| 183 | 185 | ||
| 184 | static inline unsigned long long l1tf_pfn_limit(void) | 186 | static inline unsigned long long l1tf_pfn_limit(void) |
| 185 | { | 187 | { |
| 186 | return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT); | 188 | return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); |
| 187 | } | 189 | } |
| 188 | 190 | ||
| 189 | extern void early_cpu_init(void); | 191 | extern void early_cpu_init(void); |
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 5f9012ff52ed..33d3c88a7225 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h | |||
| @@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs); | |||
| 39 | 39 | ||
| 40 | #define __ARCH_HAS_SA_RESTORER | 40 | #define __ARCH_HAS_SA_RESTORER |
| 41 | 41 | ||
| 42 | #include <asm/asm.h> | ||
| 42 | #include <uapi/asm/sigcontext.h> | 43 | #include <uapi/asm/sigcontext.h> |
| 43 | 44 | ||
| 44 | #ifdef __i386__ | 45 | #ifdef __i386__ |
| @@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig) | |||
| 86 | 87 | ||
| 87 | static inline int __gen_sigismember(sigset_t *set, int _sig) | 88 | static inline int __gen_sigismember(sigset_t *set, int _sig) |
| 88 | { | 89 | { |
| 89 | unsigned char ret; | 90 | bool ret; |
| 90 | asm("btl %2,%1\n\tsetc %0" | 91 | asm("btl %2,%1" CC_SET(c) |
| 91 | : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); | 92 | : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); |
| 92 | return ret; | 93 | return ret; |
| 93 | } | 94 | } |
| 94 | 95 | ||
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index b6dc698f992a..f335aad404a4 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
| @@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void) | |||
| 111 | return (unsigned long)frame; | 111 | return (unsigned long)frame; |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | void show_opcodes(u8 *rip, const char *loglvl); | 114 | void show_opcodes(struct pt_regs *regs, const char *loglvl); |
| 115 | void show_ip(struct pt_regs *regs, const char *loglvl); | 115 | void show_ip(struct pt_regs *regs, const char *loglvl); |
| 116 | #endif /* _ASM_X86_STACKTRACE_H */ | 116 | #endif /* _ASM_X86_STACKTRACE_H */ |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 29c9da6c62fc..58ce5288878e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
| @@ -175,8 +175,16 @@ struct tlb_state { | |||
| 175 | * are on. This means that it may not match current->active_mm, | 175 | * are on. This means that it may not match current->active_mm, |
| 176 | * which will contain the previous user mm when we're in lazy TLB | 176 | * which will contain the previous user mm when we're in lazy TLB |
| 177 | * mode even if we've already switched back to swapper_pg_dir. | 177 | * mode even if we've already switched back to swapper_pg_dir. |
| 178 | * | ||
| 179 | * During switch_mm_irqs_off(), loaded_mm will be set to | ||
| 180 | * LOADED_MM_SWITCHING during the brief interrupts-off window | ||
| 181 | * when CR3 and loaded_mm would otherwise be inconsistent. This | ||
| 182 | * is for nmi_uaccess_okay()'s benefit. | ||
| 178 | */ | 183 | */ |
| 179 | struct mm_struct *loaded_mm; | 184 | struct mm_struct *loaded_mm; |
| 185 | |||
| 186 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1) | ||
| 187 | |||
| 180 | u16 loaded_mm_asid; | 188 | u16 loaded_mm_asid; |
| 181 | u16 next_asid; | 189 | u16 next_asid; |
| 182 | /* last user mm's ctx id */ | 190 | /* last user mm's ctx id */ |
| @@ -246,6 +254,38 @@ struct tlb_state { | |||
| 246 | }; | 254 | }; |
| 247 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | 255 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
| 248 | 256 | ||
| 257 | /* | ||
| 258 | * Blindly accessing user memory from NMI context can be dangerous | ||
| 259 | * if we're in the middle of switching the current user task or | ||
| 260 | * switching the loaded mm. It can also be dangerous if we | ||
| 261 | * interrupted some kernel code that was temporarily using a | ||
| 262 | * different mm. | ||
| 263 | */ | ||
| 264 | static inline bool nmi_uaccess_okay(void) | ||
| 265 | { | ||
| 266 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); | ||
| 267 | struct mm_struct *current_mm = current->mm; | ||
| 268 | |||
| 269 | VM_WARN_ON_ONCE(!loaded_mm); | ||
| 270 | |||
| 271 | /* | ||
| 272 | * The condition we want to check is | ||
| 273 | * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, | ||
| 274 | * if we're running in a VM with shadow paging, and nmi_uaccess_okay() | ||
| 275 | * is supposed to be reasonably fast. | ||
| 276 | * | ||
| 277 | * Instead, we check the almost equivalent but somewhat conservative | ||
| 278 | * condition below, and we rely on the fact that switch_mm_irqs_off() | ||
| 279 | * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. | ||
| 280 | */ | ||
| 281 | if (loaded_mm != current_mm) | ||
| 282 | return false; | ||
| 283 | |||
| 284 | VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); | ||
| 285 | |||
| 286 | return true; | ||
| 287 | } | ||
| 288 | |||
| 249 | /* Initialize cr4 shadow for this CPU. */ | 289 | /* Initialize cr4 shadow for this CPU. */ |
| 250 | static inline void cr4_init_shadow(void) | 290 | static inline void cr4_init_shadow(void) |
| 251 | { | 291 | { |
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index fb856c9f0449..53748541c487 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
| @@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void) | |||
| 93 | * | 93 | * |
| 94 | * If RDPID is available, use it. | 94 | * If RDPID is available, use it. |
| 95 | */ | 95 | */ |
| 96 | alternative_io ("lsl %[p],%[seg]", | 96 | alternative_io ("lsl %[seg],%[p]", |
| 97 | ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ | 97 | ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ |
| 98 | X86_FEATURE_RDPID, | 98 | X86_FEATURE_RDPID, |
| 99 | [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); | 99 | [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 014f214da581..b9d5e7c9ef43 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
| @@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, | |||
| 684 | * It means the size must be writable atomically and the address must be aligned | 684 | * It means the size must be writable atomically and the address must be aligned |
| 685 | * in a way that permits an atomic write. It also makes sure we fit on a single | 685 | * in a way that permits an atomic write. It also makes sure we fit on a single |
| 686 | * page. | 686 | * page. |
| 687 | * | ||
| 688 | * Note: Must be called under text_mutex. | ||
| 689 | */ | 687 | */ |
| 690 | void *text_poke(void *addr, const void *opcode, size_t len) | 688 | void *text_poke(void *addr, const void *opcode, size_t len) |
| 691 | { | 689 | { |
| @@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len) | |||
| 700 | */ | 698 | */ |
| 701 | BUG_ON(!after_bootmem); | 699 | BUG_ON(!after_bootmem); |
| 702 | 700 | ||
| 701 | lockdep_assert_held(&text_mutex); | ||
| 702 | |||
| 703 | if (!core_kernel_text((unsigned long)addr)) { | 703 | if (!core_kernel_text((unsigned long)addr)) { |
| 704 | pages[0] = vmalloc_to_page(addr); | 704 | pages[0] = vmalloc_to_page(addr); |
| 705 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | 705 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
| @@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs) | |||
| 782 | * - replace the first byte (int3) by the first byte of | 782 | * - replace the first byte (int3) by the first byte of |
| 783 | * replacing opcode | 783 | * replacing opcode |
| 784 | * - sync cores | 784 | * - sync cores |
| 785 | * | ||
| 786 | * Note: must be called under text_mutex. | ||
| 787 | */ | 785 | */ |
| 788 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) | 786 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) |
| 789 | { | 787 | { |
| @@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) | |||
| 792 | bp_int3_handler = handler; | 790 | bp_int3_handler = handler; |
| 793 | bp_int3_addr = (u8 *)addr + sizeof(int3); | 791 | bp_int3_addr = (u8 *)addr + sizeof(int3); |
| 794 | bp_patching_in_progress = true; | 792 | bp_patching_in_progress = true; |
| 793 | |||
| 794 | lockdep_assert_held(&text_mutex); | ||
| 795 | |||
| 795 | /* | 796 | /* |
| 796 | * Corresponding read barrier in int3 notifier for making sure the | 797 | * Corresponding read barrier in int3 notifier for making sure the |
| 797 | * in_progress and handler are correctly ordered wrt. patching. | 798 | * in_progress and handler are correctly ordered wrt. patching. |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4c2313d0b9ca..40bdaea97fe7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); | |||
| 668 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; | 668 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
| 669 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); | 669 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
| 670 | 670 | ||
| 671 | /* | ||
| 672 | * These CPUs all support 44bits physical address space internally in the | ||
| 673 | * cache but CPUID can report a smaller number of physical address bits. | ||
| 674 | * | ||
| 675 | * The L1TF mitigation uses the top most address bit for the inversion of | ||
| 676 | * non present PTEs. When the installed memory reaches into the top most | ||
| 677 | * address bit due to memory holes, which has been observed on machines | ||
| 678 | * which report 36bits physical address bits and have 32G RAM installed, | ||
| 679 | * then the mitigation range check in l1tf_select_mitigation() triggers. | ||
| 680 | * This is a false positive because the mitigation is still possible due to | ||
| 681 | * the fact that the cache uses 44bit internally. Use the cache bits | ||
| 682 | * instead of the reported physical bits and adjust them on the affected | ||
| 683 | * machines to 44bit if the reported bits are less than 44. | ||
| 684 | */ | ||
| 685 | static void override_cache_bits(struct cpuinfo_x86 *c) | ||
| 686 | { | ||
| 687 | if (c->x86 != 6) | ||
| 688 | return; | ||
| 689 | |||
| 690 | switch (c->x86_model) { | ||
| 691 | case INTEL_FAM6_NEHALEM: | ||
| 692 | case INTEL_FAM6_WESTMERE: | ||
| 693 | case INTEL_FAM6_SANDYBRIDGE: | ||
| 694 | case INTEL_FAM6_IVYBRIDGE: | ||
| 695 | case INTEL_FAM6_HASWELL_CORE: | ||
| 696 | case INTEL_FAM6_HASWELL_ULT: | ||
| 697 | case INTEL_FAM6_HASWELL_GT3E: | ||
| 698 | case INTEL_FAM6_BROADWELL_CORE: | ||
| 699 | case INTEL_FAM6_BROADWELL_GT3E: | ||
| 700 | case INTEL_FAM6_SKYLAKE_MOBILE: | ||
| 701 | case INTEL_FAM6_SKYLAKE_DESKTOP: | ||
| 702 | case INTEL_FAM6_KABYLAKE_MOBILE: | ||
| 703 | case INTEL_FAM6_KABYLAKE_DESKTOP: | ||
| 704 | if (c->x86_cache_bits < 44) | ||
| 705 | c->x86_cache_bits = 44; | ||
| 706 | break; | ||
| 707 | } | ||
| 708 | } | ||
| 709 | |||
| 671 | static void __init l1tf_select_mitigation(void) | 710 | static void __init l1tf_select_mitigation(void) |
| 672 | { | 711 | { |
| 673 | u64 half_pa; | 712 | u64 half_pa; |
| @@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void) | |||
| 675 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) | 714 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) |
| 676 | return; | 715 | return; |
| 677 | 716 | ||
| 717 | override_cache_bits(&boot_cpu_data); | ||
| 718 | |||
| 678 | switch (l1tf_mitigation) { | 719 | switch (l1tf_mitigation) { |
| 679 | case L1TF_MITIGATION_OFF: | 720 | case L1TF_MITIGATION_OFF: |
| 680 | case L1TF_MITIGATION_FLUSH_NOWARN: | 721 | case L1TF_MITIGATION_FLUSH_NOWARN: |
| @@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void) | |||
| 694 | return; | 735 | return; |
| 695 | #endif | 736 | #endif |
| 696 | 737 | ||
| 697 | /* | ||
| 698 | * This is extremely unlikely to happen because almost all | ||
| 699 | * systems have far more MAX_PA/2 than RAM can be fit into | ||
| 700 | * DIMM slots. | ||
| 701 | */ | ||
| 702 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; | 738 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; |
| 703 | if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { | 739 | if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { |
| 704 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); | 740 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 84dee5ab745a..44c4ef3d989b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c) | |||
| 919 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | 919 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) |
| 920 | c->x86_phys_bits = 36; | 920 | c->x86_phys_bits = 36; |
| 921 | #endif | 921 | #endif |
| 922 | c->x86_cache_bits = c->x86_phys_bits; | ||
| 922 | } | 923 | } |
| 923 | 924 | ||
| 924 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | 925 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 401e8c133108..fc3c07fe7df5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) | |||
| 150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) | 150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) |
| 151 | return false; | 151 | return false; |
| 152 | 152 | ||
| 153 | if (c->x86 != 6) | ||
| 154 | return false; | ||
| 155 | |||
| 153 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { | 156 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { |
| 154 | if (c->x86_model == spectre_bad_microcodes[i].model && | 157 | if (c->x86_model == spectre_bad_microcodes[i].model && |
| 155 | c->x86_stepping == spectre_bad_microcodes[i].stepping) | 158 | c->x86_stepping == spectre_bad_microcodes[i].stepping) |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9c8652974f8e..f56895106ccf 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/bug.h> | 17 | #include <linux/bug.h> |
| 18 | #include <linux/nmi.h> | 18 | #include <linux/nmi.h> |
| 19 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
| 20 | #include <linux/kasan.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/cpu_entry_area.h> | 22 | #include <asm/cpu_entry_area.h> |
| 22 | #include <asm/stacktrace.h> | 23 | #include <asm/stacktrace.h> |
| @@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable, | |||
| 89 | * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random | 90 | * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random |
| 90 | * guesstimate in attempt to achieve all of the above. | 91 | * guesstimate in attempt to achieve all of the above. |
| 91 | */ | 92 | */ |
| 92 | void show_opcodes(u8 *rip, const char *loglvl) | 93 | void show_opcodes(struct pt_regs *regs, const char *loglvl) |
| 93 | { | 94 | { |
| 94 | #define PROLOGUE_SIZE 42 | 95 | #define PROLOGUE_SIZE 42 |
| 95 | #define EPILOGUE_SIZE 21 | 96 | #define EPILOGUE_SIZE 21 |
| 96 | #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) | 97 | #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) |
| 97 | u8 opcodes[OPCODE_BUFSIZE]; | 98 | u8 opcodes[OPCODE_BUFSIZE]; |
| 99 | unsigned long prologue = regs->ip - PROLOGUE_SIZE; | ||
| 100 | bool bad_ip; | ||
| 98 | 101 | ||
| 99 | if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) { | 102 | /* |
| 103 | * Make sure userspace isn't trying to trick us into dumping kernel | ||
| 104 | * memory by pointing the userspace instruction pointer at it. | ||
| 105 | */ | ||
| 106 | bad_ip = user_mode(regs) && | ||
| 107 | __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); | ||
| 108 | |||
| 109 | if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue, | ||
| 110 | OPCODE_BUFSIZE)) { | ||
| 100 | printk("%sCode: Bad RIP value.\n", loglvl); | 111 | printk("%sCode: Bad RIP value.\n", loglvl); |
| 101 | } else { | 112 | } else { |
| 102 | printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" | 113 | printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" |
| @@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl) | |||
| 112 | #else | 123 | #else |
| 113 | printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); | 124 | printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); |
| 114 | #endif | 125 | #endif |
| 115 | show_opcodes((u8 *)regs->ip, loglvl); | 126 | show_opcodes(regs, loglvl); |
| 116 | } | 127 | } |
| 117 | 128 | ||
| 118 | void show_iret_regs(struct pt_regs *regs) | 129 | void show_iret_regs(struct pt_regs *regs) |
| @@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
| 346 | * We're not going to return, but we might be on an IST stack or | 357 | * We're not going to return, but we might be on an IST stack or |
| 347 | * have very little stack space left. Rewind the stack and kill | 358 | * have very little stack space left. Rewind the stack and kill |
| 348 | * the task. | 359 | * the task. |
| 360 | * Before we rewind the stack, we have to tell KASAN that we're going to | ||
| 361 | * reuse the task stack and that existing poisons are invalid. | ||
| 349 | */ | 362 | */ |
| 363 | kasan_unpoison_task_stack(current); | ||
| 350 | rewind_stack_do_exit(signr); | 364 | rewind_stack_do_exit(signr); |
| 351 | } | 365 | } |
| 352 | NOKPROBE_SYMBOL(oops_end); | 366 | NOKPROBE_SYMBOL(oops_end); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d440154e8938..e24ea7067373 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -5212,7 +5212,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) | |||
| 5212 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, | 5212 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, |
| 5213 | void *insn, int insn_len) | 5213 | void *insn, int insn_len) |
| 5214 | { | 5214 | { |
| 5215 | int r, emulation_type = EMULTYPE_RETRY; | 5215 | int r, emulation_type = 0; |
| 5216 | enum emulation_result er; | 5216 | enum emulation_result er; |
| 5217 | bool direct = vcpu->arch.mmu.direct_map; | 5217 | bool direct = vcpu->arch.mmu.direct_map; |
| 5218 | 5218 | ||
| @@ -5225,10 +5225,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, | |||
| 5225 | r = RET_PF_INVALID; | 5225 | r = RET_PF_INVALID; |
| 5226 | if (unlikely(error_code & PFERR_RSVD_MASK)) { | 5226 | if (unlikely(error_code & PFERR_RSVD_MASK)) { |
| 5227 | r = handle_mmio_page_fault(vcpu, cr2, direct); | 5227 | r = handle_mmio_page_fault(vcpu, cr2, direct); |
| 5228 | if (r == RET_PF_EMULATE) { | 5228 | if (r == RET_PF_EMULATE) |
| 5229 | emulation_type = 0; | ||
| 5230 | goto emulate; | 5229 | goto emulate; |
| 5231 | } | ||
| 5232 | } | 5230 | } |
| 5233 | 5231 | ||
| 5234 | if (r == RET_PF_INVALID) { | 5232 | if (r == RET_PF_INVALID) { |
| @@ -5255,8 +5253,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, | |||
| 5255 | return 1; | 5253 | return 1; |
| 5256 | } | 5254 | } |
| 5257 | 5255 | ||
| 5258 | if (mmio_info_in_cache(vcpu, cr2, direct)) | 5256 | /* |
| 5259 | emulation_type = 0; | 5257 | * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still |
| 5258 | * optimistically try to just unprotect the page and let the processor | ||
| 5259 | * re-execute the instruction that caused the page fault. Do not allow | ||
| 5260 | * retrying MMIO emulation, as it's not only pointless but could also | ||
| 5261 | * cause us to enter an infinite loop because the processor will keep | ||
| 5262 | * faulting on the non-existent MMIO address. Retrying an instruction | ||
| 5263 | * from a nested guest is also pointless and dangerous as we are only | ||
| 5264 | * explicitly shadowing L1's page tables, i.e. unprotecting something | ||
| 5265 | * for L1 isn't going to magically fix whatever issue cause L2 to fail. | ||
| 5266 | */ | ||
| 5267 | if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu)) | ||
| 5268 | emulation_type = EMULTYPE_ALLOW_RETRY; | ||
| 5260 | emulate: | 5269 | emulate: |
| 5261 | /* | 5270 | /* |
| 5262 | * On AMD platforms, under certain conditions insn_len may be zero on #NPF. | 5271 | * On AMD platforms, under certain conditions insn_len may be zero on #NPF. |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6276140044d0..89c4c5aa15f1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
| 776 | } | 776 | } |
| 777 | 777 | ||
| 778 | if (!svm->next_rip) { | 778 | if (!svm->next_rip) { |
| 779 | if (emulate_instruction(vcpu, EMULTYPE_SKIP) != | 779 | if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) != |
| 780 | EMULATE_DONE) | 780 | EMULATE_DONE) |
| 781 | printk(KERN_DEBUG "%s: NOP\n", __func__); | 781 | printk(KERN_DEBUG "%s: NOP\n", __func__); |
| 782 | return; | 782 | return; |
| @@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm) | |||
| 2715 | 2715 | ||
| 2716 | WARN_ON_ONCE(!enable_vmware_backdoor); | 2716 | WARN_ON_ONCE(!enable_vmware_backdoor); |
| 2717 | 2717 | ||
| 2718 | er = emulate_instruction(vcpu, | 2718 | er = kvm_emulate_instruction(vcpu, |
| 2719 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); | 2719 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); |
| 2720 | if (er == EMULATE_USER_EXIT) | 2720 | if (er == EMULATE_USER_EXIT) |
| 2721 | return 0; | 2721 | return 0; |
| @@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm) | |||
| 2819 | string = (io_info & SVM_IOIO_STR_MASK) != 0; | 2819 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
| 2820 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; | 2820 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
| 2821 | if (string) | 2821 | if (string) |
| 2822 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 2822 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
| 2823 | 2823 | ||
| 2824 | port = io_info >> 16; | 2824 | port = io_info >> 16; |
| 2825 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; | 2825 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
| @@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm) | |||
| 3861 | static int invlpg_interception(struct vcpu_svm *svm) | 3861 | static int invlpg_interception(struct vcpu_svm *svm) |
| 3862 | { | 3862 | { |
| 3863 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) | 3863 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
| 3864 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 3864 | return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
| 3865 | 3865 | ||
| 3866 | kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); | 3866 | kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); |
| 3867 | return kvm_skip_emulated_instruction(&svm->vcpu); | 3867 | return kvm_skip_emulated_instruction(&svm->vcpu); |
| @@ -3869,13 +3869,13 @@ static int invlpg_interception(struct vcpu_svm *svm) | |||
| 3869 | 3869 | ||
| 3870 | static int emulate_on_interception(struct vcpu_svm *svm) | 3870 | static int emulate_on_interception(struct vcpu_svm *svm) |
| 3871 | { | 3871 | { |
| 3872 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 3872 | return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
| 3873 | } | 3873 | } |
| 3874 | 3874 | ||
| 3875 | static int rsm_interception(struct vcpu_svm *svm) | 3875 | static int rsm_interception(struct vcpu_svm *svm) |
| 3876 | { | 3876 | { |
| 3877 | return x86_emulate_instruction(&svm->vcpu, 0, 0, | 3877 | return kvm_emulate_instruction_from_buffer(&svm->vcpu, |
| 3878 | rsm_ins_bytes, 2) == EMULATE_DONE; | 3878 | rsm_ins_bytes, 2) == EMULATE_DONE; |
| 3879 | } | 3879 | } |
| 3880 | 3880 | ||
| 3881 | static int rdpmc_interception(struct vcpu_svm *svm) | 3881 | static int rdpmc_interception(struct vcpu_svm *svm) |
| @@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm) | |||
| 4700 | ret = avic_unaccel_trap_write(svm); | 4700 | ret = avic_unaccel_trap_write(svm); |
| 4701 | } else { | 4701 | } else { |
| 4702 | /* Handling Fault */ | 4702 | /* Handling Fault */ |
| 4703 | ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); | 4703 | ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); |
| 4704 | } | 4704 | } |
| 4705 | 4705 | ||
| 4706 | return ret; | 4706 | return ret; |
| @@ -6747,7 +6747,7 @@ e_free: | |||
| 6747 | static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | 6747 | static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) |
| 6748 | { | 6748 | { |
| 6749 | unsigned long vaddr, vaddr_end, next_vaddr; | 6749 | unsigned long vaddr, vaddr_end, next_vaddr; |
| 6750 | unsigned long dst_vaddr, dst_vaddr_end; | 6750 | unsigned long dst_vaddr; |
| 6751 | struct page **src_p, **dst_p; | 6751 | struct page **src_p, **dst_p; |
| 6752 | struct kvm_sev_dbg debug; | 6752 | struct kvm_sev_dbg debug; |
| 6753 | unsigned long n; | 6753 | unsigned long n; |
| @@ -6763,7 +6763,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
| 6763 | size = debug.len; | 6763 | size = debug.len; |
| 6764 | vaddr_end = vaddr + size; | 6764 | vaddr_end = vaddr + size; |
| 6765 | dst_vaddr = debug.dst_uaddr; | 6765 | dst_vaddr = debug.dst_uaddr; |
| 6766 | dst_vaddr_end = dst_vaddr + size; | ||
| 6767 | 6766 | ||
| 6768 | for (; vaddr < vaddr_end; vaddr = next_vaddr) { | 6767 | for (; vaddr < vaddr_end; vaddr = next_vaddr) { |
| 6769 | int len, s_off, d_off; | 6768 | int len, s_off, d_off; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d26f3c4985b..f910d33858d9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
| 6983 | * Cause the #SS fault with 0 error code in VM86 mode. | 6983 | * Cause the #SS fault with 0 error code in VM86 mode. |
| 6984 | */ | 6984 | */ |
| 6985 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { | 6985 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { |
| 6986 | if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { | 6986 | if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { |
| 6987 | if (vcpu->arch.halt_request) { | 6987 | if (vcpu->arch.halt_request) { |
| 6988 | vcpu->arch.halt_request = 0; | 6988 | vcpu->arch.halt_request = 0; |
| 6989 | return kvm_vcpu_halt(vcpu); | 6989 | return kvm_vcpu_halt(vcpu); |
| @@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
| 7054 | 7054 | ||
| 7055 | if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { | 7055 | if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { |
| 7056 | WARN_ON_ONCE(!enable_vmware_backdoor); | 7056 | WARN_ON_ONCE(!enable_vmware_backdoor); |
| 7057 | er = emulate_instruction(vcpu, | 7057 | er = kvm_emulate_instruction(vcpu, |
| 7058 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); | 7058 | EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); |
| 7059 | if (er == EMULATE_USER_EXIT) | 7059 | if (er == EMULATE_USER_EXIT) |
| 7060 | return 0; | 7060 | return 0; |
| @@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu) | |||
| 7157 | ++vcpu->stat.io_exits; | 7157 | ++vcpu->stat.io_exits; |
| 7158 | 7158 | ||
| 7159 | if (string) | 7159 | if (string) |
| 7160 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7160 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
| 7161 | 7161 | ||
| 7162 | port = exit_qualification >> 16; | 7162 | port = exit_qualification >> 16; |
| 7163 | size = (exit_qualification & 7) + 1; | 7163 | size = (exit_qualification & 7) + 1; |
| @@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) | |||
| 7231 | static int handle_desc(struct kvm_vcpu *vcpu) | 7231 | static int handle_desc(struct kvm_vcpu *vcpu) |
| 7232 | { | 7232 | { |
| 7233 | WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); | 7233 | WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); |
| 7234 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7234 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
| 7235 | } | 7235 | } |
| 7236 | 7236 | ||
| 7237 | static int handle_cr(struct kvm_vcpu *vcpu) | 7237 | static int handle_cr(struct kvm_vcpu *vcpu) |
| @@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu) | |||
| 7480 | 7480 | ||
| 7481 | static int handle_invd(struct kvm_vcpu *vcpu) | 7481 | static int handle_invd(struct kvm_vcpu *vcpu) |
| 7482 | { | 7482 | { |
| 7483 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7483 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
| 7484 | } | 7484 | } |
| 7485 | 7485 | ||
| 7486 | static int handle_invlpg(struct kvm_vcpu *vcpu) | 7486 | static int handle_invlpg(struct kvm_vcpu *vcpu) |
| @@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) | |||
| 7547 | return kvm_skip_emulated_instruction(vcpu); | 7547 | return kvm_skip_emulated_instruction(vcpu); |
| 7548 | } | 7548 | } |
| 7549 | } | 7549 | } |
| 7550 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 7550 | return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; |
| 7551 | } | 7551 | } |
| 7552 | 7552 | ||
| 7553 | static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) | 7553 | static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) |
| @@ -7704,8 +7704,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) | |||
| 7704 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | 7704 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) |
| 7705 | return kvm_skip_emulated_instruction(vcpu); | 7705 | return kvm_skip_emulated_instruction(vcpu); |
| 7706 | else | 7706 | else |
| 7707 | return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, | 7707 | return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == |
| 7708 | NULL, 0) == EMULATE_DONE; | 7708 | EMULATE_DONE; |
| 7709 | } | 7709 | } |
| 7710 | 7710 | ||
| 7711 | return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); | 7711 | return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); |
| @@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
| 7748 | if (kvm_test_request(KVM_REQ_EVENT, vcpu)) | 7748 | if (kvm_test_request(KVM_REQ_EVENT, vcpu)) |
| 7749 | return 1; | 7749 | return 1; |
| 7750 | 7750 | ||
| 7751 | err = emulate_instruction(vcpu, 0); | 7751 | err = kvm_emulate_instruction(vcpu, 0); |
| 7752 | 7752 | ||
| 7753 | if (err == EMULATE_USER_EXIT) { | 7753 | if (err == EMULATE_USER_EXIT) { |
| 7754 | ++vcpu->stat.mmio_exits; | 7754 | ++vcpu->stat.mmio_exits; |
| @@ -13988,9 +13988,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, | |||
| 13988 | check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) | 13988 | check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) |
| 13989 | return -EINVAL; | 13989 | return -EINVAL; |
| 13990 | 13990 | ||
| 13991 | if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING) | ||
| 13992 | vmx->nested.nested_run_pending = 1; | ||
| 13993 | |||
| 13994 | vmx->nested.dirty_vmcs12 = true; | 13991 | vmx->nested.dirty_vmcs12 = true; |
| 13995 | ret = enter_vmx_non_root_mode(vcpu, NULL); | 13992 | ret = enter_vmx_non_root_mode(vcpu, NULL); |
| 13996 | if (ret) | 13993 | if (ret) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 506bd2b4b8bb..542f6315444d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu) | |||
| 4987 | emul_type = 0; | 4987 | emul_type = 0; |
| 4988 | } | 4988 | } |
| 4989 | 4989 | ||
| 4990 | er = emulate_instruction(vcpu, emul_type); | 4990 | er = kvm_emulate_instruction(vcpu, emul_type); |
| 4991 | if (er == EMULATE_USER_EXIT) | 4991 | if (er == EMULATE_USER_EXIT) |
| 4992 | return 0; | 4992 | return 0; |
| 4993 | if (er != EMULATE_DONE) | 4993 | if (er != EMULATE_DONE) |
| @@ -5870,7 +5870,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, | |||
| 5870 | gpa_t gpa = cr2; | 5870 | gpa_t gpa = cr2; |
| 5871 | kvm_pfn_t pfn; | 5871 | kvm_pfn_t pfn; |
| 5872 | 5872 | ||
| 5873 | if (emulation_type & EMULTYPE_NO_REEXECUTE) | 5873 | if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) |
| 5874 | return false; | ||
| 5875 | |||
| 5876 | if (WARN_ON_ONCE(is_guest_mode(vcpu))) | ||
| 5874 | return false; | 5877 | return false; |
| 5875 | 5878 | ||
| 5876 | if (!vcpu->arch.mmu.direct_map) { | 5879 | if (!vcpu->arch.mmu.direct_map) { |
| @@ -5958,7 +5961,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, | |||
| 5958 | */ | 5961 | */ |
| 5959 | vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; | 5962 | vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; |
| 5960 | 5963 | ||
| 5961 | if (!(emulation_type & EMULTYPE_RETRY)) | 5964 | if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) |
| 5965 | return false; | ||
| 5966 | |||
| 5967 | if (WARN_ON_ONCE(is_guest_mode(vcpu))) | ||
| 5962 | return false; | 5968 | return false; |
| 5963 | 5969 | ||
| 5964 | if (x86_page_table_writing_insn(ctxt)) | 5970 | if (x86_page_table_writing_insn(ctxt)) |
| @@ -6276,7 +6282,19 @@ restart: | |||
| 6276 | 6282 | ||
| 6277 | return r; | 6283 | return r; |
| 6278 | } | 6284 | } |
| 6279 | EXPORT_SYMBOL_GPL(x86_emulate_instruction); | 6285 | |
| 6286 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) | ||
| 6287 | { | ||
| 6288 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); | ||
| 6289 | } | ||
| 6290 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction); | ||
| 6291 | |||
| 6292 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, | ||
| 6293 | void *insn, int insn_len) | ||
| 6294 | { | ||
| 6295 | return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); | ||
| 6296 | } | ||
| 6297 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); | ||
| 6280 | 6298 | ||
| 6281 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, | 6299 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, |
| 6282 | unsigned short port) | 6300 | unsigned short port) |
| @@ -7734,7 +7752,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu) | |||
| 7734 | { | 7752 | { |
| 7735 | int r; | 7753 | int r; |
| 7736 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 7754 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 7737 | r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); | 7755 | r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); |
| 7738 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 7756 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
| 7739 | if (r != EMULATE_DONE) | 7757 | if (r != EMULATE_DONE) |
| 7740 | return 0; | 7758 | return 0; |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 257f27620bc2..67b9568613f3 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
| @@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |||
| 274 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, | 274 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 275 | int page_num); | 275 | int page_num); |
| 276 | bool kvm_vector_hashing_enabled(void); | 276 | bool kvm_vector_hashing_enabled(void); |
| 277 | int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, | ||
| 278 | int emulation_type, void *insn, int insn_len); | ||
| 277 | 279 | ||
| 278 | #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | 280 | #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ |
| 279 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | 281 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c8c6ad0d58b8..3f435d7fca5e 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
| 8 | #include <linux/export.h> | 8 | #include <linux/export.h> |
| 9 | 9 | ||
| 10 | #include <asm/tlbflush.h> | ||
| 11 | |||
| 10 | /* | 12 | /* |
| 11 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the | 13 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the |
| 12 | * nested NMI paths are careful to preserve CR2. | 14 | * nested NMI paths are careful to preserve CR2. |
| @@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
| 19 | if (__range_not_ok(from, n, TASK_SIZE)) | 21 | if (__range_not_ok(from, n, TASK_SIZE)) |
| 20 | return n; | 22 | return n; |
| 21 | 23 | ||
| 24 | if (!nmi_uaccess_okay()) | ||
| 25 | return n; | ||
| 26 | |||
| 22 | /* | 27 | /* |
| 23 | * Even though this function is typically called from NMI/IRQ context | 28 | * Even though this function is typically called from NMI/IRQ context |
| 24 | * disable pagefaults so that its behaviour is consistent even when | 29 | * disable pagefaults so that its behaviour is consistent even when |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b9123c497e0a..47bebfe6efa7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
| @@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |||
| 837 | 837 | ||
| 838 | printk(KERN_CONT "\n"); | 838 | printk(KERN_CONT "\n"); |
| 839 | 839 | ||
| 840 | show_opcodes((u8 *)regs->ip, loglvl); | 840 | show_opcodes(regs, loglvl); |
| 841 | } | 841 | } |
| 842 | 842 | ||
| 843 | static void | 843 | static void |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8d6c34fe49be..51a5a69ecac9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | |||
| 1420 | return 0; | 1420 | return 0; |
| 1421 | } | 1421 | } |
| 1422 | 1422 | ||
| 1423 | /* | ||
| 1424 | * Machine check recovery code needs to change cache mode of poisoned | ||
| 1425 | * pages to UC to avoid speculative access logging another error. But | ||
| 1426 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine | ||
| 1427 | * way to encourage a speculative access. So we cheat and flip the top | ||
| 1428 | * bit of the address. This works fine for the code that updates the | ||
| 1429 | * page tables. But at the end of the process we need to flush the cache | ||
| 1430 | * and the non-canonical address causes a #GP fault when used by the | ||
| 1431 | * CLFLUSH instruction. | ||
| 1432 | * | ||
| 1433 | * But in the common case we already have a canonical address. This code | ||
| 1434 | * will fix the top bit if needed and is a no-op otherwise. | ||
| 1435 | */ | ||
| 1436 | static inline unsigned long make_addr_canonical_again(unsigned long addr) | ||
| 1437 | { | ||
| 1438 | #ifdef CONFIG_X86_64 | ||
| 1439 | return (long)(addr << 1) >> 1; | ||
| 1440 | #else | ||
| 1441 | return addr; | ||
| 1442 | #endif | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | |||
| 1423 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 1446 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
| 1424 | pgprot_t mask_set, pgprot_t mask_clr, | 1447 | pgprot_t mask_set, pgprot_t mask_clr, |
| 1425 | int force_split, int in_flag, | 1448 | int force_split, int in_flag, |
| @@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 1465 | * Save address for cache flush. *addr is modified in the call | 1488 | * Save address for cache flush. *addr is modified in the call |
| 1466 | * to __change_page_attr_set_clr() below. | 1489 | * to __change_page_attr_set_clr() below. |
| 1467 | */ | 1490 | */ |
| 1468 | baddr = *addr; | 1491 | baddr = make_addr_canonical_again(*addr); |
| 1469 | } | 1492 | } |
| 1470 | 1493 | ||
| 1471 | /* Must avoid aliasing mappings in the highmem code */ | 1494 | /* Must avoid aliasing mappings in the highmem code */ |
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 31341ae7309f..c1fc1ae6b429 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c | |||
| @@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) | |||
| 248 | * | 248 | * |
| 249 | * Returns a pointer to a PTE on success, or NULL on failure. | 249 | * Returns a pointer to a PTE on success, or NULL on failure. |
| 250 | */ | 250 | */ |
| 251 | static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) | 251 | static pte_t *pti_user_pagetable_walk_pte(unsigned long address) |
| 252 | { | 252 | { |
| 253 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); | 253 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
| 254 | pmd_t *pmd; | 254 | pmd_t *pmd; |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 9517d1b2a281..e96b99eb800c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
| @@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
| 305 | 305 | ||
| 306 | choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); | 306 | choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); |
| 307 | 307 | ||
| 308 | /* Let nmi_uaccess_okay() know that we're changing CR3. */ | ||
| 309 | this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); | ||
| 310 | barrier(); | ||
| 311 | |||
| 308 | if (need_flush) { | 312 | if (need_flush) { |
| 309 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); | 313 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); |
| 310 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); | 314 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); |
| @@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
| 335 | if (next != &init_mm) | 339 | if (next != &init_mm) |
| 336 | this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); | 340 | this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); |
| 337 | 341 | ||
| 342 | /* Make sure we write CR3 before loaded_mm. */ | ||
| 343 | barrier(); | ||
| 344 | |||
| 338 | this_cpu_write(cpu_tlbstate.loaded_mm, next); | 345 | this_cpu_write(cpu_tlbstate.loaded_mm, next); |
| 339 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); | 346 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); |
| 340 | } | 347 | } |
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 324b93328b37..05ca14222463 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c | |||
| @@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void) | |||
| 85 | 85 | ||
| 86 | void __init efi_call_phys_epilog(pgd_t *save_pgd) | 86 | void __init efi_call_phys_epilog(pgd_t *save_pgd) |
| 87 | { | 87 | { |
| 88 | struct desc_ptr gdt_descr; | ||
| 89 | |||
| 90 | gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0); | ||
| 91 | gdt_descr.size = GDT_SIZE - 1; | ||
| 92 | load_gdt(&gdt_descr); | ||
| 93 | |||
| 94 | load_cr3(save_pgd); | 88 | load_cr3(save_pgd); |
| 95 | __flush_tlb_all(); | 89 | __flush_tlb_all(); |
| 90 | |||
| 91 | load_fixmap_gdt(0); | ||
| 96 | } | 92 | } |
| 97 | 93 | ||
| 98 | void __init efi_runtime_update_mappings(void) | 94 | void __init efi_runtime_update_mappings(void) |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 45b700ac5fe7..2fe5c9b1816b 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
| @@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val) | |||
| 435 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | 435 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 436 | { | 436 | { |
| 437 | trace_xen_mmu_set_pte_atomic(ptep, pte); | 437 | trace_xen_mmu_set_pte_atomic(ptep, pte); |
| 438 | set_64bit((u64 *)ptep, native_pte_val(pte)); | 438 | __xen_set_pte(ptep, pte); |
| 439 | } | 439 | } |
| 440 | 440 | ||
| 441 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 441 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 442 | { | 442 | { |
| 443 | trace_xen_mmu_pte_clear(mm, addr, ptep); | 443 | trace_xen_mmu_pte_clear(mm, addr, ptep); |
| 444 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) | 444 | __xen_set_pte(ptep, native_make_pte(0)); |
| 445 | native_pte_clear(mm, addr, ptep); | ||
| 446 | } | 445 | } |
| 447 | 446 | ||
| 448 | static void xen_pmd_clear(pmd_t *pmdp) | 447 | static void xen_pmd_clear(pmd_t *pmdp) |
| @@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | |||
| 1570 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 1569 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
| 1571 | pte_val_ma(pte)); | 1570 | pte_val_ma(pte)); |
| 1572 | #endif | 1571 | #endif |
| 1573 | native_set_pte(ptep, pte); | 1572 | __xen_set_pte(ptep, pte); |
| 1574 | } | 1573 | } |
| 1575 | 1574 | ||
| 1576 | /* Early in boot, while setting up the initial pagetable, assume | 1575 | /* Early in boot, while setting up the initial pagetable, assume |
| @@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void) | |||
| 2061 | pud_t *pud; | 2060 | pud_t *pud; |
| 2062 | pgd_t *pgd; | 2061 | pgd_t *pgd; |
| 2063 | unsigned long *new_p2m; | 2062 | unsigned long *new_p2m; |
| 2064 | int save_pud; | ||
| 2065 | 2063 | ||
| 2066 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | 2064 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); |
| 2067 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; | 2065 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; |
| @@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void) | |||
| 2091 | 2089 | ||
| 2092 | pgd = __va(read_cr3_pa()); | 2090 | pgd = __va(read_cr3_pa()); |
| 2093 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); | 2091 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); |
| 2094 | save_pud = n_pud; | ||
| 2095 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { | 2092 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { |
| 2096 | pud = early_memremap(pud_phys, PAGE_SIZE); | 2093 | pud = early_memremap(pud_phys, PAGE_SIZE); |
| 2097 | clear_page(pud); | 2094 | clear_page(pud); |
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 84507d3e9a98..8e20a0677dcf 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
| @@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb) | |||
| 123 | } | 123 | } |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | 126 | static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, |
| 127 | enum wbt_flags wb_acct) | ||
| 127 | { | 128 | { |
| 128 | struct rq_wb *rwb = RQWB(rqos); | ||
| 129 | struct rq_wait *rqw; | ||
| 130 | int inflight, limit; | 129 | int inflight, limit; |
| 131 | 130 | ||
| 132 | if (!(wb_acct & WBT_TRACKED)) | ||
| 133 | return; | ||
| 134 | |||
| 135 | rqw = get_rq_wait(rwb, wb_acct); | ||
| 136 | inflight = atomic_dec_return(&rqw->inflight); | 131 | inflight = atomic_dec_return(&rqw->inflight); |
| 137 | 132 | ||
| 138 | /* | 133 | /* |
| @@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | |||
| 166 | int diff = limit - inflight; | 161 | int diff = limit - inflight; |
| 167 | 162 | ||
| 168 | if (!inflight || diff >= rwb->wb_background / 2) | 163 | if (!inflight || diff >= rwb->wb_background / 2) |
| 169 | wake_up(&rqw->wait); | 164 | wake_up_all(&rqw->wait); |
| 170 | } | 165 | } |
| 171 | } | 166 | } |
| 172 | 167 | ||
| 168 | static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) | ||
| 169 | { | ||
| 170 | struct rq_wb *rwb = RQWB(rqos); | ||
| 171 | struct rq_wait *rqw; | ||
| 172 | |||
| 173 | if (!(wb_acct & WBT_TRACKED)) | ||
| 174 | return; | ||
| 175 | |||
| 176 | rqw = get_rq_wait(rwb, wb_acct); | ||
| 177 | wbt_rqw_done(rwb, rqw, wb_acct); | ||
| 178 | } | ||
| 179 | |||
| 173 | /* | 180 | /* |
| 174 | * Called on completion of a request. Note that it's also called when | 181 | * Called on completion of a request. Note that it's also called when |
| 175 | * a request is merged, when the request gets freed. | 182 | * a request is merged, when the request gets freed. |
| @@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) | |||
| 481 | return limit; | 488 | return limit; |
| 482 | } | 489 | } |
| 483 | 490 | ||
| 491 | struct wbt_wait_data { | ||
| 492 | struct wait_queue_entry wq; | ||
| 493 | struct task_struct *task; | ||
| 494 | struct rq_wb *rwb; | ||
| 495 | struct rq_wait *rqw; | ||
| 496 | unsigned long rw; | ||
| 497 | bool got_token; | ||
| 498 | }; | ||
| 499 | |||
| 500 | static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, | ||
| 501 | int wake_flags, void *key) | ||
| 502 | { | ||
| 503 | struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data, | ||
| 504 | wq); | ||
| 505 | |||
| 506 | /* | ||
| 507 | * If we fail to get a budget, return -1 to interrupt the wake up | ||
| 508 | * loop in __wake_up_common. | ||
| 509 | */ | ||
| 510 | if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw))) | ||
| 511 | return -1; | ||
| 512 | |||
| 513 | data->got_token = true; | ||
| 514 | list_del_init(&curr->entry); | ||
| 515 | wake_up_process(data->task); | ||
| 516 | return 1; | ||
| 517 | } | ||
| 518 | |||
| 484 | /* | 519 | /* |
| 485 | * Block if we will exceed our limit, or if we are currently waiting for | 520 | * Block if we will exceed our limit, or if we are currently waiting for |
| 486 | * the timer to kick off queuing again. | 521 | * the timer to kick off queuing again. |
| @@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, | |||
| 491 | __acquires(lock) | 526 | __acquires(lock) |
| 492 | { | 527 | { |
| 493 | struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); | 528 | struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); |
| 494 | DECLARE_WAITQUEUE(wait, current); | 529 | struct wbt_wait_data data = { |
| 530 | .wq = { | ||
| 531 | .func = wbt_wake_function, | ||
| 532 | .entry = LIST_HEAD_INIT(data.wq.entry), | ||
| 533 | }, | ||
| 534 | .task = current, | ||
| 535 | .rwb = rwb, | ||
| 536 | .rqw = rqw, | ||
| 537 | .rw = rw, | ||
| 538 | }; | ||
| 495 | bool has_sleeper; | 539 | bool has_sleeper; |
| 496 | 540 | ||
| 497 | has_sleeper = wq_has_sleeper(&rqw->wait); | 541 | has_sleeper = wq_has_sleeper(&rqw->wait); |
| 498 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) | 542 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) |
| 499 | return; | 543 | return; |
| 500 | 544 | ||
| 501 | add_wait_queue_exclusive(&rqw->wait, &wait); | 545 | prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); |
| 502 | do { | 546 | do { |
| 503 | set_current_state(TASK_UNINTERRUPTIBLE); | 547 | if (data.got_token) |
| 548 | break; | ||
| 504 | 549 | ||
| 505 | if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) | 550 | if (!has_sleeper && |
| 551 | rq_wait_inc_below(rqw, get_limit(rwb, rw))) { | ||
| 552 | finish_wait(&rqw->wait, &data.wq); | ||
| 553 | |||
| 554 | /* | ||
| 555 | * We raced with wbt_wake_function() getting a token, | ||
| 556 | * which means we now have two. Put our local token | ||
| 557 | * and wake anyone else potentially waiting for one. | ||
| 558 | */ | ||
| 559 | if (data.got_token) | ||
| 560 | wbt_rqw_done(rwb, rqw, wb_acct); | ||
| 506 | break; | 561 | break; |
| 562 | } | ||
| 507 | 563 | ||
| 508 | if (lock) { | 564 | if (lock) { |
| 509 | spin_unlock_irq(lock); | 565 | spin_unlock_irq(lock); |
| @@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, | |||
| 511 | spin_lock_irq(lock); | 567 | spin_lock_irq(lock); |
| 512 | } else | 568 | } else |
| 513 | io_schedule(); | 569 | io_schedule(); |
| 570 | |||
| 514 | has_sleeper = false; | 571 | has_sleeper = false; |
| 515 | } while (1); | 572 | } while (1); |
| 516 | 573 | ||
| 517 | __set_current_state(TASK_RUNNING); | 574 | finish_wait(&rqw->wait, &data.wq); |
| 518 | remove_wait_queue(&rqw->wait, &wait); | ||
| 519 | } | 575 | } |
| 520 | 576 | ||
| 521 | static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) | 577 | static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) |
| @@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) | |||
| 580 | return; | 636 | return; |
| 581 | } | 637 | } |
| 582 | 638 | ||
| 583 | if (current_is_kswapd()) | ||
| 584 | flags |= WBT_KSWAPD; | ||
| 585 | if (bio_op(bio) == REQ_OP_DISCARD) | ||
| 586 | flags |= WBT_DISCARD; | ||
| 587 | |||
| 588 | __wbt_wait(rwb, flags, bio->bi_opf, lock); | 639 | __wbt_wait(rwb, flags, bio->bi_opf, lock); |
| 589 | 640 | ||
| 590 | if (!blk_stat_is_active(rwb->cb)) | 641 | if (!blk_stat_is_active(rwb->cb)) |
diff --git a/block/bsg.c b/block/bsg.c index db588add6ba6..9a442c23a715 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
| @@ -37,7 +37,7 @@ struct bsg_device { | |||
| 37 | struct request_queue *queue; | 37 | struct request_queue *queue; |
| 38 | spinlock_t lock; | 38 | spinlock_t lock; |
| 39 | struct hlist_node dev_list; | 39 | struct hlist_node dev_list; |
| 40 | atomic_t ref_count; | 40 | refcount_t ref_count; |
| 41 | char name[20]; | 41 | char name[20]; |
| 42 | int max_queue; | 42 | int max_queue; |
| 43 | }; | 43 | }; |
| @@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd) | |||
| 252 | 252 | ||
| 253 | mutex_lock(&bsg_mutex); | 253 | mutex_lock(&bsg_mutex); |
| 254 | 254 | ||
| 255 | if (!atomic_dec_and_test(&bd->ref_count)) { | 255 | if (!refcount_dec_and_test(&bd->ref_count)) { |
| 256 | mutex_unlock(&bsg_mutex); | 256 | mutex_unlock(&bsg_mutex); |
| 257 | return 0; | 257 | return 0; |
| 258 | } | 258 | } |
| @@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
| 290 | 290 | ||
| 291 | bd->queue = rq; | 291 | bd->queue = rq; |
| 292 | 292 | ||
| 293 | atomic_set(&bd->ref_count, 1); | 293 | refcount_set(&bd->ref_count, 1); |
| 294 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); | 294 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
| 295 | 295 | ||
| 296 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); | 296 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); |
| @@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) | |||
| 308 | 308 | ||
| 309 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { | 309 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { |
| 310 | if (bd->queue == q) { | 310 | if (bd->queue == q) { |
| 311 | atomic_inc(&bd->ref_count); | 311 | refcount_inc(&bd->ref_count); |
| 312 | goto found; | 312 | goto found; |
| 313 | } | 313 | } |
| 314 | } | 314 | } |
diff --git a/block/elevator.c b/block/elevator.c index 5ea6e7d600e4..6a06b5d040e5 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e) | |||
| 895 | spin_lock(&elv_list_lock); | 895 | spin_lock(&elv_list_lock); |
| 896 | if (elevator_find(e->elevator_name, e->uses_mq)) { | 896 | if (elevator_find(e->elevator_name, e->uses_mq)) { |
| 897 | spin_unlock(&elv_list_lock); | 897 | spin_unlock(&elv_list_lock); |
| 898 | if (e->icq_cache) | 898 | kmem_cache_destroy(e->icq_cache); |
| 899 | kmem_cache_destroy(e->icq_cache); | ||
| 900 | return -EBUSY; | 899 | return -EBUSY; |
| 901 | } | 900 | } |
| 902 | list_add_tail(&e->list, &elv_list); | 901 | list_add_tail(&e->list, &elv_list); |
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161..569a4a662dcd 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c | |||
| @@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { | |||
| 256 | .qc_issue = ftide010_qc_issue, | 256 | .qc_issue = ftide010_qc_issue, |
| 257 | }; | 257 | }; |
| 258 | 258 | ||
| 259 | static struct ata_port_info ftide010_port_info[] = { | 259 | static struct ata_port_info ftide010_port_info = { |
| 260 | { | 260 | .flags = ATA_FLAG_SLAVE_POSS, |
| 261 | .flags = ATA_FLAG_SLAVE_POSS, | 261 | .mwdma_mask = ATA_MWDMA2, |
| 262 | .mwdma_mask = ATA_MWDMA2, | 262 | .udma_mask = ATA_UDMA6, |
| 263 | .udma_mask = ATA_UDMA6, | 263 | .pio_mask = ATA_PIO4, |
| 264 | .pio_mask = ATA_PIO4, | 264 | .port_ops = &pata_ftide010_port_ops, |
| 265 | .port_ops = &pata_ftide010_port_ops, | ||
| 266 | }, | ||
| 267 | }; | 265 | }; |
| 268 | 266 | ||
| 269 | #if IS_ENABLED(CONFIG_SATA_GEMINI) | 267 | #if IS_ENABLED(CONFIG_SATA_GEMINI) |
| @@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) | |||
| 349 | } | 347 | } |
| 350 | 348 | ||
| 351 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 349 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
| 350 | struct ata_port_info *pi, | ||
| 352 | bool is_ata1) | 351 | bool is_ata1) |
| 353 | { | 352 | { |
| 354 | struct device *dev = ftide->dev; | 353 | struct device *dev = ftide->dev; |
| @@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
| 373 | 372 | ||
| 374 | /* Flag port as SATA-capable */ | 373 | /* Flag port as SATA-capable */ |
| 375 | if (gemini_sata_bridge_enabled(sg, is_ata1)) | 374 | if (gemini_sata_bridge_enabled(sg, is_ata1)) |
| 376 | ftide010_port_info[0].flags |= ATA_FLAG_SATA; | 375 | pi->flags |= ATA_FLAG_SATA; |
| 376 | |||
| 377 | /* This device has broken DMA, only PIO works */ | ||
| 378 | if (of_machine_is_compatible("itian,sq201")) { | ||
| 379 | pi->mwdma_mask = 0; | ||
| 380 | pi->udma_mask = 0; | ||
| 381 | } | ||
| 377 | 382 | ||
| 378 | /* | 383 | /* |
| 379 | * We assume that a simple 40-wire cable is used in the PATA mode. | 384 | * We assume that a simple 40-wire cable is used in the PATA mode. |
| @@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
| 435 | } | 440 | } |
| 436 | #else | 441 | #else |
| 437 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 442 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
| 443 | struct ata_port_info *pi, | ||
| 438 | bool is_ata1) | 444 | bool is_ata1) |
| 439 | { | 445 | { |
| 440 | return -ENOTSUPP; | 446 | return -ENOTSUPP; |
| @@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
| 446 | { | 452 | { |
| 447 | struct device *dev = &pdev->dev; | 453 | struct device *dev = &pdev->dev; |
| 448 | struct device_node *np = dev->of_node; | 454 | struct device_node *np = dev->of_node; |
| 449 | const struct ata_port_info pi = ftide010_port_info[0]; | 455 | struct ata_port_info pi = ftide010_port_info; |
| 450 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 456 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
| 451 | struct ftide010 *ftide; | 457 | struct ftide010 *ftide; |
| 452 | struct resource *res; | 458 | struct resource *res; |
| @@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
| 490 | * are ATA0. This will also set up the cable types. | 496 | * are ATA0. This will also set up the cable types. |
| 491 | */ | 497 | */ |
| 492 | ret = pata_ftide010_gemini_init(ftide, | 498 | ret = pata_ftide010_gemini_init(ftide, |
| 499 | &pi, | ||
| 493 | (res->start == 0x63400000)); | 500 | (res->start == 0x63400000)); |
| 494 | if (ret) | 501 | if (ret) |
| 495 | goto err_dis_clk; | 502 | goto err_dis_clk; |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb..5a42ae4078c2 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
| @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); | |||
| 185 | int of_pm_clk_add_clks(struct device *dev) | 185 | int of_pm_clk_add_clks(struct device *dev) |
| 186 | { | 186 | { |
| 187 | struct clk **clks; | 187 | struct clk **clks; |
| 188 | unsigned int i, count; | 188 | int i, count; |
| 189 | int ret; | 189 | int ret; |
| 190 | 190 | ||
| 191 | if (!dev || !dev->of_node) | 191 | if (!dev || !dev->of_node) |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index b55b245e8052..fd1e19f1a49f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants, | |||
| 84 | "Maximum number of grants to map persistently"); | 84 | "Maximum number of grants to map persistently"); |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * How long a persistent grant is allowed to remain allocated without being in | ||
| 88 | * use. The time is in seconds, 0 means indefinitely long. | ||
| 89 | */ | ||
| 90 | |||
| 91 | static unsigned int xen_blkif_pgrant_timeout = 60; | ||
| 92 | module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout, | ||
| 93 | uint, 0644); | ||
| 94 | MODULE_PARM_DESC(persistent_grant_unused_seconds, | ||
| 95 | "Time in seconds an unused persistent grant is allowed to " | ||
| 96 | "remain allocated. Default is 60, 0 means unlimited."); | ||
| 97 | |||
| 98 | /* | ||
| 87 | * Maximum number of rings/queues blkback supports, allow as many queues as there | 99 | * Maximum number of rings/queues blkback supports, allow as many queues as there |
| 88 | * are CPUs if user has not specified a value. | 100 | * are CPUs if user has not specified a value. |
| 89 | */ | 101 | */ |
| @@ -123,6 +135,13 @@ module_param(log_stats, int, 0644); | |||
| 123 | /* Number of free pages to remove on each call to gnttab_free_pages */ | 135 | /* Number of free pages to remove on each call to gnttab_free_pages */ |
| 124 | #define NUM_BATCH_FREE_PAGES 10 | 136 | #define NUM_BATCH_FREE_PAGES 10 |
| 125 | 137 | ||
| 138 | static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) | ||
| 139 | { | ||
| 140 | return xen_blkif_pgrant_timeout && | ||
| 141 | (jiffies - persistent_gnt->last_used >= | ||
| 142 | HZ * xen_blkif_pgrant_timeout); | ||
| 143 | } | ||
| 144 | |||
| 126 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) | 145 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) |
| 127 | { | 146 | { |
| 128 | unsigned long flags; | 147 | unsigned long flags; |
| @@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 236 | } | 255 | } |
| 237 | } | 256 | } |
| 238 | 257 | ||
| 239 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); | 258 | persistent_gnt->active = true; |
| 240 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | ||
| 241 | /* Add new node and rebalance tree. */ | 259 | /* Add new node and rebalance tree. */ |
| 242 | rb_link_node(&(persistent_gnt->node), parent, new); | 260 | rb_link_node(&(persistent_gnt->node), parent, new); |
| 243 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); | 261 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); |
| @@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 261 | else if (gref > data->gnt) | 279 | else if (gref > data->gnt) |
| 262 | node = node->rb_right; | 280 | node = node->rb_right; |
| 263 | else { | 281 | else { |
| 264 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | 282 | if (data->active) { |
| 265 | pr_alert_ratelimited("requesting a grant already in use\n"); | 283 | pr_alert_ratelimited("requesting a grant already in use\n"); |
| 266 | return NULL; | 284 | return NULL; |
| 267 | } | 285 | } |
| 268 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | 286 | data->active = true; |
| 269 | atomic_inc(&ring->persistent_gnt_in_use); | 287 | atomic_inc(&ring->persistent_gnt_in_use); |
| 270 | return data; | 288 | return data; |
| 271 | } | 289 | } |
| @@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 276 | static void put_persistent_gnt(struct xen_blkif_ring *ring, | 294 | static void put_persistent_gnt(struct xen_blkif_ring *ring, |
| 277 | struct persistent_gnt *persistent_gnt) | 295 | struct persistent_gnt *persistent_gnt) |
| 278 | { | 296 | { |
| 279 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | 297 | if (!persistent_gnt->active) |
| 280 | pr_alert_ratelimited("freeing a grant already unused\n"); | 298 | pr_alert_ratelimited("freeing a grant already unused\n"); |
| 281 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | 299 | persistent_gnt->last_used = jiffies; |
| 282 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | 300 | persistent_gnt->active = false; |
| 283 | atomic_dec(&ring->persistent_gnt_in_use); | 301 | atomic_dec(&ring->persistent_gnt_in_use); |
| 284 | } | 302 | } |
| 285 | 303 | ||
| @@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
| 371 | struct persistent_gnt *persistent_gnt; | 389 | struct persistent_gnt *persistent_gnt; |
| 372 | struct rb_node *n; | 390 | struct rb_node *n; |
| 373 | unsigned int num_clean, total; | 391 | unsigned int num_clean, total; |
| 374 | bool scan_used = false, clean_used = false; | 392 | bool scan_used = false; |
| 375 | struct rb_root *root; | 393 | struct rb_root *root; |
| 376 | 394 | ||
| 377 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || | ||
| 378 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && | ||
| 379 | !ring->blkif->vbd.overflow_max_grants)) { | ||
| 380 | goto out; | ||
| 381 | } | ||
| 382 | |||
| 383 | if (work_busy(&ring->persistent_purge_work)) { | 395 | if (work_busy(&ring->persistent_purge_work)) { |
| 384 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); | 396 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); |
| 385 | goto out; | 397 | goto out; |
| 386 | } | 398 | } |
| 387 | 399 | ||
| 388 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | 400 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || |
| 389 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; | 401 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && |
| 390 | num_clean = min(ring->persistent_gnt_c, num_clean); | 402 | !ring->blkif->vbd.overflow_max_grants)) { |
| 391 | if ((num_clean == 0) || | 403 | num_clean = 0; |
| 392 | (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) | 404 | } else { |
| 393 | goto out; | 405 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; |
| 406 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + | ||
| 407 | num_clean; | ||
| 408 | num_clean = min(ring->persistent_gnt_c, num_clean); | ||
| 409 | pr_debug("Going to purge at least %u persistent grants\n", | ||
| 410 | num_clean); | ||
| 411 | } | ||
| 394 | 412 | ||
| 395 | /* | 413 | /* |
| 396 | * At this point, we can assure that there will be no calls | 414 | * At this point, we can assure that there will be no calls |
| @@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
| 401 | * number of grants. | 419 | * number of grants. |
| 402 | */ | 420 | */ |
| 403 | 421 | ||
| 404 | total = num_clean; | 422 | total = 0; |
| 405 | |||
| 406 | pr_debug("Going to purge %u persistent grants\n", num_clean); | ||
| 407 | 423 | ||
| 408 | BUG_ON(!list_empty(&ring->persistent_purge_list)); | 424 | BUG_ON(!list_empty(&ring->persistent_purge_list)); |
| 409 | root = &ring->persistent_gnts; | 425 | root = &ring->persistent_gnts; |
| @@ -412,46 +428,37 @@ purge_list: | |||
| 412 | BUG_ON(persistent_gnt->handle == | 428 | BUG_ON(persistent_gnt->handle == |
| 413 | BLKBACK_INVALID_HANDLE); | 429 | BLKBACK_INVALID_HANDLE); |
| 414 | 430 | ||
| 415 | if (clean_used) { | 431 | if (persistent_gnt->active) |
| 416 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | ||
| 417 | continue; | 432 | continue; |
| 418 | } | 433 | if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) |
| 419 | |||
| 420 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | ||
| 421 | continue; | 434 | continue; |
| 422 | if (!scan_used && | 435 | if (scan_used && total >= num_clean) |
| 423 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | ||
| 424 | continue; | 436 | continue; |
| 425 | 437 | ||
| 426 | rb_erase(&persistent_gnt->node, root); | 438 | rb_erase(&persistent_gnt->node, root); |
| 427 | list_add(&persistent_gnt->remove_node, | 439 | list_add(&persistent_gnt->remove_node, |
| 428 | &ring->persistent_purge_list); | 440 | &ring->persistent_purge_list); |
| 429 | if (--num_clean == 0) | 441 | total++; |
| 430 | goto finished; | ||
| 431 | } | 442 | } |
| 432 | /* | 443 | /* |
| 433 | * If we get here it means we also need to start cleaning | 444 | * Check whether we also need to start cleaning |
| 434 | * grants that were used since last purge in order to cope | 445 | * grants that were used since last purge in order to cope |
| 435 | * with the requested num | 446 | * with the requested num |
| 436 | */ | 447 | */ |
| 437 | if (!scan_used && !clean_used) { | 448 | if (!scan_used && total < num_clean) { |
| 438 | pr_debug("Still missing %u purged frames\n", num_clean); | 449 | pr_debug("Still missing %u purged frames\n", num_clean - total); |
| 439 | scan_used = true; | 450 | scan_used = true; |
| 440 | goto purge_list; | 451 | goto purge_list; |
| 441 | } | 452 | } |
| 442 | finished: | ||
| 443 | if (!clean_used) { | ||
| 444 | pr_debug("Finished scanning for grants to clean, removing used flag\n"); | ||
| 445 | clean_used = true; | ||
| 446 | goto purge_list; | ||
| 447 | } | ||
| 448 | 453 | ||
| 449 | ring->persistent_gnt_c -= (total - num_clean); | 454 | if (total) { |
| 450 | ring->blkif->vbd.overflow_max_grants = 0; | 455 | ring->persistent_gnt_c -= total; |
| 456 | ring->blkif->vbd.overflow_max_grants = 0; | ||
| 451 | 457 | ||
| 452 | /* We can defer this work */ | 458 | /* We can defer this work */ |
| 453 | schedule_work(&ring->persistent_purge_work); | 459 | schedule_work(&ring->persistent_purge_work); |
| 454 | pr_debug("Purged %u/%u\n", (total - num_clean), total); | 460 | pr_debug("Purged %u/%u\n", num_clean, total); |
| 461 | } | ||
| 455 | 462 | ||
| 456 | out: | 463 | out: |
| 457 | return; | 464 | return; |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index ecb35fe8ca8d..1d3002d773f7 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
| @@ -233,16 +233,6 @@ struct xen_vbd { | |||
| 233 | 233 | ||
| 234 | struct backend_info; | 234 | struct backend_info; |
| 235 | 235 | ||
| 236 | /* Number of available flags */ | ||
| 237 | #define PERSISTENT_GNT_FLAGS_SIZE 2 | ||
| 238 | /* This persistent grant is currently in use */ | ||
| 239 | #define PERSISTENT_GNT_ACTIVE 0 | ||
| 240 | /* | ||
| 241 | * This persistent grant has been used, this flag is set when we remove the | ||
| 242 | * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. | ||
| 243 | */ | ||
| 244 | #define PERSISTENT_GNT_WAS_ACTIVE 1 | ||
| 245 | |||
| 246 | /* Number of requests that we can fit in a ring */ | 236 | /* Number of requests that we can fit in a ring */ |
| 247 | #define XEN_BLKIF_REQS_PER_PAGE 32 | 237 | #define XEN_BLKIF_REQS_PER_PAGE 32 |
| 248 | 238 | ||
| @@ -250,7 +240,8 @@ struct persistent_gnt { | |||
| 250 | struct page *page; | 240 | struct page *page; |
| 251 | grant_ref_t gnt; | 241 | grant_ref_t gnt; |
| 252 | grant_handle_t handle; | 242 | grant_handle_t handle; |
| 253 | DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); | 243 | unsigned long last_used; |
| 244 | bool active; | ||
| 254 | struct rb_node node; | 245 | struct rb_node node; |
| 255 | struct list_head remove_node; | 246 | struct list_head remove_node; |
| 256 | }; | 247 | }; |
| @@ -278,7 +269,6 @@ struct xen_blkif_ring { | |||
| 278 | wait_queue_head_t pending_free_wq; | 269 | wait_queue_head_t pending_free_wq; |
| 279 | 270 | ||
| 280 | /* Tree to store persistent grants. */ | 271 | /* Tree to store persistent grants. */ |
| 281 | spinlock_t pers_gnts_lock; | ||
| 282 | struct rb_root persistent_gnts; | 272 | struct rb_root persistent_gnts; |
| 283 | unsigned int persistent_gnt_c; | 273 | unsigned int persistent_gnt_c; |
| 284 | atomic_t persistent_gnt_in_use; | 274 | atomic_t persistent_gnt_in_use; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8986adab9bf5..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/scatterlist.h> | 46 | #include <linux/scatterlist.h> |
| 47 | #include <linux/bitmap.h> | 47 | #include <linux/bitmap.h> |
| 48 | #include <linux/list.h> | 48 | #include <linux/list.h> |
| 49 | #include <linux/workqueue.h> | ||
| 49 | 50 | ||
| 50 | #include <xen/xen.h> | 51 | #include <xen/xen.h> |
| 51 | #include <xen/xenbus.h> | 52 | #include <xen/xenbus.h> |
| @@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq) | |||
| 121 | 122 | ||
| 122 | static DEFINE_MUTEX(blkfront_mutex); | 123 | static DEFINE_MUTEX(blkfront_mutex); |
| 123 | static const struct block_device_operations xlvbd_block_fops; | 124 | static const struct block_device_operations xlvbd_block_fops; |
| 125 | static struct delayed_work blkfront_work; | ||
| 126 | static LIST_HEAD(info_list); | ||
| 124 | 127 | ||
| 125 | /* | 128 | /* |
| 126 | * Maximum number of segments in indirect requests, the actual value used by | 129 | * Maximum number of segments in indirect requests, the actual value used by |
| @@ -216,6 +219,7 @@ struct blkfront_info | |||
| 216 | /* Save uncomplete reqs and bios for migration. */ | 219 | /* Save uncomplete reqs and bios for migration. */ |
| 217 | struct list_head requests; | 220 | struct list_head requests; |
| 218 | struct bio_list bio_list; | 221 | struct bio_list bio_list; |
| 222 | struct list_head info_list; | ||
| 219 | }; | 223 | }; |
| 220 | 224 | ||
| 221 | static unsigned int nr_minors; | 225 | static unsigned int nr_minors; |
| @@ -1759,6 +1763,12 @@ abort_transaction: | |||
| 1759 | return err; | 1763 | return err; |
| 1760 | } | 1764 | } |
| 1761 | 1765 | ||
| 1766 | static void free_info(struct blkfront_info *info) | ||
| 1767 | { | ||
| 1768 | list_del(&info->info_list); | ||
| 1769 | kfree(info); | ||
| 1770 | } | ||
| 1771 | |||
| 1762 | /* Common code used when first setting up, and when resuming. */ | 1772 | /* Common code used when first setting up, and when resuming. */ |
| 1763 | static int talk_to_blkback(struct xenbus_device *dev, | 1773 | static int talk_to_blkback(struct xenbus_device *dev, |
| 1764 | struct blkfront_info *info) | 1774 | struct blkfront_info *info) |
| @@ -1880,7 +1890,10 @@ again: | |||
| 1880 | destroy_blkring: | 1890 | destroy_blkring: |
| 1881 | blkif_free(info, 0); | 1891 | blkif_free(info, 0); |
| 1882 | 1892 | ||
| 1883 | kfree(info); | 1893 | mutex_lock(&blkfront_mutex); |
| 1894 | free_info(info); | ||
| 1895 | mutex_unlock(&blkfront_mutex); | ||
| 1896 | |||
| 1884 | dev_set_drvdata(&dev->dev, NULL); | 1897 | dev_set_drvdata(&dev->dev, NULL); |
| 1885 | 1898 | ||
| 1886 | return err; | 1899 | return err; |
| @@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
| 1991 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | 2004 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); |
| 1992 | dev_set_drvdata(&dev->dev, info); | 2005 | dev_set_drvdata(&dev->dev, info); |
| 1993 | 2006 | ||
| 2007 | mutex_lock(&blkfront_mutex); | ||
| 2008 | list_add(&info->info_list, &info_list); | ||
| 2009 | mutex_unlock(&blkfront_mutex); | ||
| 2010 | |||
| 1994 | return 0; | 2011 | return 0; |
| 1995 | } | 2012 | } |
| 1996 | 2013 | ||
| @@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
| 2301 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2318 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) |
| 2302 | indirect_segments = 0; | 2319 | indirect_segments = 0; |
| 2303 | info->max_indirect_segments = indirect_segments; | 2320 | info->max_indirect_segments = indirect_segments; |
| 2321 | |||
| 2322 | if (info->feature_persistent) { | ||
| 2323 | mutex_lock(&blkfront_mutex); | ||
| 2324 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
| 2325 | mutex_unlock(&blkfront_mutex); | ||
| 2326 | } | ||
| 2304 | } | 2327 | } |
| 2305 | 2328 | ||
| 2306 | /* | 2329 | /* |
| @@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
| 2482 | mutex_unlock(&info->mutex); | 2505 | mutex_unlock(&info->mutex); |
| 2483 | 2506 | ||
| 2484 | if (!bdev) { | 2507 | if (!bdev) { |
| 2485 | kfree(info); | 2508 | mutex_lock(&blkfront_mutex); |
| 2509 | free_info(info); | ||
| 2510 | mutex_unlock(&blkfront_mutex); | ||
| 2486 | return 0; | 2511 | return 0; |
| 2487 | } | 2512 | } |
| 2488 | 2513 | ||
| @@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
| 2502 | if (info && !bdev->bd_openers) { | 2527 | if (info && !bdev->bd_openers) { |
| 2503 | xlvbd_release_gendisk(info); | 2528 | xlvbd_release_gendisk(info); |
| 2504 | disk->private_data = NULL; | 2529 | disk->private_data = NULL; |
| 2505 | kfree(info); | 2530 | mutex_lock(&blkfront_mutex); |
| 2531 | free_info(info); | ||
| 2532 | mutex_unlock(&blkfront_mutex); | ||
| 2506 | } | 2533 | } |
| 2507 | 2534 | ||
| 2508 | mutex_unlock(&bdev->bd_mutex); | 2535 | mutex_unlock(&bdev->bd_mutex); |
| @@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) | |||
| 2585 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); | 2612 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
| 2586 | xlvbd_release_gendisk(info); | 2613 | xlvbd_release_gendisk(info); |
| 2587 | disk->private_data = NULL; | 2614 | disk->private_data = NULL; |
| 2588 | kfree(info); | 2615 | free_info(info); |
| 2589 | } | 2616 | } |
| 2590 | 2617 | ||
| 2591 | out: | 2618 | out: |
| @@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = { | |||
| 2618 | .is_ready = blkfront_is_ready, | 2645 | .is_ready = blkfront_is_ready, |
| 2619 | }; | 2646 | }; |
| 2620 | 2647 | ||
| 2648 | static void purge_persistent_grants(struct blkfront_info *info) | ||
| 2649 | { | ||
| 2650 | unsigned int i; | ||
| 2651 | unsigned long flags; | ||
| 2652 | |||
| 2653 | for (i = 0; i < info->nr_rings; i++) { | ||
| 2654 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | ||
| 2655 | struct grant *gnt_list_entry, *tmp; | ||
| 2656 | |||
| 2657 | spin_lock_irqsave(&rinfo->ring_lock, flags); | ||
| 2658 | |||
| 2659 | if (rinfo->persistent_gnts_c == 0) { | ||
| 2660 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 2661 | continue; | ||
| 2662 | } | ||
| 2663 | |||
| 2664 | list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, | ||
| 2665 | node) { | ||
| 2666 | if (gnt_list_entry->gref == GRANT_INVALID_REF || | ||
| 2667 | gnttab_query_foreign_access(gnt_list_entry->gref)) | ||
| 2668 | continue; | ||
| 2669 | |||
| 2670 | list_del(&gnt_list_entry->node); | ||
| 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | ||
| 2672 | rinfo->persistent_gnts_c--; | ||
| 2673 | __free_page(gnt_list_entry->page); | ||
| 2674 | kfree(gnt_list_entry); | ||
| 2675 | } | ||
| 2676 | |||
| 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 2678 | } | ||
| 2679 | } | ||
| 2680 | |||
| 2681 | static void blkfront_delay_work(struct work_struct *work) | ||
| 2682 | { | ||
| 2683 | struct blkfront_info *info; | ||
| 2684 | bool need_schedule_work = false; | ||
| 2685 | |||
| 2686 | mutex_lock(&blkfront_mutex); | ||
| 2687 | |||
| 2688 | list_for_each_entry(info, &info_list, info_list) { | ||
| 2689 | if (info->feature_persistent) { | ||
| 2690 | need_schedule_work = true; | ||
| 2691 | mutex_lock(&info->mutex); | ||
| 2692 | purge_persistent_grants(info); | ||
| 2693 | mutex_unlock(&info->mutex); | ||
| 2694 | } | ||
| 2695 | } | ||
| 2696 | |||
| 2697 | if (need_schedule_work) | ||
| 2698 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
| 2699 | |||
| 2700 | mutex_unlock(&blkfront_mutex); | ||
| 2701 | } | ||
| 2702 | |||
| 2621 | static int __init xlblk_init(void) | 2703 | static int __init xlblk_init(void) |
| 2622 | { | 2704 | { |
| 2623 | int ret; | 2705 | int ret; |
| @@ -2626,6 +2708,15 @@ static int __init xlblk_init(void) | |||
| 2626 | if (!xen_domain()) | 2708 | if (!xen_domain()) |
| 2627 | return -ENODEV; | 2709 | return -ENODEV; |
| 2628 | 2710 | ||
| 2711 | if (!xen_has_pv_disk_devices()) | ||
| 2712 | return -ENODEV; | ||
| 2713 | |||
| 2714 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
| 2715 | pr_warn("xen_blk: can't get major %d with name %s\n", | ||
| 2716 | XENVBD_MAJOR, DEV_NAME); | ||
| 2717 | return -ENODEV; | ||
| 2718 | } | ||
| 2719 | |||
| 2629 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2720 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) |
| 2630 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2721 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; |
| 2631 | 2722 | ||
| @@ -2641,14 +2732,7 @@ static int __init xlblk_init(void) | |||
| 2641 | xen_blkif_max_queues = nr_cpus; | 2732 | xen_blkif_max_queues = nr_cpus; |
| 2642 | } | 2733 | } |
| 2643 | 2734 | ||
| 2644 | if (!xen_has_pv_disk_devices()) | 2735 | INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); |
| 2645 | return -ENODEV; | ||
| 2646 | |||
| 2647 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
| 2648 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | ||
| 2649 | XENVBD_MAJOR, DEV_NAME); | ||
| 2650 | return -ENODEV; | ||
| 2651 | } | ||
| 2652 | 2736 | ||
| 2653 | ret = xenbus_register_frontend(&blkfront_driver); | 2737 | ret = xenbus_register_frontend(&blkfront_driver); |
| 2654 | if (ret) { | 2738 | if (ret) { |
| @@ -2663,6 +2747,8 @@ module_init(xlblk_init); | |||
| 2663 | 2747 | ||
| 2664 | static void __exit xlblk_exit(void) | 2748 | static void __exit xlblk_exit(void) |
| 2665 | { | 2749 | { |
| 2750 | cancel_delayed_work_sync(&blkfront_work); | ||
| 2751 | |||
| 2666 | xenbus_unregister_driver(&blkfront_driver); | 2752 | xenbus_unregister_driver(&blkfront_driver); |
| 2667 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); | 2753 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); |
| 2668 | kfree(minors); | 2754 | kfree(minors); |
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 2df11cc08a46..845b0314ce3a 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig | |||
| @@ -200,6 +200,7 @@ config BT_HCIUART_RTL | |||
| 200 | depends on BT_HCIUART | 200 | depends on BT_HCIUART |
| 201 | depends on BT_HCIUART_SERDEV | 201 | depends on BT_HCIUART_SERDEV |
| 202 | depends on GPIOLIB | 202 | depends on GPIOLIB |
| 203 | depends on ACPI | ||
| 203 | select BT_HCIUART_3WIRE | 204 | select BT_HCIUART_3WIRE |
| 204 | select BT_RTL | 205 | select BT_RTL |
| 205 | help | 206 | help |
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index ed2a5c7cb77f..4593baff2bc9 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c | |||
| @@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev) | |||
| 144 | fw_size = fw->size; | 144 | fw_size = fw->size; |
| 145 | 145 | ||
| 146 | /* The size of patch header is 30 bytes, should be skip */ | 146 | /* The size of patch header is 30 bytes, should be skip */ |
| 147 | if (fw_size < 30) | 147 | if (fw_size < 30) { |
| 148 | return -EINVAL; | 148 | err = -EINVAL; |
| 149 | goto free_fw; | ||
| 150 | } | ||
| 149 | 151 | ||
| 150 | fw_size -= 30; | 152 | fw_size -= 30; |
| 151 | fw_ptr += 30; | 153 | fw_ptr += 30; |
| @@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev) | |||
| 172 | fw_ptr += dlen; | 174 | fw_ptr += dlen; |
| 173 | } | 175 | } |
| 174 | 176 | ||
| 177 | free_fw: | ||
| 175 | release_firmware(fw); | 178 | release_firmware(fw); |
| 176 | |||
| 177 | return err; | 179 | return err; |
| 178 | } | 180 | } |
| 179 | 181 | ||
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index c9bac9dc4637..e4fe954e63a9 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
| @@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata) | |||
| 498 | 498 | ||
| 499 | /** | 499 | /** |
| 500 | * syc_ioremap - ioremap register space for the interconnect target module | 500 | * syc_ioremap - ioremap register space for the interconnect target module |
| 501 | * @ddata: deviec driver data | 501 | * @ddata: device driver data |
| 502 | * | 502 | * |
| 503 | * Note that the interconnect target module registers can be anywhere | 503 | * Note that the interconnect target module registers can be anywhere |
| 504 | * within the first child device address space. For example, SGX has | 504 | * within the interconnect target module range. For example, SGX has |
| 505 | * them at offset 0x1fc00 in the 32MB module address space. We just | 505 | * them at offset 0x1fc00 in the 32MB module address space. And cpsw |
| 506 | * what we need around the interconnect target module registers. | 506 | * has them at offset 0x1200 in the CPSW_WR child. Usually the |
| 507 | * the interconnect target module registers are at the beginning of | ||
| 508 | * the module range though. | ||
| 507 | */ | 509 | */ |
| 508 | static int sysc_ioremap(struct sysc *ddata) | 510 | static int sysc_ioremap(struct sysc *ddata) |
| 509 | { | 511 | { |
| 510 | u32 size = 0; | 512 | int size; |
| 511 | |||
| 512 | if (ddata->offsets[SYSC_SYSSTATUS] >= 0) | ||
| 513 | size = ddata->offsets[SYSC_SYSSTATUS]; | ||
| 514 | else if (ddata->offsets[SYSC_SYSCONFIG] >= 0) | ||
| 515 | size = ddata->offsets[SYSC_SYSCONFIG]; | ||
| 516 | else if (ddata->offsets[SYSC_REVISION] >= 0) | ||
| 517 | size = ddata->offsets[SYSC_REVISION]; | ||
| 518 | else | ||
| 519 | return -EINVAL; | ||
| 520 | 513 | ||
| 521 | size &= 0xfff00; | 514 | size = max3(ddata->offsets[SYSC_REVISION], |
| 522 | size += SZ_256; | 515 | ddata->offsets[SYSC_SYSCONFIG], |
| 516 | ddata->offsets[SYSC_SYSSTATUS]); | ||
| 517 | |||
| 518 | if (size < 0 || (size + sizeof(u32)) > ddata->module_size) | ||
| 519 | return -EINVAL; | ||
| 523 | 520 | ||
| 524 | ddata->module_va = devm_ioremap(ddata->dev, | 521 | ddata->module_va = devm_ioremap(ddata->dev, |
| 525 | ddata->module_pa, | 522 | ddata->module_pa, |
| 526 | size); | 523 | size + sizeof(u32)); |
| 527 | if (!ddata->module_va) | 524 | if (!ddata->module_va) |
| 528 | return -EIO; | 525 | return -EIO; |
| 529 | 526 | ||
| @@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev) | |||
| 1224 | if (!pm_runtime_status_suspended(dev)) { | 1221 | if (!pm_runtime_status_suspended(dev)) { |
| 1225 | error = pm_generic_runtime_suspend(dev); | 1222 | error = pm_generic_runtime_suspend(dev); |
| 1226 | if (error) { | 1223 | if (error) { |
| 1227 | dev_err(dev, "%s error at %i: %i\n", | 1224 | dev_warn(dev, "%s busy at %i: %i\n", |
| 1228 | __func__, __LINE__, error); | 1225 | __func__, __LINE__, error); |
| 1229 | 1226 | ||
| 1230 | return error; | 1227 | return 0; |
| 1231 | } | 1228 | } |
| 1232 | 1229 | ||
| 1233 | error = sysc_runtime_suspend(ddata->dev); | 1230 | error = sysc_runtime_suspend(ddata->dev); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 113fc6edb2b0..a5d5a96479bf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
| @@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, | |||
| 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || | 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || |
| 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) | 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) |
| 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); | 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); |
| 2549 | if (((int)arg >= cdi->capacity)) | 2549 | if (arg >= cdi->capacity) |
| 2550 | return -EINVAL; | 2550 | return -EINVAL; |
| 2551 | return cdrom_slot_status(cdi, arg); | 2551 | return cdrom_slot_status(cdi, arg); |
| 2552 | } | 2552 | } |
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index 740af90a9508..c5edf8f2fd19 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c | |||
| @@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) | |||
| 558 | if (!clk_base) | 558 | if (!clk_base) |
| 559 | goto npcm7xx_init_error; | 559 | goto npcm7xx_init_error; |
| 560 | 560 | ||
| 561 | npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * | 561 | npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws, |
| 562 | NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); | 562 | NPCM7XX_NUM_CLOCKS), GFP_KERNEL); |
| 563 | if (!npcm7xx_clk_data) | 563 | if (!npcm7xx_clk_data) |
| 564 | goto npcm7xx_init_np_err; | 564 | goto npcm7xx_init_np_err; |
| 565 | 565 | ||
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index fb62f3938008..3a0996f2d556 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c | |||
| @@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev) | |||
| 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), | 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), |
| 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); | 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); |
| 48 | 48 | ||
| 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); | 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk); |
| 50 | 50 | ||
| 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", | 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", |
| 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, | 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 110483f0e3fb..e26a40971b26 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
| 379 | if (idx == -1) | 379 | if (idx == -1) |
| 380 | idx = i; /* first enabled state */ | 380 | idx = i; /* first enabled state */ |
| 381 | if (s->target_residency > data->predicted_us) { | 381 | if (s->target_residency > data->predicted_us) { |
| 382 | if (!tick_nohz_tick_stopped()) | 382 | if (data->predicted_us < TICK_USEC) |
| 383 | break; | 383 | break; |
| 384 | 384 | ||
| 385 | if (!tick_nohz_tick_stopped()) { | ||
| 386 | /* | ||
| 387 | * If the state selected so far is shallow, | ||
| 388 | * waking up early won't hurt, so retain the | ||
| 389 | * tick in that case and let the governor run | ||
| 390 | * again in the next iteration of the loop. | ||
| 391 | */ | ||
| 392 | expected_interval = drv->states[idx].target_residency; | ||
| 393 | break; | ||
| 394 | } | ||
| 395 | |||
| 385 | /* | 396 | /* |
| 386 | * If the state selected so far is shallow and this | 397 | * If the state selected so far is shallow and this |
| 387 | * state's target residency matches the time till the | 398 | * state's target residency matches the time till the |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
| @@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 679 | int ret = 0; | 679 | int ret = 0; |
| 680 | 680 | ||
| 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { | 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
| 682 | crypto_ablkcipher_set_flags(ablkcipher, | ||
| 683 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 684 | dev_err(jrdev, "key size mismatch\n"); | 682 | dev_err(jrdev, "key size mismatch\n"); |
| 685 | return -EINVAL; | 683 | goto badkey; |
| 686 | } | 684 | } |
| 687 | 685 | ||
| 688 | ctx->cdata.keylen = keylen; | 686 | ctx->cdata.keylen = keylen; |
| @@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 715 | return ret; | 713 | return ret; |
| 716 | badkey: | 714 | badkey: |
| 717 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 715 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 718 | return 0; | 716 | return -EINVAL; |
| 719 | } | 717 | } |
| 720 | 718 | ||
| 721 | /* | 719 | /* |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
| @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
| 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
| 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
| 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
| 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
| @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
| 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); | 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
| 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); | 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
| 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
| 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | /* RSA Job Completion handler */ | 97 | /* RSA Job Completion handler */ |
| @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
| 417 | goto unmap_p; | 417 | goto unmap_p; |
| 418 | } | 418 | } |
| 419 | 419 | ||
| 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
| 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
| 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
| 423 | goto unmap_q; | 423 | goto unmap_q; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
| 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
| 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
| 429 | goto unmap_tmp1; | 429 | goto unmap_tmp1; |
| @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
| 451 | return 0; | 451 | return 0; |
| 452 | 452 | ||
| 453 | unmap_tmp1: | 453 | unmap_tmp1: |
| 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 455 | unmap_q: | 455 | unmap_q: |
| 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
| 457 | unmap_p: | 457 | unmap_p: |
| @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
| 504 | goto unmap_dq; | 504 | goto unmap_dq; |
| 505 | } | 505 | } |
| 506 | 506 | ||
| 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
| 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
| 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
| 510 | goto unmap_qinv; | 510 | goto unmap_qinv; |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
| 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
| 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
| 516 | goto unmap_tmp1; | 516 | goto unmap_tmp1; |
| @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
| 538 | return 0; | 538 | return 0; |
| 539 | 539 | ||
| 540 | unmap_tmp1: | 540 | unmap_tmp1: |
| 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 542 | unmap_qinv: | 542 | unmap_qinv: |
| 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
| 544 | unmap_dq: | 544 | unmap_dq: |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); | 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); |
| 191 | 191 | ||
| 192 | /* Unmap just-run descriptor so we can post-process */ | 192 | /* Unmap just-run descriptor so we can post-process */ |
| 193 | dma_unmap_single(dev, jrp->outring[hw_idx].desc, | 193 | dma_unmap_single(dev, |
| 194 | caam_dma_to_cpu(jrp->outring[hw_idx].desc), | ||
| 194 | jrp->entinfo[sw_idx].desc_size, | 195 | jrp->entinfo[sw_idx].desc_size, |
| 195 | DMA_TO_DEVICE); | 196 | DMA_TO_DEVICE); |
| 196 | 197 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h | |||
| @@ -35,6 +35,7 @@ struct nitrox_cmdq { | |||
| 35 | /* requests in backlog queues */ | 35 | /* requests in backlog queues */ |
| 36 | atomic_t backlog_count; | 36 | atomic_t backlog_count; |
| 37 | 37 | ||
| 38 | int write_idx; | ||
| 38 | /* command size 32B/64B */ | 39 | /* command size 32B/64B */ |
| 39 | u8 instr_size; | 40 | u8 instr_size; |
| 40 | u8 qno; | 41 | u8 qno; |
| @@ -87,7 +88,7 @@ struct nitrox_bh { | |||
| 87 | struct bh_data *slc; | 88 | struct bh_data *slc; |
| 88 | }; | 89 | }; |
| 89 | 90 | ||
| 90 | /* NITROX-5 driver state */ | 91 | /* NITROX-V driver state */ |
| 91 | #define NITROX_UCODE_LOADED 0 | 92 | #define NITROX_UCODE_LOADED 0 |
| 92 | #define NITROX_READY 1 | 93 | #define NITROX_READY 1 |
| 93 | 94 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
| @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) | |||
| 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); | 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); |
| 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); | 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); |
| 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); | 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); |
| 39 | cmdq->write_idx = 0; | ||
| 39 | 40 | ||
| 40 | spin_lock_init(&cmdq->response_lock); | 41 | spin_lock_init(&cmdq->response_lock); |
| 41 | spin_lock_init(&cmdq->cmdq_lock); | 42 | spin_lock_init(&cmdq->cmdq_lock); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
| @@ -42,6 +42,16 @@ | |||
| 42 | * Invalid flag options in AES-CCM IV. | 42 | * Invalid flag options in AES-CCM IV. |
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | static inline int incr_index(int index, int count, int max) | ||
| 46 | { | ||
| 47 | if ((index + count) >= max) | ||
| 48 | index = index + count - max; | ||
| 49 | else | ||
| 50 | index += count; | ||
| 51 | |||
| 52 | return index; | ||
| 53 | } | ||
| 54 | |||
| 45 | /** | 55 | /** |
| 46 | * dma_free_sglist - unmap and free the sg lists. | 56 | * dma_free_sglist - unmap and free the sg lists. |
| 47 | * @ndev: N5 device | 57 | * @ndev: N5 device |
| @@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, | |||
| 426 | struct nitrox_cmdq *cmdq) | 436 | struct nitrox_cmdq *cmdq) |
| 427 | { | 437 | { |
| 428 | struct nitrox_device *ndev = sr->ndev; | 438 | struct nitrox_device *ndev = sr->ndev; |
| 429 | union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; | 439 | int idx; |
| 430 | u64 offset; | ||
| 431 | u8 *ent; | 440 | u8 *ent; |
| 432 | 441 | ||
| 433 | spin_lock_bh(&cmdq->cmdq_lock); | 442 | spin_lock_bh(&cmdq->cmdq_lock); |
| 434 | 443 | ||
| 435 | /* get the next write offset */ | 444 | idx = cmdq->write_idx; |
| 436 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); | ||
| 437 | pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); | ||
| 438 | /* copy the instruction */ | 445 | /* copy the instruction */ |
| 439 | ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; | 446 | ent = cmdq->head + (idx * cmdq->instr_size); |
| 440 | memcpy(ent, &sr->instr, cmdq->instr_size); | 447 | memcpy(ent, &sr->instr, cmdq->instr_size); |
| 441 | /* flush the command queue updates */ | ||
| 442 | dma_wmb(); | ||
| 443 | 448 | ||
| 444 | sr->tstamp = jiffies; | ||
| 445 | atomic_set(&sr->status, REQ_POSTED); | 449 | atomic_set(&sr->status, REQ_POSTED); |
| 446 | response_list_add(sr, cmdq); | 450 | response_list_add(sr, cmdq); |
| 451 | sr->tstamp = jiffies; | ||
| 452 | /* flush the command queue updates */ | ||
| 453 | dma_wmb(); | ||
| 447 | 454 | ||
| 448 | /* Ring doorbell with count 1 */ | 455 | /* Ring doorbell with count 1 */ |
| 449 | writeq(1, cmdq->dbell_csr_addr); | 456 | writeq(1, cmdq->dbell_csr_addr); |
| 450 | /* orders the doorbell rings */ | 457 | /* orders the doorbell rings */ |
| 451 | mmiowb(); | 458 | mmiowb(); |
| 452 | 459 | ||
| 460 | cmdq->write_idx = incr_index(idx, 1, ndev->qlen); | ||
| 461 | |||
| 453 | spin_unlock_bh(&cmdq->cmdq_lock); | 462 | spin_unlock_bh(&cmdq->cmdq_lock); |
| 454 | } | 463 | } |
| 455 | 464 | ||
| @@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
| 459 | struct nitrox_softreq *sr, *tmp; | 468 | struct nitrox_softreq *sr, *tmp; |
| 460 | int ret = 0; | 469 | int ret = 0; |
| 461 | 470 | ||
| 471 | if (!atomic_read(&cmdq->backlog_count)) | ||
| 472 | return 0; | ||
| 473 | |||
| 462 | spin_lock_bh(&cmdq->backlog_lock); | 474 | spin_lock_bh(&cmdq->backlog_lock); |
| 463 | 475 | ||
| 464 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | 476 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { |
| @@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
| 466 | 478 | ||
| 467 | /* submit until space available */ | 479 | /* submit until space available */ |
| 468 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 480 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
| 469 | ret = -EBUSY; | 481 | ret = -ENOSPC; |
| 470 | break; | 482 | break; |
| 471 | } | 483 | } |
| 472 | /* delete from backlog list */ | 484 | /* delete from backlog list */ |
| @@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) | |||
| 491 | { | 503 | { |
| 492 | struct nitrox_cmdq *cmdq = sr->cmdq; | 504 | struct nitrox_cmdq *cmdq = sr->cmdq; |
| 493 | struct nitrox_device *ndev = sr->ndev; | 505 | struct nitrox_device *ndev = sr->ndev; |
| 494 | int ret = -EBUSY; | 506 | |
| 507 | /* try to post backlog requests */ | ||
| 508 | post_backlog_cmds(cmdq); | ||
| 495 | 509 | ||
| 496 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 510 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
| 497 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 511 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
| 498 | return -EAGAIN; | 512 | return -ENOSPC; |
| 499 | 513 | /* add to backlog list */ | |
| 500 | backlog_list_add(sr, cmdq); | 514 | backlog_list_add(sr, cmdq); |
| 501 | } else { | 515 | return -EBUSY; |
| 502 | ret = post_backlog_cmds(cmdq); | ||
| 503 | if (ret) { | ||
| 504 | backlog_list_add(sr, cmdq); | ||
| 505 | return ret; | ||
| 506 | } | ||
| 507 | post_se_instr(sr, cmdq); | ||
| 508 | ret = -EINPROGRESS; | ||
| 509 | } | 516 | } |
| 510 | return ret; | 517 | post_se_instr(sr, cmdq); |
| 518 | |||
| 519 | return -EINPROGRESS; | ||
| 511 | } | 520 | } |
| 512 | 521 | ||
| 513 | /** | 522 | /** |
| @@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, | |||
| 624 | */ | 633 | */ |
| 625 | sr->instr.fdata[0] = *((u64 *)&req->gph); | 634 | sr->instr.fdata[0] = *((u64 *)&req->gph); |
| 626 | sr->instr.fdata[1] = 0; | 635 | sr->instr.fdata[1] = 0; |
| 627 | /* flush the soft_req changes before posting the cmd */ | ||
| 628 | wmb(); | ||
| 629 | 636 | ||
| 630 | ret = nitrox_enqueue_request(sr); | 637 | ret = nitrox_enqueue_request(sr); |
| 631 | if (ret == -EAGAIN) | 638 | if (ret == -ENOSPC) |
| 632 | goto send_fail; | 639 | goto send_fail; |
| 633 | 640 | ||
| 634 | return ret; | 641 | return ret; |
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h | |||
| @@ -96,6 +96,10 @@ enum csk_flags { | |||
| 96 | CSK_CONN_INLINE, /* Connection on HW */ | 96 | CSK_CONN_INLINE, /* Connection on HW */ |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | enum chtls_cdev_state { | ||
| 100 | CHTLS_CDEV_STATE_UP = 1 | ||
| 101 | }; | ||
| 102 | |||
| 99 | struct listen_ctx { | 103 | struct listen_ctx { |
| 100 | struct sock *lsk; | 104 | struct sock *lsk; |
| 101 | struct chtls_dev *cdev; | 105 | struct chtls_dev *cdev; |
| @@ -146,6 +150,7 @@ struct chtls_dev { | |||
| 146 | unsigned int send_page_order; | 150 | unsigned int send_page_order; |
| 147 | int max_host_sndbuf; | 151 | int max_host_sndbuf; |
| 148 | struct key_map kmap; | 152 | struct key_map kmap; |
| 153 | unsigned int cdev_state; | ||
| 149 | }; | 154 | }; |
| 150 | 155 | ||
| 151 | struct chtls_hws { | 156 | struct chtls_hws { |
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c | |||
| @@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) | |||
| 160 | tlsdev->hash = chtls_create_hash; | 160 | tlsdev->hash = chtls_create_hash; |
| 161 | tlsdev->unhash = chtls_destroy_hash; | 161 | tlsdev->unhash = chtls_destroy_hash; |
| 162 | tls_register_device(&cdev->tlsdev); | 162 | tls_register_device(&cdev->tlsdev); |
| 163 | cdev->cdev_state = CHTLS_CDEV_STATE_UP; | ||
| 163 | } | 164 | } |
| 164 | 165 | ||
| 165 | static void chtls_unregister_dev(struct chtls_dev *cdev) | 166 | static void chtls_unregister_dev(struct chtls_dev *cdev) |
| @@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) | |||
| 281 | struct chtls_dev *cdev, *tmp; | 282 | struct chtls_dev *cdev, *tmp; |
| 282 | 283 | ||
| 283 | mutex_lock(&cdev_mutex); | 284 | mutex_lock(&cdev_mutex); |
| 284 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) | 285 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { |
| 285 | chtls_free_uld(cdev); | 286 | if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) |
| 287 | chtls_free_uld(cdev); | ||
| 288 | } | ||
| 286 | mutex_unlock(&cdev_mutex); | 289 | mutex_unlock(&cdev_mutex); |
| 287 | } | 290 | } |
| 288 | 291 | ||
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
| @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | |||
| 107 | ret = crypto_skcipher_encrypt(req); | 107 | ret = crypto_skcipher_encrypt(req); |
| 108 | skcipher_request_zero(req); | 108 | skcipher_request_zero(req); |
| 109 | } else { | 109 | } else { |
| 110 | preempt_disable(); | ||
| 111 | pagefault_disable(); | ||
| 112 | enable_kernel_vsx(); | ||
| 113 | |||
| 114 | blkcipher_walk_init(&walk, dst, src, nbytes); | 110 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 115 | ret = blkcipher_walk_virt(desc, &walk); | 111 | ret = blkcipher_walk_virt(desc, &walk); |
| 116 | while ((nbytes = walk.nbytes)) { | 112 | while ((nbytes = walk.nbytes)) { |
| 113 | preempt_disable(); | ||
| 114 | pagefault_disable(); | ||
| 115 | enable_kernel_vsx(); | ||
| 117 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 116 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
| 118 | walk.dst.virt.addr, | 117 | walk.dst.virt.addr, |
| 119 | nbytes & AES_BLOCK_MASK, | 118 | nbytes & AES_BLOCK_MASK, |
| 120 | &ctx->enc_key, walk.iv, 1); | 119 | &ctx->enc_key, walk.iv, 1); |
| 120 | disable_kernel_vsx(); | ||
| 121 | pagefault_enable(); | ||
| 122 | preempt_enable(); | ||
| 123 | |||
| 121 | nbytes &= AES_BLOCK_SIZE - 1; | 124 | nbytes &= AES_BLOCK_SIZE - 1; |
| 122 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 125 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 123 | } | 126 | } |
| 124 | |||
| 125 | disable_kernel_vsx(); | ||
| 126 | pagefault_enable(); | ||
| 127 | preempt_enable(); | ||
| 128 | } | 127 | } |
| 129 | 128 | ||
| 130 | return ret; | 129 | return ret; |
| @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | |||
| 147 | ret = crypto_skcipher_decrypt(req); | 146 | ret = crypto_skcipher_decrypt(req); |
| 148 | skcipher_request_zero(req); | 147 | skcipher_request_zero(req); |
| 149 | } else { | 148 | } else { |
| 150 | preempt_disable(); | ||
| 151 | pagefault_disable(); | ||
| 152 | enable_kernel_vsx(); | ||
| 153 | |||
| 154 | blkcipher_walk_init(&walk, dst, src, nbytes); | 149 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 155 | ret = blkcipher_walk_virt(desc, &walk); | 150 | ret = blkcipher_walk_virt(desc, &walk); |
| 156 | while ((nbytes = walk.nbytes)) { | 151 | while ((nbytes = walk.nbytes)) { |
| 152 | preempt_disable(); | ||
| 153 | pagefault_disable(); | ||
| 154 | enable_kernel_vsx(); | ||
| 157 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 155 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
| 158 | walk.dst.virt.addr, | 156 | walk.dst.virt.addr, |
| 159 | nbytes & AES_BLOCK_MASK, | 157 | nbytes & AES_BLOCK_MASK, |
| 160 | &ctx->dec_key, walk.iv, 0); | 158 | &ctx->dec_key, walk.iv, 0); |
| 159 | disable_kernel_vsx(); | ||
| 160 | pagefault_enable(); | ||
| 161 | preempt_enable(); | ||
| 162 | |||
| 161 | nbytes &= AES_BLOCK_SIZE - 1; | 163 | nbytes &= AES_BLOCK_SIZE - 1; |
| 162 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 164 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 163 | } | 165 | } |
| 164 | |||
| 165 | disable_kernel_vsx(); | ||
| 166 | pagefault_enable(); | ||
| 167 | preempt_enable(); | ||
| 168 | } | 166 | } |
| 169 | 167 | ||
| 170 | return ret; | 168 | return ret; |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
| @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | |||
| 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); | 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); |
| 117 | skcipher_request_zero(req); | 117 | skcipher_request_zero(req); |
| 118 | } else { | 118 | } else { |
| 119 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 120 | |||
| 121 | ret = blkcipher_walk_virt(desc, &walk); | ||
| 122 | |||
| 119 | preempt_disable(); | 123 | preempt_disable(); |
| 120 | pagefault_disable(); | 124 | pagefault_disable(); |
| 121 | enable_kernel_vsx(); | 125 | enable_kernel_vsx(); |
| 122 | 126 | ||
| 123 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 124 | |||
| 125 | ret = blkcipher_walk_virt(desc, &walk); | ||
| 126 | iv = walk.iv; | 127 | iv = walk.iv; |
| 127 | memset(tweak, 0, AES_BLOCK_SIZE); | 128 | memset(tweak, 0, AES_BLOCK_SIZE); |
| 128 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 129 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
| 129 | 130 | ||
| 131 | disable_kernel_vsx(); | ||
| 132 | pagefault_enable(); | ||
| 133 | preempt_enable(); | ||
| 134 | |||
| 130 | while ((nbytes = walk.nbytes)) { | 135 | while ((nbytes = walk.nbytes)) { |
| 136 | preempt_disable(); | ||
| 137 | pagefault_disable(); | ||
| 138 | enable_kernel_vsx(); | ||
| 131 | if (enc) | 139 | if (enc) |
| 132 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 140 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 133 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); | 141 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); |
| 134 | else | 142 | else |
| 135 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | 143 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 136 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); | 144 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); |
| 145 | disable_kernel_vsx(); | ||
| 146 | pagefault_enable(); | ||
| 147 | preempt_enable(); | ||
| 137 | 148 | ||
| 138 | nbytes &= AES_BLOCK_SIZE - 1; | 149 | nbytes &= AES_BLOCK_SIZE - 1; |
| 139 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 150 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 140 | } | 151 | } |
| 141 | |||
| 142 | disable_kernel_vsx(); | ||
| 143 | pagefault_enable(); | ||
| 144 | preempt_enable(); | ||
| 145 | } | 152 | } |
| 146 | return ret; | 153 | return ret; |
| 147 | } | 154 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..b6e9df11115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
| 1012 | if (r) | 1012 | if (r) |
| 1013 | return r; | 1013 | return r; |
| 1014 | 1014 | ||
| 1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { | 1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
| 1016 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; | 1016 | parser->job->preamble_status |= |
| 1017 | if (!parser->ctx->preamble_presented) { | 1017 | AMDGPU_PREAMBLE_IB_PRESENT; |
| 1018 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
| 1019 | parser->ctx->preamble_presented = true; | ||
| 1020 | } | ||
| 1021 | } | ||
| 1022 | 1018 | ||
| 1023 | if (parser->ring && parser->ring != ring) | 1019 | if (parser->ring && parser->ring != ring) |
| 1024 | return -EINVAL; | 1020 | return -EINVAL; |
| @@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1207 | 1203 | ||
| 1208 | int r; | 1204 | int r; |
| 1209 | 1205 | ||
| 1206 | job = p->job; | ||
| 1207 | p->job = NULL; | ||
| 1208 | |||
| 1209 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
| 1210 | if (r) | ||
| 1211 | goto error_unlock; | ||
| 1212 | |||
| 1213 | /* No memory allocation is allowed while holding the mn lock */ | ||
| 1210 | amdgpu_mn_lock(p->mn); | 1214 | amdgpu_mn_lock(p->mn); |
| 1211 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 1215 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
| 1212 | struct amdgpu_bo *bo = e->robj; | 1216 | struct amdgpu_bo *bo = e->robj; |
| 1213 | 1217 | ||
| 1214 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { | 1218 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { |
| 1215 | amdgpu_mn_unlock(p->mn); | 1219 | r = -ERESTARTSYS; |
| 1216 | return -ERESTARTSYS; | 1220 | goto error_abort; |
| 1217 | } | 1221 | } |
| 1218 | } | 1222 | } |
| 1219 | 1223 | ||
| 1220 | job = p->job; | ||
| 1221 | p->job = NULL; | ||
| 1222 | |||
| 1223 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
| 1224 | if (r) { | ||
| 1225 | amdgpu_job_free(job); | ||
| 1226 | amdgpu_mn_unlock(p->mn); | ||
| 1227 | return r; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | job->owner = p->filp; | 1224 | job->owner = p->filp; |
| 1231 | p->fence = dma_fence_get(&job->base.s_fence->finished); | 1225 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
| 1232 | 1226 | ||
| @@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1241 | 1235 | ||
| 1242 | amdgpu_cs_post_dependencies(p); | 1236 | amdgpu_cs_post_dependencies(p); |
| 1243 | 1237 | ||
| 1238 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && | ||
| 1239 | !p->ctx->preamble_presented) { | ||
| 1240 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
| 1241 | p->ctx->preamble_presented = true; | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | cs->out.handle = seq; | 1244 | cs->out.handle = seq; |
| 1245 | job->uf_sequence = seq; | 1245 | job->uf_sequence = seq; |
| 1246 | 1246 | ||
| @@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1258 | amdgpu_mn_unlock(p->mn); | 1258 | amdgpu_mn_unlock(p->mn); |
| 1259 | 1259 | ||
| 1260 | return 0; | 1260 | return 0; |
| 1261 | |||
| 1262 | error_abort: | ||
| 1263 | dma_fence_put(&job->base.s_fence->finished); | ||
| 1264 | job->base.s_fence = NULL; | ||
| 1265 | |||
| 1266 | error_unlock: | ||
| 1267 | amdgpu_job_free(job); | ||
| 1268 | amdgpu_mn_unlock(p->mn); | ||
| 1269 | return r; | ||
| 1261 | } | 1270 | } |
| 1262 | 1271 | ||
| 1263 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 1272 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 164 | return r; | 164 | return r; |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
| 167 | if (ring->funcs->emit_pipeline_sync && job && | 168 | if (ring->funcs->emit_pipeline_sync && job && |
| 168 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || | 169 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || |
| 170 | (amdgpu_sriov_vf(adev) && need_ctx_switch) || | ||
| 169 | amdgpu_vm_need_pipeline_sync(ring, job))) { | 171 | amdgpu_vm_need_pipeline_sync(ring, job))) { |
| 170 | need_pipe_sync = true; | 172 | need_pipe_sync = true; |
| 171 | dma_fence_put(tmp); | 173 | dma_fence_put(tmp); |
| @@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 196 | } | 198 | } |
| 197 | 199 | ||
| 198 | skip_preamble = ring->current_ctx == fence_ctx; | 200 | skip_preamble = ring->current_ctx == fence_ctx; |
| 199 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
| 200 | if (job && ring->funcs->emit_cntxcntl) { | 201 | if (job && ring->funcs->emit_cntxcntl) { |
| 201 | if (need_ctx_switch) | 202 | if (need_ctx_switch) |
| 202 | status |= AMDGPU_HAVE_CTX_SWITCH; | 203 | status |= AMDGPU_HAVE_CTX_SWITCH; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
| 1932 | amdgpu_fence_wait_empty(ring); | 1932 | amdgpu_fence_wait_empty(ring); |
| 1933 | } | 1933 | } |
| 1934 | 1934 | ||
| 1935 | mutex_lock(&adev->pm.mutex); | ||
| 1936 | /* update battery/ac status */ | ||
| 1937 | if (power_supply_is_system_supplied() > 0) | ||
| 1938 | adev->pm.ac_power = true; | ||
| 1939 | else | ||
| 1940 | adev->pm.ac_power = false; | ||
| 1941 | mutex_unlock(&adev->pm.mutex); | ||
| 1942 | |||
| 1943 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | 1935 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
| 1944 | if (!amdgpu_device_has_dc_support(adev)) { | 1936 | if (!amdgpu_device_has_dc_support(adev)) { |
| 1945 | mutex_lock(&adev->pm.mutex); | 1937 | mutex_lock(&adev->pm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, | |||
| 172 | * is validated on next vm use to avoid fault. | 172 | * is validated on next vm use to avoid fault. |
| 173 | * */ | 173 | * */ |
| 174 | list_move_tail(&base->vm_status, &vm->evicted); | 174 | list_move_tail(&base->vm_status, &vm->evicted); |
| 175 | base->moved = true; | ||
| 175 | } | 176 | } |
| 176 | 177 | ||
| 177 | /** | 178 | /** |
| @@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 369 | uint64_t addr; | 370 | uint64_t addr; |
| 370 | int r; | 371 | int r; |
| 371 | 372 | ||
| 372 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 373 | entries = amdgpu_bo_size(bo) / 8; | 373 | entries = amdgpu_bo_size(bo) / 8; |
| 374 | 374 | ||
| 375 | if (pte_support_ats) { | 375 | if (pte_support_ats) { |
| @@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 401 | if (r) | 401 | if (r) |
| 402 | goto error; | 402 | goto error; |
| 403 | 403 | ||
| 404 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 404 | if (ats_entries) { | 405 | if (ats_entries) { |
| 405 | uint64_t ats_value; | 406 | uint64_t ats_value; |
| 406 | 407 | ||
| @@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) | |||
| 2483 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size | 2484 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size |
| 2484 | * | 2485 | * |
| 2485 | * @adev: amdgpu_device pointer | 2486 | * @adev: amdgpu_device pointer |
| 2486 | * @vm_size: the default vm size if it's set auto | 2487 | * @min_vm_size: the minimum vm size in GB if it's set auto |
| 2487 | * @fragment_size_default: Default PTE fragment size | 2488 | * @fragment_size_default: Default PTE fragment size |
| 2488 | * @max_level: max VMPT level | 2489 | * @max_level: max VMPT level |
| 2489 | * @max_bits: max address space size in bits | 2490 | * @max_bits: max address space size in bits |
| 2490 | * | 2491 | * |
| 2491 | */ | 2492 | */ |
| 2492 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 2493 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
| 2493 | uint32_t fragment_size_default, unsigned max_level, | 2494 | uint32_t fragment_size_default, unsigned max_level, |
| 2494 | unsigned max_bits) | 2495 | unsigned max_bits) |
| 2495 | { | 2496 | { |
| 2497 | unsigned int max_size = 1 << (max_bits - 30); | ||
| 2498 | unsigned int vm_size; | ||
| 2496 | uint64_t tmp; | 2499 | uint64_t tmp; |
| 2497 | 2500 | ||
| 2498 | /* adjust vm size first */ | 2501 | /* adjust vm size first */ |
| 2499 | if (amdgpu_vm_size != -1) { | 2502 | if (amdgpu_vm_size != -1) { |
| 2500 | unsigned max_size = 1 << (max_bits - 30); | ||
| 2501 | |||
| 2502 | vm_size = amdgpu_vm_size; | 2503 | vm_size = amdgpu_vm_size; |
| 2503 | if (vm_size > max_size) { | 2504 | if (vm_size > max_size) { |
| 2504 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", | 2505 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", |
| 2505 | amdgpu_vm_size, max_size); | 2506 | amdgpu_vm_size, max_size); |
| 2506 | vm_size = max_size; | 2507 | vm_size = max_size; |
| 2507 | } | 2508 | } |
| 2509 | } else { | ||
| 2510 | struct sysinfo si; | ||
| 2511 | unsigned int phys_ram_gb; | ||
| 2512 | |||
| 2513 | /* Optimal VM size depends on the amount of physical | ||
| 2514 | * RAM available. Underlying requirements and | ||
| 2515 | * assumptions: | ||
| 2516 | * | ||
| 2517 | * - Need to map system memory and VRAM from all GPUs | ||
| 2518 | * - VRAM from other GPUs not known here | ||
| 2519 | * - Assume VRAM <= system memory | ||
| 2520 | * - On GFX8 and older, VM space can be segmented for | ||
| 2521 | * different MTYPEs | ||
| 2522 | * - Need to allow room for fragmentation, guard pages etc. | ||
| 2523 | * | ||
| 2524 | * This adds up to a rough guess of system memory x3. | ||
| 2525 | * Round up to power of two to maximize the available | ||
| 2526 | * VM size with the given page table size. | ||
| 2527 | */ | ||
| 2528 | si_meminfo(&si); | ||
| 2529 | phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + | ||
| 2530 | (1 << 30) - 1) >> 30; | ||
| 2531 | vm_size = roundup_pow_of_two( | ||
| 2532 | min(max(phys_ram_gb * 3, min_vm_size), max_size)); | ||
| 2508 | } | 2533 | } |
| 2509 | 2534 | ||
| 2510 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; | 2535 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
| @@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, | |||
| 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); | 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); |
| 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
| 323 | struct amdgpu_bo_va *bo_va); | 323 | struct amdgpu_bo_va *bo_va); |
| 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
| 325 | uint32_t fragment_size_default, unsigned max_level, | 325 | uint32_t fragment_size_default, unsigned max_level, |
| 326 | unsigned max_bits); | 326 | unsigned max_bits); |
| 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
| 5664 | if (amdgpu_sriov_vf(adev)) | 5664 | if (amdgpu_sriov_vf(adev)) |
| 5665 | return 0; | 5665 | return 0; |
| 5666 | 5666 | ||
| 5667 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | ||
| 5668 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
| 5669 | AMD_PG_SUPPORT_CP | | ||
| 5670 | AMD_PG_SUPPORT_GFX_DMG)) | ||
| 5671 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | ||
| 5667 | switch (adev->asic_type) { | 5672 | switch (adev->asic_type) { |
| 5668 | case CHIP_CARRIZO: | 5673 | case CHIP_CARRIZO: |
| 5669 | case CHIP_STONEY: | 5674 | case CHIP_STONEY: |
| @@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
| 5713 | default: | 5718 | default: |
| 5714 | break; | 5719 | break; |
| 5715 | } | 5720 | } |
| 5716 | 5721 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | |
| 5722 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
| 5723 | AMD_PG_SUPPORT_CP | | ||
| 5724 | AMD_PG_SUPPORT_GFX_DMG)) | ||
| 5725 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | ||
| 5717 | return 0; | 5726 | return 0; |
| 5718 | } | 5727 | } |
| 5719 | 5728 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
| @@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) | |||
| 632 | amdgpu_gart_table_vram_unpin(adev); | 632 | amdgpu_gart_table_vram_unpin(adev); |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) | ||
| 636 | { | ||
| 637 | amdgpu_gart_table_vram_free(adev); | ||
| 638 | amdgpu_gart_fini(adev); | ||
| 639 | } | ||
| 640 | |||
| 641 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, | 635 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, |
| 642 | u32 status, u32 addr, u32 mc_client) | 636 | u32 status, u32 addr, u32 mc_client) |
| 643 | { | 637 | { |
| @@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) | |||
| 935 | 929 | ||
| 936 | amdgpu_gem_force_release(adev); | 930 | amdgpu_gem_force_release(adev); |
| 937 | amdgpu_vm_manager_fini(adev); | 931 | amdgpu_vm_manager_fini(adev); |
| 938 | gmc_v6_0_gart_fini(adev); | 932 | amdgpu_gart_table_vram_free(adev); |
| 939 | amdgpu_bo_fini(adev); | 933 | amdgpu_bo_fini(adev); |
| 934 | amdgpu_gart_fini(adev); | ||
| 940 | release_firmware(adev->gmc.fw); | 935 | release_firmware(adev->gmc.fw); |
| 941 | adev->gmc.fw = NULL; | 936 | adev->gmc.fw = NULL; |
| 942 | 937 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
| @@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) | |||
| 747 | } | 747 | } |
| 748 | 748 | ||
| 749 | /** | 749 | /** |
| 750 | * gmc_v7_0_gart_fini - vm fini callback | ||
| 751 | * | ||
| 752 | * @adev: amdgpu_device pointer | ||
| 753 | * | ||
| 754 | * Tears down the driver GART/VM setup (CIK). | ||
| 755 | */ | ||
| 756 | static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) | ||
| 757 | { | ||
| 758 | amdgpu_gart_table_vram_free(adev); | ||
| 759 | amdgpu_gart_fini(adev); | ||
| 760 | } | ||
| 761 | |||
| 762 | /** | ||
| 763 | * gmc_v7_0_vm_decode_fault - print human readable fault info | 750 | * gmc_v7_0_vm_decode_fault - print human readable fault info |
| 764 | * | 751 | * |
| 765 | * @adev: amdgpu_device pointer | 752 | * @adev: amdgpu_device pointer |
| @@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) | |||
| 1095 | amdgpu_gem_force_release(adev); | 1082 | amdgpu_gem_force_release(adev); |
| 1096 | amdgpu_vm_manager_fini(adev); | 1083 | amdgpu_vm_manager_fini(adev); |
| 1097 | kfree(adev->gmc.vm_fault_info); | 1084 | kfree(adev->gmc.vm_fault_info); |
| 1098 | gmc_v7_0_gart_fini(adev); | 1085 | amdgpu_gart_table_vram_free(adev); |
| 1099 | amdgpu_bo_fini(adev); | 1086 | amdgpu_bo_fini(adev); |
| 1087 | amdgpu_gart_fini(adev); | ||
| 1100 | release_firmware(adev->gmc.fw); | 1088 | release_firmware(adev->gmc.fw); |
| 1101 | adev->gmc.fw = NULL; | 1089 | adev->gmc.fw = NULL; |
| 1102 | 1090 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
| @@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | |||
| 969 | } | 969 | } |
| 970 | 970 | ||
| 971 | /** | 971 | /** |
| 972 | * gmc_v8_0_gart_fini - vm fini callback | ||
| 973 | * | ||
| 974 | * @adev: amdgpu_device pointer | ||
| 975 | * | ||
| 976 | * Tears down the driver GART/VM setup (CIK). | ||
| 977 | */ | ||
| 978 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | ||
| 979 | { | ||
| 980 | amdgpu_gart_table_vram_free(adev); | ||
| 981 | amdgpu_gart_fini(adev); | ||
| 982 | } | ||
| 983 | |||
| 984 | /** | ||
| 985 | * gmc_v8_0_vm_decode_fault - print human readable fault info | 972 | * gmc_v8_0_vm_decode_fault - print human readable fault info |
| 986 | * | 973 | * |
| 987 | * @adev: amdgpu_device pointer | 974 | * @adev: amdgpu_device pointer |
| @@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) | |||
| 1199 | amdgpu_gem_force_release(adev); | 1186 | amdgpu_gem_force_release(adev); |
| 1200 | amdgpu_vm_manager_fini(adev); | 1187 | amdgpu_vm_manager_fini(adev); |
| 1201 | kfree(adev->gmc.vm_fault_info); | 1188 | kfree(adev->gmc.vm_fault_info); |
| 1202 | gmc_v8_0_gart_fini(adev); | 1189 | amdgpu_gart_table_vram_free(adev); |
| 1203 | amdgpu_bo_fini(adev); | 1190 | amdgpu_bo_fini(adev); |
| 1191 | amdgpu_gart_fini(adev); | ||
| 1204 | release_firmware(adev->gmc.fw); | 1192 | release_firmware(adev->gmc.fw); |
| 1205 | adev->gmc.fw = NULL; | 1193 | adev->gmc.fw = NULL; |
| 1206 | 1194 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
| @@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) | |||
| 942 | return 0; | 942 | return 0; |
| 943 | } | 943 | } |
| 944 | 944 | ||
| 945 | /** | ||
| 946 | * gmc_v9_0_gart_fini - vm fini callback | ||
| 947 | * | ||
| 948 | * @adev: amdgpu_device pointer | ||
| 949 | * | ||
| 950 | * Tears down the driver GART/VM setup (CIK). | ||
| 951 | */ | ||
| 952 | static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) | ||
| 953 | { | ||
| 954 | amdgpu_gart_table_vram_free(adev); | ||
| 955 | amdgpu_gart_fini(adev); | ||
| 956 | } | ||
| 957 | |||
| 958 | static int gmc_v9_0_sw_fini(void *handle) | 945 | static int gmc_v9_0_sw_fini(void *handle) |
| 959 | { | 946 | { |
| 960 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 947 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 961 | 948 | ||
| 962 | amdgpu_gem_force_release(adev); | 949 | amdgpu_gem_force_release(adev); |
| 963 | amdgpu_vm_manager_fini(adev); | 950 | amdgpu_vm_manager_fini(adev); |
| 964 | gmc_v9_0_gart_fini(adev); | ||
| 965 | 951 | ||
| 966 | /* | 952 | /* |
| 967 | * TODO: | 953 | * TODO: |
| @@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) | |||
| 974 | */ | 960 | */ |
| 975 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); | 961 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
| 976 | 962 | ||
| 963 | amdgpu_gart_table_vram_free(adev); | ||
| 977 | amdgpu_bo_fini(adev); | 964 | amdgpu_bo_fini(adev); |
| 965 | amdgpu_gart_fini(adev); | ||
| 978 | 966 | ||
| 979 | return 0; | 967 | return 0; |
| 980 | } | 968 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
| @@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, | |||
| 65 | int min_temp, int max_temp); | 65 | int min_temp, int max_temp); |
| 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); | 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); |
| 67 | 67 | ||
| 68 | static void kv_dpm_powergate_uvd(void *handle, bool gate); | ||
| 69 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); | ||
| 70 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); | 68 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); |
| 71 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); | 69 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); |
| 72 | 70 | ||
| @@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
| 1354 | return ret; | 1352 | return ret; |
| 1355 | } | 1353 | } |
| 1356 | 1354 | ||
| 1357 | kv_update_current_ps(adev, adev->pm.dpm.boot_ps); | ||
| 1358 | |||
| 1359 | if (adev->irq.installed && | 1355 | if (adev->irq.installed && |
| 1360 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { | 1356 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { |
| 1361 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); | 1357 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); |
| @@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
| 1374 | 1370 | ||
| 1375 | static void kv_dpm_disable(struct amdgpu_device *adev) | 1371 | static void kv_dpm_disable(struct amdgpu_device *adev) |
| 1376 | { | 1372 | { |
| 1373 | struct kv_power_info *pi = kv_get_pi(adev); | ||
| 1374 | |||
| 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1375 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
| 1378 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); | 1376 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
| 1379 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
| @@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) | |||
| 1387 | /* powerup blocks */ | 1385 | /* powerup blocks */ |
| 1388 | kv_dpm_powergate_acp(adev, false); | 1386 | kv_dpm_powergate_acp(adev, false); |
| 1389 | kv_dpm_powergate_samu(adev, false); | 1387 | kv_dpm_powergate_samu(adev, false); |
| 1390 | kv_dpm_powergate_vce(adev, false); | 1388 | if (pi->caps_vce_pg) /* power on the VCE block */ |
| 1391 | kv_dpm_powergate_uvd(adev, false); | 1389 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
| 1390 | if (pi->caps_uvd_pg) /* power on the UVD block */ | ||
| 1391 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); | ||
| 1392 | 1392 | ||
| 1393 | kv_enable_smc_cac(adev, false); | 1393 | kv_enable_smc_cac(adev, false); |
| 1394 | kv_enable_didt(adev, false); | 1394 | kv_enable_didt(adev, false); |
| @@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1551 | int ret; | 1551 | int ret; |
| 1552 | 1552 | ||
| 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { | 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { |
| 1554 | kv_dpm_powergate_vce(adev, false); | ||
| 1555 | if (pi->caps_stable_p_state) | 1554 | if (pi->caps_stable_p_state) |
| 1556 | pi->vce_boot_level = table->count - 1; | 1555 | pi->vce_boot_level = table->count - 1; |
| 1557 | else | 1556 | else |
| @@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1573 | kv_enable_vce_dpm(adev, true); | 1572 | kv_enable_vce_dpm(adev, true); |
| 1574 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { | 1573 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { |
| 1575 | kv_enable_vce_dpm(adev, false); | 1574 | kv_enable_vce_dpm(adev, false); |
| 1576 | kv_dpm_powergate_vce(adev, true); | ||
| 1577 | } | 1575 | } |
| 1578 | 1576 | ||
| 1579 | return 0; | 1577 | return 0; |
| @@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) | |||
| 1702 | } | 1700 | } |
| 1703 | } | 1701 | } |
| 1704 | 1702 | ||
| 1705 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | 1703 | static void kv_dpm_powergate_vce(void *handle, bool gate) |
| 1706 | { | 1704 | { |
| 1705 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 1707 | struct kv_power_info *pi = kv_get_pi(adev); | 1706 | struct kv_power_info *pi = kv_get_pi(adev); |
| 1708 | 1707 | int ret; | |
| 1709 | if (pi->vce_power_gated == gate) | ||
| 1710 | return; | ||
| 1711 | 1708 | ||
| 1712 | pi->vce_power_gated = gate; | 1709 | pi->vce_power_gated = gate; |
| 1713 | 1710 | ||
| 1714 | if (!pi->caps_vce_pg) | 1711 | if (gate) { |
| 1715 | return; | 1712 | /* stop the VCE block */ |
| 1716 | 1713 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | |
| 1717 | if (gate) | 1714 | AMD_PG_STATE_GATE); |
| 1718 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); | 1715 | kv_enable_vce_dpm(adev, false); |
| 1719 | else | 1716 | if (pi->caps_vce_pg) /* power off the VCE block */ |
| 1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | 1717 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); |
| 1718 | } else { | ||
| 1719 | if (pi->caps_vce_pg) /* power on the VCE block */ | ||
| 1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | ||
| 1721 | kv_enable_vce_dpm(adev, true); | ||
| 1722 | /* re-init the VCE block */ | ||
| 1723 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1724 | AMD_PG_STATE_UNGATE); | ||
| 1725 | } | ||
| 1721 | } | 1726 | } |
| 1722 | 1727 | ||
| 1728 | |||
| 1723 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) | 1729 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) |
| 1724 | { | 1730 | { |
| 1725 | struct kv_power_info *pi = kv_get_pi(adev); | 1731 | struct kv_power_info *pi = kv_get_pi(adev); |
| @@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) | |||
| 3061 | else | 3067 | else |
| 3062 | adev->pm.dpm_enabled = true; | 3068 | adev->pm.dpm_enabled = true; |
| 3063 | mutex_unlock(&adev->pm.mutex); | 3069 | mutex_unlock(&adev->pm.mutex); |
| 3064 | 3070 | amdgpu_pm_compute_clocks(adev); | |
| 3065 | return ret; | 3071 | return ret; |
| 3066 | } | 3072 | } |
| 3067 | 3073 | ||
| @@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, | |||
| 3313 | case AMD_IP_BLOCK_TYPE_UVD: | 3319 | case AMD_IP_BLOCK_TYPE_UVD: |
| 3314 | kv_dpm_powergate_uvd(handle, gate); | 3320 | kv_dpm_powergate_uvd(handle, gate); |
| 3315 | break; | 3321 | break; |
| 3322 | case AMD_IP_BLOCK_TYPE_VCE: | ||
| 3323 | kv_dpm_powergate_vce(handle, gate); | ||
| 3324 | break; | ||
| 3316 | default: | 3325 | default: |
| 3317 | break; | 3326 | break; |
| 3318 | } | 3327 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
| @@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) | |||
| 6887 | 6887 | ||
| 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
| 6889 | si_thermal_start_thermal_controller(adev); | 6889 | si_thermal_start_thermal_controller(adev); |
| 6890 | ni_update_current_ps(adev, boot_ps); | ||
| 6891 | 6890 | ||
| 6892 | return 0; | 6891 | return 0; |
| 6893 | } | 6892 | } |
| @@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) | |||
| 7763 | else | 7762 | else |
| 7764 | adev->pm.dpm_enabled = true; | 7763 | adev->pm.dpm_enabled = true; |
| 7765 | mutex_unlock(&adev->pm.mutex); | 7764 | mutex_unlock(&adev->pm.mutex); |
| 7766 | 7765 | amdgpu_pm_compute_clocks(adev); | |
| 7767 | return ret; | 7766 | return ret; |
| 7768 | } | 7767 | } |
| 7769 | 7768 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | |||
| @@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, | |||
| 480 | { | 480 | { |
| 481 | struct dc_context *ctx = pp->ctx; | 481 | struct dc_context *ctx = pp->ctx; |
| 482 | struct amdgpu_device *adev = ctx->driver_context; | 482 | struct amdgpu_device *adev = ctx->driver_context; |
| 483 | void *pp_handle = adev->powerplay.pp_handle; | ||
| 483 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | 484 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; |
| 485 | struct pp_display_clock_request clock = {0}; | ||
| 484 | 486 | ||
| 485 | if (!pp_funcs || !pp_funcs->display_configuration_changed) | 487 | if (!pp_funcs || !pp_funcs->display_clock_voltage_request) |
| 486 | return; | 488 | return; |
| 487 | 489 | ||
| 488 | amdgpu_dpm_display_configuration_changed(adev); | 490 | clock.clock_type = amd_pp_dcf_clock; |
| 491 | clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; | ||
| 492 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
| 493 | |||
| 494 | clock.clock_type = amd_pp_f_clock; | ||
| 495 | clock.clock_freq_in_khz = req->hard_min_fclk_khz; | ||
| 496 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
| 489 | } | 497 | } |
| 490 | 498 | ||
| 491 | void pp_rv_set_wm_ranges(struct pp_smu *pp, | 499 | void pp_rv_set_wm_ranges(struct pp_smu *pp, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
| 754 | * fail-safe mode | 754 | * fail-safe mode |
| 755 | */ | 755 | */ |
| 756 | if (dc_is_hdmi_signal(link->connector_signal) || | 756 | if (dc_is_hdmi_signal(link->connector_signal) || |
| 757 | dc_is_dvi_signal(link->connector_signal)) | 757 | dc_is_dvi_signal(link->connector_signal)) { |
| 758 | if (prev_sink != NULL) | ||
| 759 | dc_sink_release(prev_sink); | ||
| 760 | |||
| 758 | return false; | 761 | return false; |
| 762 | } | ||
| 759 | default: | 763 | default: |
| 760 | break; | 764 | break; |
| 761 | } | 765 | } |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..98358b4b36de 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
| @@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, | |||
| 199 | vma->flags |= I915_VMA_GGTT; | 199 | vma->flags |= I915_VMA_GGTT; |
| 200 | list_add(&vma->obj_link, &obj->vma_list); | 200 | list_add(&vma->obj_link, &obj->vma_list); |
| 201 | } else { | 201 | } else { |
| 202 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); | ||
| 203 | list_add_tail(&vma->obj_link, &obj->vma_list); | 202 | list_add_tail(&vma->obj_link, &obj->vma_list); |
| 204 | } | 203 | } |
| 205 | 204 | ||
| @@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) | |||
| 807 | if (vma->obj) | 806 | if (vma->obj) |
| 808 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); | 807 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); |
| 809 | 808 | ||
| 810 | if (!i915_vma_is_ggtt(vma)) | ||
| 811 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); | ||
| 812 | |||
| 813 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { | 809 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { |
| 814 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); | 810 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); |
| 815 | kfree(iter); | 811 | kfree(iter); |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
| @@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) | |||
| 962 | { | 962 | { |
| 963 | int ret; | 963 | int ret; |
| 964 | 964 | ||
| 965 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | ||
| 966 | return; | ||
| 967 | |||
| 968 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); | 965 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); |
| 969 | if (ret < 0) { | 966 | if (ret < 0) { |
| 970 | DRM_ERROR("failed to add audio component (%d)\n", ret); | 967 | DRM_ERROR("failed to add audio component (%d)\n", ret); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3fa1c8a983..4a3c8ee9a973 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
| 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; | 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; |
| 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; | 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; |
| 2990 | int dst_x = plane_state->base.dst.x1; | 2990 | int dst_x = plane_state->base.dst.x1; |
| 2991 | int dst_w = drm_rect_width(&plane_state->base.dst); | ||
| 2991 | int pipe_src_w = crtc_state->pipe_src_w; | 2992 | int pipe_src_w = crtc_state->pipe_src_w; |
| 2992 | int max_width = skl_max_plane_width(fb, 0, rotation); | 2993 | int max_width = skl_max_plane_width(fb, 0, rotation); |
| 2993 | int max_height = 4096; | 2994 | int max_height = 4096; |
| @@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
| 3009 | * screen may cause FIFO underflow and display corruption. | 3010 | * screen may cause FIFO underflow and display corruption. |
| 3010 | */ | 3011 | */ |
| 3011 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && | 3012 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && |
| 3012 | (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { | 3013 | (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { |
| 3013 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", | 3014 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", |
| 3014 | dst_x + w < 4 ? "end" : "start", | 3015 | dst_x + dst_w < 4 ? "end" : "start", |
| 3015 | dst_x + w < 4 ? dst_x + w : dst_x, | 3016 | dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, |
| 3016 | 4, pipe_src_w - 4); | 3017 | 4, pipe_src_w - 4); |
| 3017 | return -ERANGE; | 3018 | return -ERANGE; |
| 3018 | } | 3019 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..192972a7d287 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, | |||
| 943 | 943 | ||
| 944 | ret = i2c_transfer(adapter, &msg, 1); | 944 | ret = i2c_transfer(adapter, &msg, 1); |
| 945 | if (ret == 1) | 945 | if (ret == 1) |
| 946 | return 0; | 946 | ret = 0; |
| 947 | return ret >= 0 ? -EIO : ret; | 947 | else if (ret >= 0) |
| 948 | ret = -EIO; | ||
| 949 | |||
| 950 | kfree(write_buf); | ||
| 951 | return ret; | ||
| 948 | } | 952 | } |
| 949 | 953 | ||
| 950 | static | 954 | static |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
| @@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, | |||
| 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", | 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", |
| 75 | lspcon_mode_name(mode)); | 75 | lspcon_mode_name(mode)); |
| 76 | 76 | ||
| 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); | 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); |
| 78 | if (current_mode != mode) | 78 | if (current_mode != mode) |
| 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); | 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); |
| 80 | 80 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c | |||
| @@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, | |||
| 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); | 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) | ||
| 136 | { | ||
| 137 | return 4; | ||
| 138 | } | ||
| 139 | |||
| 135 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) | 140 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) |
| 136 | { | 141 | { |
| 137 | unsigned int reg; | 142 | unsigned int reg; |
| @@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) | |||
| 157 | 162 | ||
| 158 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) | 163 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) |
| 159 | { | 164 | { |
| 165 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
| 166 | * is defined in mediatek HW data sheet. | ||
| 167 | * The alphabet order in XXX is no relation to data | ||
| 168 | * arrangement in memory. | ||
| 169 | */ | ||
| 160 | switch (fmt) { | 170 | switch (fmt) { |
| 161 | default: | 171 | default: |
| 162 | case DRM_FORMAT_RGB565: | 172 | case DRM_FORMAT_RGB565: |
| @@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { | |||
| 221 | .stop = mtk_ovl_stop, | 231 | .stop = mtk_ovl_stop, |
| 222 | .enable_vblank = mtk_ovl_enable_vblank, | 232 | .enable_vblank = mtk_ovl_enable_vblank, |
| 223 | .disable_vblank = mtk_ovl_disable_vblank, | 233 | .disable_vblank = mtk_ovl_disable_vblank, |
| 234 | .layer_nr = mtk_ovl_layer_nr, | ||
| 224 | .layer_on = mtk_ovl_layer_on, | 235 | .layer_on = mtk_ovl_layer_on, |
| 225 | .layer_off = mtk_ovl_layer_off, | 236 | .layer_off = mtk_ovl_layer_off, |
| 226 | .layer_config = mtk_ovl_layer_config, | 237 | .layer_config = mtk_ovl_layer_config, |
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c | |||
| @@ -31,14 +31,31 @@ | |||
| 31 | #define RDMA_REG_UPDATE_INT BIT(0) | 31 | #define RDMA_REG_UPDATE_INT BIT(0) |
| 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 | 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 |
| 33 | #define RDMA_ENGINE_EN BIT(0) | 33 | #define RDMA_ENGINE_EN BIT(0) |
| 34 | #define RDMA_MODE_MEMORY BIT(1) | ||
| 34 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 | 35 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 |
| 36 | #define RDMA_MATRIX_ENABLE BIT(17) | ||
| 37 | #define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) | ||
| 38 | #define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) | ||
| 35 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 | 39 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 |
| 36 | #define DISP_REG_RDMA_TARGET_LINE 0x001c | 40 | #define DISP_REG_RDMA_TARGET_LINE 0x001c |
| 41 | #define DISP_RDMA_MEM_CON 0x0024 | ||
| 42 | #define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) | ||
| 43 | #define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) | ||
| 44 | #define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) | ||
| 45 | #define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) | ||
| 46 | #define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) | ||
| 47 | #define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) | ||
| 48 | #define MEM_MODE_INPUT_SWAP BIT(8) | ||
| 49 | #define DISP_RDMA_MEM_SRC_PITCH 0x002c | ||
| 50 | #define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 | ||
| 37 | #define DISP_REG_RDMA_FIFO_CON 0x0040 | 51 | #define DISP_REG_RDMA_FIFO_CON 0x0040 |
| 38 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) | 52 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) |
| 39 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) | 53 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) |
| 40 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) | 54 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) |
| 41 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) | 55 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) |
| 56 | #define DISP_RDMA_MEM_START_ADDR 0x0f00 | ||
| 57 | |||
| 58 | #define RDMA_MEM_GMC 0x40402020 | ||
| 42 | 59 | ||
| 43 | struct mtk_disp_rdma_data { | 60 | struct mtk_disp_rdma_data { |
| 44 | unsigned int fifo_size; | 61 | unsigned int fifo_size; |
| @@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, | |||
| 138 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); | 155 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); |
| 139 | } | 156 | } |
| 140 | 157 | ||
| 158 | static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, | ||
| 159 | unsigned int fmt) | ||
| 160 | { | ||
| 161 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
| 162 | * is defined in mediatek HW data sheet. | ||
| 163 | * The alphabet order in XXX is no relation to data | ||
| 164 | * arrangement in memory. | ||
| 165 | */ | ||
| 166 | switch (fmt) { | ||
| 167 | default: | ||
| 168 | case DRM_FORMAT_RGB565: | ||
| 169 | return MEM_MODE_INPUT_FORMAT_RGB565; | ||
| 170 | case DRM_FORMAT_BGR565: | ||
| 171 | return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; | ||
| 172 | case DRM_FORMAT_RGB888: | ||
| 173 | return MEM_MODE_INPUT_FORMAT_RGB888; | ||
| 174 | case DRM_FORMAT_BGR888: | ||
| 175 | return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; | ||
| 176 | case DRM_FORMAT_RGBX8888: | ||
| 177 | case DRM_FORMAT_RGBA8888: | ||
| 178 | return MEM_MODE_INPUT_FORMAT_ARGB8888; | ||
| 179 | case DRM_FORMAT_BGRX8888: | ||
| 180 | case DRM_FORMAT_BGRA8888: | ||
| 181 | return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; | ||
| 182 | case DRM_FORMAT_XRGB8888: | ||
| 183 | case DRM_FORMAT_ARGB8888: | ||
| 184 | return MEM_MODE_INPUT_FORMAT_RGBA8888; | ||
| 185 | case DRM_FORMAT_XBGR8888: | ||
| 186 | case DRM_FORMAT_ABGR8888: | ||
| 187 | return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; | ||
| 188 | case DRM_FORMAT_UYVY: | ||
| 189 | return MEM_MODE_INPUT_FORMAT_UYVY; | ||
| 190 | case DRM_FORMAT_YUYV: | ||
| 191 | return MEM_MODE_INPUT_FORMAT_YUYV; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | |||
| 195 | static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) | ||
| 196 | { | ||
| 197 | return 1; | ||
| 198 | } | ||
| 199 | |||
| 200 | static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, | ||
| 201 | struct mtk_plane_state *state) | ||
| 202 | { | ||
| 203 | struct mtk_disp_rdma *rdma = comp_to_rdma(comp); | ||
| 204 | struct mtk_plane_pending_state *pending = &state->pending; | ||
| 205 | unsigned int addr = pending->addr; | ||
| 206 | unsigned int pitch = pending->pitch & 0xffff; | ||
| 207 | unsigned int fmt = pending->format; | ||
| 208 | unsigned int con; | ||
| 209 | |||
| 210 | con = rdma_fmt_convert(rdma, fmt); | ||
| 211 | writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); | ||
| 212 | |||
| 213 | if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { | ||
| 214 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 215 | RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); | ||
| 216 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 217 | RDMA_MATRIX_INT_MTX_SEL, | ||
| 218 | RDMA_MATRIX_INT_MTX_BT601_to_RGB); | ||
| 219 | } else { | ||
| 220 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 221 | RDMA_MATRIX_ENABLE, 0); | ||
| 222 | } | ||
| 223 | |||
| 224 | writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); | ||
| 225 | writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); | ||
| 226 | writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); | ||
| 227 | rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, | ||
| 228 | RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); | ||
| 229 | } | ||
| 230 | |||
| 141 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { | 231 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { |
| 142 | .config = mtk_rdma_config, | 232 | .config = mtk_rdma_config, |
| 143 | .start = mtk_rdma_start, | 233 | .start = mtk_rdma_start, |
| 144 | .stop = mtk_rdma_stop, | 234 | .stop = mtk_rdma_stop, |
| 145 | .enable_vblank = mtk_rdma_enable_vblank, | 235 | .enable_vblank = mtk_rdma_enable_vblank, |
| 146 | .disable_vblank = mtk_rdma_disable_vblank, | 236 | .disable_vblank = mtk_rdma_disable_vblank, |
| 237 | .layer_nr = mtk_rdma_layer_nr, | ||
| 238 | .layer_config = mtk_rdma_layer_config, | ||
| 147 | }; | 239 | }; |
| 148 | 240 | ||
| 149 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, | 241 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c | |||
| @@ -45,7 +45,8 @@ struct mtk_drm_crtc { | |||
| 45 | bool pending_needs_vblank; | 45 | bool pending_needs_vblank; |
| 46 | struct drm_pending_vblank_event *event; | 46 | struct drm_pending_vblank_event *event; |
| 47 | 47 | ||
| 48 | struct drm_plane planes[OVL_LAYER_NR]; | 48 | struct drm_plane *planes; |
| 49 | unsigned int layer_nr; | ||
| 49 | bool pending_planes; | 50 | bool pending_planes; |
| 50 | 51 | ||
| 51 | void __iomem *config_regs; | 52 | void __iomem *config_regs; |
| @@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 171 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | 172 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) |
| 172 | { | 173 | { |
| 173 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 174 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 174 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 175 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 175 | 176 | ||
| 176 | mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); | 177 | mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); |
| 177 | 178 | ||
| 178 | return 0; | 179 | return 0; |
| 179 | } | 180 | } |
| @@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | |||
| 181 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) | 182 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) |
| 182 | { | 183 | { |
| 183 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 184 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 184 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 185 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 185 | 186 | ||
| 186 | mtk_ddp_comp_disable_vblank(ovl); | 187 | mtk_ddp_comp_disable_vblank(comp); |
| 187 | } | 188 | } |
| 188 | 189 | ||
| 189 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) | 190 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) |
| @@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) | |||
| 286 | } | 287 | } |
| 287 | 288 | ||
| 288 | /* Initially configure all planes */ | 289 | /* Initially configure all planes */ |
| 289 | for (i = 0; i < OVL_LAYER_NR; i++) { | 290 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 290 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 291 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 291 | struct mtk_plane_state *plane_state; | 292 | struct mtk_plane_state *plane_state; |
| 292 | 293 | ||
| @@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 334 | { | 335 | { |
| 335 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 336 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 336 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); | 337 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); |
| 337 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 338 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 338 | unsigned int i; | 339 | unsigned int i; |
| 339 | 340 | ||
| 340 | /* | 341 | /* |
| @@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 343 | * queue update module registers on vblank. | 344 | * queue update module registers on vblank. |
| 344 | */ | 345 | */ |
| 345 | if (state->pending_config) { | 346 | if (state->pending_config) { |
| 346 | mtk_ddp_comp_config(ovl, state->pending_width, | 347 | mtk_ddp_comp_config(comp, state->pending_width, |
| 347 | state->pending_height, | 348 | state->pending_height, |
| 348 | state->pending_vrefresh, 0); | 349 | state->pending_vrefresh, 0); |
| 349 | 350 | ||
| @@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 351 | } | 352 | } |
| 352 | 353 | ||
| 353 | if (mtk_crtc->pending_planes) { | 354 | if (mtk_crtc->pending_planes) { |
| 354 | for (i = 0; i < OVL_LAYER_NR; i++) { | 355 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 355 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 356 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 356 | struct mtk_plane_state *plane_state; | 357 | struct mtk_plane_state *plane_state; |
| 357 | 358 | ||
| 358 | plane_state = to_mtk_plane_state(plane->state); | 359 | plane_state = to_mtk_plane_state(plane->state); |
| 359 | 360 | ||
| 360 | if (plane_state->pending.config) { | 361 | if (plane_state->pending.config) { |
| 361 | mtk_ddp_comp_layer_config(ovl, i, plane_state); | 362 | mtk_ddp_comp_layer_config(comp, i, plane_state); |
| 362 | plane_state->pending.config = false; | 363 | plane_state->pending.config = false; |
| 363 | } | 364 | } |
| 364 | } | 365 | } |
| @@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
| 370 | struct drm_crtc_state *old_state) | 371 | struct drm_crtc_state *old_state) |
| 371 | { | 372 | { |
| 372 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 373 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 373 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 374 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 374 | int ret; | 375 | int ret; |
| 375 | 376 | ||
| 376 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 377 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
| 377 | 378 | ||
| 378 | ret = mtk_smi_larb_get(ovl->larb_dev); | 379 | ret = mtk_smi_larb_get(comp->larb_dev); |
| 379 | if (ret) { | 380 | if (ret) { |
| 380 | DRM_ERROR("Failed to get larb: %d\n", ret); | 381 | DRM_ERROR("Failed to get larb: %d\n", ret); |
| 381 | return; | 382 | return; |
| @@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
| 383 | 384 | ||
| 384 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); | 385 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); |
| 385 | if (ret) { | 386 | if (ret) { |
| 386 | mtk_smi_larb_put(ovl->larb_dev); | 387 | mtk_smi_larb_put(comp->larb_dev); |
| 387 | return; | 388 | return; |
| 388 | } | 389 | } |
| 389 | 390 | ||
| @@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 395 | struct drm_crtc_state *old_state) | 396 | struct drm_crtc_state *old_state) |
| 396 | { | 397 | { |
| 397 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 398 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 398 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 399 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 399 | int i; | 400 | int i; |
| 400 | 401 | ||
| 401 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 402 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
| @@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 403 | return; | 404 | return; |
| 404 | 405 | ||
| 405 | /* Set all pending plane state to disabled */ | 406 | /* Set all pending plane state to disabled */ |
| 406 | for (i = 0; i < OVL_LAYER_NR; i++) { | 407 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 407 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 408 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 408 | struct mtk_plane_state *plane_state; | 409 | struct mtk_plane_state *plane_state; |
| 409 | 410 | ||
| @@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 418 | 419 | ||
| 419 | drm_crtc_vblank_off(crtc); | 420 | drm_crtc_vblank_off(crtc); |
| 420 | mtk_crtc_ddp_hw_fini(mtk_crtc); | 421 | mtk_crtc_ddp_hw_fini(mtk_crtc); |
| 421 | mtk_smi_larb_put(ovl->larb_dev); | 422 | mtk_smi_larb_put(comp->larb_dev); |
| 422 | 423 | ||
| 423 | mtk_crtc->enabled = false; | 424 | mtk_crtc->enabled = false; |
| 424 | } | 425 | } |
| @@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 450 | 451 | ||
| 451 | if (mtk_crtc->event) | 452 | if (mtk_crtc->event) |
| 452 | mtk_crtc->pending_needs_vblank = true; | 453 | mtk_crtc->pending_needs_vblank = true; |
| 453 | for (i = 0; i < OVL_LAYER_NR; i++) { | 454 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 454 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 455 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 455 | struct mtk_plane_state *plane_state; | 456 | struct mtk_plane_state *plane_state; |
| 456 | 457 | ||
| @@ -516,7 +517,7 @@ err_cleanup_crtc: | |||
| 516 | return ret; | 517 | return ret; |
| 517 | } | 518 | } |
| 518 | 519 | ||
| 519 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) | 520 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) |
| 520 | { | 521 | { |
| 521 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 522 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 522 | struct mtk_drm_private *priv = crtc->dev->dev_private; | 523 | struct mtk_drm_private *priv = crtc->dev->dev_private; |
| @@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
| 598 | mtk_crtc->ddp_comp[i] = comp; | 599 | mtk_crtc->ddp_comp[i] = comp; |
| 599 | } | 600 | } |
| 600 | 601 | ||
| 601 | for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { | 602 | mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); |
| 603 | mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * | ||
| 604 | sizeof(struct drm_plane), | ||
| 605 | GFP_KERNEL); | ||
| 606 | |||
| 607 | for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { | ||
| 602 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : | 608 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : |
| 603 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : | 609 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : |
| 604 | DRM_PLANE_TYPE_OVERLAY; | 610 | DRM_PLANE_TYPE_OVERLAY; |
| @@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
| 609 | } | 615 | } |
| 610 | 616 | ||
| 611 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], | 617 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], |
| 612 | &mtk_crtc->planes[1], pipe); | 618 | mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : |
| 619 | NULL, pipe); | ||
| 613 | if (ret < 0) | 620 | if (ret < 0) |
| 614 | goto unprepare; | 621 | goto unprepare; |
| 615 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); | 622 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h | |||
| @@ -18,13 +18,12 @@ | |||
| 18 | #include "mtk_drm_ddp_comp.h" | 18 | #include "mtk_drm_ddp_comp.h" |
| 19 | #include "mtk_drm_plane.h" | 19 | #include "mtk_drm_plane.h" |
| 20 | 20 | ||
| 21 | #define OVL_LAYER_NR 4 | ||
| 22 | #define MTK_LUT_SIZE 512 | 21 | #define MTK_LUT_SIZE 512 |
| 23 | #define MTK_MAX_BPC 10 | 22 | #define MTK_MAX_BPC 10 |
| 24 | #define MTK_MIN_BPC 3 | 23 | #define MTK_MIN_BPC 3 |
| 25 | 24 | ||
| 26 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); | 25 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); |
| 27 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); | 26 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); |
| 28 | int mtk_drm_crtc_create(struct drm_device *drm_dev, | 27 | int mtk_drm_crtc_create(struct drm_device *drm_dev, |
| 29 | const enum mtk_ddp_comp_id *path, | 28 | const enum mtk_ddp_comp_id *path, |
| 30 | unsigned int path_len); | 29 | unsigned int path_len); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c | |||
| @@ -106,6 +106,8 @@ | |||
| 106 | #define OVL1_MOUT_EN_COLOR1 0x1 | 106 | #define OVL1_MOUT_EN_COLOR1 0x1 |
| 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 | 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 |
| 108 | #define RDMA0_SOUT_DPI0 0x2 | 108 | #define RDMA0_SOUT_DPI0 0x2 |
| 109 | #define RDMA0_SOUT_DPI1 0x3 | ||
| 110 | #define RDMA0_SOUT_DSI1 0x1 | ||
| 109 | #define RDMA0_SOUT_DSI2 0x4 | 111 | #define RDMA0_SOUT_DSI2 0x4 |
| 110 | #define RDMA0_SOUT_DSI3 0x5 | 112 | #define RDMA0_SOUT_DSI3 0x5 |
| 111 | #define RDMA1_SOUT_DPI0 0x2 | 113 | #define RDMA1_SOUT_DPI0 0x2 |
| @@ -122,6 +124,8 @@ | |||
| 122 | #define DPI0_SEL_IN_RDMA2 0x3 | 124 | #define DPI0_SEL_IN_RDMA2 0x3 |
| 123 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) | 125 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) |
| 124 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) | 126 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) |
| 127 | #define DSI0_SEL_IN_RDMA1 0x1 | ||
| 128 | #define DSI0_SEL_IN_RDMA2 0x4 | ||
| 125 | #define DSI1_SEL_IN_RDMA1 0x1 | 129 | #define DSI1_SEL_IN_RDMA1 0x1 |
| 126 | #define DSI1_SEL_IN_RDMA2 0x4 | 130 | #define DSI1_SEL_IN_RDMA2 0x4 |
| 127 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) | 131 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) |
| @@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, | |||
| 224 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { | 228 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { |
| 225 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 229 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
| 226 | value = RDMA0_SOUT_DPI0; | 230 | value = RDMA0_SOUT_DPI0; |
| 231 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { | ||
| 232 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
| 233 | value = RDMA0_SOUT_DPI1; | ||
| 234 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { | ||
| 235 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
| 236 | value = RDMA0_SOUT_DSI1; | ||
| 227 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { | 237 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { |
| 228 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 238 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
| 229 | value = RDMA0_SOUT_DSI2; | 239 | value = RDMA0_SOUT_DSI2; |
| @@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
| 282 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { | 292 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { |
| 283 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 293 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
| 284 | value = DPI1_SEL_IN_RDMA1; | 294 | value = DPI1_SEL_IN_RDMA1; |
| 295 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { | ||
| 296 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | ||
| 297 | value = DSI0_SEL_IN_RDMA1; | ||
| 285 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { | 298 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { |
| 286 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | 299 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; |
| 287 | value = DSI1_SEL_IN_RDMA1; | 300 | value = DSI1_SEL_IN_RDMA1; |
| @@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
| 297 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { | 310 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { |
| 298 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 311 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
| 299 | value = DPI1_SEL_IN_RDMA2; | 312 | value = DPI1_SEL_IN_RDMA2; |
| 300 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | 313 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { |
| 301 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 314 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
| 315 | value = DSI0_SEL_IN_RDMA2; | ||
| 316 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | ||
| 317 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | ||
| 302 | value = DSI1_SEL_IN_RDMA2; | 318 | value = DSI1_SEL_IN_RDMA2; |
| 303 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { | 319 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { |
| 304 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 320 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | |||
| @@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { | |||
| 78 | void (*stop)(struct mtk_ddp_comp *comp); | 78 | void (*stop)(struct mtk_ddp_comp *comp); |
| 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); | 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); |
| 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); | 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); |
| 81 | unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); | ||
| 81 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); | 82 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); |
| 82 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); | 83 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); |
| 83 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, | 84 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, |
| @@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) | |||
| 128 | comp->funcs->disable_vblank(comp); | 129 | comp->funcs->disable_vblank(comp); |
| 129 | } | 130 | } |
| 130 | 131 | ||
| 132 | static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) | ||
| 133 | { | ||
| 134 | if (comp->funcs && comp->funcs->layer_nr) | ||
| 135 | return comp->funcs->layer_nr(comp); | ||
| 136 | |||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | |||
| 131 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, | 140 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, |
| 132 | unsigned int idx) | 141 | unsigned int idx) |
| 133 | { | 142 | { |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
| @@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) | |||
| 381 | err_deinit: | 381 | err_deinit: |
| 382 | mtk_drm_kms_deinit(drm); | 382 | mtk_drm_kms_deinit(drm); |
| 383 | err_free: | 383 | err_free: |
| 384 | drm_dev_unref(drm); | 384 | drm_dev_put(drm); |
| 385 | return ret; | 385 | return ret; |
| 386 | } | 386 | } |
| 387 | 387 | ||
| @@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) | |||
| 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 391 | 391 | ||
| 392 | drm_dev_unregister(private->drm); | 392 | drm_dev_unregister(private->drm); |
| 393 | drm_dev_unref(private->drm); | 393 | drm_dev_put(private->drm); |
| 394 | private->drm = NULL; | 394 | private->drm = NULL; |
| 395 | } | 395 | } |
| 396 | 396 | ||
| @@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) | |||
| 564 | 564 | ||
| 565 | drm_dev_unregister(drm); | 565 | drm_dev_unregister(drm); |
| 566 | mtk_drm_kms_deinit(drm); | 566 | mtk_drm_kms_deinit(drm); |
| 567 | drm_dev_unref(drm); | 567 | drm_dev_put(drm); |
| 568 | 568 | ||
| 569 | component_master_del(&pdev->dev, &mtk_drm_ops); | 569 | component_master_del(&pdev->dev, &mtk_drm_ops); |
| 570 | pm_runtime_disable(&pdev->dev); | 570 | pm_runtime_disable(&pdev->dev); |
| @@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) | |||
| 580 | { | 580 | { |
| 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 582 | struct drm_device *drm = private->drm; | 582 | struct drm_device *drm = private->drm; |
| 583 | int ret; | ||
| 583 | 584 | ||
| 584 | drm_kms_helper_poll_disable(drm); | 585 | ret = drm_mode_config_helper_suspend(drm); |
| 585 | |||
| 586 | private->suspend_state = drm_atomic_helper_suspend(drm); | ||
| 587 | if (IS_ERR(private->suspend_state)) { | ||
| 588 | drm_kms_helper_poll_enable(drm); | ||
| 589 | return PTR_ERR(private->suspend_state); | ||
| 590 | } | ||
| 591 | |||
| 592 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); | 586 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); |
| 593 | return 0; | 587 | |
| 588 | return ret; | ||
| 594 | } | 589 | } |
| 595 | 590 | ||
| 596 | static int mtk_drm_sys_resume(struct device *dev) | 591 | static int mtk_drm_sys_resume(struct device *dev) |
| 597 | { | 592 | { |
| 598 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 593 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 599 | struct drm_device *drm = private->drm; | 594 | struct drm_device *drm = private->drm; |
| 595 | int ret; | ||
| 600 | 596 | ||
| 601 | drm_atomic_helper_resume(drm, private->suspend_state); | 597 | ret = drm_mode_config_helper_resume(drm); |
| 602 | drm_kms_helper_poll_enable(drm); | ||
| 603 | |||
| 604 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); | 598 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); |
| 605 | return 0; | 599 | |
| 600 | return ret; | ||
| 606 | } | 601 | } |
| 607 | #endif | 602 | #endif |
| 608 | 603 | ||
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 90837f7c7d0f..f4c7516eb989 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c | |||
| @@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) | |||
| 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); | 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | static u16 adt7475_read_word(struct i2c_client *client, int reg) | 305 | static int adt7475_read_word(struct i2c_client *client, int reg) |
| 306 | { | 306 | { |
| 307 | u16 val; | 307 | int val1, val2; |
| 308 | 308 | ||
| 309 | val = i2c_smbus_read_byte_data(client, reg); | 309 | val1 = i2c_smbus_read_byte_data(client, reg); |
| 310 | val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); | 310 | if (val1 < 0) |
| 311 | return val1; | ||
| 312 | val2 = i2c_smbus_read_byte_data(client, reg + 1); | ||
| 313 | if (val2 < 0) | ||
| 314 | return val2; | ||
| 311 | 315 | ||
| 312 | return val; | 316 | return val1 | (val2 << 8); |
| 313 | } | 317 | } |
| 314 | 318 | ||
| 315 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) | 319 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) |
| @@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, | |||
| 962 | { | 966 | { |
| 963 | struct adt7475_data *data = adt7475_update_device(dev); | 967 | struct adt7475_data *data = adt7475_update_device(dev); |
| 964 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | 968 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); |
| 965 | int i = clamp_val(data->range[sattr->index] & 0xf, 0, | 969 | int idx; |
| 966 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
| 967 | 970 | ||
| 968 | if (IS_ERR(data)) | 971 | if (IS_ERR(data)) |
| 969 | return PTR_ERR(data); | 972 | return PTR_ERR(data); |
| 973 | idx = clamp_val(data->range[sattr->index] & 0xf, 0, | ||
| 974 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
| 970 | 975 | ||
| 971 | return sprintf(buf, "%d\n", pwmfreq_table[i]); | 976 | return sprintf(buf, "%d\n", pwmfreq_table[idx]); |
| 972 | } | 977 | } |
| 973 | 978 | ||
| 974 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, | 979 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, |
| @@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, | |||
| 1004 | char *buf) | 1009 | char *buf) |
| 1005 | { | 1010 | { |
| 1006 | struct adt7475_data *data = adt7475_update_device(dev); | 1011 | struct adt7475_data *data = adt7475_update_device(dev); |
| 1012 | |||
| 1013 | if (IS_ERR(data)) | ||
| 1014 | return PTR_ERR(data); | ||
| 1015 | |||
| 1007 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); | 1016 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); |
| 1008 | } | 1017 | } |
| 1009 | 1018 | ||
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf84..71d3445ba869 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * Bi-directional Current/Power Monitor with I2C Interface | 17 | * Bi-directional Current/Power Monitor with I2C Interface |
| 18 | * Datasheet: http://www.ti.com/product/ina230 | 18 | * Datasheet: http://www.ti.com/product/ina230 |
| 19 | * | 19 | * |
| 20 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 20 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
| 21 | * Thanks to Jan Volkering | 21 | * Thanks to Jan Volkering |
| 22 | * | 22 | * |
| 23 | * This program is free software; you can redistribute it and/or modify | 23 | * This program is free software; you can redistribute it and/or modify |
| @@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) | |||
| 329 | return 0; | 329 | return 0; |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | static ssize_t ina2xx_show_shunt(struct device *dev, | ||
| 333 | struct device_attribute *da, | ||
| 334 | char *buf) | ||
| 335 | { | ||
| 336 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
| 337 | |||
| 338 | return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); | ||
| 339 | } | ||
| 340 | |||
| 332 | static ssize_t ina2xx_store_shunt(struct device *dev, | 341 | static ssize_t ina2xx_store_shunt(struct device *dev, |
| 333 | struct device_attribute *da, | 342 | struct device_attribute *da, |
| 334 | const char *buf, size_t count) | 343 | const char *buf, size_t count) |
| @@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, | |||
| 403 | 412 | ||
| 404 | /* shunt resistance */ | 413 | /* shunt resistance */ |
| 405 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, | 414 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, |
| 406 | ina2xx_show_value, ina2xx_store_shunt, | 415 | ina2xx_show_shunt, ina2xx_store_shunt, |
| 407 | INA2XX_CALIBRATION); | 416 | INA2XX_CALIBRATION); |
| 408 | 417 | ||
| 409 | /* update interval (ina226 only) */ | 418 | /* update interval (ina226 only) */ |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c6bd61e4695a..944f5b63aecd 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #include <linux/bitops.h> | 63 | #include <linux/bitops.h> |
| 64 | #include <linux/dmi.h> | 64 | #include <linux/dmi.h> |
| 65 | #include <linux/io.h> | 65 | #include <linux/io.h> |
| 66 | #include <linux/nospec.h> | ||
| 66 | #include "lm75.h" | 67 | #include "lm75.h" |
| 67 | 68 | ||
| 68 | #define USE_ALTERNATE | 69 | #define USE_ALTERNATE |
| @@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, | |||
| 2689 | return err; | 2690 | return err; |
| 2690 | if (val > NUM_TEMP) | 2691 | if (val > NUM_TEMP) |
| 2691 | return -EINVAL; | 2692 | return -EINVAL; |
| 2693 | val = array_index_nospec(val, NUM_TEMP + 1); | ||
| 2692 | if (val && (!(data->have_temp & BIT(val - 1)) || | 2694 | if (val && (!(data->have_temp & BIT(val - 1)) || |
| 2693 | !data->temp_src[val - 1])) | 2695 | !data->temp_src[val - 1])) |
| 2694 | return -EINVAL; | 2696 | return -EINVAL; |
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 6ec65adaba49..c33dcfb87993 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
| @@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap) | |||
| 110 | } | 110 | } |
| 111 | #ifdef DEBUG | 111 | #ifdef DEBUG |
| 112 | if (jiffies != start && i2c_debug >= 3) | 112 | if (jiffies != start && i2c_debug >= 3) |
| 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " | 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", |
| 114 | "high\n", jiffies - start); | 114 | jiffies - start); |
| 115 | #endif | 115 | #endif |
| 116 | 116 | ||
| 117 | done: | 117 | done: |
| @@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
| 171 | setsda(adap, sb); | 171 | setsda(adap, sb); |
| 172 | udelay((adap->udelay + 1) / 2); | 172 | udelay((adap->udelay + 1) / 2); |
| 173 | if (sclhi(adap) < 0) { /* timed out */ | 173 | if (sclhi(adap) < 0) { /* timed out */ |
| 174 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 174 | bit_dbg(1, &i2c_adap->dev, |
| 175 | "timeout at bit #%d\n", (int)c, i); | 175 | "i2c_outb: 0x%02x, timeout at bit #%d\n", |
| 176 | (int)c, i); | ||
| 176 | return -ETIMEDOUT; | 177 | return -ETIMEDOUT; |
| 177 | } | 178 | } |
| 178 | /* FIXME do arbitration here: | 179 | /* FIXME do arbitration here: |
| @@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
| 185 | } | 186 | } |
| 186 | sdahi(adap); | 187 | sdahi(adap); |
| 187 | if (sclhi(adap) < 0) { /* timeout */ | 188 | if (sclhi(adap) < 0) { /* timeout */ |
| 188 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 189 | bit_dbg(1, &i2c_adap->dev, |
| 189 | "timeout at ack\n", (int)c); | 190 | "i2c_outb: 0x%02x, timeout at ack\n", (int)c); |
| 190 | return -ETIMEDOUT; | 191 | return -ETIMEDOUT; |
| 191 | } | 192 | } |
| 192 | 193 | ||
| @@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) | |||
| 215 | sdahi(adap); | 216 | sdahi(adap); |
| 216 | for (i = 0; i < 8; i++) { | 217 | for (i = 0; i < 8; i++) { |
| 217 | if (sclhi(adap) < 0) { /* timeout */ | 218 | if (sclhi(adap) < 0) { /* timeout */ |
| 218 | bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " | 219 | bit_dbg(1, &i2c_adap->dev, |
| 219 | "#%d\n", 7 - i); | 220 | "i2c_inb: timeout at bit #%d\n", |
| 221 | 7 - i); | ||
| 220 | return -ETIMEDOUT; | 222 | return -ETIMEDOUT; |
| 221 | } | 223 | } |
| 222 | indata *= 2; | 224 | indata *= 2; |
| @@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 265 | goto bailout; | 267 | goto bailout; |
| 266 | } | 268 | } |
| 267 | if (!scl) { | 269 | if (!scl) { |
| 268 | printk(KERN_WARNING "%s: SCL unexpected low " | 270 | printk(KERN_WARNING |
| 269 | "while pulling SDA low!\n", name); | 271 | "%s: SCL unexpected low while pulling SDA low!\n", |
| 272 | name); | ||
| 270 | goto bailout; | 273 | goto bailout; |
| 271 | } | 274 | } |
| 272 | 275 | ||
| @@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 278 | goto bailout; | 281 | goto bailout; |
| 279 | } | 282 | } |
| 280 | if (!scl) { | 283 | if (!scl) { |
| 281 | printk(KERN_WARNING "%s: SCL unexpected low " | 284 | printk(KERN_WARNING |
| 282 | "while pulling SDA high!\n", name); | 285 | "%s: SCL unexpected low while pulling SDA high!\n", |
| 286 | name); | ||
| 283 | goto bailout; | 287 | goto bailout; |
| 284 | } | 288 | } |
| 285 | 289 | ||
| @@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 291 | goto bailout; | 295 | goto bailout; |
| 292 | } | 296 | } |
| 293 | if (!sda) { | 297 | if (!sda) { |
| 294 | printk(KERN_WARNING "%s: SDA unexpected low " | 298 | printk(KERN_WARNING |
| 295 | "while pulling SCL low!\n", name); | 299 | "%s: SDA unexpected low while pulling SCL low!\n", |
| 300 | name); | ||
| 296 | goto bailout; | 301 | goto bailout; |
| 297 | } | 302 | } |
| 298 | 303 | ||
| @@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 304 | goto bailout; | 309 | goto bailout; |
| 305 | } | 310 | } |
| 306 | if (!sda) { | 311 | if (!sda) { |
| 307 | printk(KERN_WARNING "%s: SDA unexpected low " | 312 | printk(KERN_WARNING |
| 308 | "while pulling SCL high!\n", name); | 313 | "%s: SDA unexpected low while pulling SCL high!\n", |
| 314 | name); | ||
| 309 | goto bailout; | 315 | goto bailout; |
| 310 | } | 316 | } |
| 311 | 317 | ||
| @@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap, | |||
| 352 | i2c_start(adap); | 358 | i2c_start(adap); |
| 353 | } | 359 | } |
| 354 | if (i && ret) | 360 | if (i && ret) |
| 355 | bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " | 361 | bit_dbg(1, &i2c_adap->dev, |
| 356 | "0x%02x: %s\n", i + 1, | 362 | "Used %d tries to %s client at 0x%02x: %s\n", i + 1, |
| 357 | addr & 1 ? "read from" : "write to", addr >> 1, | 363 | addr & 1 ? "read from" : "write to", addr >> 1, |
| 358 | ret == 1 ? "success" : "failed, timeout?"); | 364 | ret == 1 ? "success" : "failed, timeout?"); |
| 359 | return ret; | 365 | return ret; |
| @@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
| 442 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { | 448 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { |
| 443 | if (!(flags & I2C_M_NO_RD_ACK)) | 449 | if (!(flags & I2C_M_NO_RD_ACK)) |
| 444 | acknak(i2c_adap, 0); | 450 | acknak(i2c_adap, 0); |
| 445 | dev_err(&i2c_adap->dev, "readbytes: invalid " | 451 | dev_err(&i2c_adap->dev, |
| 446 | "block length (%d)\n", inval); | 452 | "readbytes: invalid block length (%d)\n", |
| 453 | inval); | ||
| 447 | return -EPROTO; | 454 | return -EPROTO; |
| 448 | } | 455 | } |
| 449 | /* The original count value accounts for the extra | 456 | /* The original count value accounts for the extra |
| @@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
| 506 | return -ENXIO; | 513 | return -ENXIO; |
| 507 | } | 514 | } |
| 508 | if (flags & I2C_M_RD) { | 515 | if (flags & I2C_M_RD) { |
| 509 | bit_dbg(3, &i2c_adap->dev, "emitting repeated " | 516 | bit_dbg(3, &i2c_adap->dev, |
| 510 | "start condition\n"); | 517 | "emitting repeated start condition\n"); |
| 511 | i2c_repstart(adap); | 518 | i2c_repstart(adap); |
| 512 | /* okay, now switch into reading mode */ | 519 | /* okay, now switch into reading mode */ |
| 513 | addr |= 0x01; | 520 | addr |= 0x01; |
| @@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, | |||
| 564 | } | 571 | } |
| 565 | ret = bit_doAddress(i2c_adap, pmsg); | 572 | ret = bit_doAddress(i2c_adap, pmsg); |
| 566 | if ((ret != 0) && !nak_ok) { | 573 | if ((ret != 0) && !nak_ok) { |
| 567 | bit_dbg(1, &i2c_adap->dev, "NAK from " | 574 | bit_dbg(1, &i2c_adap->dev, |
| 568 | "device addr 0x%02x msg #%d\n", | 575 | "NAK from device addr 0x%02x msg #%d\n", |
| 569 | msgs[i].addr, i); | 576 | msgs[i].addr, i); |
| 570 | goto bailout; | 577 | goto bailout; |
| 571 | } | 578 | } |
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e18442b9973a..94d94b4a9a0d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
| @@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) | |||
| 708 | i2c_set_adapdata(adap, dev); | 708 | i2c_set_adapdata(adap, dev); |
| 709 | 709 | ||
| 710 | if (dev->pm_disabled) { | 710 | if (dev->pm_disabled) { |
| 711 | dev_pm_syscore_device(dev->dev, true); | ||
| 712 | irq_flags = IRQF_NO_SUSPEND; | 711 | irq_flags = IRQF_NO_SUSPEND; |
| 713 | } else { | 712 | } else { |
| 714 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; | 713 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 1a8d2da5b000..b5750fd85125 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
| @@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev) | |||
| 434 | { | 434 | { |
| 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
| 436 | 436 | ||
| 437 | if (i_dev->pm_disabled) | ||
| 438 | return 0; | ||
| 439 | |||
| 437 | i_dev->disable(i_dev); | 440 | i_dev->disable(i_dev); |
| 438 | i2c_dw_prepare_clk(i_dev, false); | 441 | i2c_dw_prepare_clk(i_dev, false); |
| 439 | 442 | ||
| @@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev) | |||
| 444 | { | 447 | { |
| 445 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 448 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
| 446 | 449 | ||
| 447 | i2c_dw_prepare_clk(i_dev, true); | 450 | if (!i_dev->pm_disabled) |
| 451 | i2c_dw_prepare_clk(i_dev, true); | ||
| 452 | |||
| 448 | i_dev->init(i_dev); | 453 | i_dev->init(i_dev); |
| 449 | 454 | ||
| 450 | return 0; | 455 | return 0; |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 941c223f6491..04b60a349d7e 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -1415,6 +1415,13 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1415 | } | 1415 | } |
| 1416 | 1416 | ||
| 1417 | #ifdef CONFIG_ACPI | 1417 | #ifdef CONFIG_ACPI |
| 1418 | static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, | ||
| 1419 | acpi_physical_address address) | ||
| 1420 | { | ||
| 1421 | return address >= priv->smba && | ||
| 1422 | address <= pci_resource_end(priv->pci_dev, SMBBAR); | ||
| 1423 | } | ||
| 1424 | |||
| 1418 | static acpi_status | 1425 | static acpi_status |
| 1419 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | 1426 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, |
| 1420 | u64 *value, void *handler_context, void *region_context) | 1427 | u64 *value, void *handler_context, void *region_context) |
| @@ -1430,7 +1437,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | |||
| 1430 | */ | 1437 | */ |
| 1431 | mutex_lock(&priv->acpi_lock); | 1438 | mutex_lock(&priv->acpi_lock); |
| 1432 | 1439 | ||
| 1433 | if (!priv->acpi_reserved) { | 1440 | if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { |
| 1434 | priv->acpi_reserved = true; | 1441 | priv->acpi_reserved = true; |
| 1435 | 1442 | ||
| 1436 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | 1443 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 439e8778f849..818cab14e87c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
| @@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data) | |||
| 507 | pd->pos = pd->msg->len; | 507 | pd->pos = pd->msg->len; |
| 508 | pd->stop_after_dma = true; | 508 | pd->stop_after_dma = true; |
| 509 | 509 | ||
| 510 | i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf); | ||
| 511 | |||
| 512 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); | 510 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); |
| 513 | } | 511 | } |
| 514 | 512 | ||
| @@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) | |||
| 602 | dma_async_issue_pending(chan); | 600 | dma_async_issue_pending(chan); |
| 603 | } | 601 | } |
| 604 | 602 | ||
| 605 | static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | 603 | static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, |
| 606 | bool do_init) | 604 | bool do_init) |
| 607 | { | 605 | { |
| 608 | if (do_init) { | 606 | if (do_init) { |
| 609 | /* Initialize channel registers */ | 607 | /* Initialize channel registers */ |
| @@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | |||
| 627 | 625 | ||
| 628 | /* Enable all interrupts to begin with */ | 626 | /* Enable all interrupts to begin with */ |
| 629 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | 627 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); |
| 630 | return 0; | ||
| 631 | } | 628 | } |
| 632 | 629 | ||
| 633 | static int poll_dte(struct sh_mobile_i2c_data *pd) | 630 | static int poll_dte(struct sh_mobile_i2c_data *pd) |
| @@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
| 698 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; | 695 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; |
| 699 | pd->stop_after_dma = false; | 696 | pd->stop_after_dma = false; |
| 700 | 697 | ||
| 701 | err = start_ch(pd, msg, do_start); | 698 | start_ch(pd, msg, do_start); |
| 702 | if (err) | ||
| 703 | break; | ||
| 704 | 699 | ||
| 705 | if (do_start) | 700 | if (do_start) |
| 706 | i2c_op(pd, OP_START, 0); | 701 | i2c_op(pd, OP_START, 0); |
| @@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
| 709 | timeout = wait_event_timeout(pd->wait, | 704 | timeout = wait_event_timeout(pd->wait, |
| 710 | pd->sr & (ICSR_TACK | SW_DONE), | 705 | pd->sr & (ICSR_TACK | SW_DONE), |
| 711 | adapter->timeout); | 706 | adapter->timeout); |
| 707 | |||
| 708 | /* 'stop_after_dma' tells if DMA transfer was complete */ | ||
| 709 | i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); | ||
| 710 | |||
| 712 | if (!timeout) { | 711 | if (!timeout) { |
| 713 | dev_err(pd->dev, "Transfer request timed out\n"); | 712 | dev_err(pd->dev, "Transfer request timed out\n"); |
| 714 | if (pd->dma_direction != DMA_NONE) | 713 | if (pd->dma_direction != DMA_NONE) |
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f15737763608..9ee9a15e7134 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
| @@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) | |||
| 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); | 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); |
| 2294 | 2294 | ||
| 2295 | /** | 2295 | /** |
| 2296 | * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg | 2296 | * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg |
| 2297 | * @msg: the message to be synced with | ||
| 2298 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. | 2297 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. |
| 2298 | * @msg: the message which the buffer corresponds to | ||
| 2299 | * @xferred: bool saying if the message was transferred | ||
| 2299 | */ | 2300 | */ |
| 2300 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) | 2301 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) |
| 2301 | { | 2302 | { |
| 2302 | if (!buf || buf == msg->buf) | 2303 | if (!buf || buf == msg->buf) |
| 2303 | return; | 2304 | return; |
| 2304 | 2305 | ||
| 2305 | if (msg->flags & I2C_M_RD) | 2306 | if (xferred && msg->flags & I2C_M_RD) |
| 2306 | memcpy(msg->buf, buf, msg->len); | 2307 | memcpy(msg->buf, buf, msg->len); |
| 2307 | 2308 | ||
| 2308 | kfree(buf); | 2309 | kfree(buf); |
| 2309 | } | 2310 | } |
| 2310 | EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); | 2311 | EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); |
| 2311 | 2312 | ||
| 2312 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); | 2313 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); |
| 2313 | MODULE_DESCRIPTION("I2C-Bus main module"); | 2314 | MODULE_DESCRIPTION("I2C-Bus main module"); |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 648eb6743ed5..6edffeed9953 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
| @@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, | |||
| 238 | mmc_exit_request(mq->queue, req); | 238 | mmc_exit_request(mq->queue, req); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | /* | ||
| 242 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests | ||
| 243 | * will not be dispatched in parallel. | ||
| 244 | */ | ||
| 245 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | 241 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 246 | const struct blk_mq_queue_data *bd) | 242 | const struct blk_mq_queue_data *bd) |
| 247 | { | 243 | { |
| @@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 264 | 260 | ||
| 265 | spin_lock_irq(q->queue_lock); | 261 | spin_lock_irq(q->queue_lock); |
| 266 | 262 | ||
| 267 | if (mq->recovery_needed) { | 263 | if (mq->recovery_needed || mq->busy) { |
| 268 | spin_unlock_irq(q->queue_lock); | 264 | spin_unlock_irq(q->queue_lock); |
| 269 | return BLK_STS_RESOURCE; | 265 | return BLK_STS_RESOURCE; |
| 270 | } | 266 | } |
| @@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 291 | break; | 287 | break; |
| 292 | } | 288 | } |
| 293 | 289 | ||
| 290 | /* Parallel dispatch of requests is not supported at the moment */ | ||
| 291 | mq->busy = true; | ||
| 292 | |||
| 294 | mq->in_flight[issue_type] += 1; | 293 | mq->in_flight[issue_type] += 1; |
| 295 | get_card = (mmc_tot_in_flight(mq) == 1); | 294 | get_card = (mmc_tot_in_flight(mq) == 1); |
| 296 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); | 295 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
| @@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 333 | mq->in_flight[issue_type] -= 1; | 332 | mq->in_flight[issue_type] -= 1; |
| 334 | if (mmc_tot_in_flight(mq) == 0) | 333 | if (mmc_tot_in_flight(mq) == 0) |
| 335 | put_card = true; | 334 | put_card = true; |
| 335 | mq->busy = false; | ||
| 336 | spin_unlock_irq(q->queue_lock); | 336 | spin_unlock_irq(q->queue_lock); |
| 337 | if (put_card) | 337 | if (put_card) |
| 338 | mmc_put_card(card, &mq->ctx); | 338 | mmc_put_card(card, &mq->ctx); |
| 339 | } else { | ||
| 340 | WRITE_ONCE(mq->busy, false); | ||
| 339 | } | 341 | } |
| 340 | 342 | ||
| 341 | return ret; | 343 | return ret; |
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 17e59d50b496..9bf3c9245075 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h | |||
| @@ -81,6 +81,7 @@ struct mmc_queue { | |||
| 81 | unsigned int cqe_busy; | 81 | unsigned int cqe_busy; |
| 82 | #define MMC_CQE_DCMD_BUSY BIT(0) | 82 | #define MMC_CQE_DCMD_BUSY BIT(0) |
| 83 | #define MMC_CQE_QUEUE_FULL BIT(1) | 83 | #define MMC_CQE_QUEUE_FULL BIT(1) |
| 84 | bool busy; | ||
| 84 | bool use_cqe; | 85 | bool use_cqe; |
| 85 | bool recovery_needed; | 86 | bool recovery_needed; |
| 86 | bool in_recovery; | 87 | bool in_recovery; |
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 294de177632c..61e4e2a213c9 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c | |||
| @@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, | |||
| 217 | * We don't really have DMA, so we need | 217 | * We don't really have DMA, so we need |
| 218 | * to copy from our platform driver buffer | 218 | * to copy from our platform driver buffer |
| 219 | */ | 219 | */ |
| 220 | sg_copy_to_buffer(data->sg, 1, host->virt_base, | 220 | sg_copy_from_buffer(data->sg, 1, host->virt_base, |
| 221 | data->sg->length); | 221 | data->sg->length); |
| 222 | } | 222 | } |
| 223 | host->data->bytes_xfered += data->sg->length; | 223 | host->data->bytes_xfered += data->sg->length; |
| @@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, | |||
| 393 | * We don't really have DMA, so we need to copy to our | 393 | * We don't really have DMA, so we need to copy to our |
| 394 | * platform driver buffer | 394 | * platform driver buffer |
| 395 | */ | 395 | */ |
| 396 | sg_copy_from_buffer(data->sg, 1, host->virt_base, | 396 | sg_copy_to_buffer(data->sg, 1, host->virt_base, |
| 397 | data->sg->length); | 397 | data->sg->length); |
| 398 | } | 398 | } |
| 399 | } | 399 | } |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 5aa2c9404e92..be53044086c7 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
| @@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 1976 | do { | 1976 | do { |
| 1977 | value = atmci_readl(host, ATMCI_RDR); | 1977 | value = atmci_readl(host, ATMCI_RDR); |
| 1978 | if (likely(offset + 4 <= sg->length)) { | 1978 | if (likely(offset + 4 <= sg->length)) { |
| 1979 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); | 1979 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); |
| 1980 | 1980 | ||
| 1981 | offset += 4; | 1981 | offset += 4; |
| 1982 | nbytes += 4; | 1982 | nbytes += 4; |
| @@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 1993 | } else { | 1993 | } else { |
| 1994 | unsigned int remaining = sg->length - offset; | 1994 | unsigned int remaining = sg->length - offset; |
| 1995 | 1995 | ||
| 1996 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); | 1996 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); |
| 1997 | nbytes += remaining; | 1997 | nbytes += remaining; |
| 1998 | 1998 | ||
| 1999 | flush_dcache_page(sg_page(sg)); | 1999 | flush_dcache_page(sg_page(sg)); |
| @@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 2003 | goto done; | 2003 | goto done; |
| 2004 | 2004 | ||
| 2005 | offset = 4 - remaining; | 2005 | offset = 4 - remaining; |
| 2006 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, | 2006 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, |
| 2007 | offset, 0); | 2007 | offset, 0); |
| 2008 | nbytes += offset; | 2008 | nbytes += offset; |
| 2009 | } | 2009 | } |
| @@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2042 | 2042 | ||
| 2043 | do { | 2043 | do { |
| 2044 | if (likely(offset + 4 <= sg->length)) { | 2044 | if (likely(offset + 4 <= sg->length)) { |
| 2045 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); | 2045 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); |
| 2046 | atmci_writel(host, ATMCI_TDR, value); | 2046 | atmci_writel(host, ATMCI_TDR, value); |
| 2047 | 2047 | ||
| 2048 | offset += 4; | 2048 | offset += 4; |
| @@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2059 | unsigned int remaining = sg->length - offset; | 2059 | unsigned int remaining = sg->length - offset; |
| 2060 | 2060 | ||
| 2061 | value = 0; | 2061 | value = 0; |
| 2062 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); | 2062 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); |
| 2063 | nbytes += remaining; | 2063 | nbytes += remaining; |
| 2064 | 2064 | ||
| 2065 | host->sg = sg = sg_next(sg); | 2065 | host->sg = sg = sg_next(sg); |
| @@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2070 | } | 2070 | } |
| 2071 | 2071 | ||
| 2072 | offset = 4 - remaining; | 2072 | offset = 4 - remaining; |
| 2073 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, | 2073 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, |
| 2074 | offset, 0); | 2074 | offset, 0); |
| 2075 | atmci_writel(host, ATMCI_TDR, value); | 2075 | atmci_writel(host, ATMCI_TDR, value); |
| 2076 | nbytes += offset; | 2076 | nbytes += offset; |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 35cc0de6be67..ca0b43973769 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
| @@ -45,14 +45,16 @@ | |||
| 45 | /* DM_CM_RST */ | 45 | /* DM_CM_RST */ |
| 46 | #define RST_DTRANRST1 BIT(9) | 46 | #define RST_DTRANRST1 BIT(9) |
| 47 | #define RST_DTRANRST0 BIT(8) | 47 | #define RST_DTRANRST0 BIT(8) |
| 48 | #define RST_RESERVED_BITS GENMASK_ULL(32, 0) | 48 | #define RST_RESERVED_BITS GENMASK_ULL(31, 0) |
| 49 | 49 | ||
| 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ | 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ |
| 51 | #define INFO1_CLEAR 0 | 51 | #define INFO1_CLEAR 0 |
| 52 | #define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) | ||
| 52 | #define INFO1_DTRANEND1 BIT(17) | 53 | #define INFO1_DTRANEND1 BIT(17) |
| 53 | #define INFO1_DTRANEND0 BIT(16) | 54 | #define INFO1_DTRANEND0 BIT(16) |
| 54 | 55 | ||
| 55 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ | 56 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ |
| 57 | #define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) | ||
| 56 | #define INFO2_DTRANERR1 BIT(17) | 58 | #define INFO2_DTRANERR1 BIT(17) |
| 57 | #define INFO2_DTRANERR0 BIT(16) | 59 | #define INFO2_DTRANERR0 BIT(16) |
| 58 | 60 | ||
| @@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, | |||
| 252 | { | 254 | { |
| 253 | struct renesas_sdhi *priv = host_to_priv(host); | 255 | struct renesas_sdhi *priv = host_to_priv(host); |
| 254 | 256 | ||
| 257 | /* Disable DMAC interrupts, we don't use them */ | ||
| 258 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, | ||
| 259 | INFO1_MASK_CLEAR); | ||
| 260 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, | ||
| 261 | INFO2_MASK_CLEAR); | ||
| 262 | |||
| 255 | /* Each value is set to non-zero to assume "enabling" each DMA */ | 263 | /* Each value is set to non-zero to assume "enabling" each DMA */ |
| 256 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; | 264 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; |
| 257 | 265 | ||
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index ca18612c4201..67b2065e7a19 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
| @@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali) | |||
| 1338 | 1338 | ||
| 1339 | denali_enable_irq(denali); | 1339 | denali_enable_irq(denali); |
| 1340 | denali_reset_banks(denali); | 1340 | denali_reset_banks(denali); |
| 1341 | if (!denali->max_banks) { | ||
| 1342 | /* Error out earlier if no chip is found for some reasons. */ | ||
| 1343 | ret = -ENODEV; | ||
| 1344 | goto disable_irq; | ||
| 1345 | } | ||
| 1341 | 1346 | ||
| 1342 | denali->active_bank = DENALI_INVALID_BANK; | 1347 | denali->active_bank = DENALI_INVALID_BANK; |
| 1343 | 1348 | ||
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c index a3f04315c05c..427fcbc1b71c 100644 --- a/drivers/mtd/nand/raw/docg4.c +++ b/drivers/mtd/nand/raw/docg4.c | |||
| @@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev) | |||
| 1218 | return 0; | 1218 | return 0; |
| 1219 | } | 1219 | } |
| 1220 | 1220 | ||
| 1221 | static void __init init_mtd_structs(struct mtd_info *mtd) | 1221 | static void init_mtd_structs(struct mtd_info *mtd) |
| 1222 | { | 1222 | { |
| 1223 | /* initialize mtd and nand data structures */ | 1223 | /* initialize mtd and nand data structures */ |
| 1224 | 1224 | ||
| @@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) | |||
| 1290 | 1290 | ||
| 1291 | } | 1291 | } |
| 1292 | 1292 | ||
| 1293 | static int __init read_id_reg(struct mtd_info *mtd) | 1293 | static int read_id_reg(struct mtd_info *mtd) |
| 1294 | { | 1294 | { |
| 1295 | struct nand_chip *nand = mtd_to_nand(mtd); | 1295 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); | 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 139d96c5a023..092c817f8f11 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
| @@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, | |||
| 110 | struct tcf_exts *tc_exts) | 110 | struct tcf_exts *tc_exts) |
| 111 | { | 111 | { |
| 112 | const struct tc_action *tc_act; | 112 | const struct tc_action *tc_act; |
| 113 | LIST_HEAD(tc_actions); | 113 | int i, rc; |
| 114 | int rc; | ||
| 115 | 114 | ||
| 116 | if (!tcf_exts_has_actions(tc_exts)) { | 115 | if (!tcf_exts_has_actions(tc_exts)) { |
| 117 | netdev_info(bp->dev, "no actions"); | 116 | netdev_info(bp->dev, "no actions"); |
| 118 | return -EINVAL; | 117 | return -EINVAL; |
| 119 | } | 118 | } |
| 120 | 119 | ||
| 121 | tcf_exts_to_list(tc_exts, &tc_actions); | 120 | tcf_exts_for_each_action(i, tc_act, tc_exts) { |
| 122 | list_for_each_entry(tc_act, &tc_actions, list) { | ||
| 123 | /* Drop action */ | 121 | /* Drop action */ |
| 124 | if (is_tcf_gact_shot(tc_act)) { | 122 | if (is_tcf_gact_shot(tc_act)) { |
| 125 | actions->flags |= BNXT_TC_ACTION_FLAG_DROP; | 123 | actions->flags |= BNXT_TC_ACTION_FLAG_DROP; |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index dc09f9a8a49b..c6707ea2d751 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
| @@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev) | |||
| 482 | 482 | ||
| 483 | if (np) { | 483 | if (np) { |
| 484 | if (of_phy_is_fixed_link(np)) { | 484 | if (of_phy_is_fixed_link(np)) { |
| 485 | if (of_phy_register_fixed_link(np) < 0) { | ||
| 486 | dev_err(&bp->pdev->dev, | ||
| 487 | "broken fixed-link specification\n"); | ||
| 488 | return -ENODEV; | ||
| 489 | } | ||
| 490 | bp->phy_node = of_node_get(np); | 485 | bp->phy_node = of_node_get(np); |
| 491 | } else { | 486 | } else { |
| 492 | bp->phy_node = of_parse_phandle(np, "phy-handle", 0); | 487 | bp->phy_node = of_parse_phandle(np, "phy-handle", 0); |
| @@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp) | |||
| 569 | { | 564 | { |
| 570 | struct macb_platform_data *pdata; | 565 | struct macb_platform_data *pdata; |
| 571 | struct device_node *np; | 566 | struct device_node *np; |
| 572 | int err; | 567 | int err = -ENXIO; |
| 573 | 568 | ||
| 574 | /* Enable management port */ | 569 | /* Enable management port */ |
| 575 | macb_writel(bp, NCR, MACB_BIT(MPE)); | 570 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
| @@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp) | |||
| 592 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); | 587 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
| 593 | 588 | ||
| 594 | np = bp->pdev->dev.of_node; | 589 | np = bp->pdev->dev.of_node; |
| 595 | if (pdata) | 590 | if (np && of_phy_is_fixed_link(np)) { |
| 596 | bp->mii_bus->phy_mask = pdata->phy_mask; | 591 | if (of_phy_register_fixed_link(np) < 0) { |
| 592 | dev_err(&bp->pdev->dev, | ||
| 593 | "broken fixed-link specification %pOF\n", np); | ||
| 594 | goto err_out_free_mdiobus; | ||
| 595 | } | ||
| 596 | |||
| 597 | err = mdiobus_register(bp->mii_bus); | ||
| 598 | } else { | ||
| 599 | if (pdata) | ||
| 600 | bp->mii_bus->phy_mask = pdata->phy_mask; | ||
| 601 | |||
| 602 | err = of_mdiobus_register(bp->mii_bus, np); | ||
| 603 | } | ||
| 597 | 604 | ||
| 598 | err = of_mdiobus_register(bp->mii_bus, np); | ||
| 599 | if (err) | 605 | if (err) |
| 600 | goto err_out_free_mdiobus; | 606 | goto err_out_free_fixed_link; |
| 601 | 607 | ||
| 602 | err = macb_mii_probe(bp->dev); | 608 | err = macb_mii_probe(bp->dev); |
| 603 | if (err) | 609 | if (err) |
| @@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp) | |||
| 607 | 613 | ||
| 608 | err_out_unregister_bus: | 614 | err_out_unregister_bus: |
| 609 | mdiobus_unregister(bp->mii_bus); | 615 | mdiobus_unregister(bp->mii_bus); |
| 616 | err_out_free_fixed_link: | ||
| 610 | if (np && of_phy_is_fixed_link(np)) | 617 | if (np && of_phy_is_fixed_link(np)) |
| 611 | of_phy_deregister_fixed_link(np); | 618 | of_phy_deregister_fixed_link(np); |
| 612 | err_out_free_mdiobus: | 619 | err_out_free_mdiobus: |
| @@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp) | |||
| 2028 | { | 2035 | { |
| 2029 | struct macb_queue *queue; | 2036 | struct macb_queue *queue; |
| 2030 | unsigned int q; | 2037 | unsigned int q; |
| 2038 | u32 ctrl = macb_readl(bp, NCR); | ||
| 2031 | 2039 | ||
| 2032 | /* Disable RX and TX (XXX: Should we halt the transmission | 2040 | /* Disable RX and TX (XXX: Should we halt the transmission |
| 2033 | * more gracefully?) | 2041 | * more gracefully?) |
| 2034 | */ | 2042 | */ |
| 2035 | macb_writel(bp, NCR, 0); | 2043 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
| 2036 | 2044 | ||
| 2037 | /* Clear the stats registers (XXX: Update stats first?) */ | 2045 | /* Clear the stats registers (XXX: Update stats first?) */ |
| 2038 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | 2046 | ctrl |= MACB_BIT(CLRSTAT); |
| 2047 | |||
| 2048 | macb_writel(bp, NCR, ctrl); | ||
| 2039 | 2049 | ||
| 2040 | /* Clear all status flags */ | 2050 | /* Clear all status flags */ |
| 2041 | macb_writel(bp, TSR, -1); | 2051 | macb_writel(bp, TSR, -1); |
| @@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp) | |||
| 2223 | } | 2233 | } |
| 2224 | 2234 | ||
| 2225 | /* Enable TX and RX */ | 2235 | /* Enable TX and RX */ |
| 2226 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); | 2236 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); |
| 2227 | } | 2237 | } |
| 2228 | 2238 | ||
| 2229 | /* The hash address register is 64 bits long and takes up two | 2239 | /* The hash address register is 64 bits long and takes up two |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 623f73dd7738..c116f96956fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | |||
| @@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in, | |||
| 417 | struct ch_filter_specification *fs) | 417 | struct ch_filter_specification *fs) |
| 418 | { | 418 | { |
| 419 | const struct tc_action *a; | 419 | const struct tc_action *a; |
| 420 | LIST_HEAD(actions); | 420 | int i; |
| 421 | 421 | ||
| 422 | tcf_exts_to_list(cls->exts, &actions); | 422 | tcf_exts_for_each_action(i, a, cls->exts) { |
| 423 | list_for_each_entry(a, &actions, list) { | ||
| 424 | if (is_tcf_gact_ok(a)) { | 423 | if (is_tcf_gact_ok(a)) { |
| 425 | fs->action = FILTER_PASS; | 424 | fs->action = FILTER_PASS; |
| 426 | } else if (is_tcf_gact_shot(a)) { | 425 | } else if (is_tcf_gact_shot(a)) { |
| @@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, | |||
| 591 | bool act_redir = false; | 590 | bool act_redir = false; |
| 592 | bool act_pedit = false; | 591 | bool act_pedit = false; |
| 593 | bool act_vlan = false; | 592 | bool act_vlan = false; |
| 594 | LIST_HEAD(actions); | 593 | int i; |
| 595 | 594 | ||
| 596 | tcf_exts_to_list(cls->exts, &actions); | 595 | tcf_exts_for_each_action(i, a, cls->exts) { |
| 597 | list_for_each_entry(a, &actions, list) { | ||
| 598 | if (is_tcf_gact_ok(a)) { | 596 | if (is_tcf_gact_ok(a)) { |
| 599 | /* Do nothing */ | 597 | /* Do nothing */ |
| 600 | } else if (is_tcf_gact_shot(a)) { | 598 | } else if (is_tcf_gact_shot(a)) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 18eb2aedd4cb..c7d2b4dc7568 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | |||
| @@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap, | |||
| 93 | unsigned int num_actions = 0; | 93 | unsigned int num_actions = 0; |
| 94 | const struct tc_action *a; | 94 | const struct tc_action *a; |
| 95 | struct tcf_exts *exts; | 95 | struct tcf_exts *exts; |
| 96 | LIST_HEAD(actions); | 96 | int i; |
| 97 | 97 | ||
| 98 | exts = cls->knode.exts; | 98 | exts = cls->knode.exts; |
| 99 | if (!tcf_exts_has_actions(exts)) | 99 | if (!tcf_exts_has_actions(exts)) |
| 100 | return -EINVAL; | 100 | return -EINVAL; |
| 101 | 101 | ||
| 102 | tcf_exts_to_list(exts, &actions); | 102 | tcf_exts_for_each_action(i, a, exts) { |
| 103 | list_for_each_entry(a, &actions, list) { | ||
| 104 | /* Don't allow more than one action per rule. */ | 103 | /* Don't allow more than one action per rule. */ |
| 105 | if (num_actions) | 104 | if (num_actions) |
| 106 | return -EINVAL; | 105 | return -EINVAL; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index fa5b30f547f6..cad52bd331f7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
| @@ -220,10 +220,10 @@ struct hnae_desc_cb { | |||
| 220 | 220 | ||
| 221 | /* priv data for the desc, e.g. skb when use with ip stack*/ | 221 | /* priv data for the desc, e.g. skb when use with ip stack*/ |
| 222 | void *priv; | 222 | void *priv; |
| 223 | u16 page_offset; | 223 | u32 page_offset; |
| 224 | u16 reuse_flag; | 224 | u32 length; /* length of the buffer */ |
| 225 | 225 | ||
| 226 | u16 length; /* length of the buffer */ | 226 | u16 reuse_flag; |
| 227 | 227 | ||
| 228 | /* desc type, used by the ring user to mark the type of the priv data */ | 228 | /* desc type, used by the ring user to mark the type of the priv data */ |
| 229 | u16 type; | 229 | u16 type; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 9f2b552aee33..02a0ba20fad5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
| @@ -406,113 +406,13 @@ out_net_tx_busy: | |||
| 406 | return NETDEV_TX_BUSY; | 406 | return NETDEV_TX_BUSY; |
| 407 | } | 407 | } |
| 408 | 408 | ||
| 409 | /** | ||
| 410 | * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE | ||
| 411 | * @data: pointer to the start of the headers | ||
| 412 | * @max: total length of section to find headers in | ||
| 413 | * | ||
| 414 | * This function is meant to determine the length of headers that will | ||
| 415 | * be recognized by hardware for LRO, GRO, and RSC offloads. The main | ||
| 416 | * motivation of doing this is to only perform one pull for IPv4 TCP | ||
| 417 | * packets so that we can do basic things like calculating the gso_size | ||
| 418 | * based on the average data per packet. | ||
| 419 | **/ | ||
| 420 | static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, | ||
| 421 | unsigned int max_size) | ||
| 422 | { | ||
| 423 | unsigned char *network; | ||
| 424 | u8 hlen; | ||
| 425 | |||
| 426 | /* this should never happen, but better safe than sorry */ | ||
| 427 | if (max_size < ETH_HLEN) | ||
| 428 | return max_size; | ||
| 429 | |||
| 430 | /* initialize network frame pointer */ | ||
| 431 | network = data; | ||
| 432 | |||
| 433 | /* set first protocol and move network header forward */ | ||
| 434 | network += ETH_HLEN; | ||
| 435 | |||
| 436 | /* handle any vlan tag if present */ | ||
| 437 | if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) | ||
| 438 | == HNS_RX_FLAG_VLAN_PRESENT) { | ||
| 439 | if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) | ||
| 440 | return max_size; | ||
| 441 | |||
| 442 | network += VLAN_HLEN; | ||
| 443 | } | ||
| 444 | |||
| 445 | /* handle L3 protocols */ | ||
| 446 | if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | ||
| 447 | == HNS_RX_FLAG_L3ID_IPV4) { | ||
| 448 | if ((typeof(max_size))(network - data) > | ||
| 449 | (max_size - sizeof(struct iphdr))) | ||
| 450 | return max_size; | ||
| 451 | |||
| 452 | /* access ihl as a u8 to avoid unaligned access on ia64 */ | ||
| 453 | hlen = (network[0] & 0x0F) << 2; | ||
| 454 | |||
| 455 | /* verify hlen meets minimum size requirements */ | ||
| 456 | if (hlen < sizeof(struct iphdr)) | ||
| 457 | return network - data; | ||
| 458 | |||
| 459 | /* record next protocol if header is present */ | ||
| 460 | } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | ||
| 461 | == HNS_RX_FLAG_L3ID_IPV6) { | ||
| 462 | if ((typeof(max_size))(network - data) > | ||
| 463 | (max_size - sizeof(struct ipv6hdr))) | ||
| 464 | return max_size; | ||
| 465 | |||
| 466 | /* record next protocol */ | ||
| 467 | hlen = sizeof(struct ipv6hdr); | ||
| 468 | } else { | ||
| 469 | return network - data; | ||
| 470 | } | ||
| 471 | |||
| 472 | /* relocate pointer to start of L4 header */ | ||
| 473 | network += hlen; | ||
| 474 | |||
| 475 | /* finally sort out TCP/UDP */ | ||
| 476 | if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | ||
| 477 | == HNS_RX_FLAG_L4ID_TCP) { | ||
| 478 | if ((typeof(max_size))(network - data) > | ||
| 479 | (max_size - sizeof(struct tcphdr))) | ||
| 480 | return max_size; | ||
| 481 | |||
| 482 | /* access doff as a u8 to avoid unaligned access on ia64 */ | ||
| 483 | hlen = (network[12] & 0xF0) >> 2; | ||
| 484 | |||
| 485 | /* verify hlen meets minimum size requirements */ | ||
| 486 | if (hlen < sizeof(struct tcphdr)) | ||
| 487 | return network - data; | ||
| 488 | |||
| 489 | network += hlen; | ||
| 490 | } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | ||
| 491 | == HNS_RX_FLAG_L4ID_UDP) { | ||
| 492 | if ((typeof(max_size))(network - data) > | ||
| 493 | (max_size - sizeof(struct udphdr))) | ||
| 494 | return max_size; | ||
| 495 | |||
| 496 | network += sizeof(struct udphdr); | ||
| 497 | } | ||
| 498 | |||
| 499 | /* If everything has gone correctly network should be the | ||
| 500 | * data section of the packet and will be the end of the header. | ||
| 501 | * If not then it probably represents the end of the last recognized | ||
| 502 | * header. | ||
| 503 | */ | ||
| 504 | if ((typeof(max_size))(network - data) < max_size) | ||
| 505 | return network - data; | ||
| 506 | else | ||
| 507 | return max_size; | ||
| 508 | } | ||
| 509 | |||
| 510 | static void hns_nic_reuse_page(struct sk_buff *skb, int i, | 409 | static void hns_nic_reuse_page(struct sk_buff *skb, int i, |
| 511 | struct hnae_ring *ring, int pull_len, | 410 | struct hnae_ring *ring, int pull_len, |
| 512 | struct hnae_desc_cb *desc_cb) | 411 | struct hnae_desc_cb *desc_cb) |
| 513 | { | 412 | { |
| 514 | struct hnae_desc *desc; | 413 | struct hnae_desc *desc; |
| 515 | int truesize, size; | 414 | u32 truesize; |
| 415 | int size; | ||
| 516 | int last_offset; | 416 | int last_offset; |
| 517 | bool twobufs; | 417 | bool twobufs; |
| 518 | 418 | ||
| @@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, | |||
| 530 | } | 430 | } |
| 531 | 431 | ||
| 532 | skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, | 432 | skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, |
| 533 | size - pull_len, truesize - pull_len); | 433 | size - pull_len, truesize); |
| 534 | 434 | ||
| 535 | /* avoid re-using remote pages,flag default unreuse */ | 435 | /* avoid re-using remote pages,flag default unreuse */ |
| 536 | if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) | 436 | if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) |
| @@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, | |||
| 695 | } else { | 595 | } else { |
| 696 | ring->stats.seg_pkt_cnt++; | 596 | ring->stats.seg_pkt_cnt++; |
| 697 | 597 | ||
| 698 | pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); | 598 | pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE); |
| 699 | memcpy(__skb_put(skb, pull_len), va, | 599 | memcpy(__skb_put(skb, pull_len), va, |
| 700 | ALIGN(pull_len, sizeof(long))); | 600 | ALIGN(pull_len, sizeof(long))); |
| 701 | 601 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3554dca7a680..955c4ab18b03 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |||
| @@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, | |||
| 2019 | struct hns3_desc_cb *desc_cb) | 2019 | struct hns3_desc_cb *desc_cb) |
| 2020 | { | 2020 | { |
| 2021 | struct hns3_desc *desc; | 2021 | struct hns3_desc *desc; |
| 2022 | int truesize, size; | 2022 | u32 truesize; |
| 2023 | int size; | ||
| 2023 | int last_offset; | 2024 | int last_offset; |
| 2024 | bool twobufs; | 2025 | bool twobufs; |
| 2025 | 2026 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index a02a96aee2a2..cb450d7ec8c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | |||
| @@ -284,11 +284,11 @@ struct hns3_desc_cb { | |||
| 284 | 284 | ||
| 285 | /* priv data for the desc, e.g. skb when use with ip stack*/ | 285 | /* priv data for the desc, e.g. skb when use with ip stack*/ |
| 286 | void *priv; | 286 | void *priv; |
| 287 | u16 page_offset; | 287 | u32 page_offset; |
| 288 | u16 reuse_flag; | ||
| 289 | |||
| 290 | u32 length; /* length of the buffer */ | 288 | u32 length; /* length of the buffer */ |
| 291 | 289 | ||
| 290 | u16 reuse_flag; | ||
| 291 | |||
| 292 | /* desc type, used by the ring user to mark the type of the priv data */ | 292 | /* desc type, used by the ring user to mark the type of the priv data */ |
| 293 | u16 type; | 293 | u16 type; |
| 294 | }; | 294 | }; |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index bdb3f8e65ed4..2569a168334c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
| @@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
| 624 | adapter->tx_ring = tx_old; | 624 | adapter->tx_ring = tx_old; |
| 625 | e1000_free_all_rx_resources(adapter); | 625 | e1000_free_all_rx_resources(adapter); |
| 626 | e1000_free_all_tx_resources(adapter); | 626 | e1000_free_all_tx_resources(adapter); |
| 627 | kfree(tx_old); | ||
| 628 | kfree(rx_old); | ||
| 629 | adapter->rx_ring = rxdr; | 627 | adapter->rx_ring = rxdr; |
| 630 | adapter->tx_ring = txdr; | 628 | adapter->tx_ring = txdr; |
| 631 | err = e1000_up(adapter); | 629 | err = e1000_up(adapter); |
| 632 | if (err) | 630 | if (err) |
| 633 | goto err_setup; | 631 | goto err_setup; |
| 634 | } | 632 | } |
| 633 | kfree(tx_old); | ||
| 634 | kfree(rx_old); | ||
| 635 | 635 | ||
| 636 | clear_bit(__E1000_RESETTING, &adapter->flags); | 636 | clear_bit(__E1000_RESETTING, &adapter->flags); |
| 637 | return 0; | 637 | return 0; |
| @@ -644,7 +644,8 @@ err_setup_rx: | |||
| 644 | err_alloc_rx: | 644 | err_alloc_rx: |
| 645 | kfree(txdr); | 645 | kfree(txdr); |
| 646 | err_alloc_tx: | 646 | err_alloc_tx: |
| 647 | e1000_up(adapter); | 647 | if (netif_running(adapter->netdev)) |
| 648 | e1000_up(adapter); | ||
| 648 | err_setup: | 649 | err_setup: |
| 649 | clear_bit(__E1000_RESETTING, &adapter->flags); | 650 | clear_bit(__E1000_RESETTING, &adapter->flags); |
| 650 | return err; | 651 | return err; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index abcd096ede14..5ff6caa83948 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
| @@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) | |||
| 2013 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) | 2013 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) |
| 2014 | i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); | 2014 | i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); |
| 2015 | 2015 | ||
| 2016 | WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, | 2016 | WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, |
| 2017 | "stat strings count mismatch!"); | 2017 | "stat strings count mismatch!"); |
| 2018 | } | 2018 | } |
| 2019 | 2019 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f2c622e78802..ac685ad4d877 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, | |||
| 5122 | u8 *bw_share) | 5122 | u8 *bw_share) |
| 5123 | { | 5123 | { |
| 5124 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; | 5124 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; |
| 5125 | struct i40e_pf *pf = vsi->back; | ||
| 5125 | i40e_status ret; | 5126 | i40e_status ret; |
| 5126 | int i; | 5127 | int i; |
| 5127 | 5128 | ||
| 5128 | if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) | 5129 | /* There is no need to reset BW when mqprio mode is on. */ |
| 5130 | if (pf->flags & I40E_FLAG_TC_MQPRIO) | ||
| 5129 | return 0; | 5131 | return 0; |
| 5130 | if (!vsi->mqprio_qopt.qopt.hw) { | 5132 | if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { |
| 5131 | ret = i40e_set_bw_limit(vsi, vsi->seid, 0); | 5133 | ret = i40e_set_bw_limit(vsi, vsi->seid, 0); |
| 5132 | if (ret) | 5134 | if (ret) |
| 5133 | dev_info(&vsi->back->pdev->dev, | 5135 | dev_info(&pf->pdev->dev, |
| 5134 | "Failed to reset tx rate for vsi->seid %u\n", | 5136 | "Failed to reset tx rate for vsi->seid %u\n", |
| 5135 | vsi->seid); | 5137 | vsi->seid); |
| 5136 | return ret; | 5138 | return ret; |
| @@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, | |||
| 5139 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | 5141 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) |
| 5140 | bw_data.tc_bw_credits[i] = bw_share[i]; | 5142 | bw_data.tc_bw_credits[i] = bw_share[i]; |
| 5141 | 5143 | ||
| 5142 | ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, | 5144 | ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); |
| 5143 | NULL); | ||
| 5144 | if (ret) { | 5145 | if (ret) { |
| 5145 | dev_info(&vsi->back->pdev->dev, | 5146 | dev_info(&pf->pdev->dev, |
| 5146 | "AQ command Config VSI BW allocation per TC failed = %d\n", | 5147 | "AQ command Config VSI BW allocation per TC failed = %d\n", |
| 5147 | vsi->back->hw.aq.asq_last_status); | 5148 | pf->hw.aq.asq_last_status); |
| 5148 | return -EINVAL; | 5149 | return -EINVAL; |
| 5149 | } | 5150 | } |
| 5150 | 5151 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d8b5fff581e7..868f4a1d0f72 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h | |||
| @@ -89,6 +89,13 @@ extern const char ice_drv_ver[]; | |||
| 89 | #define ice_for_each_rxq(vsi, i) \ | 89 | #define ice_for_each_rxq(vsi, i) \ |
| 90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) | 90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) |
| 91 | 91 | ||
| 92 | /* Macros for each allocated tx/rx ring whether used or not in a VSI */ | ||
| 93 | #define ice_for_each_alloc_txq(vsi, i) \ | ||
| 94 | for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) | ||
| 95 | |||
| 96 | #define ice_for_each_alloc_rxq(vsi, i) \ | ||
| 97 | for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) | ||
| 98 | |||
| 92 | struct ice_tc_info { | 99 | struct ice_tc_info { |
| 93 | u16 qoffset; | 100 | u16 qoffset; |
| 94 | u16 qcount; | 101 | u16 qcount; |
| @@ -189,9 +196,9 @@ struct ice_vsi { | |||
| 189 | struct list_head tmp_sync_list; /* MAC filters to be synced */ | 196 | struct list_head tmp_sync_list; /* MAC filters to be synced */ |
| 190 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ | 197 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ |
| 191 | 198 | ||
| 192 | bool irqs_ready; | 199 | u8 irqs_ready; |
| 193 | bool current_isup; /* Sync 'link up' logging */ | 200 | u8 current_isup; /* Sync 'link up' logging */ |
| 194 | bool stat_offsets_loaded; | 201 | u8 stat_offsets_loaded; |
| 195 | 202 | ||
| 196 | /* queue information */ | 203 | /* queue information */ |
| 197 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ | 204 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
| @@ -262,7 +269,7 @@ struct ice_pf { | |||
| 262 | struct ice_hw_port_stats stats; | 269 | struct ice_hw_port_stats stats; |
| 263 | struct ice_hw_port_stats stats_prev; | 270 | struct ice_hw_port_stats stats_prev; |
| 264 | struct ice_hw hw; | 271 | struct ice_hw hw; |
| 265 | bool stat_prev_loaded; /* has previous stats been loaded */ | 272 | u8 stat_prev_loaded; /* has previous stats been loaded */ |
| 266 | char int_name[ICE_INT_NAME_STR_LEN]; | 273 | char int_name[ICE_INT_NAME_STR_LEN]; |
| 267 | }; | 274 | }; |
| 268 | 275 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7541ec2270b3..a0614f472658 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | |||
| @@ -329,19 +329,19 @@ struct ice_aqc_vsi_props { | |||
| 329 | /* VLAN section */ | 329 | /* VLAN section */ |
| 330 | __le16 pvid; /* VLANS include priority bits */ | 330 | __le16 pvid; /* VLANS include priority bits */ |
| 331 | u8 pvlan_reserved[2]; | 331 | u8 pvlan_reserved[2]; |
| 332 | u8 port_vlan_flags; | 332 | u8 vlan_flags; |
| 333 | #define ICE_AQ_VSI_PVLAN_MODE_S 0 | 333 | #define ICE_AQ_VSI_VLAN_MODE_S 0 |
| 334 | #define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) | 334 | #define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) |
| 335 | #define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 | 335 | #define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 |
| 336 | #define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 | 336 | #define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 |
| 337 | #define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 | 337 | #define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 |
| 338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) | 338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) |
| 339 | #define ICE_AQ_VSI_PVLAN_EMOD_S 3 | 339 | #define ICE_AQ_VSI_VLAN_EMOD_S 3 |
| 340 | #define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 340 | #define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 341 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) | 341 | #define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 342 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) | 342 | #define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 343 | #define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) | 343 | #define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 344 | #define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 344 | #define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 345 | u8 pvlan_reserved2[3]; | 345 | u8 pvlan_reserved2[3]; |
| 346 | /* ingress egress up sections */ | 346 | /* ingress egress up sections */ |
| 347 | __le32 ingress_table; /* bitmap, 3 bits per up */ | 347 | __le32 ingress_table; /* bitmap, 3 bits per up */ |
| @@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act { | |||
| 594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) | 594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) |
| 595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 | 595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 |
| 596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) | 596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) |
| 597 | #define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 | ||
| 597 | 598 | ||
| 598 | /* Action = 7 - Set Stat count */ | 599 | /* Action = 7 - Set Stat count */ |
| 599 | #define ICE_LG_ACT_STAT_COUNT 0x7 | 600 | #define ICE_LG_ACT_STAT_COUNT 0x7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 71d032cc5fa7..661beea6af79 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c | |||
| @@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) | |||
| 45 | /** | 45 | /** |
| 46 | * ice_clear_pf_cfg - Clear PF configuration | 46 | * ice_clear_pf_cfg - Clear PF configuration |
| 47 | * @hw: pointer to the hardware structure | 47 | * @hw: pointer to the hardware structure |
| 48 | * | ||
| 49 | * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port | ||
| 50 | * configuration, flow director filters, etc.). | ||
| 48 | */ | 51 | */ |
| 49 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) | 52 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) |
| 50 | { | 53 | { |
| @@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) | |||
| 1483 | struct ice_phy_info *phy_info; | 1486 | struct ice_phy_info *phy_info; |
| 1484 | enum ice_status status = 0; | 1487 | enum ice_status status = 0; |
| 1485 | 1488 | ||
| 1486 | if (!pi) | 1489 | if (!pi || !link_up) |
| 1487 | return ICE_ERR_PARAM; | 1490 | return ICE_ERR_PARAM; |
| 1488 | 1491 | ||
| 1489 | phy_info = &pi->phy; | 1492 | phy_info = &pi->phy; |
| @@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, | |||
| 1619 | } | 1622 | } |
| 1620 | 1623 | ||
| 1621 | /* LUT size is only valid for Global and PF table types */ | 1624 | /* LUT size is only valid for Global and PF table types */ |
| 1622 | if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { | 1625 | switch (lut_size) { |
| 1623 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << | 1626 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: |
| 1624 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1627 | break; |
| 1625 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1628 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: |
| 1626 | } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { | ||
| 1627 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << | 1629 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << |
| 1628 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1630 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
| 1629 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1631 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
| 1630 | } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && | 1632 | break; |
| 1631 | (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { | 1633 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: |
| 1632 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << | 1634 | if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { |
| 1633 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1635 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << |
| 1634 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1636 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
| 1635 | } else { | 1637 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
| 1638 | break; | ||
| 1639 | } | ||
| 1640 | /* fall-through */ | ||
| 1641 | default: | ||
| 1636 | status = ICE_ERR_PARAM; | 1642 | status = ICE_ERR_PARAM; |
| 1637 | goto ice_aq_get_set_rss_lut_exit; | 1643 | goto ice_aq_get_set_rss_lut_exit; |
| 1638 | } | 1644 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 7c511f144ed6..62be72fdc8f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c | |||
| @@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) | |||
| 597 | return 0; | 597 | return 0; |
| 598 | 598 | ||
| 599 | init_ctrlq_free_rq: | 599 | init_ctrlq_free_rq: |
| 600 | ice_shutdown_rq(hw, cq); | 600 | if (cq->rq.head) { |
| 601 | ice_shutdown_sq(hw, cq); | 601 | ice_shutdown_rq(hw, cq); |
| 602 | mutex_destroy(&cq->sq_lock); | 602 | mutex_destroy(&cq->rq_lock); |
| 603 | mutex_destroy(&cq->rq_lock); | 603 | } |
| 604 | if (cq->sq.head) { | ||
| 605 | ice_shutdown_sq(hw, cq); | ||
| 606 | mutex_destroy(&cq->sq_lock); | ||
| 607 | } | ||
| 604 | return status; | 608 | return status; |
| 605 | } | 609 | } |
| 606 | 610 | ||
| @@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) | |||
| 706 | return; | 710 | return; |
| 707 | } | 711 | } |
| 708 | 712 | ||
| 709 | ice_shutdown_sq(hw, cq); | 713 | if (cq->sq.head) { |
| 710 | ice_shutdown_rq(hw, cq); | 714 | ice_shutdown_sq(hw, cq); |
| 711 | mutex_destroy(&cq->sq_lock); | 715 | mutex_destroy(&cq->sq_lock); |
| 712 | mutex_destroy(&cq->rq_lock); | 716 | } |
| 717 | if (cq->rq.head) { | ||
| 718 | ice_shutdown_rq(hw, cq); | ||
| 719 | mutex_destroy(&cq->rq_lock); | ||
| 720 | } | ||
| 713 | } | 721 | } |
| 714 | 722 | ||
| 715 | /** | 723 | /** |
| @@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, | |||
| 1057 | 1065 | ||
| 1058 | clean_rq_elem_out: | 1066 | clean_rq_elem_out: |
| 1059 | /* Set pending if needed, unlock and return */ | 1067 | /* Set pending if needed, unlock and return */ |
| 1060 | if (pending) | 1068 | if (pending) { |
| 1069 | /* re-read HW head to calculate actual pending messages */ | ||
| 1070 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
| 1061 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); | 1071 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); |
| 1072 | } | ||
| 1062 | clean_rq_elem_err: | 1073 | clean_rq_elem_err: |
| 1063 | mutex_unlock(&cq->rq_lock); | 1074 | mutex_unlock(&cq->rq_lock); |
| 1064 | 1075 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1db304c01d10..c71a9b528d6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c | |||
| @@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev) | |||
| 26 | { | 26 | { |
| 27 | struct ice_netdev_priv *np = netdev_priv(netdev); | 27 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 28 | 28 | ||
| 29 | return ((np->vsi->num_txq + np->vsi->num_rxq) * | 29 | return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * |
| 30 | (sizeof(struct ice_q_stats) / sizeof(u64))); | 30 | (sizeof(struct ice_q_stats) / sizeof(u64))); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| @@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
| 218 | p += ETH_GSTRING_LEN; | 218 | p += ETH_GSTRING_LEN; |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | ice_for_each_txq(vsi, i) { | 221 | ice_for_each_alloc_txq(vsi, i) { |
| 222 | snprintf(p, ETH_GSTRING_LEN, | 222 | snprintf(p, ETH_GSTRING_LEN, |
| 223 | "tx-queue-%u.tx_packets", i); | 223 | "tx-queue-%u.tx_packets", i); |
| 224 | p += ETH_GSTRING_LEN; | 224 | p += ETH_GSTRING_LEN; |
| @@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
| 226 | p += ETH_GSTRING_LEN; | 226 | p += ETH_GSTRING_LEN; |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | ice_for_each_rxq(vsi, i) { | 229 | ice_for_each_alloc_rxq(vsi, i) { |
| 230 | snprintf(p, ETH_GSTRING_LEN, | 230 | snprintf(p, ETH_GSTRING_LEN, |
| 231 | "rx-queue-%u.rx_packets", i); | 231 | "rx-queue-%u.rx_packets", i); |
| 232 | p += ETH_GSTRING_LEN; | 232 | p += ETH_GSTRING_LEN; |
| @@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) | |||
| 253 | { | 253 | { |
| 254 | switch (sset) { | 254 | switch (sset) { |
| 255 | case ETH_SS_STATS: | 255 | case ETH_SS_STATS: |
| 256 | /* The number (and order) of strings reported *must* remain | ||
| 257 | * constant for a given netdevice. This function must not | ||
| 258 | * report a different number based on run time parameters | ||
| 259 | * (such as the number of queues in use, or the setting of | ||
| 260 | * a private ethtool flag). This is due to the nature of the | ||
| 261 | * ethtool stats API. | ||
| 262 | * | ||
| 263 | * User space programs such as ethtool must make 3 separate | ||
| 264 | * ioctl requests, one for size, one for the strings, and | ||
| 265 | * finally one for the stats. Since these cross into | ||
| 266 | * user space, changes to the number or size could result in | ||
| 267 | * undefined memory access or incorrect string<->value | ||
| 268 | * correlations for statistics. | ||
| 269 | * | ||
| 270 | * Even if it appears to be safe, changes to the size or | ||
| 271 | * order of strings will suffer from race conditions and are | ||
| 272 | * not safe. | ||
| 273 | */ | ||
| 256 | return ICE_ALL_STATS_LEN(netdev); | 274 | return ICE_ALL_STATS_LEN(netdev); |
| 257 | default: | 275 | default: |
| 258 | return -EOPNOTSUPP; | 276 | return -EOPNOTSUPP; |
| @@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev, | |||
| 280 | /* populate per queue stats */ | 298 | /* populate per queue stats */ |
| 281 | rcu_read_lock(); | 299 | rcu_read_lock(); |
| 282 | 300 | ||
| 283 | ice_for_each_txq(vsi, j) { | 301 | ice_for_each_alloc_txq(vsi, j) { |
| 284 | ring = READ_ONCE(vsi->tx_rings[j]); | 302 | ring = READ_ONCE(vsi->tx_rings[j]); |
| 285 | if (!ring) | 303 | if (ring) { |
| 286 | continue; | 304 | data[i++] = ring->stats.pkts; |
| 287 | data[i++] = ring->stats.pkts; | 305 | data[i++] = ring->stats.bytes; |
| 288 | data[i++] = ring->stats.bytes; | 306 | } else { |
| 307 | data[i++] = 0; | ||
| 308 | data[i++] = 0; | ||
| 309 | } | ||
| 289 | } | 310 | } |
| 290 | 311 | ||
| 291 | ice_for_each_rxq(vsi, j) { | 312 | ice_for_each_alloc_rxq(vsi, j) { |
| 292 | ring = READ_ONCE(vsi->rx_rings[j]); | 313 | ring = READ_ONCE(vsi->rx_rings[j]); |
| 293 | data[i++] = ring->stats.pkts; | 314 | if (ring) { |
| 294 | data[i++] = ring->stats.bytes; | 315 | data[i++] = ring->stats.pkts; |
| 316 | data[i++] = ring->stats.bytes; | ||
| 317 | } else { | ||
| 318 | data[i++] = 0; | ||
| 319 | data[i++] = 0; | ||
| 320 | } | ||
| 295 | } | 321 | } |
| 296 | 322 | ||
| 297 | rcu_read_unlock(); | 323 | rcu_read_unlock(); |
| @@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) | |||
| 519 | goto done; | 545 | goto done; |
| 520 | } | 546 | } |
| 521 | 547 | ||
| 522 | for (i = 0; i < vsi->num_txq; i++) { | 548 | for (i = 0; i < vsi->alloc_txq; i++) { |
| 523 | /* clone ring and setup updated count */ | 549 | /* clone ring and setup updated count */ |
| 524 | tx_rings[i] = *vsi->tx_rings[i]; | 550 | tx_rings[i] = *vsi->tx_rings[i]; |
| 525 | tx_rings[i].count = new_tx_cnt; | 551 | tx_rings[i].count = new_tx_cnt; |
| @@ -551,7 +577,7 @@ process_rx: | |||
| 551 | goto done; | 577 | goto done; |
| 552 | } | 578 | } |
| 553 | 579 | ||
| 554 | for (i = 0; i < vsi->num_rxq; i++) { | 580 | for (i = 0; i < vsi->alloc_rxq; i++) { |
| 555 | /* clone ring and setup updated count */ | 581 | /* clone ring and setup updated count */ |
| 556 | rx_rings[i] = *vsi->rx_rings[i]; | 582 | rx_rings[i] = *vsi->rx_rings[i]; |
| 557 | rx_rings[i].count = new_rx_cnt; | 583 | rx_rings[i].count = new_rx_cnt; |
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 499904874b3f..6076fc87df9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h | |||
| @@ -121,10 +121,6 @@ | |||
| 121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 | 121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 |
| 122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) | 122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) |
| 123 | #define PFINT_OICR 0x0016CA00 | 123 | #define PFINT_OICR 0x0016CA00 |
| 124 | #define PFINT_OICR_HLP_RDY_S 14 | ||
| 125 | #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) | ||
| 126 | #define PFINT_OICR_CPM_RDY_S 15 | ||
| 127 | #define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S) | ||
| 128 | #define PFINT_OICR_ECC_ERR_S 16 | 124 | #define PFINT_OICR_ECC_ERR_S 16 |
| 129 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) | 125 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) |
| 130 | #define PFINT_OICR_MAL_DETECT_S 19 | 126 | #define PFINT_OICR_MAL_DETECT_S 19 |
| @@ -133,10 +129,6 @@ | |||
| 133 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) | 129 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) |
| 134 | #define PFINT_OICR_PCI_EXCEPTION_S 21 | 130 | #define PFINT_OICR_PCI_EXCEPTION_S 21 |
| 135 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) | 131 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) |
| 136 | #define PFINT_OICR_GPIO_S 22 | ||
| 137 | #define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S) | ||
| 138 | #define PFINT_OICR_STORM_DETECT_S 24 | ||
| 139 | #define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S) | ||
| 140 | #define PFINT_OICR_HMC_ERR_S 26 | 132 | #define PFINT_OICR_HMC_ERR_S 26 |
| 141 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) | 133 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) |
| 142 | #define PFINT_OICR_PE_CRITERR_S 28 | 134 | #define PFINT_OICR_PE_CRITERR_S 28 |
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d23a91665b46..068dbc740b76 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | |||
| @@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits { | |||
| 265 | struct ice_rlan_ctx { | 265 | struct ice_rlan_ctx { |
| 266 | u16 head; | 266 | u16 head; |
| 267 | u16 cpuid; /* bigger than needed, see above for reason */ | 267 | u16 cpuid; /* bigger than needed, see above for reason */ |
| 268 | #define ICE_RLAN_BASE_S 7 | ||
| 268 | u64 base; | 269 | u64 base; |
| 269 | u16 qlen; | 270 | u16 qlen; |
| 270 | #define ICE_RLAN_CTX_DBUF_S 7 | 271 | #define ICE_RLAN_CTX_DBUF_S 7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5299caf55a7f..f1e80eed2fd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
| @@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
| 901 | case ice_aqc_opc_get_link_status: | 901 | case ice_aqc_opc_get_link_status: |
| 902 | if (ice_handle_link_event(pf)) | 902 | if (ice_handle_link_event(pf)) |
| 903 | dev_err(&pf->pdev->dev, | 903 | dev_err(&pf->pdev->dev, |
| 904 | "Could not handle link event"); | 904 | "Could not handle link event\n"); |
| 905 | break; | 905 | break; |
| 906 | default: | 906 | default: |
| 907 | dev_dbg(&pf->pdev->dev, | 907 | dev_dbg(&pf->pdev->dev, |
| @@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
| 917 | } | 917 | } |
| 918 | 918 | ||
| 919 | /** | 919 | /** |
| 920 | * ice_ctrlq_pending - check if there is a difference between ntc and ntu | ||
| 921 | * @hw: pointer to hardware info | ||
| 922 | * @cq: control queue information | ||
| 923 | * | ||
| 924 | * returns true if there are pending messages in a queue, false if there aren't | ||
| 925 | */ | ||
| 926 | static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) | ||
| 927 | { | ||
| 928 | u16 ntu; | ||
| 929 | |||
| 930 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
| 931 | return cq->rq.next_to_clean != ntu; | ||
| 932 | } | ||
| 933 | |||
| 934 | /** | ||
| 920 | * ice_clean_adminq_subtask - clean the AdminQ rings | 935 | * ice_clean_adminq_subtask - clean the AdminQ rings |
| 921 | * @pf: board private structure | 936 | * @pf: board private structure |
| 922 | */ | 937 | */ |
| 923 | static void ice_clean_adminq_subtask(struct ice_pf *pf) | 938 | static void ice_clean_adminq_subtask(struct ice_pf *pf) |
| 924 | { | 939 | { |
| 925 | struct ice_hw *hw = &pf->hw; | 940 | struct ice_hw *hw = &pf->hw; |
| 926 | u32 val; | ||
| 927 | 941 | ||
| 928 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) | 942 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) |
| 929 | return; | 943 | return; |
| @@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) | |||
| 933 | 947 | ||
| 934 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); | 948 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); |
| 935 | 949 | ||
| 936 | /* re-enable Admin queue interrupt causes */ | 950 | /* There might be a situation where new messages arrive to a control |
| 937 | val = rd32(hw, PFINT_FW_CTL); | 951 | * queue between processing the last message and clearing the |
| 938 | wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); | 952 | * EVENT_PENDING bit. So before exiting, check queue head again (using |
| 953 | * ice_ctrlq_pending) and process new messages if any. | ||
| 954 | */ | ||
| 955 | if (ice_ctrlq_pending(hw, &hw->adminq)) | ||
| 956 | __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); | ||
| 939 | 957 | ||
| 940 | ice_flush(hw); | 958 | ice_flush(hw); |
| 941 | } | 959 | } |
| @@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) | |||
| 1295 | qcount = numq_tc; | 1313 | qcount = numq_tc; |
| 1296 | } | 1314 | } |
| 1297 | 1315 | ||
| 1298 | /* find higher power-of-2 of qcount */ | 1316 | /* find the (rounded up) power-of-2 of qcount */ |
| 1299 | pow = ilog2(qcount); | 1317 | pow = order_base_2(qcount); |
| 1300 | |||
| 1301 | if (!is_power_of_2(qcount)) | ||
| 1302 | pow++; | ||
| 1303 | 1318 | ||
| 1304 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { | 1319 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { |
| 1305 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { | 1320 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { |
| @@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) | |||
| 1352 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; | 1367 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; |
| 1353 | /* Traffic from VSI can be sent to LAN */ | 1368 | /* Traffic from VSI can be sent to LAN */ |
| 1354 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; | 1369 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; |
| 1355 | /* Allow all packets untagged/tagged */ | 1370 | |
| 1356 | ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & | 1371 | /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy |
| 1357 | ICE_AQ_VSI_PVLAN_MODE_M) >> | 1372 | * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all |
| 1358 | ICE_AQ_VSI_PVLAN_MODE_S); | 1373 | * packets untagged/tagged. |
| 1359 | /* Show VLAN/UP from packets in Rx descriptors */ | 1374 | */ |
| 1360 | ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & | 1375 | ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & |
| 1361 | ICE_AQ_VSI_PVLAN_EMOD_M) >> | 1376 | ICE_AQ_VSI_VLAN_MODE_M) >> |
| 1362 | ICE_AQ_VSI_PVLAN_EMOD_S); | 1377 | ICE_AQ_VSI_VLAN_MODE_S); |
| 1378 | |||
| 1363 | /* Have 1:1 UP mapping for both ingress/egress tables */ | 1379 | /* Have 1:1 UP mapping for both ingress/egress tables */ |
| 1364 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); | 1380 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); |
| 1365 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); | 1381 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); |
| @@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf) | |||
| 1688 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ | 1704 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ |
| 1689 | rd32(hw, PFINT_OICR); /* read to clear */ | 1705 | rd32(hw, PFINT_OICR); /* read to clear */ |
| 1690 | 1706 | ||
| 1691 | val = (PFINT_OICR_HLP_RDY_M | | 1707 | val = (PFINT_OICR_ECC_ERR_M | |
| 1692 | PFINT_OICR_CPM_RDY_M | | ||
| 1693 | PFINT_OICR_ECC_ERR_M | | ||
| 1694 | PFINT_OICR_MAL_DETECT_M | | 1708 | PFINT_OICR_MAL_DETECT_M | |
| 1695 | PFINT_OICR_GRST_M | | 1709 | PFINT_OICR_GRST_M | |
| 1696 | PFINT_OICR_PCI_EXCEPTION_M | | 1710 | PFINT_OICR_PCI_EXCEPTION_M | |
| 1697 | PFINT_OICR_GPIO_M | | 1711 | PFINT_OICR_HMC_ERR_M | |
| 1698 | PFINT_OICR_STORM_DETECT_M | | 1712 | PFINT_OICR_PE_CRITERR_M); |
| 1699 | PFINT_OICR_HMC_ERR_M); | ||
| 1700 | 1713 | ||
| 1701 | wr32(hw, PFINT_OICR_ENA, val); | 1714 | wr32(hw, PFINT_OICR_ENA, val); |
| 1702 | 1715 | ||
| @@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) | |||
| 2058 | skip_req_irq: | 2071 | skip_req_irq: |
| 2059 | ice_ena_misc_vector(pf); | 2072 | ice_ena_misc_vector(pf); |
| 2060 | 2073 | ||
| 2061 | val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | | 2074 | val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
| 2062 | (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | | 2075 | PFINT_OICR_CTL_CAUSE_ENA_M); |
| 2063 | PFINT_OICR_CTL_CAUSE_ENA_M; | ||
| 2064 | wr32(hw, PFINT_OICR_CTL, val); | 2076 | wr32(hw, PFINT_OICR_CTL, val); |
| 2065 | 2077 | ||
| 2066 | /* This enables Admin queue Interrupt causes */ | 2078 | /* This enables Admin queue Interrupt causes */ |
| 2067 | val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | | 2079 | val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
| 2068 | (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | | 2080 | PFINT_FW_CTL_CAUSE_ENA_M); |
| 2069 | PFINT_FW_CTL_CAUSE_ENA_M; | ||
| 2070 | wr32(hw, PFINT_FW_CTL, val); | 2081 | wr32(hw, PFINT_FW_CTL, val); |
| 2071 | 2082 | ||
| 2072 | itr_gran = hw->itr_gran_200; | 2083 | itr_gran = hw->itr_gran_200; |
| @@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) | |||
| 3246 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | 3257 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
| 3247 | ice_dis_msix(pf); | 3258 | ice_dis_msix(pf); |
| 3248 | 3259 | ||
| 3249 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); | 3260 | if (pf->irq_tracker) { |
| 3250 | pf->irq_tracker = NULL; | 3261 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); |
| 3262 | pf->irq_tracker = NULL; | ||
| 3263 | } | ||
| 3251 | } | 3264 | } |
| 3252 | 3265 | ||
| 3253 | /** | 3266 | /** |
| @@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev, | |||
| 3271 | 3284 | ||
| 3272 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); | 3285 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); |
| 3273 | if (err) { | 3286 | if (err) { |
| 3274 | dev_err(&pdev->dev, "I/O map error %d\n", err); | 3287 | dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); |
| 3275 | return err; | 3288 | return err; |
| 3276 | } | 3289 | } |
| 3277 | 3290 | ||
| @@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
| 3720 | enum ice_status status; | 3733 | enum ice_status status; |
| 3721 | 3734 | ||
| 3722 | /* Here we are configuring the VSI to let the driver add VLAN tags by | 3735 | /* Here we are configuring the VSI to let the driver add VLAN tags by |
| 3723 | * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN | 3736 | * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag |
| 3724 | * tag insertion happens in the Tx hot path, in ice_tx_map. | 3737 | * insertion happens in the Tx hot path, in ice_tx_map. |
| 3725 | */ | 3738 | */ |
| 3726 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; | 3739 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; |
| 3727 | 3740 | ||
| 3728 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3741 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
| 3729 | ctxt.vsi_num = vsi->vsi_num; | 3742 | ctxt.vsi_num = vsi->vsi_num; |
| @@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
| 3735 | return -EIO; | 3748 | return -EIO; |
| 3736 | } | 3749 | } |
| 3737 | 3750 | ||
| 3738 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3751 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
| 3739 | return 0; | 3752 | return 0; |
| 3740 | } | 3753 | } |
| 3741 | 3754 | ||
| @@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
| 3757 | */ | 3770 | */ |
| 3758 | if (ena) { | 3771 | if (ena) { |
| 3759 | /* Strip VLAN tag from Rx packet and put it in the desc */ | 3772 | /* Strip VLAN tag from Rx packet and put it in the desc */ |
| 3760 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; | 3773 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; |
| 3761 | } else { | 3774 | } else { |
| 3762 | /* Disable stripping. Leave tag in packet */ | 3775 | /* Disable stripping. Leave tag in packet */ |
| 3763 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; | 3776 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
| 3764 | } | 3777 | } |
| 3765 | 3778 | ||
| 3779 | /* Allow all packets untagged/tagged */ | ||
| 3780 | ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; | ||
| 3781 | |||
| 3766 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3782 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
| 3767 | ctxt.vsi_num = vsi->vsi_num; | 3783 | ctxt.vsi_num = vsi->vsi_num; |
| 3768 | 3784 | ||
| @@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
| 3773 | return -EIO; | 3789 | return -EIO; |
| 3774 | } | 3790 | } |
| 3775 | 3791 | ||
| 3776 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3792 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
| 3777 | return 0; | 3793 | return 0; |
| 3778 | } | 3794 | } |
| 3779 | 3795 | ||
| @@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) | |||
| 3986 | /* clear the context structure first */ | 4002 | /* clear the context structure first */ |
| 3987 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | 4003 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); |
| 3988 | 4004 | ||
| 3989 | rlan_ctx.base = ring->dma >> 7; | 4005 | rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; |
| 3990 | 4006 | ||
| 3991 | rlan_ctx.qlen = ring->count; | 4007 | rlan_ctx.qlen = ring->count; |
| 3992 | 4008 | ||
| @@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) | |||
| 4098 | { | 4114 | { |
| 4099 | int err; | 4115 | int err; |
| 4100 | 4116 | ||
| 4101 | ice_set_rx_mode(vsi->netdev); | 4117 | if (vsi->netdev) { |
| 4102 | 4118 | ice_set_rx_mode(vsi->netdev); | |
| 4103 | err = ice_restore_vlan(vsi); | 4119 | err = ice_restore_vlan(vsi); |
| 4104 | if (err) | 4120 | if (err) |
| 4105 | return err; | 4121 | return err; |
| 4122 | } | ||
| 4106 | 4123 | ||
| 4107 | err = ice_vsi_cfg_txqs(vsi); | 4124 | err = ice_vsi_cfg_txqs(vsi); |
| 4108 | if (!err) | 4125 | if (!err) |
| @@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi) | |||
| 4868 | */ | 4885 | */ |
| 4869 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | 4886 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
| 4870 | { | 4887 | { |
| 4871 | int i, err; | 4888 | int i, err = 0; |
| 4872 | 4889 | ||
| 4873 | if (!vsi->num_txq) { | 4890 | if (!vsi->num_txq) { |
| 4874 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", | 4891 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", |
| @@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | |||
| 4893 | */ | 4910 | */ |
| 4894 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) | 4911 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
| 4895 | { | 4912 | { |
| 4896 | int i, err; | 4913 | int i, err = 0; |
| 4897 | 4914 | ||
| 4898 | if (!vsi->num_rxq) { | 4915 | if (!vsi->num_rxq) { |
| 4899 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", | 4916 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", |
| @@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 5235 | u8 count = 0; | 5252 | u8 count = 0; |
| 5236 | 5253 | ||
| 5237 | if (new_mtu == netdev->mtu) { | 5254 | if (new_mtu == netdev->mtu) { |
| 5238 | netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); | 5255 | netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); |
| 5239 | return 0; | 5256 | return 0; |
| 5240 | } | 5257 | } |
| 5241 | 5258 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 92da0a626ce0..295a8cd87fc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c | |||
| @@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) | |||
| 131 | * | 131 | * |
| 132 | * This function will request NVM ownership. | 132 | * This function will request NVM ownership. |
| 133 | */ | 133 | */ |
| 134 | static enum | 134 | static enum ice_status |
| 135 | ice_status ice_acquire_nvm(struct ice_hw *hw, | 135 | ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) |
| 136 | enum ice_aq_res_access_type access) | ||
| 137 | { | 136 | { |
| 138 | if (hw->nvm.blank_nvm_mode) | 137 | if (hw->nvm.blank_nvm_mode) |
| 139 | return 0; | 138 | return 0; |
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2e6c1d92cc88..eeae199469b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c | |||
| @@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, | |||
| 1576 | return status; | 1576 | return status; |
| 1577 | } | 1577 | } |
| 1578 | 1578 | ||
| 1579 | if (owner == ICE_SCHED_NODE_OWNER_LAN) | 1579 | vsi->max_lanq[tc] = new_numqs; |
| 1580 | vsi->max_lanq[tc] = new_numqs; | ||
| 1581 | 1580 | ||
| 1582 | return status; | 1581 | return status; |
| 1583 | } | 1582 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 723d15f1e90b..6b7ec2ae5ad6 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c | |||
| @@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, | |||
| 645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; |
| 646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); | 646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); |
| 647 | 647 | ||
| 648 | act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 648 | act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << |
| 649 | ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; | ||
| 649 | 650 | ||
| 650 | /* Third action Marker value */ | 651 | /* Third action Marker value */ |
| 651 | act |= ICE_LG_ACT_GENERIC; | 652 | act |= ICE_LG_ACT_GENERIC; |
| 652 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & | 653 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & |
| 653 | ICE_LG_ACT_GENERIC_VALUE_M; | 654 | ICE_LG_ACT_GENERIC_VALUE_M; |
| 654 | 655 | ||
| 655 | act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | ||
| 656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); | 656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); |
| 657 | 657 | ||
| 658 | /* call the fill switch rule to fill the lookup tx rx structure */ | 658 | /* call the fill switch rule to fill the lookup tx rx structure */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 6f4a0d159dbf..9b8ec128ee31 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h | |||
| @@ -17,7 +17,7 @@ struct ice_vsi_ctx { | |||
| 17 | u16 vsis_unallocated; | 17 | u16 vsis_unallocated; |
| 18 | u16 flags; | 18 | u16 flags; |
| 19 | struct ice_aqc_vsi_props info; | 19 | struct ice_aqc_vsi_props info; |
| 20 | bool alloc_from_pool; | 20 | u8 alloc_from_pool; |
| 21 | }; | 21 | }; |
| 22 | 22 | ||
| 23 | enum ice_sw_fwd_act_type { | 23 | enum ice_sw_fwd_act_type { |
| @@ -94,8 +94,8 @@ struct ice_fltr_info { | |||
| 94 | u8 qgrp_size; | 94 | u8 qgrp_size; |
| 95 | 95 | ||
| 96 | /* Rule creations populate these indicators basing on the switch type */ | 96 | /* Rule creations populate these indicators basing on the switch type */ |
| 97 | bool lb_en; /* Indicate if packet can be looped back */ | 97 | u8 lb_en; /* Indicate if packet can be looped back */ |
| 98 | bool lan_en; /* Indicate if packet can be forwarded to the uplink */ | 98 | u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ |
| 99 | }; | 99 | }; |
| 100 | 100 | ||
| 101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ | 101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 567067b650c4..31bc998fe200 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h | |||
| @@ -143,7 +143,7 @@ struct ice_ring { | |||
| 143 | u16 next_to_use; | 143 | u16 next_to_use; |
| 144 | u16 next_to_clean; | 144 | u16 next_to_clean; |
| 145 | 145 | ||
| 146 | bool ring_active; /* is ring online or not */ | 146 | u8 ring_active; /* is ring online or not */ |
| 147 | 147 | ||
| 148 | /* stats structs */ | 148 | /* stats structs */ |
| 149 | struct ice_q_stats stats; | 149 | struct ice_q_stats stats; |
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 99c8a9a71b5e..97c366e0ca59 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h | |||
| @@ -83,7 +83,7 @@ struct ice_link_status { | |||
| 83 | u64 phy_type_low; | 83 | u64 phy_type_low; |
| 84 | u16 max_frame_size; | 84 | u16 max_frame_size; |
| 85 | u16 link_speed; | 85 | u16 link_speed; |
| 86 | bool lse_ena; /* Link Status Event notification */ | 86 | u8 lse_ena; /* Link Status Event notification */ |
| 87 | u8 link_info; | 87 | u8 link_info; |
| 88 | u8 an_info; | 88 | u8 an_info; |
| 89 | u8 ext_info; | 89 | u8 ext_info; |
| @@ -101,7 +101,7 @@ struct ice_phy_info { | |||
| 101 | struct ice_link_status link_info_old; | 101 | struct ice_link_status link_info_old; |
| 102 | u64 phy_type_low; | 102 | u64 phy_type_low; |
| 103 | enum ice_media_type media_type; | 103 | enum ice_media_type media_type; |
| 104 | bool get_link_info; | 104 | u8 get_link_info; |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | /* Common HW capabilities for SW use */ | 107 | /* Common HW capabilities for SW use */ |
| @@ -167,7 +167,7 @@ struct ice_nvm_info { | |||
| 167 | u32 oem_ver; /* OEM version info */ | 167 | u32 oem_ver; /* OEM version info */ |
| 168 | u16 sr_words; /* Shadow RAM size in words */ | 168 | u16 sr_words; /* Shadow RAM size in words */ |
| 169 | u16 ver; /* NVM package version */ | 169 | u16 ver; /* NVM package version */ |
| 170 | bool blank_nvm_mode; /* is NVM empty (no FW present) */ | 170 | u8 blank_nvm_mode; /* is NVM empty (no FW present) */ |
| 171 | }; | 171 | }; |
| 172 | 172 | ||
| 173 | /* Max number of port to queue branches w.r.t topology */ | 173 | /* Max number of port to queue branches w.r.t topology */ |
| @@ -181,7 +181,7 @@ struct ice_sched_node { | |||
| 181 | struct ice_aqc_txsched_elem_data info; | 181 | struct ice_aqc_txsched_elem_data info; |
| 182 | u32 agg_id; /* aggregator group id */ | 182 | u32 agg_id; /* aggregator group id */ |
| 183 | u16 vsi_id; | 183 | u16 vsi_id; |
| 184 | bool in_use; /* suspended or in use */ | 184 | u8 in_use; /* suspended or in use */ |
| 185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ | 185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ |
| 186 | u8 num_children; | 186 | u8 num_children; |
| 187 | u8 tc_num; | 187 | u8 tc_num; |
| @@ -218,7 +218,7 @@ struct ice_sched_vsi_info { | |||
| 218 | struct ice_sched_tx_policy { | 218 | struct ice_sched_tx_policy { |
| 219 | u16 max_num_vsis; | 219 | u16 max_num_vsis; |
| 220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; | 220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; |
| 221 | bool rdma_ena; | 221 | u8 rdma_ena; |
| 222 | }; | 222 | }; |
| 223 | 223 | ||
| 224 | struct ice_port_info { | 224 | struct ice_port_info { |
| @@ -243,7 +243,7 @@ struct ice_port_info { | |||
| 243 | struct list_head agg_list; /* lists all aggregator */ | 243 | struct list_head agg_list; /* lists all aggregator */ |
| 244 | u8 lport; | 244 | u8 lport; |
| 245 | #define ICE_LPORT_MASK 0xff | 245 | #define ICE_LPORT_MASK 0xff |
| 246 | bool is_vf; | 246 | u8 is_vf; |
| 247 | }; | 247 | }; |
| 248 | 248 | ||
| 249 | struct ice_switch_info { | 249 | struct ice_switch_info { |
| @@ -287,7 +287,7 @@ struct ice_hw { | |||
| 287 | u8 max_cgds; | 287 | u8 max_cgds; |
| 288 | u8 sw_entry_point_layer; | 288 | u8 sw_entry_point_layer; |
| 289 | 289 | ||
| 290 | bool evb_veb; /* true for VEB, false for VEPA */ | 290 | u8 evb_veb; /* true for VEB, false for VEPA */ |
| 291 | struct ice_bus_info bus; | 291 | struct ice_bus_info bus; |
| 292 | struct ice_nvm_info nvm; | 292 | struct ice_nvm_info nvm; |
| 293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ | 293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ |
| @@ -318,7 +318,7 @@ struct ice_hw { | |||
| 318 | u8 itr_gran_100; | 318 | u8 itr_gran_100; |
| 319 | u8 itr_gran_50; | 319 | u8 itr_gran_50; |
| 320 | u8 itr_gran_25; | 320 | u8 itr_gran_25; |
| 321 | bool ucast_shared; /* true if VSIs can share unicast addr */ | 321 | u8 ucast_shared; /* true if VSIs can share unicast addr */ |
| 322 | 322 | ||
| 323 | }; | 323 | }; |
| 324 | 324 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index f92f7918112d..5acf3b743876 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
| @@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) | |||
| 1649 | if (hw->phy.type == e1000_phy_m88) | 1649 | if (hw->phy.type == e1000_phy_m88) |
| 1650 | igb_phy_disable_receiver(adapter); | 1650 | igb_phy_disable_receiver(adapter); |
| 1651 | 1651 | ||
| 1652 | mdelay(500); | 1652 | msleep(500); |
| 1653 | return 0; | 1653 | return 0; |
| 1654 | } | 1654 | } |
| 1655 | 1655 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d03c2f0d7592..a32c576c1e65 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
| 3873 | 3873 | ||
| 3874 | adapter->mac_table = kcalloc(hw->mac.rar_entry_count, | 3874 | adapter->mac_table = kcalloc(hw->mac.rar_entry_count, |
| 3875 | sizeof(struct igb_mac_addr), | 3875 | sizeof(struct igb_mac_addr), |
| 3876 | GFP_ATOMIC); | 3876 | GFP_KERNEL); |
| 3877 | if (!adapter->mac_table) | 3877 | if (!adapter->mac_table) |
| 3878 | return -ENOMEM; | 3878 | return -ENOMEM; |
| 3879 | 3879 | ||
| @@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
| 3883 | 3883 | ||
| 3884 | /* Setup and initialize a copy of the hw vlan table array */ | 3884 | /* Setup and initialize a copy of the hw vlan table array */ |
| 3885 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), | 3885 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), |
| 3886 | GFP_ATOMIC); | 3886 | GFP_KERNEL); |
| 3887 | if (!adapter->shadow_vfta) | 3887 | if (!adapter->shadow_vfta) |
| 3888 | return -ENOMEM; | 3888 | return -ENOMEM; |
| 3889 | 3889 | ||
| @@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) | |||
| 5816 | 5816 | ||
| 5817 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | 5817 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
| 5818 | csum_failed: | 5818 | csum_failed: |
| 5819 | if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) | 5819 | if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && |
| 5820 | !tx_ring->launchtime_enable) | ||
| 5820 | return; | 5821 | return; |
| 5821 | goto no_csum; | 5822 | goto no_csum; |
| 5822 | } | 5823 | } |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 43664adf7a3c..d3e72d0f66ef 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
| @@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
| 771 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | 771 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); |
| 772 | rxdr->size = ALIGN(rxdr->size, 4096); | 772 | rxdr->size = ALIGN(rxdr->size, 4096); |
| 773 | 773 | ||
| 774 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 774 | rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
| 775 | GFP_KERNEL); | 775 | GFP_KERNEL); |
| 776 | 776 | ||
| 777 | if (!rxdr->desc) { | 777 | if (!rxdr->desc) { |
| 778 | vfree(rxdr->buffer_info); | 778 | vfree(rxdr->buffer_info); |
| 779 | return -ENOMEM; | 779 | return -ENOMEM; |
| 780 | } | 780 | } |
| 781 | memset(rxdr->desc, 0, rxdr->size); | ||
| 782 | 781 | ||
| 783 | rxdr->next_to_clean = 0; | 782 | rxdr->next_to_clean = 0; |
| 784 | rxdr->next_to_use = 0; | 783 | rxdr->next_to_use = 0; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 94b3165ff543..ccd852ad62a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | |||
| @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, | |||
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | /* alloc the udl from per cpu ddp pool */ | 194 | /* alloc the udl from per cpu ddp pool */ |
| 195 | ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); | 195 | ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); |
| 196 | if (!ddp->udl) { | 196 | if (!ddp->udl) { |
| 197 | e_err(drv, "failed allocated ddp context\n"); | 197 | e_err(drv, "failed allocated ddp context\n"); |
| 198 | goto out_noddp_unmap; | 198 | goto out_noddp_unmap; |
| @@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) | |||
| 760 | return 0; | 760 | return 0; |
| 761 | 761 | ||
| 762 | /* Extra buffer to be shared by all DDPs for HW work around */ | 762 | /* Extra buffer to be shared by all DDPs for HW work around */ |
| 763 | buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | 763 | buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); |
| 764 | if (!buffer) | 764 | if (!buffer) |
| 765 | return -ENOMEM; | 765 | return -ENOMEM; |
| 766 | 766 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 447098005490..9a23d33a47ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, | |||
| 6201 | 6201 | ||
| 6202 | adapter->mac_table = kcalloc(hw->mac.num_rar_entries, | 6202 | adapter->mac_table = kcalloc(hw->mac.num_rar_entries, |
| 6203 | sizeof(struct ixgbe_mac_addr), | 6203 | sizeof(struct ixgbe_mac_addr), |
| 6204 | GFP_ATOMIC); | 6204 | GFP_KERNEL); |
| 6205 | if (!adapter->mac_table) | 6205 | if (!adapter->mac_table) |
| 6206 | return -ENOMEM; | 6206 | return -ENOMEM; |
| 6207 | 6207 | ||
| @@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 6620 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6620 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 6621 | 6621 | ||
| 6622 | if (adapter->xdp_prog) { | 6622 | if (adapter->xdp_prog) { |
| 6623 | e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); | 6623 | int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + |
| 6624 | return -EPERM; | 6624 | VLAN_HLEN; |
| 6625 | int i; | ||
| 6626 | |||
| 6627 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
| 6628 | struct ixgbe_ring *ring = adapter->rx_ring[i]; | ||
| 6629 | |||
| 6630 | if (new_frame_size > ixgbe_rx_bufsz(ring)) { | ||
| 6631 | e_warn(probe, "Requested MTU size is not supported with XDP\n"); | ||
| 6632 | return -EINVAL; | ||
| 6633 | } | ||
| 6634 | } | ||
| 6625 | } | 6635 | } |
| 6626 | 6636 | ||
| 6627 | /* | 6637 | /* |
| @@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
| 8983 | 8993 | ||
| 8984 | #ifdef CONFIG_IXGBE_DCB | 8994 | #ifdef CONFIG_IXGBE_DCB |
| 8985 | if (tc) { | 8995 | if (tc) { |
| 8996 | if (adapter->xdp_prog) { | ||
| 8997 | e_warn(probe, "DCB is not supported with XDP\n"); | ||
| 8998 | |||
| 8999 | ixgbe_init_interrupt_scheme(adapter); | ||
| 9000 | if (netif_running(dev)) | ||
| 9001 | ixgbe_open(dev); | ||
| 9002 | return -EINVAL; | ||
| 9003 | } | ||
| 9004 | |||
| 8986 | netdev_set_num_tc(dev, tc); | 9005 | netdev_set_num_tc(dev, tc); |
| 8987 | ixgbe_set_prio_tc_map(adapter); | 9006 | ixgbe_set_prio_tc_map(adapter); |
| 8988 | 9007 | ||
| @@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
| 9171 | struct tcf_exts *exts, u64 *action, u8 *queue) | 9190 | struct tcf_exts *exts, u64 *action, u8 *queue) |
| 9172 | { | 9191 | { |
| 9173 | const struct tc_action *a; | 9192 | const struct tc_action *a; |
| 9174 | LIST_HEAD(actions); | 9193 | int i; |
| 9175 | 9194 | ||
| 9176 | if (!tcf_exts_has_actions(exts)) | 9195 | if (!tcf_exts_has_actions(exts)) |
| 9177 | return -EINVAL; | 9196 | return -EINVAL; |
| 9178 | 9197 | ||
| 9179 | tcf_exts_to_list(exts, &actions); | 9198 | tcf_exts_for_each_action(i, a, exts) { |
| 9180 | list_for_each_entry(a, &actions, list) { | ||
| 9181 | |||
| 9182 | /* Drop action */ | 9199 | /* Drop action */ |
| 9183 | if (is_tcf_gact_shot(a)) { | 9200 | if (is_tcf_gact_shot(a)) { |
| 9184 | *action = IXGBE_FDIR_DROP_QUEUE; | 9201 | *action = IXGBE_FDIR_DROP_QUEUE; |
| @@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) | |||
| 9936 | int tcs = adapter->hw_tcs ? : 1; | 9953 | int tcs = adapter->hw_tcs ? : 1; |
| 9937 | int pool, err; | 9954 | int pool, err; |
| 9938 | 9955 | ||
| 9956 | if (adapter->xdp_prog) { | ||
| 9957 | e_warn(probe, "L2FW offload is not supported with XDP\n"); | ||
| 9958 | return ERR_PTR(-EINVAL); | ||
| 9959 | } | ||
| 9960 | |||
| 9939 | /* The hardware supported by ixgbe only filters on the destination MAC | 9961 | /* The hardware supported by ixgbe only filters on the destination MAC |
| 9940 | * address. In order to avoid issues we only support offloading modes | 9962 | * address. In order to avoid issues we only support offloading modes |
| 9941 | * where the hardware can actually provide the functionality. | 9963 | * where the hardware can actually provide the functionality. |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 6f59933cdff7..3c6f01c41b78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
| @@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | |||
| 53 | struct ixgbe_hw *hw = &adapter->hw; | 53 | struct ixgbe_hw *hw = &adapter->hw; |
| 54 | int i; | 54 | int i; |
| 55 | 55 | ||
| 56 | if (adapter->xdp_prog) { | ||
| 57 | e_warn(probe, "SRIOV is not supported with XDP\n"); | ||
| 58 | return -EINVAL; | ||
| 59 | } | ||
| 60 | |||
| 56 | /* Enable VMDq flag so device will be set in VM mode */ | 61 | /* Enable VMDq flag so device will be set in VM mode */ |
| 57 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | | 62 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | |
| 58 | IXGBE_FLAG_VMDQ_ENABLED; | 63 | IXGBE_FLAG_VMDQ_ENABLED; |
| @@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, | |||
| 688 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | 693 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) |
| 689 | { | 694 | { |
| 690 | struct ixgbe_hw *hw = &adapter->hw; | 695 | struct ixgbe_hw *hw = &adapter->hw; |
| 696 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; | ||
| 691 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; | 697 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
| 698 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); | ||
| 692 | u8 num_tcs = adapter->hw_tcs; | 699 | u8 num_tcs = adapter->hw_tcs; |
| 700 | u32 reg_val; | ||
| 701 | u32 queue; | ||
| 702 | u32 word; | ||
| 693 | 703 | ||
| 694 | /* remove VLAN filters beloning to this VF */ | 704 | /* remove VLAN filters beloning to this VF */ |
| 695 | ixgbe_clear_vf_vlans(adapter, vf); | 705 | ixgbe_clear_vf_vlans(adapter, vf); |
| @@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | |||
| 726 | 736 | ||
| 727 | /* reset VF api back to unknown */ | 737 | /* reset VF api back to unknown */ |
| 728 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; | 738 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; |
| 739 | |||
| 740 | /* Restart each queue for given VF */ | ||
| 741 | for (queue = 0; queue < q_per_pool; queue++) { | ||
| 742 | unsigned int reg_idx = (vf * q_per_pool) + queue; | ||
| 743 | |||
| 744 | reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); | ||
| 745 | |||
| 746 | /* Re-enabling only configured queues */ | ||
| 747 | if (reg_val) { | ||
| 748 | reg_val |= IXGBE_TXDCTL_ENABLE; | ||
| 749 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); | ||
| 750 | reg_val &= ~IXGBE_TXDCTL_ENABLE; | ||
| 751 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); | ||
| 752 | } | ||
| 753 | } | ||
| 754 | |||
| 755 | /* Clear VF's mailbox memory */ | ||
| 756 | for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) | ||
| 757 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); | ||
| 758 | |||
| 759 | IXGBE_WRITE_FLUSH(hw); | ||
| 729 | } | 760 | } |
| 730 | 761 | ||
| 731 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | 762 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 44cfb2021145..41bcbb337e83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | |||
| @@ -2518,6 +2518,7 @@ enum { | |||
| 2518 | /* Translated register #defines */ | 2518 | /* Translated register #defines */ |
| 2519 | #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) | 2519 | #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) |
| 2520 | #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) | 2520 | #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) |
| 2521 | #define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) | ||
| 2521 | #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) | 2522 | #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) |
| 2522 | #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) | 2523 | #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) |
| 2523 | 2524 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9131a1376e7d..9fed54017659 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
| 1982 | goto out_ok; | 1982 | goto out_ok; |
| 1983 | 1983 | ||
| 1984 | modify_ip_header = false; | 1984 | modify_ip_header = false; |
| 1985 | tcf_exts_to_list(exts, &actions); | 1985 | tcf_exts_for_each_action(i, a, exts) { |
| 1986 | list_for_each_entry(a, &actions, list) { | 1986 | int k; |
| 1987 | |||
| 1987 | if (!is_tcf_pedit(a)) | 1988 | if (!is_tcf_pedit(a)) |
| 1988 | continue; | 1989 | continue; |
| 1989 | 1990 | ||
| 1990 | nkeys = tcf_pedit_nkeys(a); | 1991 | nkeys = tcf_pedit_nkeys(a); |
| 1991 | for (i = 0; i < nkeys; i++) { | 1992 | for (k = 0; k < nkeys; k++) { |
| 1992 | htype = tcf_pedit_htype(a, i); | 1993 | htype = tcf_pedit_htype(a, k); |
| 1993 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || | 1994 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || |
| 1994 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { | 1995 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { |
| 1995 | modify_ip_header = true; | 1996 | modify_ip_header = true; |
| @@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2053 | const struct tc_action *a; | 2054 | const struct tc_action *a; |
| 2054 | LIST_HEAD(actions); | 2055 | LIST_HEAD(actions); |
| 2055 | u32 action = 0; | 2056 | u32 action = 0; |
| 2056 | int err; | 2057 | int err, i; |
| 2057 | 2058 | ||
| 2058 | if (!tcf_exts_has_actions(exts)) | 2059 | if (!tcf_exts_has_actions(exts)) |
| 2059 | return -EINVAL; | 2060 | return -EINVAL; |
| 2060 | 2061 | ||
| 2061 | attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 2062 | attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
| 2062 | 2063 | ||
| 2063 | tcf_exts_to_list(exts, &actions); | 2064 | tcf_exts_for_each_action(i, a, exts) { |
| 2064 | list_for_each_entry(a, &actions, list) { | ||
| 2065 | if (is_tcf_gact_shot(a)) { | 2065 | if (is_tcf_gact_shot(a)) { |
| 2066 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP; | 2066 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP; |
| 2067 | if (MLX5_CAP_FLOWTABLE(priv->mdev, | 2067 | if (MLX5_CAP_FLOWTABLE(priv->mdev, |
| @@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2666 | LIST_HEAD(actions); | 2666 | LIST_HEAD(actions); |
| 2667 | bool encap = false; | 2667 | bool encap = false; |
| 2668 | u32 action = 0; | 2668 | u32 action = 0; |
| 2669 | int err; | 2669 | int err, i; |
| 2670 | 2670 | ||
| 2671 | if (!tcf_exts_has_actions(exts)) | 2671 | if (!tcf_exts_has_actions(exts)) |
| 2672 | return -EINVAL; | 2672 | return -EINVAL; |
| @@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2674 | attr->in_rep = rpriv->rep; | 2674 | attr->in_rep = rpriv->rep; |
| 2675 | attr->in_mdev = priv->mdev; | 2675 | attr->in_mdev = priv->mdev; |
| 2676 | 2676 | ||
| 2677 | tcf_exts_to_list(exts, &actions); | 2677 | tcf_exts_for_each_action(i, a, exts) { |
| 2678 | list_for_each_entry(a, &actions, list) { | ||
| 2679 | if (is_tcf_gact_shot(a)) { | 2678 | if (is_tcf_gact_shot(a)) { |
| 2680 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP | | 2679 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP | |
| 2681 | MLX5_FLOW_CONTEXT_ACTION_COUNT; | 2680 | MLX5_FLOW_CONTEXT_ACTION_COUNT; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6070d1591d1e..930700413b1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1346 | return -ENOMEM; | 1346 | return -ENOMEM; |
| 1347 | mall_tc_entry->cookie = f->cookie; | 1347 | mall_tc_entry->cookie = f->cookie; |
| 1348 | 1348 | ||
| 1349 | tcf_exts_to_list(f->exts, &actions); | 1349 | a = tcf_exts_first_action(f->exts); |
| 1350 | a = list_first_entry(&actions, struct tc_action, list); | ||
| 1351 | 1350 | ||
| 1352 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { | 1351 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { |
| 1353 | struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; | 1352 | struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3ae930196741..3cdb7aca90b7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
| @@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, | |||
| 414 | void | 414 | void |
| 415 | mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); | 415 | mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); |
| 416 | void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); | 416 | void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); |
| 417 | void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, | ||
| 418 | struct net_device *dev); | ||
| 417 | 419 | ||
| 418 | /* spectrum_kvdl.c */ | 420 | /* spectrum_kvdl.c */ |
| 419 | enum mlxsw_sp_kvdl_entry_type { | 421 | enum mlxsw_sp_kvdl_entry_type { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ebd1b24ebaa5..8d211972c5e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | |||
| @@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, | |||
| 21 | struct netlink_ext_ack *extack) | 21 | struct netlink_ext_ack *extack) |
| 22 | { | 22 | { |
| 23 | const struct tc_action *a; | 23 | const struct tc_action *a; |
| 24 | LIST_HEAD(actions); | 24 | int err, i; |
| 25 | int err; | ||
| 26 | 25 | ||
| 27 | if (!tcf_exts_has_actions(exts)) | 26 | if (!tcf_exts_has_actions(exts)) |
| 28 | return 0; | 27 | return 0; |
| @@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, | |||
| 32 | if (err) | 31 | if (err) |
| 33 | return err; | 32 | return err; |
| 34 | 33 | ||
| 35 | tcf_exts_to_list(exts, &actions); | 34 | tcf_exts_for_each_action(i, a, exts) { |
| 36 | list_for_each_entry(a, &actions, list) { | ||
| 37 | if (is_tcf_gact_ok(a)) { | 35 | if (is_tcf_gact_ok(a)) { |
| 38 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); | 36 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); |
| 39 | if (err) { | 37 | if (err) { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3a96307f51b0..2ab9cf25a08a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) | |||
| 6234 | mlxsw_sp_vr_put(mlxsw_sp, vr); | 6234 | mlxsw_sp_vr_put(mlxsw_sp, vr); |
| 6235 | } | 6235 | } |
| 6236 | 6236 | ||
| 6237 | void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, | ||
| 6238 | struct net_device *dev) | ||
| 6239 | { | ||
| 6240 | struct mlxsw_sp_rif *rif; | ||
| 6241 | |||
| 6242 | rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); | ||
| 6243 | if (!rif) | ||
| 6244 | return; | ||
| 6245 | mlxsw_sp_rif_destroy(rif); | ||
| 6246 | } | ||
| 6247 | |||
| 6237 | static void | 6248 | static void |
| 6238 | mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, | 6249 | mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, |
| 6239 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) | 6250 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0d8444aaba01..db715da7bab7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, | |||
| 127 | return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); | 127 | return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, | ||
| 131 | void *data) | ||
| 132 | { | ||
| 133 | struct mlxsw_sp *mlxsw_sp = data; | ||
| 134 | |||
| 135 | mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); | ||
| 136 | return 0; | ||
| 137 | } | ||
| 138 | |||
| 139 | static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, | ||
| 140 | struct net_device *dev) | ||
| 141 | { | ||
| 142 | mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); | ||
| 143 | netdev_walk_all_upper_dev_rcu(dev, | ||
| 144 | mlxsw_sp_bridge_device_upper_rif_destroy, | ||
| 145 | mlxsw_sp); | ||
| 146 | } | ||
| 147 | |||
| 130 | static struct mlxsw_sp_bridge_device * | 148 | static struct mlxsw_sp_bridge_device * |
| 131 | mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, | 149 | mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, |
| 132 | struct net_device *br_dev) | 150 | struct net_device *br_dev) |
| @@ -165,6 +183,8 @@ static void | |||
| 165 | mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, | 183 | mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, |
| 166 | struct mlxsw_sp_bridge_device *bridge_device) | 184 | struct mlxsw_sp_bridge_device *bridge_device) |
| 167 | { | 185 | { |
| 186 | mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, | ||
| 187 | bridge_device->dev); | ||
| 168 | list_del(&bridge_device->list); | 188 | list_del(&bridge_device->list); |
| 169 | if (bridge_device->vlan_enabled) | 189 | if (bridge_device->vlan_enabled) |
| 170 | bridge->vlan_enabled_exists = false; | 190 | bridge->vlan_enabled_exists = false; |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 0ba0356ec4e6..9044496803e6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
| @@ -796,11 +796,10 @@ int nfp_flower_compile_action(struct nfp_app *app, | |||
| 796 | struct net_device *netdev, | 796 | struct net_device *netdev, |
| 797 | struct nfp_fl_payload *nfp_flow) | 797 | struct nfp_fl_payload *nfp_flow) |
| 798 | { | 798 | { |
| 799 | int act_len, act_cnt, err, tun_out_cnt, out_cnt; | 799 | int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; |
| 800 | enum nfp_flower_tun_type tun_type; | 800 | enum nfp_flower_tun_type tun_type; |
| 801 | const struct tc_action *a; | 801 | const struct tc_action *a; |
| 802 | u32 csum_updated = 0; | 802 | u32 csum_updated = 0; |
| 803 | LIST_HEAD(actions); | ||
| 804 | 803 | ||
| 805 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); | 804 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); |
| 806 | nfp_flow->meta.act_len = 0; | 805 | nfp_flow->meta.act_len = 0; |
| @@ -810,8 +809,7 @@ int nfp_flower_compile_action(struct nfp_app *app, | |||
| 810 | tun_out_cnt = 0; | 809 | tun_out_cnt = 0; |
| 811 | out_cnt = 0; | 810 | out_cnt = 0; |
| 812 | 811 | ||
| 813 | tcf_exts_to_list(flow->exts, &actions); | 812 | tcf_exts_for_each_action(i, a, flow->exts) { |
| 814 | list_for_each_entry(a, &actions, list) { | ||
| 815 | err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, | 813 | err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, |
| 816 | netdev, &tun_type, &tun_out_cnt, | 814 | netdev, &tun_type, &tun_out_cnt, |
| 817 | &out_cnt, &csum_updated); | 815 | &out_cnt, &csum_updated); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d9ab5add27a8..34193c2f1699 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c | |||
| @@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, | |||
| 407 | 407 | ||
| 408 | if (i == QED_INIT_MAX_POLL_COUNT) { | 408 | if (i == QED_INIT_MAX_POLL_COUNT) { |
| 409 | DP_ERR(p_hwfn, | 409 | DP_ERR(p_hwfn, |
| 410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", | 410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", |
| 411 | addr, le32_to_cpu(cmd->expected_val), | 411 | addr, le32_to_cpu(cmd->expected_val), |
| 412 | val, le32_to_cpu(cmd->op_data)); | 412 | val, le32_to_cpu(cmd->op_data)); |
| 413 | } | 413 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index d89a0e22f6e4..5d37ec7e9b0b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | #include "qed_reg_addr.h" | 48 | #include "qed_reg_addr.h" |
| 49 | #include "qed_sriov.h" | 49 | #include "qed_sriov.h" |
| 50 | 50 | ||
| 51 | #define CHIP_MCP_RESP_ITER_US 10 | 51 | #define QED_MCP_RESP_ITER_US 10 |
| 52 | 52 | ||
| 53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ | 53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ |
| 54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ | 54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ |
| @@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) | |||
| 183 | return 0; | 183 | return 0; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | /* Maximum of 1 sec to wait for the SHMEM ready indication */ | ||
| 187 | #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 | ||
| 188 | #define QED_MCP_SHMEM_RDY_ITER_MS 50 | ||
| 189 | |||
| 186 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 190 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 187 | { | 191 | { |
| 188 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; | 192 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; |
| 193 | u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; | ||
| 194 | u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; | ||
| 189 | u32 drv_mb_offsize, mfw_mb_offsize; | 195 | u32 drv_mb_offsize, mfw_mb_offsize; |
| 190 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); | 196 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); |
| 191 | 197 | ||
| 192 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); | 198 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); |
| 193 | if (!p_info->public_base) | 199 | if (!p_info->public_base) { |
| 194 | return 0; | 200 | DP_NOTICE(p_hwfn, |
| 201 | "The address of the MCP scratch-pad is not configured\n"); | ||
| 202 | return -EINVAL; | ||
| 203 | } | ||
| 195 | 204 | ||
| 196 | p_info->public_base |= GRCBASE_MCP; | 205 | p_info->public_base |= GRCBASE_MCP; |
| 197 | 206 | ||
| 207 | /* Get the MFW MB address and number of supported messages */ | ||
| 208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
| 209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
| 210 | PUBLIC_MFW_MB)); | ||
| 211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
| 212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, | ||
| 213 | p_info->mfw_mb_addr + | ||
| 214 | offsetof(struct public_mfw_mb, | ||
| 215 | sup_msgs)); | ||
| 216 | |||
| 217 | /* The driver can notify that there was an MCP reset, and might read the | ||
| 218 | * SHMEM values before the MFW has completed initializing them. | ||
| 219 | * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a | ||
| 220 | * data ready indication. | ||
| 221 | */ | ||
| 222 | while (!p_info->mfw_mb_length && --cnt) { | ||
| 223 | msleep(msec); | ||
| 224 | p_info->mfw_mb_length = | ||
| 225 | (u16)qed_rd(p_hwfn, p_ptt, | ||
| 226 | p_info->mfw_mb_addr + | ||
| 227 | offsetof(struct public_mfw_mb, sup_msgs)); | ||
| 228 | } | ||
| 229 | |||
| 230 | if (!cnt) { | ||
| 231 | DP_NOTICE(p_hwfn, | ||
| 232 | "Failed to get the SHMEM ready notification after %d msec\n", | ||
| 233 | QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); | ||
| 234 | return -EBUSY; | ||
| 235 | } | ||
| 236 | |||
| 198 | /* Calculate the driver and MFW mailbox address */ | 237 | /* Calculate the driver and MFW mailbox address */ |
| 199 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, | 238 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, |
| 200 | SECTION_OFFSIZE_ADDR(p_info->public_base, | 239 | SECTION_OFFSIZE_ADDR(p_info->public_base, |
| @@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 204 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", | 243 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", |
| 205 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); | 244 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); |
| 206 | 245 | ||
| 207 | /* Set the MFW MB address */ | ||
| 208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
| 209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
| 210 | PUBLIC_MFW_MB)); | ||
| 211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
| 212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); | ||
| 213 | |||
| 214 | /* Get the current driver mailbox sequence before sending | 246 | /* Get the current driver mailbox sequence before sending |
| 215 | * the first command | 247 | * the first command |
| 216 | */ | 248 | */ |
| @@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, | |||
| 285 | 317 | ||
| 286 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 318 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 287 | { | 319 | { |
| 288 | u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; | 320 | u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; |
| 289 | int rc = 0; | 321 | int rc = 0; |
| 290 | 322 | ||
| 323 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
| 324 | DP_NOTICE(p_hwfn, | ||
| 325 | "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); | ||
| 326 | return -EBUSY; | ||
| 327 | } | ||
| 328 | |||
| 291 | /* Ensure that only a single thread is accessing the mailbox */ | 329 | /* Ensure that only a single thread is accessing the mailbox */ |
| 292 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 330 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 293 | 331 | ||
| @@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 413 | (p_mb_params->cmd | seq_num), p_mb_params->param); | 451 | (p_mb_params->cmd | seq_num), p_mb_params->param); |
| 414 | } | 452 | } |
| 415 | 453 | ||
| 454 | static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) | ||
| 455 | { | ||
| 456 | p_hwfn->mcp_info->b_block_cmd = block_cmd; | ||
| 457 | |||
| 458 | DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", | ||
| 459 | block_cmd ? "Block" : "Unblock"); | ||
| 460 | } | ||
| 461 | |||
| 462 | static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, | ||
| 463 | struct qed_ptt *p_ptt) | ||
| 464 | { | ||
| 465 | u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; | ||
| 466 | u32 delay = QED_MCP_RESP_ITER_US; | ||
| 467 | |||
| 468 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
| 469 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
| 470 | cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
| 471 | udelay(delay); | ||
| 472 | cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
| 473 | udelay(delay); | ||
| 474 | cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
| 475 | |||
| 476 | DP_NOTICE(p_hwfn, | ||
| 477 | "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", | ||
| 478 | cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); | ||
| 479 | } | ||
| 480 | |||
| 416 | static int | 481 | static int |
| 417 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | 482 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, |
| 418 | struct qed_ptt *p_ptt, | 483 | struct qed_ptt *p_ptt, |
| 419 | struct qed_mcp_mb_params *p_mb_params, | 484 | struct qed_mcp_mb_params *p_mb_params, |
| 420 | u32 max_retries, u32 delay) | 485 | u32 max_retries, u32 usecs) |
| 421 | { | 486 | { |
| 487 | u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); | ||
| 422 | struct qed_mcp_cmd_elem *p_cmd_elem; | 488 | struct qed_mcp_cmd_elem *p_cmd_elem; |
| 423 | u32 cnt = 0; | ||
| 424 | u16 seq_num; | 489 | u16 seq_num; |
| 425 | int rc = 0; | 490 | int rc = 0; |
| 426 | 491 | ||
| @@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 443 | goto err; | 508 | goto err; |
| 444 | 509 | ||
| 445 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 510 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 446 | udelay(delay); | 511 | |
| 512 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) | ||
| 513 | msleep(msecs); | ||
| 514 | else | ||
| 515 | udelay(usecs); | ||
| 447 | } while (++cnt < max_retries); | 516 | } while (++cnt < max_retries); |
| 448 | 517 | ||
| 449 | if (cnt >= max_retries) { | 518 | if (cnt >= max_retries) { |
| @@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 472 | * The spinlock stays locked until the list element is removed. | 541 | * The spinlock stays locked until the list element is removed. |
| 473 | */ | 542 | */ |
| 474 | 543 | ||
| 475 | udelay(delay); | 544 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) |
| 545 | msleep(msecs); | ||
| 546 | else | ||
| 547 | udelay(usecs); | ||
| 548 | |||
| 476 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 549 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 477 | 550 | ||
| 478 | if (p_cmd_elem->b_is_completed) | 551 | if (p_cmd_elem->b_is_completed) |
| @@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 491 | DP_NOTICE(p_hwfn, | 564 | DP_NOTICE(p_hwfn, |
| 492 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", | 565 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", |
| 493 | p_mb_params->cmd, p_mb_params->param); | 566 | p_mb_params->cmd, p_mb_params->param); |
| 567 | qed_mcp_print_cpu_info(p_hwfn, p_ptt); | ||
| 494 | 568 | ||
| 495 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 569 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 496 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); | 570 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); |
| 497 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 571 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 498 | 572 | ||
| 573 | if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) | ||
| 574 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
| 575 | |||
| 499 | return -EAGAIN; | 576 | return -EAGAIN; |
| 500 | } | 577 | } |
| 501 | 578 | ||
| @@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 507 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", | 584 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", |
| 508 | p_mb_params->mcp_resp, | 585 | p_mb_params->mcp_resp, |
| 509 | p_mb_params->mcp_param, | 586 | p_mb_params->mcp_param, |
| 510 | (cnt * delay) / 1000, (cnt * delay) % 1000); | 587 | (cnt * usecs) / 1000, (cnt * usecs) % 1000); |
| 511 | 588 | ||
| 512 | /* Clear the sequence number from the MFW response */ | 589 | /* Clear the sequence number from the MFW response */ |
| 513 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; | 590 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; |
| @@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 525 | { | 602 | { |
| 526 | size_t union_data_size = sizeof(union drv_union_data); | 603 | size_t union_data_size = sizeof(union drv_union_data); |
| 527 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; | 604 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; |
| 528 | u32 delay = CHIP_MCP_RESP_ITER_US; | 605 | u32 usecs = QED_MCP_RESP_ITER_US; |
| 529 | 606 | ||
| 530 | /* MCP not initialized */ | 607 | /* MCP not initialized */ |
| 531 | if (!qed_mcp_is_init(p_hwfn)) { | 608 | if (!qed_mcp_is_init(p_hwfn)) { |
| @@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 533 | return -EBUSY; | 610 | return -EBUSY; |
| 534 | } | 611 | } |
| 535 | 612 | ||
| 613 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
| 614 | DP_NOTICE(p_hwfn, | ||
| 615 | "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", | ||
| 616 | p_mb_params->cmd, p_mb_params->param); | ||
| 617 | return -EBUSY; | ||
| 618 | } | ||
| 619 | |||
| 536 | if (p_mb_params->data_src_size > union_data_size || | 620 | if (p_mb_params->data_src_size > union_data_size || |
| 537 | p_mb_params->data_dst_size > union_data_size) { | 621 | p_mb_params->data_dst_size > union_data_size) { |
| 538 | DP_ERR(p_hwfn, | 622 | DP_ERR(p_hwfn, |
| @@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 542 | return -EINVAL; | 626 | return -EINVAL; |
| 543 | } | 627 | } |
| 544 | 628 | ||
| 629 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { | ||
| 630 | max_retries = DIV_ROUND_UP(max_retries, 1000); | ||
| 631 | usecs *= 1000; | ||
| 632 | } | ||
| 633 | |||
| 545 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, | 634 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, |
| 546 | delay); | 635 | usecs); |
| 547 | } | 636 | } |
| 548 | 637 | ||
| 549 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, | 638 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, |
| @@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
| 761 | mb_params.data_src_size = sizeof(load_req); | 850 | mb_params.data_src_size = sizeof(load_req); |
| 762 | mb_params.p_data_dst = &load_rsp; | 851 | mb_params.p_data_dst = &load_rsp; |
| 763 | mb_params.data_dst_size = sizeof(load_rsp); | 852 | mb_params.data_dst_size = sizeof(load_rsp); |
| 853 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
| 764 | 854 | ||
| 765 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | 855 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 766 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", | 856 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", |
| @@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
| 982 | 1072 | ||
| 983 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1073 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 984 | { | 1074 | { |
| 985 | u32 wol_param, mcp_resp, mcp_param; | 1075 | struct qed_mcp_mb_params mb_params; |
| 1076 | u32 wol_param; | ||
| 986 | 1077 | ||
| 987 | switch (p_hwfn->cdev->wol_config) { | 1078 | switch (p_hwfn->cdev->wol_config) { |
| 988 | case QED_OV_WOL_DISABLED: | 1079 | case QED_OV_WOL_DISABLED: |
| @@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 1000 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; | 1091 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; |
| 1001 | } | 1092 | } |
| 1002 | 1093 | ||
| 1003 | return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, | 1094 | memset(&mb_params, 0, sizeof(mb_params)); |
| 1004 | &mcp_resp, &mcp_param); | 1095 | mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; |
| 1096 | mb_params.param = wol_param; | ||
| 1097 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
| 1098 | |||
| 1099 | return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | ||
| 1005 | } | 1100 | } |
| 1006 | 1101 | ||
| 1007 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1102 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| @@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, | |||
| 2077 | return rc; | 2172 | return rc; |
| 2078 | } | 2173 | } |
| 2079 | 2174 | ||
| 2175 | /* A maximal 100 msec waiting time for the MCP to halt */ | ||
| 2176 | #define QED_MCP_HALT_SLEEP_MS 10 | ||
| 2177 | #define QED_MCP_HALT_MAX_RETRIES 10 | ||
| 2178 | |||
| 2080 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2179 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 2081 | { | 2180 | { |
| 2082 | u32 resp = 0, param = 0; | 2181 | u32 resp = 0, param = 0, cpu_state, cnt = 0; |
| 2083 | int rc; | 2182 | int rc; |
| 2084 | 2183 | ||
| 2085 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, | 2184 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, |
| 2086 | ¶m); | 2185 | ¶m); |
| 2087 | if (rc) | 2186 | if (rc) { |
| 2088 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | 2187 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); |
| 2188 | return rc; | ||
| 2189 | } | ||
| 2089 | 2190 | ||
| 2090 | return rc; | 2191 | do { |
| 2192 | msleep(QED_MCP_HALT_SLEEP_MS); | ||
| 2193 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
| 2194 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) | ||
| 2195 | break; | ||
| 2196 | } while (++cnt < QED_MCP_HALT_MAX_RETRIES); | ||
| 2197 | |||
| 2198 | if (cnt == QED_MCP_HALT_MAX_RETRIES) { | ||
| 2199 | DP_NOTICE(p_hwfn, | ||
| 2200 | "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
| 2201 | qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); | ||
| 2202 | return -EBUSY; | ||
| 2203 | } | ||
| 2204 | |||
| 2205 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
| 2206 | |||
| 2207 | return 0; | ||
| 2091 | } | 2208 | } |
| 2092 | 2209 | ||
| 2210 | #define QED_MCP_RESUME_SLEEP_MS 10 | ||
| 2211 | |||
| 2093 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2212 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 2094 | { | 2213 | { |
| 2095 | u32 value, cpu_mode; | 2214 | u32 cpu_mode, cpu_state; |
| 2096 | 2215 | ||
| 2097 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); | 2216 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); |
| 2098 | 2217 | ||
| 2099 | value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
| 2100 | value &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
| 2101 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); | ||
| 2102 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | 2218 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); |
| 2219 | cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
| 2220 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); | ||
| 2221 | msleep(QED_MCP_RESUME_SLEEP_MS); | ||
| 2222 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
| 2103 | 2223 | ||
| 2104 | return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; | 2224 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { |
| 2225 | DP_NOTICE(p_hwfn, | ||
| 2226 | "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
| 2227 | cpu_mode, cpu_state); | ||
| 2228 | return -EBUSY; | ||
| 2229 | } | ||
| 2230 | |||
| 2231 | qed_mcp_cmd_set_blocking(p_hwfn, false); | ||
| 2232 | |||
| 2233 | return 0; | ||
| 2105 | } | 2234 | } |
| 2106 | 2235 | ||
| 2107 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, | 2236 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 047976d5c6e9..85e6b3989e7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h | |||
| @@ -635,11 +635,14 @@ struct qed_mcp_info { | |||
| 635 | */ | 635 | */ |
| 636 | spinlock_t cmd_lock; | 636 | spinlock_t cmd_lock; |
| 637 | 637 | ||
| 638 | /* Flag to indicate whether sending a MFW mailbox command is blocked */ | ||
| 639 | bool b_block_cmd; | ||
| 640 | |||
| 638 | /* Spinlock used for syncing SW link-changes and link-changes | 641 | /* Spinlock used for syncing SW link-changes and link-changes |
| 639 | * originating from attention context. | 642 | * originating from attention context. |
| 640 | */ | 643 | */ |
| 641 | spinlock_t link_lock; | 644 | spinlock_t link_lock; |
| 642 | bool block_mb_sending; | 645 | |
| 643 | u32 public_base; | 646 | u32 public_base; |
| 644 | u32 drv_mb_addr; | 647 | u32 drv_mb_addr; |
| 645 | u32 mfw_mb_addr; | 648 | u32 mfw_mb_addr; |
| @@ -660,14 +663,20 @@ struct qed_mcp_info { | |||
| 660 | }; | 663 | }; |
| 661 | 664 | ||
| 662 | struct qed_mcp_mb_params { | 665 | struct qed_mcp_mb_params { |
| 663 | u32 cmd; | 666 | u32 cmd; |
| 664 | u32 param; | 667 | u32 param; |
| 665 | void *p_data_src; | 668 | void *p_data_src; |
| 666 | u8 data_src_size; | 669 | void *p_data_dst; |
| 667 | void *p_data_dst; | 670 | u8 data_src_size; |
| 668 | u8 data_dst_size; | 671 | u8 data_dst_size; |
| 669 | u32 mcp_resp; | 672 | u32 mcp_resp; |
| 670 | u32 mcp_param; | 673 | u32 mcp_param; |
| 674 | u32 flags; | ||
| 675 | #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) | ||
| 676 | #define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) | ||
| 677 | #define QED_MB_FLAGS_IS_SET(params, flag) \ | ||
| 678 | ({ typeof(params) __params = (params); \ | ||
| 679 | (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) | ||
| 671 | }; | 680 | }; |
| 672 | 681 | ||
| 673 | struct qed_drv_tlv_hdr { | 682 | struct qed_drv_tlv_hdr { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index d8ad2dcad8d5..f736f70956fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
| @@ -562,8 +562,10 @@ | |||
| 562 | 0 | 562 | 0 |
| 563 | #define MCP_REG_CPU_STATE \ | 563 | #define MCP_REG_CPU_STATE \ |
| 564 | 0xe05004UL | 564 | 0xe05004UL |
| 565 | #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) | ||
| 565 | #define MCP_REG_CPU_EVENT_MASK \ | 566 | #define MCP_REG_CPU_EVENT_MASK \ |
| 566 | 0xe05008UL | 567 | 0xe05008UL |
| 568 | #define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL | ||
| 567 | #define PGLUE_B_REG_PF_BAR0_SIZE \ | 569 | #define PGLUE_B_REG_PF_BAR0_SIZE \ |
| 568 | 0x2aae60UL | 570 | 0x2aae60UL |
| 569 | #define PGLUE_B_REG_PF_BAR1_SIZE \ | 571 | #define PGLUE_B_REG_PF_BAR1_SIZE \ |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9673d19308e6..b16ce7d93caf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c | |||
| @@ -2006,18 +2006,16 @@ unlock: | |||
| 2006 | static int qede_parse_actions(struct qede_dev *edev, | 2006 | static int qede_parse_actions(struct qede_dev *edev, |
| 2007 | struct tcf_exts *exts) | 2007 | struct tcf_exts *exts) |
| 2008 | { | 2008 | { |
| 2009 | int rc = -EINVAL, num_act = 0; | 2009 | int rc = -EINVAL, num_act = 0, i; |
| 2010 | const struct tc_action *a; | 2010 | const struct tc_action *a; |
| 2011 | bool is_drop = false; | 2011 | bool is_drop = false; |
| 2012 | LIST_HEAD(actions); | ||
| 2013 | 2012 | ||
| 2014 | if (!tcf_exts_has_actions(exts)) { | 2013 | if (!tcf_exts_has_actions(exts)) { |
| 2015 | DP_NOTICE(edev, "No tc actions received\n"); | 2014 | DP_NOTICE(edev, "No tc actions received\n"); |
| 2016 | return rc; | 2015 | return rc; |
| 2017 | } | 2016 | } |
| 2018 | 2017 | ||
| 2019 | tcf_exts_to_list(exts, &actions); | 2018 | tcf_exts_for_each_action(i, a, exts) { |
| 2020 | list_for_each_entry(a, &actions, list) { | ||
| 2021 | num_act++; | 2019 | num_act++; |
| 2022 | 2020 | ||
| 2023 | if (is_tcf_gact_shot(a)) | 2021 | if (is_tcf_gact_shot(a)) |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 353f1c129af1..059ba9429e51 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, | |||
| 2384 | return status; | 2384 | return status; |
| 2385 | } | 2385 | } |
| 2386 | 2386 | ||
| 2387 | static netdev_features_t qlge_fix_features(struct net_device *ndev, | ||
| 2388 | netdev_features_t features) | ||
| 2389 | { | ||
| 2390 | int err; | ||
| 2391 | |||
| 2392 | /* Update the behavior of vlan accel in the adapter */ | ||
| 2393 | err = qlge_update_hw_vlan_features(ndev, features); | ||
| 2394 | if (err) | ||
| 2395 | return err; | ||
| 2396 | |||
| 2397 | return features; | ||
| 2398 | } | ||
| 2399 | |||
| 2400 | static int qlge_set_features(struct net_device *ndev, | 2387 | static int qlge_set_features(struct net_device *ndev, |
| 2401 | netdev_features_t features) | 2388 | netdev_features_t features) |
| 2402 | { | 2389 | { |
| 2403 | netdev_features_t changed = ndev->features ^ features; | 2390 | netdev_features_t changed = ndev->features ^ features; |
| 2391 | int err; | ||
| 2392 | |||
| 2393 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) { | ||
| 2394 | /* Update the behavior of vlan accel in the adapter */ | ||
| 2395 | err = qlge_update_hw_vlan_features(ndev, features); | ||
| 2396 | if (err) | ||
| 2397 | return err; | ||
| 2404 | 2398 | ||
| 2405 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) | ||
| 2406 | qlge_vlan_mode(ndev, features); | 2399 | qlge_vlan_mode(ndev, features); |
| 2400 | } | ||
| 2407 | 2401 | ||
| 2408 | return 0; | 2402 | return 0; |
| 2409 | } | 2403 | } |
| @@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = { | |||
| 4719 | .ndo_set_mac_address = qlge_set_mac_address, | 4713 | .ndo_set_mac_address = qlge_set_mac_address, |
| 4720 | .ndo_validate_addr = eth_validate_addr, | 4714 | .ndo_validate_addr = eth_validate_addr, |
| 4721 | .ndo_tx_timeout = qlge_tx_timeout, | 4715 | .ndo_tx_timeout = qlge_tx_timeout, |
| 4722 | .ndo_fix_features = qlge_fix_features, | ||
| 4723 | .ndo_set_features = qlge_set_features, | 4716 | .ndo_set_features = qlge_set_features, |
| 4724 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, | 4717 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, |
| 4725 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, | 4718 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, |
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b81f4faf7b10..1470fc12282b 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* Renesas Ethernet AVB device driver | 2 | /* Renesas Ethernet AVB device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2014-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2014-2015 Renesas Electronics Corporation |
| @@ -5,10 +6,6 @@ | |||
| 5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
| 6 | * | 7 | * |
| 7 | * Based on the SuperH Ethernet driver | 8 | * Based on the SuperH Ethernet driver |
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify it | ||
| 10 | * under the terms and conditions of the GNU General Public License version 2, | ||
| 11 | * as published by the Free Software Foundation. | ||
| 12 | */ | 9 | */ |
| 13 | 10 | ||
| 14 | #ifndef __RAVB_H__ | 11 | #ifndef __RAVB_H__ |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c06f2df895c2..aff5516b781e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* Renesas Ethernet AVB device driver | 2 | /* Renesas Ethernet AVB device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2014-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2014-2015 Renesas Electronics Corporation |
| @@ -5,10 +6,6 @@ | |||
| 5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
| 6 | * | 7 | * |
| 7 | * Based on the SuperH Ethernet driver | 8 | * Based on the SuperH Ethernet driver |
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify it | ||
| 10 | * under the terms and conditions of the GNU General Public License version 2, | ||
| 11 | * as published by the Free Software Foundation. | ||
| 12 | */ | 9 | */ |
| 13 | 10 | ||
| 14 | #include <linux/cache.h> | 11 | #include <linux/cache.h> |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5573199c4536..ad4433d59237 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* SuperH Ethernet device driver | 2 | /* SuperH Ethernet device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2014 Renesas Electronics Corporation | 4 | * Copyright (C) 2014 Renesas Electronics Corporation |
| @@ -5,18 +6,6 @@ | |||
| 5 | * Copyright (C) 2008-2014 Renesas Solutions Corp. | 6 | * Copyright (C) 2008-2014 Renesas Solutions Corp. |
| 6 | * Copyright (C) 2013-2017 Cogent Embedded, Inc. | 7 | * Copyright (C) 2013-2017 Cogent Embedded, Inc. |
| 7 | * Copyright (C) 2014 Codethink Limited | 8 | * Copyright (C) 2014 Codethink Limited |
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify it | ||
| 10 | * under the terms and conditions of the GNU General Public License, | ||
| 11 | * version 2, as published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | ||
| 19 | * the file called "COPYING". | ||
| 20 | */ | 9 | */ |
| 21 | 10 | ||
| 22 | #include <linux/module.h> | 11 | #include <linux/module.h> |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index f94be99cf400..0c18650bbfe6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
| @@ -1,19 +1,8 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* SuperH Ethernet device driver | 2 | /* SuperH Ethernet device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2006-2012 Nobuhiro Iwamatsu | 4 | * Copyright (C) 2006-2012 Nobuhiro Iwamatsu |
| 4 | * Copyright (C) 2008-2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2008-2012 Renesas Solutions Corp. |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | */ | 6 | */ |
| 18 | 7 | ||
| 19 | #ifndef __SH_ETH_H__ | 8 | #ifndef __SH_ETH_H__ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index edf20361ea5f..bf4acebb6bcd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
| @@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH | |||
| 33 | select PHYLIB | 33 | select PHYLIB |
| 34 | select CRC32 | 34 | select CRC32 |
| 35 | select MII | 35 | select MII |
| 36 | depends on OF && COMMON_CLK && HAS_DMA | 36 | depends on OF && HAS_DMA |
| 37 | help | 37 | help |
| 38 | Support for chips using the snps,dwc-qos-ethernet.txt DT binding. | 38 | Support for chips using the snps,dwc-qos-ethernet.txt DT binding. |
| 39 | 39 | ||
| @@ -57,7 +57,7 @@ config DWMAC_ANARION | |||
| 57 | config DWMAC_IPQ806X | 57 | config DWMAC_IPQ806X |
| 58 | tristate "QCA IPQ806x DWMAC support" | 58 | tristate "QCA IPQ806x DWMAC support" |
| 59 | default ARCH_QCOM | 59 | default ARCH_QCOM |
| 60 | depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) | 60 | depends on OF && (ARCH_QCOM || COMPILE_TEST) |
| 61 | select MFD_SYSCON | 61 | select MFD_SYSCON |
| 62 | help | 62 | help |
| 63 | Support for QCA IPQ806X DWMAC Ethernet. | 63 | Support for QCA IPQ806X DWMAC Ethernet. |
| @@ -100,7 +100,7 @@ config DWMAC_OXNAS | |||
| 100 | config DWMAC_ROCKCHIP | 100 | config DWMAC_ROCKCHIP |
| 101 | tristate "Rockchip dwmac support" | 101 | tristate "Rockchip dwmac support" |
| 102 | default ARCH_ROCKCHIP | 102 | default ARCH_ROCKCHIP |
| 103 | depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) | 103 | depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST) |
| 104 | select MFD_SYSCON | 104 | select MFD_SYSCON |
| 105 | help | 105 | help |
| 106 | Support for Ethernet controller on Rockchip RK3288 SoC. | 106 | Support for Ethernet controller on Rockchip RK3288 SoC. |
| @@ -123,7 +123,7 @@ config DWMAC_SOCFPGA | |||
| 123 | config DWMAC_STI | 123 | config DWMAC_STI |
| 124 | tristate "STi GMAC support" | 124 | tristate "STi GMAC support" |
| 125 | default ARCH_STI | 125 | default ARCH_STI |
| 126 | depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) | 126 | depends on OF && (ARCH_STI || COMPILE_TEST) |
| 127 | select MFD_SYSCON | 127 | select MFD_SYSCON |
| 128 | ---help--- | 128 | ---help--- |
| 129 | Support for ethernet controller on STi SOCs. | 129 | Support for ethernet controller on STi SOCs. |
| @@ -147,7 +147,7 @@ config DWMAC_STM32 | |||
| 147 | config DWMAC_SUNXI | 147 | config DWMAC_SUNXI |
| 148 | tristate "Allwinner GMAC support" | 148 | tristate "Allwinner GMAC support" |
| 149 | default ARCH_SUNXI | 149 | default ARCH_SUNXI |
| 150 | depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) | 150 | depends on OF && (ARCH_SUNXI || COMPILE_TEST) |
| 151 | ---help--- | 151 | ---help--- |
| 152 | Support for Allwinner A20/A31 GMAC ethernet controllers. | 152 | Support for Allwinner A20/A31 GMAC ethernet controllers. |
| 153 | 153 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 1a96dd9c1091..531294f4978b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | |||
| @@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, | |||
| 61 | struct stmmac_tc_entry *action_entry = entry; | 61 | struct stmmac_tc_entry *action_entry = entry; |
| 62 | const struct tc_action *act; | 62 | const struct tc_action *act; |
| 63 | struct tcf_exts *exts; | 63 | struct tcf_exts *exts; |
| 64 | LIST_HEAD(actions); | 64 | int i; |
| 65 | 65 | ||
| 66 | exts = cls->knode.exts; | 66 | exts = cls->knode.exts; |
| 67 | if (!tcf_exts_has_actions(exts)) | 67 | if (!tcf_exts_has_actions(exts)) |
| @@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, | |||
| 69 | if (frag) | 69 | if (frag) |
| 70 | action_entry = frag; | 70 | action_entry = frag; |
| 71 | 71 | ||
| 72 | tcf_exts_to_list(exts, &actions); | 72 | tcf_exts_for_each_action(i, act, exts) { |
| 73 | list_for_each_entry(act, &actions, list) { | ||
| 74 | /* Accept */ | 73 | /* Accept */ |
| 75 | if (is_tcf_gact_ok(act)) { | 74 | if (is_tcf_gact_ok(act)) { |
| 76 | action_entry->val.af = 1; | 75 | action_entry->val.af = 1; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 507f68190cb1..1121a1ec407c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
| 30 | #include <linux/inetdevice.h> | 30 | #include <linux/inetdevice.h> |
| 31 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
| 32 | #include <linux/pci.h> | ||
| 32 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
| 33 | #include <linux/if_vlan.h> | 34 | #include <linux/if_vlan.h> |
| 34 | #include <linux/in.h> | 35 | #include <linux/in.h> |
| @@ -2039,12 +2040,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
| 2039 | { | 2040 | { |
| 2040 | struct net_device *ndev; | 2041 | struct net_device *ndev; |
| 2041 | struct net_device_context *net_device_ctx; | 2042 | struct net_device_context *net_device_ctx; |
| 2043 | struct device *pdev = vf_netdev->dev.parent; | ||
| 2042 | struct netvsc_device *netvsc_dev; | 2044 | struct netvsc_device *netvsc_dev; |
| 2043 | int ret; | 2045 | int ret; |
| 2044 | 2046 | ||
| 2045 | if (vf_netdev->addr_len != ETH_ALEN) | 2047 | if (vf_netdev->addr_len != ETH_ALEN) |
| 2046 | return NOTIFY_DONE; | 2048 | return NOTIFY_DONE; |
| 2047 | 2049 | ||
| 2050 | if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) | ||
| 2051 | return NOTIFY_DONE; | ||
| 2052 | |||
| 2048 | /* | 2053 | /* |
| 2049 | * We will use the MAC address to locate the synthetic interface to | 2054 | * We will use the MAC address to locate the synthetic interface to |
| 2050 | * associate with the VF interface. If we don't find a matching | 2055 | * associate with the VF interface. If we don't find a matching |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 97742708460b..2cd71bdb6484 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
| 5217 | netdev->hw_features &= ~NETIF_F_RXCSUM; | 5217 | netdev->hw_features &= ~NETIF_F_RXCSUM; |
| 5218 | } | 5218 | } |
| 5219 | 5219 | ||
| 5220 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && | 5220 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && |
| 5221 | udev->serial && !strcmp(udev->serial, "000001000000")) { | 5221 | (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { |
| 5222 | dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); | 5222 | dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); |
| 5223 | set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); | 5223 | set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); |
| 5224 | } | 5224 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b9951d2067e..d668682f91df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, | |||
| 316 | old_value = *dbbuf_db; | 316 | old_value = *dbbuf_db; |
| 317 | *dbbuf_db = value; | 317 | *dbbuf_db = value; |
| 318 | 318 | ||
| 319 | /* | ||
| 320 | * Ensure that the doorbell is updated before reading the event | ||
| 321 | * index from memory. The controller needs to provide similar | ||
| 322 | * ordering to ensure the envent index is updated before reading | ||
| 323 | * the doorbell. | ||
| 324 | */ | ||
| 325 | mb(); | ||
| 326 | |||
| 319 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) | 327 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
| 320 | return false; | 328 | return false; |
| 321 | } | 329 | } |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ebf3e7a6c49e..b5ec96abd048 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -1210,7 +1210,7 @@ static int __init nvmet_init(void) | |||
| 1210 | 1210 | ||
| 1211 | error = nvmet_init_discovery(); | 1211 | error = nvmet_init_discovery(); |
| 1212 | if (error) | 1212 | if (error) |
| 1213 | goto out; | 1213 | goto out_free_work_queue; |
| 1214 | 1214 | ||
| 1215 | error = nvmet_init_configfs(); | 1215 | error = nvmet_init_configfs(); |
| 1216 | if (error) | 1216 | if (error) |
| @@ -1219,6 +1219,8 @@ static int __init nvmet_init(void) | |||
| 1219 | 1219 | ||
| 1220 | out_exit_discovery: | 1220 | out_exit_discovery: |
| 1221 | nvmet_exit_discovery(); | 1221 | nvmet_exit_discovery(); |
| 1222 | out_free_work_queue: | ||
| 1223 | destroy_workqueue(buffered_io_wq); | ||
| 1222 | out: | 1224 | out: |
| 1223 | return error; | 1225 | return error; |
| 1224 | } | 1226 | } |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 34712def81b1..5251689a1d9a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
| @@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) | |||
| 311 | struct fcloop_tport *tport = tls_req->tport; | 311 | struct fcloop_tport *tport = tls_req->tport; |
| 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; | 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; |
| 313 | 313 | ||
| 314 | if (tport->remoteport) | 314 | if (!tport || tport->remoteport) |
| 315 | lsreq->done(lsreq, tls_req->status); | 315 | lsreq->done(lsreq, tls_req->status); |
| 316 | } | 316 | } |
| 317 | 317 | ||
| @@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, | |||
| 329 | 329 | ||
| 330 | if (!rport->targetport) { | 330 | if (!rport->targetport) { |
| 331 | tls_req->status = -ECONNREFUSED; | 331 | tls_req->status = -ECONNREFUSED; |
| 332 | tls_req->tport = NULL; | ||
| 332 | schedule_work(&tls_req->work); | 333 | schedule_work(&tls_req->work); |
| 333 | return ret; | 334 | return ret; |
| 334 | } | 335 | } |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 466e3c8582f0..9095b8290150 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex); | |||
| 54 | */ | 54 | */ |
| 55 | DEFINE_RAW_SPINLOCK(devtree_lock); | 55 | DEFINE_RAW_SPINLOCK(devtree_lock); |
| 56 | 56 | ||
| 57 | bool of_node_name_eq(const struct device_node *np, const char *name) | ||
| 58 | { | ||
| 59 | const char *node_name; | ||
| 60 | size_t len; | ||
| 61 | |||
| 62 | if (!np) | ||
| 63 | return false; | ||
| 64 | |||
| 65 | node_name = kbasename(np->full_name); | ||
| 66 | len = strchrnul(node_name, '@') - node_name; | ||
| 67 | |||
| 68 | return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); | ||
| 69 | } | ||
| 70 | |||
| 71 | bool of_node_name_prefix(const struct device_node *np, const char *prefix) | ||
| 72 | { | ||
| 73 | if (!np) | ||
| 74 | return false; | ||
| 75 | |||
| 76 | return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; | ||
| 77 | } | ||
| 78 | |||
| 57 | int of_n_addr_cells(struct device_node *np) | 79 | int of_n_addr_cells(struct device_node *np) |
| 58 | { | 80 | { |
| 59 | u32 cells; | 81 | u32 cells; |
| @@ -720,6 +742,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node, | |||
| 720 | EXPORT_SYMBOL(of_get_next_available_child); | 742 | EXPORT_SYMBOL(of_get_next_available_child); |
| 721 | 743 | ||
| 722 | /** | 744 | /** |
| 745 | * of_get_compatible_child - Find compatible child node | ||
| 746 | * @parent: parent node | ||
| 747 | * @compatible: compatible string | ||
| 748 | * | ||
| 749 | * Lookup child node whose compatible property contains the given compatible | ||
| 750 | * string. | ||
| 751 | * | ||
| 752 | * Returns a node pointer with refcount incremented, use of_node_put() on it | ||
| 753 | * when done; or NULL if not found. | ||
| 754 | */ | ||
| 755 | struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
| 756 | const char *compatible) | ||
| 757 | { | ||
| 758 | struct device_node *child; | ||
| 759 | |||
| 760 | for_each_child_of_node(parent, child) { | ||
| 761 | if (of_device_is_compatible(child, compatible)) | ||
| 762 | break; | ||
| 763 | } | ||
| 764 | |||
| 765 | return child; | ||
| 766 | } | ||
| 767 | EXPORT_SYMBOL(of_get_compatible_child); | ||
| 768 | |||
| 769 | /** | ||
| 723 | * of_get_child_by_name - Find the child node by name for a given parent | 770 | * of_get_child_by_name - Find the child node by name for a given parent |
| 724 | * @node: parent node | 771 | * @node: parent node |
| 725 | * @name: child name to look for. | 772 | * @name: child name to look for. |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 977a8307fbb1..4f2816559205 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
| @@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz, | |||
| 260 | 260 | ||
| 261 | mutex_lock(&tz->lock); | 261 | mutex_lock(&tz->lock); |
| 262 | 262 | ||
| 263 | if (mode == THERMAL_DEVICE_ENABLED) | 263 | if (mode == THERMAL_DEVICE_ENABLED) { |
| 264 | tz->polling_delay = data->polling_delay; | 264 | tz->polling_delay = data->polling_delay; |
| 265 | else | 265 | tz->passive_delay = data->passive_delay; |
| 266 | } else { | ||
| 266 | tz->polling_delay = 0; | 267 | tz->polling_delay = 0; |
| 268 | tz->passive_delay = 0; | ||
| 269 | } | ||
| 267 | 270 | ||
| 268 | mutex_unlock(&tz->lock); | 271 | mutex_unlock(&tz->lock); |
| 269 | 272 | ||
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index c866cc165960..450ed66edf58 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c | |||
| @@ -1,16 +1,6 @@ | |||
| 1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | * Copyright 2016 Freescale Semiconductor, Inc. | 2 | // |
| 3 | * | 3 | // Copyright 2016 Freescale Semiconductor, Inc. |
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | 4 | ||
| 15 | #include <linux/module.h> | 5 | #include <linux/module.h> |
| 16 | #include <linux/platform_device.h> | 6 | #include <linux/platform_device.h> |
| @@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
| 197 | int ret; | 187 | int ret; |
| 198 | struct qoriq_tmu_data *data; | 188 | struct qoriq_tmu_data *data; |
| 199 | struct device_node *np = pdev->dev.of_node; | 189 | struct device_node *np = pdev->dev.of_node; |
| 200 | u32 site = 0; | 190 | u32 site; |
| 201 | 191 | ||
| 202 | if (!np) { | 192 | if (!np) { |
| 203 | dev_err(&pdev->dev, "Device OF-Node is NULL"); | 193 | dev_err(&pdev->dev, "Device OF-Node is NULL"); |
| @@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
| 233 | if (ret < 0) | 223 | if (ret < 0) |
| 234 | goto err_tmu; | 224 | goto err_tmu; |
| 235 | 225 | ||
| 236 | data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, | 226 | data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev, |
| 237 | data, &tmu_tz_ops); | 227 | data->sensor_id, |
| 228 | data, &tmu_tz_ops); | ||
| 238 | if (IS_ERR(data->tz)) { | 229 | if (IS_ERR(data->tz)) { |
| 239 | ret = PTR_ERR(data->tz); | 230 | ret = PTR_ERR(data->tz); |
| 240 | dev_err(&pdev->dev, | 231 | dev_err(&pdev->dev, |
| @@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
| 243 | } | 234 | } |
| 244 | 235 | ||
| 245 | /* Enable monitoring */ | 236 | /* Enable monitoring */ |
| 246 | site |= 0x1 << (15 - data->sensor_id); | 237 | site = 0x1 << (15 - data->sensor_id); |
| 247 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); | 238 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); |
| 248 | 239 | ||
| 249 | return 0; | 240 | return 0; |
| @@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev) | |||
| 261 | { | 252 | { |
| 262 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); | 253 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); |
| 263 | 254 | ||
| 264 | thermal_zone_of_sensor_unregister(&pdev->dev, data->tz); | ||
| 265 | |||
| 266 | /* Disable monitoring */ | 255 | /* Disable monitoring */ |
| 267 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); | 256 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); |
| 268 | 257 | ||
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 766521eb7071..7aed5337bdd3 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c | |||
| @@ -1,19 +1,10 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * R-Car Gen3 THS thermal sensor driver | 3 | * R-Car Gen3 THS thermal sensor driver |
| 3 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. | 4 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. |
| 4 | * | 5 | * |
| 5 | * Copyright (C) 2016 Renesas Electronics Corporation. | 6 | * Copyright (C) 2016 Renesas Electronics Corporation. |
| 6 | * Copyright (C) 2016 Sang Engineering | 7 | * Copyright (C) 2016 Sang Engineering |
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; version 2 of the License. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | */ | 8 | */ |
| 18 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
| 19 | #include <linux/err.h> | 10 | #include <linux/err.h> |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index e77e63070e99..78f932822d38 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
| @@ -1,21 +1,9 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * R-Car THS/TSC thermal sensor driver | 3 | * R-Car THS/TSC thermal sensor driver |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2012 Renesas Solutions Corp. |
| 5 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 6 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; version 2 of the License. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, but | ||
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | * General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License along | ||
| 17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
| 19 | */ | 7 | */ |
| 20 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
| 21 | #include <linux/err.h> | 9 | #include <linux/err.h> |
| @@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = { | |||
| 660 | }; | 648 | }; |
| 661 | module_platform_driver(rcar_thermal_driver); | 649 | module_platform_driver(rcar_thermal_driver); |
| 662 | 650 | ||
| 663 | MODULE_LICENSE("GPL"); | 651 | MODULE_LICENSE("GPL v2"); |
| 664 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); | 652 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); |
| 665 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); | 653 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 96c1d8400822..b13c6b4b2c66 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, | |||
| 952 | list_for_each_entry_safe(node, n, &d->pending_list, node) { | 952 | list_for_each_entry_safe(node, n, &d->pending_list, node) { |
| 953 | struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; | 953 | struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; |
| 954 | if (msg->iova <= vq_msg->iova && | 954 | if (msg->iova <= vq_msg->iova && |
| 955 | msg->iova + msg->size - 1 > vq_msg->iova && | 955 | msg->iova + msg->size - 1 >= vq_msg->iova && |
| 956 | vq_msg->type == VHOST_IOTLB_MISS) { | 956 | vq_msg->type == VHOST_IOTLB_MISS) { |
| 957 | vhost_poll_queue(&node->vq->poll); | 957 | vhost_poll_queue(&node->vq->poll); |
| 958 | list_del(&node->node); | 958 | list_del(&node->node); |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index f2088838f690..5b471889d723 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
| @@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev, | |||
| 402 | } | 402 | } |
| 403 | static DEVICE_ATTR_RO(modalias); | 403 | static DEVICE_ATTR_RO(modalias); |
| 404 | 404 | ||
| 405 | static ssize_t state_show(struct device *dev, | ||
| 406 | struct device_attribute *attr, char *buf) | ||
| 407 | { | ||
| 408 | return sprintf(buf, "%s\n", | ||
| 409 | xenbus_strstate(to_xenbus_device(dev)->state)); | ||
| 410 | } | ||
| 411 | static DEVICE_ATTR_RO(state); | ||
| 412 | |||
| 405 | static struct attribute *xenbus_dev_attrs[] = { | 413 | static struct attribute *xenbus_dev_attrs[] = { |
| 406 | &dev_attr_nodename.attr, | 414 | &dev_attr_nodename.attr, |
| 407 | &dev_attr_devtype.attr, | 415 | &dev_attr_devtype.attr, |
| 408 | &dev_attr_modalias.attr, | 416 | &dev_attr_modalias.attr, |
| 417 | &dev_attr_state.attr, | ||
| 409 | NULL, | 418 | NULL, |
| 410 | }; | 419 | }; |
| 411 | 420 | ||
diff --git a/fs/buffer.c b/fs/buffer.c index 4cc679d5bf58..6f1ae3ac9789 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | #include <linux/buffer_head.h> | 39 | #include <linux/buffer_head.h> |
| 40 | #include <linux/task_io_accounting_ops.h> | 40 | #include <linux/task_io_accounting_ops.h> |
| 41 | #include <linux/bio.h> | 41 | #include <linux/bio.h> |
| 42 | #include <linux/notifier.h> | ||
| 43 | #include <linux/cpu.h> | 42 | #include <linux/cpu.h> |
| 44 | #include <linux/bitops.h> | 43 | #include <linux/bitops.h> |
| 45 | #include <linux/mpage.h> | 44 | #include <linux/mpage.h> |
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index ec3fba7d492f..488a9e7f8f66 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/mpage.h> | 24 | #include <linux/mpage.h> |
| 25 | #include <linux/user_namespace.h> | 25 | #include <linux/user_namespace.h> |
| 26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
| 27 | #include <linux/blkdev.h> | ||
| 27 | 28 | ||
| 28 | #include "isofs.h" | 29 | #include "isofs.h" |
| 29 | #include "zisofs.h" | 30 | #include "zisofs.h" |
| @@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) | |||
| 653 | /* | 654 | /* |
| 654 | * What if bugger tells us to go beyond page size? | 655 | * What if bugger tells us to go beyond page size? |
| 655 | */ | 656 | */ |
| 657 | if (bdev_logical_block_size(s->s_bdev) > 2048) { | ||
| 658 | printk(KERN_WARNING | ||
| 659 | "ISOFS: unsupported/invalid hardware sector size %d\n", | ||
| 660 | bdev_logical_block_size(s->s_bdev)); | ||
| 661 | goto out_freesbi; | ||
| 662 | } | ||
| 656 | opt.blocksize = sb_min_blocksize(s, opt.blocksize); | 663 | opt.blocksize = sb_min_blocksize(s, opt.blocksize); |
| 657 | 664 | ||
| 658 | sbi->s_high_sierra = 0; /* default is iso9660 */ | 665 | sbi->s_high_sierra = 0; /* default is iso9660 */ |
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 05506d60131c..59cdb27826de 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
| @@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) | |||
| 132 | struct fsnotify_mark *mark; | 132 | struct fsnotify_mark *mark; |
| 133 | 133 | ||
| 134 | assert_spin_locked(&conn->lock); | 134 | assert_spin_locked(&conn->lock); |
| 135 | /* We can get detached connector here when inode is getting unlinked. */ | ||
| 136 | if (!fsnotify_valid_obj_type(conn->type)) | ||
| 137 | return; | ||
| 135 | hlist_for_each_entry(mark, &conn->list, obj_list) { | 138 | hlist_for_each_entry(mark, &conn->list, obj_list) { |
| 136 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) | 139 | if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) |
| 137 | new_mask |= mark->mask; | 140 | new_mask |= mark->mask; |
| 138 | } | 141 | } |
| 139 | if (WARN_ON(!fsnotify_valid_obj_type(conn->type))) | ||
| 140 | return; | ||
| 141 | |||
| 142 | *fsnotify_conn_mask_p(conn) = new_mask; | 142 | *fsnotify_conn_mask_p(conn) = new_mask; |
| 143 | } | 143 | } |
| 144 | 144 | ||
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 860bfbe7a07a..f0cbf58ad4da 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/quotaops.h> | 18 | #include <linux/quotaops.h> |
| 19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
| 21 | #include <linux/nospec.h> | ||
| 21 | 22 | ||
| 22 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, | 23 | static int check_quotactl_permission(struct super_block *sb, int type, int cmd, |
| 23 | qid_t id) | 24 | qid_t id) |
| @@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr) | |||
| 120 | struct if_dqinfo uinfo; | 121 | struct if_dqinfo uinfo; |
| 121 | int ret; | 122 | int ret; |
| 122 | 123 | ||
| 123 | /* This checks whether qc_state has enough entries... */ | ||
| 124 | BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); | ||
| 125 | if (!sb->s_qcop->get_state) | 124 | if (!sb->s_qcop->get_state) |
| 126 | return -ENOSYS; | 125 | return -ENOSYS; |
| 127 | ret = sb->s_qcop->get_state(sb, &state); | 126 | ret = sb->s_qcop->get_state(sb, &state); |
| @@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) | |||
| 354 | * GETXSTATE quotactl has space for just one set of time limits so | 353 | * GETXSTATE quotactl has space for just one set of time limits so |
| 355 | * report them for the first enabled quota type | 354 | * report them for the first enabled quota type |
| 356 | */ | 355 | */ |
| 357 | for (type = 0; type < XQM_MAXQUOTAS; type++) | 356 | for (type = 0; type < MAXQUOTAS; type++) |
| 358 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) | 357 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) |
| 359 | break; | 358 | break; |
| 360 | BUG_ON(type == XQM_MAXQUOTAS); | 359 | BUG_ON(type == MAXQUOTAS); |
| 361 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | 360 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
| 362 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | 361 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
| 363 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | 362 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
| @@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) | |||
| 427 | * GETXSTATV quotactl has space for just one set of time limits so | 426 | * GETXSTATV quotactl has space for just one set of time limits so |
| 428 | * report them for the first enabled quota type | 427 | * report them for the first enabled quota type |
| 429 | */ | 428 | */ |
| 430 | for (type = 0; type < XQM_MAXQUOTAS; type++) | 429 | for (type = 0; type < MAXQUOTAS; type++) |
| 431 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) | 430 | if (state.s_state[type].flags & QCI_ACCT_ENABLED) |
| 432 | break; | 431 | break; |
| 433 | BUG_ON(type == XQM_MAXQUOTAS); | 432 | BUG_ON(type == MAXQUOTAS); |
| 434 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; | 433 | fqs->qs_btimelimit = state.s_state[type].spc_timelimit; |
| 435 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; | 434 | fqs->qs_itimelimit = state.s_state[type].ino_timelimit; |
| 436 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; | 435 | fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; |
| @@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
| 701 | { | 700 | { |
| 702 | int ret; | 701 | int ret; |
| 703 | 702 | ||
| 704 | if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) | 703 | if (type >= MAXQUOTAS) |
| 705 | return -EINVAL; | 704 | return -EINVAL; |
| 705 | type = array_index_nospec(type, MAXQUOTAS); | ||
| 706 | /* | 706 | /* |
| 707 | * Quota not supported on this fs? Check this before s_quota_types | 707 | * Quota not supported on this fs? Check this before s_quota_types |
| 708 | * since they needn't be set if quota is not supported at all. | 708 | * since they needn't be set if quota is not supported at all. |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 3040dc2a32f6..6f515651a2c2 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
| @@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb, | |||
| 764 | struct kernel_lb_addr *root) | 764 | struct kernel_lb_addr *root) |
| 765 | { | 765 | { |
| 766 | struct buffer_head *bh = NULL; | 766 | struct buffer_head *bh = NULL; |
| 767 | long lastblock; | ||
| 768 | uint16_t ident; | 767 | uint16_t ident; |
| 769 | struct udf_sb_info *sbi; | ||
| 770 | 768 | ||
| 771 | if (fileset->logicalBlockNum != 0xFFFFFFFF || | 769 | if (fileset->logicalBlockNum != 0xFFFFFFFF || |
| 772 | fileset->partitionReferenceNum != 0xFFFF) { | 770 | fileset->partitionReferenceNum != 0xFFFF) { |
| @@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb, | |||
| 779 | return 1; | 777 | return 1; |
| 780 | } | 778 | } |
| 781 | 779 | ||
| 782 | } | ||
| 783 | |||
| 784 | sbi = UDF_SB(sb); | ||
| 785 | if (!bh) { | ||
| 786 | /* Search backwards through the partitions */ | ||
| 787 | struct kernel_lb_addr newfileset; | ||
| 788 | |||
| 789 | /* --> cvg: FIXME - is it reasonable? */ | ||
| 790 | return 1; | ||
| 791 | |||
| 792 | for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; | ||
| 793 | (newfileset.partitionReferenceNum != 0xFFFF && | ||
| 794 | fileset->logicalBlockNum == 0xFFFFFFFF && | ||
| 795 | fileset->partitionReferenceNum == 0xFFFF); | ||
| 796 | newfileset.partitionReferenceNum--) { | ||
| 797 | lastblock = sbi->s_partmaps | ||
| 798 | [newfileset.partitionReferenceNum] | ||
| 799 | .s_partition_len; | ||
| 800 | newfileset.logicalBlockNum = 0; | ||
| 801 | |||
| 802 | do { | ||
| 803 | bh = udf_read_ptagged(sb, &newfileset, 0, | ||
| 804 | &ident); | ||
| 805 | if (!bh) { | ||
| 806 | newfileset.logicalBlockNum++; | ||
| 807 | continue; | ||
| 808 | } | ||
| 809 | |||
| 810 | switch (ident) { | ||
| 811 | case TAG_IDENT_SBD: | ||
| 812 | { | ||
| 813 | struct spaceBitmapDesc *sp; | ||
| 814 | sp = (struct spaceBitmapDesc *) | ||
| 815 | bh->b_data; | ||
| 816 | newfileset.logicalBlockNum += 1 + | ||
| 817 | ((le32_to_cpu(sp->numOfBytes) + | ||
| 818 | sizeof(struct spaceBitmapDesc) | ||
| 819 | - 1) >> sb->s_blocksize_bits); | ||
| 820 | brelse(bh); | ||
| 821 | break; | ||
| 822 | } | ||
| 823 | case TAG_IDENT_FSD: | ||
| 824 | *fileset = newfileset; | ||
| 825 | break; | ||
| 826 | default: | ||
| 827 | newfileset.logicalBlockNum++; | ||
| 828 | brelse(bh); | ||
| 829 | bh = NULL; | ||
| 830 | break; | ||
| 831 | } | ||
| 832 | } while (newfileset.logicalBlockNum < lastblock && | ||
| 833 | fileset->logicalBlockNum == 0xFFFFFFFF && | ||
| 834 | fileset->partitionReferenceNum == 0xFFFF); | ||
| 835 | } | ||
| 836 | } | ||
| 837 | |||
| 838 | if ((fileset->logicalBlockNum != 0xFFFFFFFF || | ||
| 839 | fileset->partitionReferenceNum != 0xFFFF) && bh) { | ||
| 840 | udf_debug("Fileset at block=%u, partition=%u\n", | 780 | udf_debug("Fileset at block=%u, partition=%u\n", |
| 841 | fileset->logicalBlockNum, | 781 | fileset->logicalBlockNum, |
| 842 | fileset->partitionReferenceNum); | 782 | fileset->partitionReferenceNum); |
| 843 | 783 | ||
| 844 | sbi->s_partition = fileset->partitionReferenceNum; | 784 | UDF_SB(sb)->s_partition = fileset->partitionReferenceNum; |
| 845 | udf_load_fileset(sb, bh, root); | 785 | udf_load_fileset(sb, bh, root); |
| 846 | brelse(bh); | 786 | brelse(bh); |
| 847 | return 0; | 787 | return 0; |
| @@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ | |||
| 1570 | */ | 1510 | */ |
| 1571 | #define PART_DESC_ALLOC_STEP 32 | 1511 | #define PART_DESC_ALLOC_STEP 32 |
| 1572 | 1512 | ||
| 1513 | struct part_desc_seq_scan_data { | ||
| 1514 | struct udf_vds_record rec; | ||
| 1515 | u32 partnum; | ||
| 1516 | }; | ||
| 1517 | |||
| 1573 | struct desc_seq_scan_data { | 1518 | struct desc_seq_scan_data { |
| 1574 | struct udf_vds_record vds[VDS_POS_LENGTH]; | 1519 | struct udf_vds_record vds[VDS_POS_LENGTH]; |
| 1575 | unsigned int size_part_descs; | 1520 | unsigned int size_part_descs; |
| 1576 | struct udf_vds_record *part_descs_loc; | 1521 | unsigned int num_part_descs; |
| 1522 | struct part_desc_seq_scan_data *part_descs_loc; | ||
| 1577 | }; | 1523 | }; |
| 1578 | 1524 | ||
| 1579 | static struct udf_vds_record *handle_partition_descriptor( | 1525 | static struct udf_vds_record *handle_partition_descriptor( |
| @@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor( | |||
| 1582 | { | 1528 | { |
| 1583 | struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; | 1529 | struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; |
| 1584 | int partnum; | 1530 | int partnum; |
| 1531 | int i; | ||
| 1585 | 1532 | ||
| 1586 | partnum = le16_to_cpu(desc->partitionNumber); | 1533 | partnum = le16_to_cpu(desc->partitionNumber); |
| 1587 | if (partnum >= data->size_part_descs) { | 1534 | for (i = 0; i < data->num_part_descs; i++) |
| 1588 | struct udf_vds_record *new_loc; | 1535 | if (partnum == data->part_descs_loc[i].partnum) |
| 1536 | return &(data->part_descs_loc[i].rec); | ||
| 1537 | if (data->num_part_descs >= data->size_part_descs) { | ||
| 1538 | struct part_desc_seq_scan_data *new_loc; | ||
| 1589 | unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); | 1539 | unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); |
| 1590 | 1540 | ||
| 1591 | new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); | 1541 | new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); |
| @@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor( | |||
| 1597 | data->part_descs_loc = new_loc; | 1547 | data->part_descs_loc = new_loc; |
| 1598 | data->size_part_descs = new_size; | 1548 | data->size_part_descs = new_size; |
| 1599 | } | 1549 | } |
| 1600 | return &(data->part_descs_loc[partnum]); | 1550 | return &(data->part_descs_loc[data->num_part_descs++].rec); |
| 1601 | } | 1551 | } |
| 1602 | 1552 | ||
| 1603 | 1553 | ||
| @@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence( | |||
| 1647 | 1597 | ||
| 1648 | memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); | 1598 | memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); |
| 1649 | data.size_part_descs = PART_DESC_ALLOC_STEP; | 1599 | data.size_part_descs = PART_DESC_ALLOC_STEP; |
| 1600 | data.num_part_descs = 0; | ||
| 1650 | data.part_descs_loc = kcalloc(data.size_part_descs, | 1601 | data.part_descs_loc = kcalloc(data.size_part_descs, |
| 1651 | sizeof(*data.part_descs_loc), | 1602 | sizeof(*data.part_descs_loc), |
| 1652 | GFP_KERNEL); | 1603 | GFP_KERNEL); |
| @@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence( | |||
| 1658 | * are in it. | 1609 | * are in it. |
| 1659 | */ | 1610 | */ |
| 1660 | for (; (!done && block <= lastblock); block++) { | 1611 | for (; (!done && block <= lastblock); block++) { |
| 1661 | |||
| 1662 | bh = udf_read_tagged(sb, block, block, &ident); | 1612 | bh = udf_read_tagged(sb, block, block, &ident); |
| 1663 | if (!bh) | 1613 | if (!bh) |
| 1664 | break; | 1614 | break; |
| @@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence( | |||
| 1730 | } | 1680 | } |
| 1731 | 1681 | ||
| 1732 | /* Now handle prevailing Partition Descriptors */ | 1682 | /* Now handle prevailing Partition Descriptors */ |
| 1733 | for (i = 0; i < data.size_part_descs; i++) { | 1683 | for (i = 0; i < data.num_part_descs; i++) { |
| 1734 | if (data.part_descs_loc[i].block) { | 1684 | ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); |
| 1735 | ret = udf_load_partdesc(sb, | 1685 | if (ret < 0) |
| 1736 | data.part_descs_loc[i].block); | 1686 | return ret; |
| 1737 | if (ret < 0) | ||
| 1738 | return ret; | ||
| 1739 | } | ||
| 1740 | } | 1687 | } |
| 1741 | 1688 | ||
| 1742 | return 0; | 1689 | return 0; |
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index ca1d2cc2cdfa..18863d56273c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h | |||
| @@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, | |||
| 199 | 199 | ||
| 200 | #define __declare_arg_0(a0, res) \ | 200 | #define __declare_arg_0(a0, res) \ |
| 201 | struct arm_smccc_res *___res = res; \ | 201 | struct arm_smccc_res *___res = res; \ |
| 202 | register u32 r0 asm("r0") = a0; \ | 202 | register unsigned long r0 asm("r0") = (u32)a0; \ |
| 203 | register unsigned long r1 asm("r1"); \ | 203 | register unsigned long r1 asm("r1"); \ |
| 204 | register unsigned long r2 asm("r2"); \ | 204 | register unsigned long r2 asm("r2"); \ |
| 205 | register unsigned long r3 asm("r3") | 205 | register unsigned long r3 asm("r3") |
| 206 | 206 | ||
| 207 | #define __declare_arg_1(a0, a1, res) \ | 207 | #define __declare_arg_1(a0, a1, res) \ |
| 208 | typeof(a1) __a1 = a1; \ | ||
| 208 | struct arm_smccc_res *___res = res; \ | 209 | struct arm_smccc_res *___res = res; \ |
| 209 | register u32 r0 asm("r0") = a0; \ | 210 | register unsigned long r0 asm("r0") = (u32)a0; \ |
| 210 | register typeof(a1) r1 asm("r1") = a1; \ | 211 | register unsigned long r1 asm("r1") = __a1; \ |
| 211 | register unsigned long r2 asm("r2"); \ | 212 | register unsigned long r2 asm("r2"); \ |
| 212 | register unsigned long r3 asm("r3") | 213 | register unsigned long r3 asm("r3") |
| 213 | 214 | ||
| 214 | #define __declare_arg_2(a0, a1, a2, res) \ | 215 | #define __declare_arg_2(a0, a1, a2, res) \ |
| 216 | typeof(a1) __a1 = a1; \ | ||
| 217 | typeof(a2) __a2 = a2; \ | ||
| 215 | struct arm_smccc_res *___res = res; \ | 218 | struct arm_smccc_res *___res = res; \ |
| 216 | register u32 r0 asm("r0") = a0; \ | 219 | register unsigned long r0 asm("r0") = (u32)a0; \ |
| 217 | register typeof(a1) r1 asm("r1") = a1; \ | 220 | register unsigned long r1 asm("r1") = __a1; \ |
| 218 | register typeof(a2) r2 asm("r2") = a2; \ | 221 | register unsigned long r2 asm("r2") = __a2; \ |
| 219 | register unsigned long r3 asm("r3") | 222 | register unsigned long r3 asm("r3") |
| 220 | 223 | ||
| 221 | #define __declare_arg_3(a0, a1, a2, a3, res) \ | 224 | #define __declare_arg_3(a0, a1, a2, a3, res) \ |
| 225 | typeof(a1) __a1 = a1; \ | ||
| 226 | typeof(a2) __a2 = a2; \ | ||
| 227 | typeof(a3) __a3 = a3; \ | ||
| 222 | struct arm_smccc_res *___res = res; \ | 228 | struct arm_smccc_res *___res = res; \ |
| 223 | register u32 r0 asm("r0") = a0; \ | 229 | register unsigned long r0 asm("r0") = (u32)a0; \ |
| 224 | register typeof(a1) r1 asm("r1") = a1; \ | 230 | register unsigned long r1 asm("r1") = __a1; \ |
| 225 | register typeof(a2) r2 asm("r2") = a2; \ | 231 | register unsigned long r2 asm("r2") = __a2; \ |
| 226 | register typeof(a3) r3 asm("r3") = a3 | 232 | register unsigned long r3 asm("r3") = __a3 |
| 227 | 233 | ||
| 228 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ | 234 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ |
| 235 | typeof(a4) __a4 = a4; \ | ||
| 229 | __declare_arg_3(a0, a1, a2, a3, res); \ | 236 | __declare_arg_3(a0, a1, a2, a3, res); \ |
| 230 | register typeof(a4) r4 asm("r4") = a4 | 237 | register unsigned long r4 asm("r4") = __a4 |
| 231 | 238 | ||
| 232 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ | 239 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ |
| 240 | typeof(a5) __a5 = a5; \ | ||
| 233 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ | 241 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ |
| 234 | register typeof(a5) r5 asm("r5") = a5 | 242 | register unsigned long r5 asm("r5") = __a5 |
| 235 | 243 | ||
| 236 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ | 244 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ |
| 245 | typeof(a6) __a6 = a6; \ | ||
| 237 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ | 246 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ |
| 238 | register typeof(a6) r6 asm("r6") = a6 | 247 | register unsigned long r6 asm("r6") = __a6 |
| 239 | 248 | ||
| 240 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ | 249 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ |
| 250 | typeof(a7) __a7 = a7; \ | ||
| 241 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ | 251 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ |
| 242 | register typeof(a7) r7 asm("r7") = a7 | 252 | register unsigned long r7 asm("r7") = __a7 |
| 243 | 253 | ||
| 244 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) | 254 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) |
| 245 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) | 255 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b79387fd57da..65b4eaed1d96 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) | |||
| 855 | } | 855 | } |
| 856 | 856 | ||
| 857 | u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); | 857 | u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); |
| 858 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); | 858 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred); |
| 859 | 859 | ||
| 860 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); | 860 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); |
| 861 | /** | 861 | /** |
diff --git a/include/linux/of.h b/include/linux/of.h index 4d25e4f952d9..99b0ebf49632 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) | |||
| 256 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) | 256 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) |
| 257 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) | 257 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) |
| 258 | 258 | ||
| 259 | extern bool of_node_name_eq(const struct device_node *np, const char *name); | ||
| 260 | extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); | ||
| 261 | |||
| 259 | static inline const char *of_node_full_name(const struct device_node *np) | 262 | static inline const char *of_node_full_name(const struct device_node *np) |
| 260 | { | 263 | { |
| 261 | return np ? np->full_name : "<no-node>"; | 264 | return np ? np->full_name : "<no-node>"; |
| @@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node, | |||
| 290 | extern struct device_node *of_get_next_available_child( | 293 | extern struct device_node *of_get_next_available_child( |
| 291 | const struct device_node *node, struct device_node *prev); | 294 | const struct device_node *node, struct device_node *prev); |
| 292 | 295 | ||
| 296 | extern struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
| 297 | const char *compatible); | ||
| 293 | extern struct device_node *of_get_child_by_name(const struct device_node *node, | 298 | extern struct device_node *of_get_child_by_name(const struct device_node *node, |
| 294 | const char *name); | 299 | const char *name); |
| 295 | 300 | ||
| @@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) | |||
| 561 | return NULL; | 566 | return NULL; |
| 562 | } | 567 | } |
| 563 | 568 | ||
| 569 | static inline bool of_node_name_eq(const struct device_node *np, const char *name) | ||
| 570 | { | ||
| 571 | return false; | ||
| 572 | } | ||
| 573 | |||
| 574 | static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) | ||
| 575 | { | ||
| 576 | return false; | ||
| 577 | } | ||
| 578 | |||
| 564 | static inline const char* of_node_full_name(const struct device_node *np) | 579 | static inline const char* of_node_full_name(const struct device_node *np) |
| 565 | { | 580 | { |
| 566 | return "<no-node>"; | 581 | return "<no-node>"; |
| @@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void) | |||
| 632 | return false; | 647 | return false; |
| 633 | } | 648 | } |
| 634 | 649 | ||
| 650 | static inline struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
| 651 | const char *compatible) | ||
| 652 | { | ||
| 653 | return NULL; | ||
| 654 | } | ||
| 655 | |||
| 635 | static inline struct device_node *of_get_child_by_name( | 656 | static inline struct device_node *of_get_child_by_name( |
| 636 | const struct device_node *node, | 657 | const struct device_node *node, |
| 637 | const char *name) | 658 | const char *name) |
| @@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node( | |||
| 967 | return of_find_matching_node_and_match(from, matches, NULL); | 988 | return of_find_matching_node_and_match(from, matches, NULL); |
| 968 | } | 989 | } |
| 969 | 990 | ||
| 991 | static inline const char *of_node_get_device_type(const struct device_node *np) | ||
| 992 | { | ||
| 993 | return of_get_property(np, "type", NULL); | ||
| 994 | } | ||
| 995 | |||
| 996 | static inline bool of_node_is_type(const struct device_node *np, const char *type) | ||
| 997 | { | ||
| 998 | const char *match = of_node_get_device_type(np); | ||
| 999 | |||
| 1000 | return np && match && type && !strcmp(match, type); | ||
| 1001 | } | ||
| 1002 | |||
| 970 | /** | 1003 | /** |
| 971 | * of_property_count_u8_elems - Count the number of u8 elements in a property | 1004 | * of_property_count_u8_elems - Count the number of u8 elements in a property |
| 972 | * | 1005 | * |
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9abc0ca7259b..9f0aa1b48c78 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Driver for Texas Instruments INA219, INA226 power monitor chips | 2 | * Driver for Texas Instruments INA219, INA226 power monitor chips |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 4 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
diff --git a/include/linux/quota.h b/include/linux/quota.h index ca9772c8e48b..f32dd270b8e3 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
| @@ -408,13 +408,7 @@ struct qc_type_state { | |||
| 408 | 408 | ||
| 409 | struct qc_state { | 409 | struct qc_state { |
| 410 | unsigned int s_incoredqs; /* Number of dquots in core */ | 410 | unsigned int s_incoredqs; /* Number of dquots in core */ |
| 411 | /* | 411 | struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ |
| 412 | * Per quota type information. The array should really have | ||
| 413 | * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in | ||
| 414 | * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS | ||
| 415 | * supports project quotas, this can be changed to MAXQUOTAS | ||
| 416 | */ | ||
| 417 | struct qc_type_state s_state[XQM_MAXQUOTAS]; | ||
| 418 | }; | 412 | }; |
| 419 | 413 | ||
| 420 | /* Structure for communicating via ->set_info */ | 414 | /* Structure for communicating via ->set_info */ |
diff --git a/include/net/act_api.h b/include/net/act_api.h index 1ad5b19e83a9..970303448c90 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h | |||
| @@ -23,13 +23,11 @@ struct tc_action { | |||
| 23 | const struct tc_action_ops *ops; | 23 | const struct tc_action_ops *ops; |
| 24 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ | 24 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ |
| 25 | __u32 order; | 25 | __u32 order; |
| 26 | struct list_head list; | ||
| 27 | struct tcf_idrinfo *idrinfo; | 26 | struct tcf_idrinfo *idrinfo; |
| 28 | 27 | ||
| 29 | u32 tcfa_index; | 28 | u32 tcfa_index; |
| 30 | refcount_t tcfa_refcnt; | 29 | refcount_t tcfa_refcnt; |
| 31 | atomic_t tcfa_bindcnt; | 30 | atomic_t tcfa_bindcnt; |
| 32 | u32 tcfa_capab; | ||
| 33 | int tcfa_action; | 31 | int tcfa_action; |
| 34 | struct tcf_t tcfa_tm; | 32 | struct tcf_t tcfa_tm; |
| 35 | struct gnet_stats_basic_packed tcfa_bstats; | 33 | struct gnet_stats_basic_packed tcfa_bstats; |
| @@ -44,7 +42,6 @@ struct tc_action { | |||
| 44 | #define tcf_index common.tcfa_index | 42 | #define tcf_index common.tcfa_index |
| 45 | #define tcf_refcnt common.tcfa_refcnt | 43 | #define tcf_refcnt common.tcfa_refcnt |
| 46 | #define tcf_bindcnt common.tcfa_bindcnt | 44 | #define tcf_bindcnt common.tcfa_bindcnt |
| 47 | #define tcf_capab common.tcfa_capab | ||
| 48 | #define tcf_action common.tcfa_action | 45 | #define tcf_action common.tcfa_action |
| 49 | #define tcf_tm common.tcfa_tm | 46 | #define tcf_tm common.tcfa_tm |
| 50 | #define tcf_bstats common.tcfa_bstats | 47 | #define tcf_bstats common.tcfa_bstats |
| @@ -102,7 +99,6 @@ struct tc_action_ops { | |||
| 102 | size_t (*get_fill_size)(const struct tc_action *act); | 99 | size_t (*get_fill_size)(const struct tc_action *act); |
| 103 | struct net_device *(*get_dev)(const struct tc_action *a); | 100 | struct net_device *(*get_dev)(const struct tc_action *a); |
| 104 | void (*put_dev)(struct net_device *dev); | 101 | void (*put_dev)(struct net_device *dev); |
| 105 | int (*delete)(struct net *net, u32 index); | ||
| 106 | }; | 102 | }; |
| 107 | 103 | ||
| 108 | struct tc_action_net { | 104 | struct tc_action_net { |
| @@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, | |||
| 148 | const struct tc_action_ops *ops, | 144 | const struct tc_action_ops *ops, |
| 149 | struct netlink_ext_ack *extack); | 145 | struct netlink_ext_ack *extack); |
| 150 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); | 146 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); |
| 151 | bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, | ||
| 152 | int bind); | ||
| 153 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | 147 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, |
| 154 | struct tc_action **a, const struct tc_action_ops *ops, | 148 | struct tc_action **a, const struct tc_action_ops *ops, |
| 155 | int bind, bool cpustats); | 149 | int bind, bool cpustats); |
| @@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); | |||
| 158 | void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); | 152 | void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); |
| 159 | int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, | 153 | int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, |
| 160 | struct tc_action **a, int bind); | 154 | struct tc_action **a, int bind); |
| 161 | int tcf_idr_delete_index(struct tc_action_net *tn, u32 index); | ||
| 162 | int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); | 155 | int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); |
| 163 | 156 | ||
| 164 | static inline int tcf_idr_release(struct tc_action *a, bool bind) | 157 | static inline int tcf_idr_release(struct tc_action *a, bool bind) |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ef727f71336e..75a3f3fdb359 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
| @@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) | |||
| 298 | #endif | 298 | #endif |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static inline void tcf_exts_to_list(const struct tcf_exts *exts, | ||
| 302 | struct list_head *actions) | ||
| 303 | { | ||
| 304 | #ifdef CONFIG_NET_CLS_ACT | 301 | #ifdef CONFIG_NET_CLS_ACT |
| 305 | int i; | 302 | #define tcf_exts_for_each_action(i, a, exts) \ |
| 306 | 303 | for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) | |
| 307 | for (i = 0; i < exts->nr_actions; i++) { | 304 | #else |
| 308 | struct tc_action *a = exts->actions[i]; | 305 | #define tcf_exts_for_each_action(i, a, exts) \ |
| 309 | 306 | for (; 0; (void)(i), (void)(a), (void)(exts)) | |
| 310 | list_add_tail(&a->list, actions); | ||
| 311 | } | ||
| 312 | #endif | 307 | #endif |
| 313 | } | ||
| 314 | 308 | ||
| 315 | static inline void | 309 | static inline void |
| 316 | tcf_exts_stats_update(const struct tcf_exts *exts, | 310 | tcf_exts_stats_update(const struct tcf_exts *exts, |
| @@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) | |||
| 361 | #endif | 355 | #endif |
| 362 | } | 356 | } |
| 363 | 357 | ||
| 358 | static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts) | ||
| 359 | { | ||
| 360 | #ifdef CONFIG_NET_CLS_ACT | ||
| 361 | return exts->actions[0]; | ||
| 362 | #else | ||
| 363 | return NULL; | ||
| 364 | #endif | ||
| 365 | } | ||
| 366 | |||
| 364 | /** | 367 | /** |
| 365 | * tcf_exts_exec - execute tc filter extensions | 368 | * tcf_exts_exec - execute tc filter extensions |
| 366 | * @skb: socket buffer | 369 | * @skb: socket buffer |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 04b8eda94e7d..03cc59ee9c95 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/jhash.h> | 15 | #include <linux/jhash.h> |
| 16 | #include <linux/filter.h> | 16 | #include <linux/filter.h> |
| 17 | #include <linux/rculist_nulls.h> | 17 | #include <linux/rculist_nulls.h> |
| 18 | #include <linux/random.h> | ||
| 18 | #include <uapi/linux/btf.h> | 19 | #include <uapi/linux/btf.h> |
| 19 | #include "percpu_freelist.h" | 20 | #include "percpu_freelist.h" |
| 20 | #include "bpf_lru_list.h" | 21 | #include "bpf_lru_list.h" |
| @@ -41,6 +42,7 @@ struct bpf_htab { | |||
| 41 | atomic_t count; /* number of elements in this hashtable */ | 42 | atomic_t count; /* number of elements in this hashtable */ |
| 42 | u32 n_buckets; /* number of hash buckets */ | 43 | u32 n_buckets; /* number of hash buckets */ |
| 43 | u32 elem_size; /* size of each element in bytes */ | 44 | u32 elem_size; /* size of each element in bytes */ |
| 45 | u32 hashrnd; | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | /* each htab element is struct htab_elem + key + value */ | 48 | /* each htab element is struct htab_elem + key + value */ |
| @@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
| 371 | if (!htab->buckets) | 373 | if (!htab->buckets) |
| 372 | goto free_htab; | 374 | goto free_htab; |
| 373 | 375 | ||
| 376 | htab->hashrnd = get_random_int(); | ||
| 374 | for (i = 0; i < htab->n_buckets; i++) { | 377 | for (i = 0; i < htab->n_buckets; i++) { |
| 375 | INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); | 378 | INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); |
| 376 | raw_spin_lock_init(&htab->buckets[i].lock); | 379 | raw_spin_lock_init(&htab->buckets[i].lock); |
| @@ -402,9 +405,9 @@ free_htab: | |||
| 402 | return ERR_PTR(err); | 405 | return ERR_PTR(err); |
| 403 | } | 406 | } |
| 404 | 407 | ||
| 405 | static inline u32 htab_map_hash(const void *key, u32 key_len) | 408 | static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) |
| 406 | { | 409 | { |
| 407 | return jhash(key, key_len, 0); | 410 | return jhash(key, key_len, hashrnd); |
| 408 | } | 411 | } |
| 409 | 412 | ||
| 410 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) | 413 | static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) |
| @@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) | |||
| 470 | 473 | ||
| 471 | key_size = map->key_size; | 474 | key_size = map->key_size; |
| 472 | 475 | ||
| 473 | hash = htab_map_hash(key, key_size); | 476 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 474 | 477 | ||
| 475 | head = select_bucket(htab, hash); | 478 | head = select_bucket(htab, hash); |
| 476 | 479 | ||
| @@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |||
| 597 | if (!key) | 600 | if (!key) |
| 598 | goto find_first_elem; | 601 | goto find_first_elem; |
| 599 | 602 | ||
| 600 | hash = htab_map_hash(key, key_size); | 603 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 601 | 604 | ||
| 602 | head = select_bucket(htab, hash); | 605 | head = select_bucket(htab, hash); |
| 603 | 606 | ||
| @@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
| 824 | 827 | ||
| 825 | key_size = map->key_size; | 828 | key_size = map->key_size; |
| 826 | 829 | ||
| 827 | hash = htab_map_hash(key, key_size); | 830 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 828 | 831 | ||
| 829 | b = __select_bucket(htab, hash); | 832 | b = __select_bucket(htab, hash); |
| 830 | head = &b->head; | 833 | head = &b->head; |
| @@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
| 880 | 883 | ||
| 881 | key_size = map->key_size; | 884 | key_size = map->key_size; |
| 882 | 885 | ||
| 883 | hash = htab_map_hash(key, key_size); | 886 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 884 | 887 | ||
| 885 | b = __select_bucket(htab, hash); | 888 | b = __select_bucket(htab, hash); |
| 886 | head = &b->head; | 889 | head = &b->head; |
| @@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, | |||
| 945 | 948 | ||
| 946 | key_size = map->key_size; | 949 | key_size = map->key_size; |
| 947 | 950 | ||
| 948 | hash = htab_map_hash(key, key_size); | 951 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 949 | 952 | ||
| 950 | b = __select_bucket(htab, hash); | 953 | b = __select_bucket(htab, hash); |
| 951 | head = &b->head; | 954 | head = &b->head; |
| @@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, | |||
| 998 | 1001 | ||
| 999 | key_size = map->key_size; | 1002 | key_size = map->key_size; |
| 1000 | 1003 | ||
| 1001 | hash = htab_map_hash(key, key_size); | 1004 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 1002 | 1005 | ||
| 1003 | b = __select_bucket(htab, hash); | 1006 | b = __select_bucket(htab, hash); |
| 1004 | head = &b->head; | 1007 | head = &b->head; |
| @@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) | |||
| 1071 | 1074 | ||
| 1072 | key_size = map->key_size; | 1075 | key_size = map->key_size; |
| 1073 | 1076 | ||
| 1074 | hash = htab_map_hash(key, key_size); | 1077 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 1075 | b = __select_bucket(htab, hash); | 1078 | b = __select_bucket(htab, hash); |
| 1076 | head = &b->head; | 1079 | head = &b->head; |
| 1077 | 1080 | ||
| @@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) | |||
| 1103 | 1106 | ||
| 1104 | key_size = map->key_size; | 1107 | key_size = map->key_size; |
| 1105 | 1108 | ||
| 1106 | hash = htab_map_hash(key, key_size); | 1109 | hash = htab_map_hash(key, key_size, htab->hashrnd); |
| 1107 | b = __select_bucket(htab, hash); | 1110 | b = __select_bucket(htab, hash); |
| 1108 | head = &b->head; | 1111 | head = &b->head; |
| 1109 | 1112 | ||
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 98e621a29e8e..cf5195c7c331 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
| @@ -1427,12 +1427,15 @@ out: | |||
| 1427 | static void smap_write_space(struct sock *sk) | 1427 | static void smap_write_space(struct sock *sk) |
| 1428 | { | 1428 | { |
| 1429 | struct smap_psock *psock; | 1429 | struct smap_psock *psock; |
| 1430 | void (*write_space)(struct sock *sk); | ||
| 1430 | 1431 | ||
| 1431 | rcu_read_lock(); | 1432 | rcu_read_lock(); |
| 1432 | psock = smap_psock_sk(sk); | 1433 | psock = smap_psock_sk(sk); |
| 1433 | if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) | 1434 | if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) |
| 1434 | schedule_work(&psock->tx_work); | 1435 | schedule_work(&psock->tx_work); |
| 1436 | write_space = psock->save_write_space; | ||
| 1435 | rcu_read_unlock(); | 1437 | rcu_read_unlock(); |
| 1438 | write_space(sk); | ||
| 1436 | } | 1439 | } |
| 1437 | 1440 | ||
| 1438 | static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) | 1441 | static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) |
| @@ -2140,7 +2143,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) | |||
| 2140 | return ERR_PTR(-EPERM); | 2143 | return ERR_PTR(-EPERM); |
| 2141 | 2144 | ||
| 2142 | /* check sanity of attributes */ | 2145 | /* check sanity of attributes */ |
| 2143 | if (attr->max_entries == 0 || attr->value_size != 4 || | 2146 | if (attr->max_entries == 0 || |
| 2147 | attr->key_size == 0 || | ||
| 2148 | attr->value_size != 4 || | ||
| 2144 | attr->map_flags & ~SOCK_CREATE_FLAG_MASK) | 2149 | attr->map_flags & ~SOCK_CREATE_FLAG_MASK) |
| 2145 | return ERR_PTR(-EINVAL); | 2150 | return ERR_PTR(-EINVAL); |
| 2146 | 2151 | ||
| @@ -2267,8 +2272,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, | |||
| 2267 | } | 2272 | } |
| 2268 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, | 2273 | l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, |
| 2269 | htab->map.numa_node); | 2274 | htab->map.numa_node); |
| 2270 | if (!l_new) | 2275 | if (!l_new) { |
| 2276 | atomic_dec(&htab->count); | ||
| 2271 | return ERR_PTR(-ENOMEM); | 2277 | return ERR_PTR(-ENOMEM); |
| 2278 | } | ||
| 2272 | 2279 | ||
| 2273 | memcpy(l_new->key, key, key_size); | 2280 | memcpy(l_new->key, key, key_size); |
| 2274 | l_new->sk = sk; | 2281 | l_new->sk = sk; |
diff --git a/kernel/cpu.c b/kernel/cpu.c index ed44d7d34c2d..aa7fe85ad62e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { } | |||
| 102 | * @name: Name of the step | 102 | * @name: Name of the step |
| 103 | * @startup: Startup function of the step | 103 | * @startup: Startup function of the step |
| 104 | * @teardown: Teardown function of the step | 104 | * @teardown: Teardown function of the step |
| 105 | * @skip_onerr: Do not invoke the functions on error rollback | ||
| 106 | * Will go away once the notifiers are gone | ||
| 107 | * @cant_stop: Bringup/teardown can't be stopped at this step | 105 | * @cant_stop: Bringup/teardown can't be stopped at this step |
| 108 | */ | 106 | */ |
| 109 | struct cpuhp_step { | 107 | struct cpuhp_step { |
| @@ -119,7 +117,6 @@ struct cpuhp_step { | |||
| 119 | struct hlist_node *node); | 117 | struct hlist_node *node); |
| 120 | } teardown; | 118 | } teardown; |
| 121 | struct hlist_head list; | 119 | struct hlist_head list; |
| 122 | bool skip_onerr; | ||
| 123 | bool cant_stop; | 120 | bool cant_stop; |
| 124 | bool multi_instance; | 121 | bool multi_instance; |
| 125 | }; | 122 | }; |
| @@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu) | |||
| 550 | 547 | ||
| 551 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | 548 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
| 552 | { | 549 | { |
| 553 | for (st->state--; st->state > st->target; st->state--) { | 550 | for (st->state--; st->state > st->target; st->state--) |
| 554 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 551 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
| 555 | |||
| 556 | if (!step->skip_onerr) | ||
| 557 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | ||
| 558 | } | ||
| 559 | } | 552 | } |
| 560 | 553 | ||
| 561 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 554 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
| @@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
| 644 | 637 | ||
| 645 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); | 638 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); |
| 646 | 639 | ||
| 647 | if (st->rollback) { | ||
| 648 | struct cpuhp_step *step = cpuhp_get_step(state); | ||
| 649 | if (step->skip_onerr) | ||
| 650 | goto next; | ||
| 651 | } | ||
| 652 | |||
| 653 | if (cpuhp_is_atomic_state(state)) { | 640 | if (cpuhp_is_atomic_state(state)) { |
| 654 | local_irq_disable(); | 641 | local_irq_disable(); |
| 655 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); | 642 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); |
| @@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu) | |||
| 673 | st->should_run = false; | 660 | st->should_run = false; |
| 674 | } | 661 | } |
| 675 | 662 | ||
| 676 | next: | ||
| 677 | cpuhp_lock_release(bringup); | 663 | cpuhp_lock_release(bringup); |
| 678 | 664 | ||
| 679 | if (!st->should_run) | 665 | if (!st->should_run) |
| @@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void) | |||
| 916 | 902 | ||
| 917 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) | 903 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
| 918 | { | 904 | { |
| 919 | for (st->state++; st->state < st->target; st->state++) { | 905 | for (st->state++; st->state < st->target; st->state++) |
| 920 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 906 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
| 921 | |||
| 922 | if (!step->skip_onerr) | ||
| 923 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); | ||
| 924 | } | ||
| 925 | } | 907 | } |
| 926 | 908 | ||
| 927 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | 909 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 924e37fb1620..fd6f8ed28e01 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include <linux/kmsg_dump.h> | 38 | #include <linux/kmsg_dump.h> |
| 39 | #include <linux/syslog.h> | 39 | #include <linux/syslog.h> |
| 40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
| 41 | #include <linux/notifier.h> | ||
| 42 | #include <linux/rculist.h> | 41 | #include <linux/rculist.h> |
| 43 | #include <linux/poll.h> | 42 | #include <linux/poll.h> |
| 44 | #include <linux/irq_work.h> | 43 | #include <linux/irq_work.h> |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5470dce212c0..977918d5d350 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -261,7 +261,7 @@ static void __touch_watchdog(void) | |||
| 261 | * entering idle state. This should only be used for scheduler events. | 261 | * entering idle state. This should only be used for scheduler events. |
| 262 | * Use touch_softlockup_watchdog() for everything else. | 262 | * Use touch_softlockup_watchdog() for everything else. |
| 263 | */ | 263 | */ |
| 264 | void touch_softlockup_watchdog_sched(void) | 264 | notrace void touch_softlockup_watchdog_sched(void) |
| 265 | { | 265 | { |
| 266 | /* | 266 | /* |
| 267 | * Preemption can be enabled. It doesn't matter which CPU's timestamp | 267 | * Preemption can be enabled. It doesn't matter which CPU's timestamp |
| @@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void) | |||
| 270 | raw_cpu_write(watchdog_touch_ts, 0); | 270 | raw_cpu_write(watchdog_touch_ts, 0); |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | void touch_softlockup_watchdog(void) | 273 | notrace void touch_softlockup_watchdog(void) |
| 274 | { | 274 | { |
| 275 | touch_softlockup_watchdog_sched(); | 275 | touch_softlockup_watchdog_sched(); |
| 276 | wq_watchdog_touch(raw_smp_processor_id()); | 276 | wq_watchdog_touch(raw_smp_processor_id()); |
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 1f7020d65d0a..71381168dede 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c | |||
| @@ -29,7 +29,7 @@ static struct cpumask dead_events_mask; | |||
| 29 | static unsigned long hardlockup_allcpu_dumped; | 29 | static unsigned long hardlockup_allcpu_dumped; |
| 30 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); | 30 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); |
| 31 | 31 | ||
| 32 | void arch_touch_nmi_watchdog(void) | 32 | notrace void arch_touch_nmi_watchdog(void) |
| 33 | { | 33 | { |
| 34 | /* | 34 | /* |
| 35 | * Using __raw here because some code paths have | 35 | * Using __raw here because some code paths have |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 60e80198c3df..0280deac392e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) | |||
| 5574 | mod_timer(&wq_watchdog_timer, jiffies + thresh); | 5574 | mod_timer(&wq_watchdog_timer, jiffies + thresh); |
| 5575 | } | 5575 | } |
| 5576 | 5576 | ||
| 5577 | void wq_watchdog_touch(int cpu) | 5577 | notrace void wq_watchdog_touch(int cpu) |
| 5578 | { | 5578 | { |
| 5579 | if (cpu >= 0) | 5579 | if (cpu >= 0) |
| 5580 | per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; | 5580 | per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index c72577e472f2..a66595ba5543 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #include <linux/percpu_counter.h> | 6 | #include <linux/percpu_counter.h> |
| 7 | #include <linux/notifier.h> | ||
| 8 | #include <linux/mutex.h> | 7 | #include <linux/mutex.h> |
| 9 | #include <linux/init.h> | 8 | #include <linux/init.h> |
| 10 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 310e29b51507..30526afa8343 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <linux/rhashtable.h> | 28 | #include <linux/rhashtable.h> |
| 29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
| 30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
| 31 | #include <linux/rhashtable.h> | ||
| 32 | 31 | ||
| 33 | #define HASH_DEFAULT_SIZE 64UL | 32 | #define HASH_DEFAULT_SIZE 64UL |
| 34 | #define HASH_MIN_SIZE 4U | 33 | #define HASH_MIN_SIZE 4U |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6551d3b0dc30..84ae9bf5858a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | #include <linux/mpage.h> | 27 | #include <linux/mpage.h> |
| 28 | #include <linux/rmap.h> | 28 | #include <linux/rmap.h> |
| 29 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
| 30 | #include <linux/notifier.h> | ||
| 31 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
| 32 | #include <linux/sysctl.h> | 31 | #include <linux/sysctl.h> |
| 33 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e75865d58ba7..05e983f42316 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
| 33 | #include <linux/ratelimit.h> | 33 | #include <linux/ratelimit.h> |
| 34 | #include <linux/oom.h> | 34 | #include <linux/oom.h> |
| 35 | #include <linux/notifier.h> | ||
| 36 | #include <linux/topology.h> | 35 | #include <linux/topology.h> |
| 37 | #include <linux/sysctl.h> | 36 | #include <linux/sysctl.h> |
| 38 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
| @@ -19,7 +19,6 @@ | |||
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include "slab.h" | 20 | #include "slab.h" |
| 21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
| 22 | #include <linux/notifier.h> | ||
| 23 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
| 24 | #include <linux/kasan.h> | 23 | #include <linux/kasan.h> |
| 25 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
diff --git a/net/core/dev.c b/net/core/dev.c index 325fc5088370..82114e1111e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -93,7 +93,6 @@ | |||
| 93 | #include <linux/netdevice.h> | 93 | #include <linux/netdevice.h> |
| 94 | #include <linux/etherdevice.h> | 94 | #include <linux/etherdevice.h> |
| 95 | #include <linux/ethtool.h> | 95 | #include <linux/ethtool.h> |
| 96 | #include <linux/notifier.h> | ||
| 97 | #include <linux/skbuff.h> | 96 | #include <linux/skbuff.h> |
| 98 | #include <linux/bpf.h> | 97 | #include <linux/bpf.h> |
| 99 | #include <linux/bpf_trace.h> | 98 | #include <linux/bpf_trace.h> |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 962c4fd338ba..1c45c1d6d241 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, | |||
| 767 | const struct tc_action *a; | 767 | const struct tc_action *a; |
| 768 | struct dsa_port *to_dp; | 768 | struct dsa_port *to_dp; |
| 769 | int err = -EOPNOTSUPP; | 769 | int err = -EOPNOTSUPP; |
| 770 | LIST_HEAD(actions); | ||
| 771 | 770 | ||
| 772 | if (!ds->ops->port_mirror_add) | 771 | if (!ds->ops->port_mirror_add) |
| 773 | return err; | 772 | return err; |
| @@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, | |||
| 775 | if (!tcf_exts_has_one_action(cls->exts)) | 774 | if (!tcf_exts_has_one_action(cls->exts)) |
| 776 | return err; | 775 | return err; |
| 777 | 776 | ||
| 778 | tcf_exts_to_list(cls->exts, &actions); | 777 | a = tcf_exts_first_action(cls->exts); |
| 779 | a = list_first_entry(&actions, struct tc_action, list); | ||
| 780 | 778 | ||
| 781 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { | 779 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { |
| 782 | struct dsa_mall_mirror_tc_entry *mirror; | 780 | struct dsa_mall_mirror_tc_entry *mirror; |
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 13d34427ca3d..02ff2dde9609 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c | |||
| @@ -95,11 +95,10 @@ struct bbr { | |||
| 95 | u32 mode:3, /* current bbr_mode in state machine */ | 95 | u32 mode:3, /* current bbr_mode in state machine */ |
| 96 | prev_ca_state:3, /* CA state on previous ACK */ | 96 | prev_ca_state:3, /* CA state on previous ACK */ |
| 97 | packet_conservation:1, /* use packet conservation? */ | 97 | packet_conservation:1, /* use packet conservation? */ |
| 98 | restore_cwnd:1, /* decided to revert cwnd to old value */ | ||
| 99 | round_start:1, /* start of packet-timed tx->ack round? */ | 98 | round_start:1, /* start of packet-timed tx->ack round? */ |
| 100 | idle_restart:1, /* restarting after idle? */ | 99 | idle_restart:1, /* restarting after idle? */ |
| 101 | probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ | 100 | probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ |
| 102 | unused:12, | 101 | unused:13, |
| 103 | lt_is_sampling:1, /* taking long-term ("LT") samples now? */ | 102 | lt_is_sampling:1, /* taking long-term ("LT") samples now? */ |
| 104 | lt_rtt_cnt:7, /* round trips in long-term interval */ | 103 | lt_rtt_cnt:7, /* round trips in long-term interval */ |
| 105 | lt_use_bw:1; /* use lt_bw as our bw estimate? */ | 104 | lt_use_bw:1; /* use lt_bw as our bw estimate? */ |
| @@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8; | |||
| 175 | /* If we estimate we're policed, use lt_bw for this many round trips: */ | 174 | /* If we estimate we're policed, use lt_bw for this many round trips: */ |
| 176 | static const u32 bbr_lt_bw_max_rtts = 48; | 175 | static const u32 bbr_lt_bw_max_rtts = 48; |
| 177 | 176 | ||
| 177 | static void bbr_check_probe_rtt_done(struct sock *sk); | ||
| 178 | |||
| 178 | /* Do we estimate that STARTUP filled the pipe? */ | 179 | /* Do we estimate that STARTUP filled the pipe? */ |
| 179 | static bool bbr_full_bw_reached(const struct sock *sk) | 180 | static bool bbr_full_bw_reached(const struct sock *sk) |
| 180 | { | 181 | { |
| @@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) | |||
| 309 | */ | 310 | */ |
| 310 | if (bbr->mode == BBR_PROBE_BW) | 311 | if (bbr->mode == BBR_PROBE_BW) |
| 311 | bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); | 312 | bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); |
| 313 | else if (bbr->mode == BBR_PROBE_RTT) | ||
| 314 | bbr_check_probe_rtt_done(sk); | ||
| 312 | } | 315 | } |
| 313 | } | 316 | } |
| 314 | 317 | ||
| @@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore( | |||
| 396 | cwnd = tcp_packets_in_flight(tp) + acked; | 399 | cwnd = tcp_packets_in_flight(tp) + acked; |
| 397 | } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { | 400 | } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { |
| 398 | /* Exiting loss recovery; restore cwnd saved before recovery. */ | 401 | /* Exiting loss recovery; restore cwnd saved before recovery. */ |
| 399 | bbr->restore_cwnd = 1; | 402 | cwnd = max(cwnd, bbr->prior_cwnd); |
| 400 | bbr->packet_conservation = 0; | 403 | bbr->packet_conservation = 0; |
| 401 | } | 404 | } |
| 402 | bbr->prev_ca_state = state; | 405 | bbr->prev_ca_state = state; |
| 403 | 406 | ||
| 404 | if (bbr->restore_cwnd) { | ||
| 405 | /* Restore cwnd after exiting loss recovery or PROBE_RTT. */ | ||
| 406 | cwnd = max(cwnd, bbr->prior_cwnd); | ||
| 407 | bbr->restore_cwnd = 0; | ||
| 408 | } | ||
| 409 | |||
| 410 | if (bbr->packet_conservation) { | 407 | if (bbr->packet_conservation) { |
| 411 | *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); | 408 | *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); |
| 412 | return true; /* yes, using packet conservation */ | 409 | return true; /* yes, using packet conservation */ |
| @@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, | |||
| 423 | { | 420 | { |
| 424 | struct tcp_sock *tp = tcp_sk(sk); | 421 | struct tcp_sock *tp = tcp_sk(sk); |
| 425 | struct bbr *bbr = inet_csk_ca(sk); | 422 | struct bbr *bbr = inet_csk_ca(sk); |
| 426 | u32 cwnd = 0, target_cwnd = 0; | 423 | u32 cwnd = tp->snd_cwnd, target_cwnd = 0; |
| 427 | 424 | ||
| 428 | if (!acked) | 425 | if (!acked) |
| 429 | return; | 426 | goto done; /* no packet fully ACKed; just apply caps */ |
| 430 | 427 | ||
| 431 | if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) | 428 | if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) |
| 432 | goto done; | 429 | goto done; |
| @@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) | |||
| 748 | bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ | 745 | bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ |
| 749 | } | 746 | } |
| 750 | 747 | ||
| 748 | static void bbr_check_probe_rtt_done(struct sock *sk) | ||
| 749 | { | ||
| 750 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 751 | struct bbr *bbr = inet_csk_ca(sk); | ||
| 752 | |||
| 753 | if (!(bbr->probe_rtt_done_stamp && | ||
| 754 | after(tcp_jiffies32, bbr->probe_rtt_done_stamp))) | ||
| 755 | return; | ||
| 756 | |||
| 757 | bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ | ||
| 758 | tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); | ||
| 759 | bbr_reset_mode(sk); | ||
| 760 | } | ||
| 761 | |||
| 751 | /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and | 762 | /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and |
| 752 | * periodically drain the bottleneck queue, to converge to measure the true | 763 | * periodically drain the bottleneck queue, to converge to measure the true |
| 753 | * min_rtt (unloaded propagation delay). This allows the flows to keep queues | 764 | * min_rtt (unloaded propagation delay). This allows the flows to keep queues |
| @@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) | |||
| 806 | } else if (bbr->probe_rtt_done_stamp) { | 817 | } else if (bbr->probe_rtt_done_stamp) { |
| 807 | if (bbr->round_start) | 818 | if (bbr->round_start) |
| 808 | bbr->probe_rtt_round_done = 1; | 819 | bbr->probe_rtt_round_done = 1; |
| 809 | if (bbr->probe_rtt_round_done && | 820 | if (bbr->probe_rtt_round_done) |
| 810 | after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) { | 821 | bbr_check_probe_rtt_done(sk); |
| 811 | bbr->min_rtt_stamp = tcp_jiffies32; | ||
| 812 | bbr->restore_cwnd = 1; /* snap to prior_cwnd */ | ||
| 813 | bbr_reset_mode(sk); | ||
| 814 | } | ||
| 815 | } | 822 | } |
| 816 | } | 823 | } |
| 817 | /* Restart after idle ends only once we process a new S/ACK for data */ | 824 | /* Restart after idle ends only once we process a new S/ACK for data */ |
| @@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk) | |||
| 862 | bbr->has_seen_rtt = 0; | 869 | bbr->has_seen_rtt = 0; |
| 863 | bbr_init_pacing_rate_from_rtt(sk); | 870 | bbr_init_pacing_rate_from_rtt(sk); |
| 864 | 871 | ||
| 865 | bbr->restore_cwnd = 0; | ||
| 866 | bbr->round_start = 0; | 872 | bbr->round_start = 0; |
| 867 | bbr->idle_restart = 0; | 873 | bbr->idle_restart = 0; |
| 868 | bbr->full_bw_reached = 0; | 874 | bbr->full_bw_reached = 0; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9e041fa5c545..44c09eddbb78 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net) | |||
| 2517 | if (res) | 2517 | if (res) |
| 2518 | goto fail; | 2518 | goto fail; |
| 2519 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 2519 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
| 2520 | |||
| 2521 | /* Please enforce IP_DF and IPID==0 for RST and | ||
| 2522 | * ACK sent in SYN-RECV and TIME-WAIT state. | ||
| 2523 | */ | ||
| 2524 | inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; | ||
| 2525 | |||
| 2520 | *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; | 2526 | *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; |
| 2521 | } | 2527 | } |
| 2522 | 2528 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2fac4ad74867..d51a8c0b3372 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev) | |||
| 2398 | 2398 | ||
| 2399 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); | 2399 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); |
| 2400 | 2400 | ||
| 2401 | ip6_route_add(&cfg, GFP_ATOMIC, NULL); | 2401 | ip6_route_add(&cfg, GFP_KERNEL, NULL); |
| 2402 | } | 2402 | } |
| 2403 | 2403 | ||
| 2404 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | 2404 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) |
| @@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
| 3062 | if (addr.s6_addr32[3]) { | 3062 | if (addr.s6_addr32[3]) { |
| 3063 | add_addr(idev, &addr, plen, scope); | 3063 | add_addr(idev, &addr, plen, scope); |
| 3064 | addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, | 3064 | addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, |
| 3065 | GFP_ATOMIC); | 3065 | GFP_KERNEL); |
| 3066 | return; | 3066 | return; |
| 3067 | } | 3067 | } |
| 3068 | 3068 | ||
| @@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
| 3087 | 3087 | ||
| 3088 | add_addr(idev, &addr, plen, flag); | 3088 | add_addr(idev, &addr, plen, flag); |
| 3089 | addrconf_prefix_route(&addr, plen, 0, idev->dev, | 3089 | addrconf_prefix_route(&addr, plen, 0, idev->dev, |
| 3090 | 0, pflags, GFP_ATOMIC); | 3090 | 0, pflags, GFP_KERNEL); |
| 3091 | } | 3091 | } |
| 3092 | } | 3092 | } |
| 3093 | } | 3093 | } |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index d212738e9d10..c861a6d4671d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head) | |||
| 198 | } | 198 | } |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | lwtstate_put(f6i->fib6_nh.nh_lwtstate); | ||
| 202 | |||
| 201 | if (f6i->fib6_nh.nh_dev) | 203 | if (f6i->fib6_nh.nh_dev) |
| 202 | dev_put(f6i->fib6_nh.nh_dev); | 204 | dev_put(f6i->fib6_nh.nh_dev); |
| 203 | 205 | ||
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 38dec9da90d3..5095367c7204 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
| @@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n, | |||
| 1094 | } | 1094 | } |
| 1095 | 1095 | ||
| 1096 | t = rtnl_dereference(ip6n->tnls_wc[0]); | 1096 | t = rtnl_dereference(ip6n->tnls_wc[0]); |
| 1097 | unregister_netdevice_queue(t->dev, list); | 1097 | if (t) |
| 1098 | unregister_netdevice_queue(t->dev, list); | ||
| 1098 | } | 1099 | } |
| 1099 | 1100 | ||
| 1100 | static int __net_init vti6_init_net(struct net *net) | 1101 | static int __net_init vti6_init_net(struct net *net) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7208c16302f6..c4ea13e8360b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) | |||
| 956 | rt->dst.error = 0; | 956 | rt->dst.error = 0; |
| 957 | rt->dst.output = ip6_output; | 957 | rt->dst.output = ip6_output; |
| 958 | 958 | ||
| 959 | if (ort->fib6_type == RTN_LOCAL) { | 959 | if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) { |
| 960 | rt->dst.input = ip6_input; | 960 | rt->dst.input = ip6_input; |
| 961 | } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { | 961 | } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { |
| 962 | rt->dst.input = ip6_mc_input; | 962 | rt->dst.input = ip6_mc_input; |
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 82e6edf9c5d9..45f33d6dedf7 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c | |||
| @@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb, | |||
| 100 | bool found; | 100 | bool found; |
| 101 | int rc; | 101 | int rc; |
| 102 | 102 | ||
| 103 | if (id > ndp->package_num) { | 103 | if (id > ndp->package_num - 1) { |
| 104 | netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); | 104 | netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); |
| 105 | return -ENODEV; | 105 | return -ENODEV; |
| 106 | } | 106 | } |
| @@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb, | |||
| 240 | return 0; /* done */ | 240 | return 0; /* done */ |
| 241 | 241 | ||
| 242 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 242 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
| 243 | &ncsi_genl_family, 0, NCSI_CMD_PKG_INFO); | 243 | &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO); |
| 244 | if (!hdr) { | 244 | if (!hdr) { |
| 245 | rc = -EMSGSIZE; | 245 | rc = -EMSGSIZE; |
| 246 | goto err; | 246 | goto err; |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 2c7b7c352d3e..b9bbcf3d6c63 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
| @@ -37,7 +37,6 @@ | |||
| 37 | #include <net/tcp.h> | 37 | #include <net/tcp.h> |
| 38 | #include <net/net_namespace.h> | 38 | #include <net/net_namespace.h> |
| 39 | #include <net/netns/generic.h> | 39 | #include <net/netns/generic.h> |
| 40 | #include <net/tcp.h> | ||
| 41 | #include <net/addrconf.h> | 40 | #include <net/addrconf.h> |
| 42 | 41 | ||
| 43 | #include "rds.h" | 42 | #include "rds.h" |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 229d63c99be2..db83dac1e7f4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, | |||
| 300 | } | 300 | } |
| 301 | EXPORT_SYMBOL(tcf_generic_walker); | 301 | EXPORT_SYMBOL(tcf_generic_walker); |
| 302 | 302 | ||
| 303 | static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, | 303 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) |
| 304 | struct tc_action **a, int bind) | ||
| 305 | { | 304 | { |
| 306 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | 305 | struct tcf_idrinfo *idrinfo = tn->idrinfo; |
| 307 | struct tc_action *p; | 306 | struct tc_action *p; |
| 308 | 307 | ||
| 309 | spin_lock(&idrinfo->lock); | 308 | spin_lock(&idrinfo->lock); |
| 310 | p = idr_find(&idrinfo->action_idr, index); | 309 | p = idr_find(&idrinfo->action_idr, index); |
| 311 | if (IS_ERR(p)) { | 310 | if (IS_ERR(p)) |
| 312 | p = NULL; | 311 | p = NULL; |
| 313 | } else if (p) { | 312 | else if (p) |
| 314 | refcount_inc(&p->tcfa_refcnt); | 313 | refcount_inc(&p->tcfa_refcnt); |
| 315 | if (bind) | ||
| 316 | atomic_inc(&p->tcfa_bindcnt); | ||
| 317 | } | ||
| 318 | spin_unlock(&idrinfo->lock); | 314 | spin_unlock(&idrinfo->lock); |
| 319 | 315 | ||
| 320 | if (p) { | 316 | if (p) { |
| @@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, | |||
| 323 | } | 319 | } |
| 324 | return false; | 320 | return false; |
| 325 | } | 321 | } |
| 326 | |||
| 327 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) | ||
| 328 | { | ||
| 329 | return __tcf_idr_check(tn, index, a, 0); | ||
| 330 | } | ||
| 331 | EXPORT_SYMBOL(tcf_idr_search); | 322 | EXPORT_SYMBOL(tcf_idr_search); |
| 332 | 323 | ||
| 333 | bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, | 324 | static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) |
| 334 | int bind) | ||
| 335 | { | 325 | { |
| 336 | return __tcf_idr_check(tn, index, a, bind); | ||
| 337 | } | ||
| 338 | EXPORT_SYMBOL(tcf_idr_check); | ||
| 339 | |||
| 340 | int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) | ||
| 341 | { | ||
| 342 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | ||
| 343 | struct tc_action *p; | 326 | struct tc_action *p; |
| 344 | int ret = 0; | 327 | int ret = 0; |
| 345 | 328 | ||
| @@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) | |||
| 370 | spin_unlock(&idrinfo->lock); | 353 | spin_unlock(&idrinfo->lock); |
| 371 | return ret; | 354 | return ret; |
| 372 | } | 355 | } |
| 373 | EXPORT_SYMBOL(tcf_idr_delete_index); | ||
| 374 | 356 | ||
| 375 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | 357 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, |
| 376 | struct tc_action **a, const struct tc_action_ops *ops, | 358 | struct tc_action **a, const struct tc_action_ops *ops, |
| @@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | |||
| 409 | 391 | ||
| 410 | p->idrinfo = idrinfo; | 392 | p->idrinfo = idrinfo; |
| 411 | p->ops = ops; | 393 | p->ops = ops; |
| 412 | INIT_LIST_HEAD(&p->list); | ||
| 413 | *a = p; | 394 | *a = p; |
| 414 | return 0; | 395 | return 0; |
| 415 | err3: | 396 | err3: |
| @@ -686,14 +667,18 @@ static int tcf_action_put(struct tc_action *p) | |||
| 686 | return __tcf_action_put(p, false); | 667 | return __tcf_action_put(p, false); |
| 687 | } | 668 | } |
| 688 | 669 | ||
| 670 | /* Put all actions in this array, skip those NULL's. */ | ||
| 689 | static void tcf_action_put_many(struct tc_action *actions[]) | 671 | static void tcf_action_put_many(struct tc_action *actions[]) |
| 690 | { | 672 | { |
| 691 | int i; | 673 | int i; |
| 692 | 674 | ||
| 693 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { | 675 | for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { |
| 694 | struct tc_action *a = actions[i]; | 676 | struct tc_action *a = actions[i]; |
| 695 | const struct tc_action_ops *ops = a->ops; | 677 | const struct tc_action_ops *ops; |
| 696 | 678 | ||
| 679 | if (!a) | ||
| 680 | continue; | ||
| 681 | ops = a->ops; | ||
| 697 | if (tcf_action_put(a)) | 682 | if (tcf_action_put(a)) |
| 698 | module_put(ops->owner); | 683 | module_put(ops->owner); |
| 699 | } | 684 | } |
| @@ -1175,41 +1160,38 @@ err_out: | |||
| 1175 | return err; | 1160 | return err; |
| 1176 | } | 1161 | } |
| 1177 | 1162 | ||
| 1178 | static int tcf_action_delete(struct net *net, struct tc_action *actions[], | 1163 | static int tcf_action_delete(struct net *net, struct tc_action *actions[]) |
| 1179 | int *acts_deleted, struct netlink_ext_ack *extack) | ||
| 1180 | { | 1164 | { |
| 1181 | u32 act_index; | 1165 | int i; |
| 1182 | int ret, i; | ||
| 1183 | 1166 | ||
| 1184 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { | 1167 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { |
| 1185 | struct tc_action *a = actions[i]; | 1168 | struct tc_action *a = actions[i]; |
| 1186 | const struct tc_action_ops *ops = a->ops; | 1169 | const struct tc_action_ops *ops = a->ops; |
| 1187 | |||
| 1188 | /* Actions can be deleted concurrently so we must save their | 1170 | /* Actions can be deleted concurrently so we must save their |
| 1189 | * type and id to search again after reference is released. | 1171 | * type and id to search again after reference is released. |
| 1190 | */ | 1172 | */ |
| 1191 | act_index = a->tcfa_index; | 1173 | struct tcf_idrinfo *idrinfo = a->idrinfo; |
| 1174 | u32 act_index = a->tcfa_index; | ||
| 1192 | 1175 | ||
| 1193 | if (tcf_action_put(a)) { | 1176 | if (tcf_action_put(a)) { |
| 1194 | /* last reference, action was deleted concurrently */ | 1177 | /* last reference, action was deleted concurrently */ |
| 1195 | module_put(ops->owner); | 1178 | module_put(ops->owner); |
| 1196 | } else { | 1179 | } else { |
| 1180 | int ret; | ||
| 1181 | |||
| 1197 | /* now do the delete */ | 1182 | /* now do the delete */ |
| 1198 | ret = ops->delete(net, act_index); | 1183 | ret = tcf_idr_delete_index(idrinfo, act_index); |
| 1199 | if (ret < 0) { | 1184 | if (ret < 0) |
| 1200 | *acts_deleted = i + 1; | ||
| 1201 | return ret; | 1185 | return ret; |
| 1202 | } | ||
| 1203 | } | 1186 | } |
| 1187 | actions[i] = NULL; | ||
| 1204 | } | 1188 | } |
| 1205 | *acts_deleted = i; | ||
| 1206 | return 0; | 1189 | return 0; |
| 1207 | } | 1190 | } |
| 1208 | 1191 | ||
| 1209 | static int | 1192 | static int |
| 1210 | tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], | 1193 | tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], |
| 1211 | int *acts_deleted, u32 portid, size_t attr_size, | 1194 | u32 portid, size_t attr_size, struct netlink_ext_ack *extack) |
| 1212 | struct netlink_ext_ack *extack) | ||
| 1213 | { | 1195 | { |
| 1214 | int ret; | 1196 | int ret; |
| 1215 | struct sk_buff *skb; | 1197 | struct sk_buff *skb; |
| @@ -1227,7 +1209,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], | |||
| 1227 | } | 1209 | } |
| 1228 | 1210 | ||
| 1229 | /* now do the delete */ | 1211 | /* now do the delete */ |
| 1230 | ret = tcf_action_delete(net, actions, acts_deleted, extack); | 1212 | ret = tcf_action_delete(net, actions); |
| 1231 | if (ret < 0) { | 1213 | if (ret < 0) { |
| 1232 | NL_SET_ERR_MSG(extack, "Failed to delete TC action"); | 1214 | NL_SET_ERR_MSG(extack, "Failed to delete TC action"); |
| 1233 | kfree_skb(skb); | 1215 | kfree_skb(skb); |
| @@ -1249,8 +1231,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
| 1249 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | 1231 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
| 1250 | struct tc_action *act; | 1232 | struct tc_action *act; |
| 1251 | size_t attr_size = 0; | 1233 | size_t attr_size = 0; |
| 1252 | struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; | 1234 | struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; |
| 1253 | int acts_deleted = 0; | ||
| 1254 | 1235 | ||
| 1255 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); | 1236 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); |
| 1256 | if (ret < 0) | 1237 | if (ret < 0) |
| @@ -1280,14 +1261,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
| 1280 | if (event == RTM_GETACTION) | 1261 | if (event == RTM_GETACTION) |
| 1281 | ret = tcf_get_notify(net, portid, n, actions, event, extack); | 1262 | ret = tcf_get_notify(net, portid, n, actions, event, extack); |
| 1282 | else { /* delete */ | 1263 | else { /* delete */ |
| 1283 | ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, | 1264 | ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); |
| 1284 | attr_size, extack); | ||
| 1285 | if (ret) | 1265 | if (ret) |
| 1286 | goto err; | 1266 | goto err; |
| 1287 | return ret; | 1267 | return 0; |
| 1288 | } | 1268 | } |
| 1289 | err: | 1269 | err: |
| 1290 | tcf_action_put_many(&actions[acts_deleted]); | 1270 | tcf_action_put_many(actions); |
| 1291 | return ret; | 1271 | return ret; |
| 1292 | } | 1272 | } |
| 1293 | 1273 | ||
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index d30b23e42436..0c68bc9cf0b4 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
| @@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index, | |||
| 395 | return tcf_idr_search(tn, a, index); | 395 | return tcf_idr_search(tn, a, index); |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static int tcf_bpf_delete(struct net *net, u32 index) | ||
| 399 | { | ||
| 400 | struct tc_action_net *tn = net_generic(net, bpf_net_id); | ||
| 401 | |||
| 402 | return tcf_idr_delete_index(tn, index); | ||
| 403 | } | ||
| 404 | |||
| 405 | static struct tc_action_ops act_bpf_ops __read_mostly = { | 398 | static struct tc_action_ops act_bpf_ops __read_mostly = { |
| 406 | .kind = "bpf", | 399 | .kind = "bpf", |
| 407 | .type = TCA_ACT_BPF, | 400 | .type = TCA_ACT_BPF, |
| @@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { | |||
| 412 | .init = tcf_bpf_init, | 405 | .init = tcf_bpf_init, |
| 413 | .walk = tcf_bpf_walker, | 406 | .walk = tcf_bpf_walker, |
| 414 | .lookup = tcf_bpf_search, | 407 | .lookup = tcf_bpf_search, |
| 415 | .delete = tcf_bpf_delete, | ||
| 416 | .size = sizeof(struct tcf_bpf), | 408 | .size = sizeof(struct tcf_bpf), |
| 417 | }; | 409 | }; |
| 418 | 410 | ||
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 54c0bf54f2ac..6f0f273f1139 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c | |||
| @@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index, | |||
| 198 | return tcf_idr_search(tn, a, index); | 198 | return tcf_idr_search(tn, a, index); |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | static int tcf_connmark_delete(struct net *net, u32 index) | ||
| 202 | { | ||
| 203 | struct tc_action_net *tn = net_generic(net, connmark_net_id); | ||
| 204 | |||
| 205 | return tcf_idr_delete_index(tn, index); | ||
| 206 | } | ||
| 207 | |||
| 208 | static struct tc_action_ops act_connmark_ops = { | 201 | static struct tc_action_ops act_connmark_ops = { |
| 209 | .kind = "connmark", | 202 | .kind = "connmark", |
| 210 | .type = TCA_ACT_CONNMARK, | 203 | .type = TCA_ACT_CONNMARK, |
| @@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = { | |||
| 214 | .init = tcf_connmark_init, | 207 | .init = tcf_connmark_init, |
| 215 | .walk = tcf_connmark_walker, | 208 | .walk = tcf_connmark_walker, |
| 216 | .lookup = tcf_connmark_search, | 209 | .lookup = tcf_connmark_search, |
| 217 | .delete = tcf_connmark_delete, | ||
| 218 | .size = sizeof(struct tcf_connmark_info), | 210 | .size = sizeof(struct tcf_connmark_info), |
| 219 | }; | 211 | }; |
| 220 | 212 | ||
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index e698d3fe2080..b8a67ae3105a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
| @@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) | |||
| 659 | return nla_total_size(sizeof(struct tc_csum)); | 659 | return nla_total_size(sizeof(struct tc_csum)); |
| 660 | } | 660 | } |
| 661 | 661 | ||
| 662 | static int tcf_csum_delete(struct net *net, u32 index) | ||
| 663 | { | ||
| 664 | struct tc_action_net *tn = net_generic(net, csum_net_id); | ||
| 665 | |||
| 666 | return tcf_idr_delete_index(tn, index); | ||
| 667 | } | ||
| 668 | |||
| 669 | static struct tc_action_ops act_csum_ops = { | 662 | static struct tc_action_ops act_csum_ops = { |
| 670 | .kind = "csum", | 663 | .kind = "csum", |
| 671 | .type = TCA_ACT_CSUM, | 664 | .type = TCA_ACT_CSUM, |
| @@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = { | |||
| 677 | .walk = tcf_csum_walker, | 670 | .walk = tcf_csum_walker, |
| 678 | .lookup = tcf_csum_search, | 671 | .lookup = tcf_csum_search, |
| 679 | .get_fill_size = tcf_csum_get_fill_size, | 672 | .get_fill_size = tcf_csum_get_fill_size, |
| 680 | .delete = tcf_csum_delete, | ||
| 681 | .size = sizeof(struct tcf_csum), | 673 | .size = sizeof(struct tcf_csum), |
| 682 | }; | 674 | }; |
| 683 | 675 | ||
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 6a3f25a8ffb3..cd1d9bd32ef9 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
| @@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) | |||
| 243 | return sz; | 243 | return sz; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | static int tcf_gact_delete(struct net *net, u32 index) | ||
| 247 | { | ||
| 248 | struct tc_action_net *tn = net_generic(net, gact_net_id); | ||
| 249 | |||
| 250 | return tcf_idr_delete_index(tn, index); | ||
| 251 | } | ||
| 252 | |||
| 253 | static struct tc_action_ops act_gact_ops = { | 246 | static struct tc_action_ops act_gact_ops = { |
| 254 | .kind = "gact", | 247 | .kind = "gact", |
| 255 | .type = TCA_ACT_GACT, | 248 | .type = TCA_ACT_GACT, |
| @@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = { | |||
| 261 | .walk = tcf_gact_walker, | 254 | .walk = tcf_gact_walker, |
| 262 | .lookup = tcf_gact_search, | 255 | .lookup = tcf_gact_search, |
| 263 | .get_fill_size = tcf_gact_get_fill_size, | 256 | .get_fill_size = tcf_gact_get_fill_size, |
| 264 | .delete = tcf_gact_delete, | ||
| 265 | .size = sizeof(struct tcf_gact), | 257 | .size = sizeof(struct tcf_gact), |
| 266 | }; | 258 | }; |
| 267 | 259 | ||
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index d1081bdf1bdb..196430aefe87 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
| @@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid) | |||
| 167 | { | 167 | { |
| 168 | struct tcf_meta_ops *o; | 168 | struct tcf_meta_ops *o; |
| 169 | 169 | ||
| 170 | read_lock_bh(&ife_mod_lock); | 170 | read_lock(&ife_mod_lock); |
| 171 | list_for_each_entry(o, &ifeoplist, list) { | 171 | list_for_each_entry(o, &ifeoplist, list) { |
| 172 | if (o->metaid == metaid) { | 172 | if (o->metaid == metaid) { |
| 173 | if (!try_module_get(o->owner)) | 173 | if (!try_module_get(o->owner)) |
| 174 | o = NULL; | 174 | o = NULL; |
| 175 | read_unlock_bh(&ife_mod_lock); | 175 | read_unlock(&ife_mod_lock); |
| 176 | return o; | 176 | return o; |
| 177 | } | 177 | } |
| 178 | } | 178 | } |
| 179 | read_unlock_bh(&ife_mod_lock); | 179 | read_unlock(&ife_mod_lock); |
| 180 | 180 | ||
| 181 | return NULL; | 181 | return NULL; |
| 182 | } | 182 | } |
| @@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops) | |||
| 190 | !mops->get || !mops->alloc) | 190 | !mops->get || !mops->alloc) |
| 191 | return -EINVAL; | 191 | return -EINVAL; |
| 192 | 192 | ||
| 193 | write_lock_bh(&ife_mod_lock); | 193 | write_lock(&ife_mod_lock); |
| 194 | 194 | ||
| 195 | list_for_each_entry(m, &ifeoplist, list) { | 195 | list_for_each_entry(m, &ifeoplist, list) { |
| 196 | if (m->metaid == mops->metaid || | 196 | if (m->metaid == mops->metaid || |
| 197 | (strcmp(mops->name, m->name) == 0)) { | 197 | (strcmp(mops->name, m->name) == 0)) { |
| 198 | write_unlock_bh(&ife_mod_lock); | 198 | write_unlock(&ife_mod_lock); |
| 199 | return -EEXIST; | 199 | return -EEXIST; |
| 200 | } | 200 | } |
| 201 | } | 201 | } |
| @@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops) | |||
| 204 | mops->release = ife_release_meta_gen; | 204 | mops->release = ife_release_meta_gen; |
| 205 | 205 | ||
| 206 | list_add_tail(&mops->list, &ifeoplist); | 206 | list_add_tail(&mops->list, &ifeoplist); |
| 207 | write_unlock_bh(&ife_mod_lock); | 207 | write_unlock(&ife_mod_lock); |
| 208 | return 0; | 208 | return 0; |
| 209 | } | 209 | } |
| 210 | EXPORT_SYMBOL_GPL(unregister_ife_op); | 210 | EXPORT_SYMBOL_GPL(unregister_ife_op); |
| @@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) | |||
| 214 | struct tcf_meta_ops *m; | 214 | struct tcf_meta_ops *m; |
| 215 | int err = -ENOENT; | 215 | int err = -ENOENT; |
| 216 | 216 | ||
| 217 | write_lock_bh(&ife_mod_lock); | 217 | write_lock(&ife_mod_lock); |
| 218 | list_for_each_entry(m, &ifeoplist, list) { | 218 | list_for_each_entry(m, &ifeoplist, list) { |
| 219 | if (m->metaid == mops->metaid) { | 219 | if (m->metaid == mops->metaid) { |
| 220 | list_del(&mops->list); | 220 | list_del(&mops->list); |
| @@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) | |||
| 222 | break; | 222 | break; |
| 223 | } | 223 | } |
| 224 | } | 224 | } |
| 225 | write_unlock_bh(&ife_mod_lock); | 225 | write_unlock(&ife_mod_lock); |
| 226 | 226 | ||
| 227 | return err; | 227 | return err; |
| 228 | } | 228 | } |
| @@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid) | |||
| 265 | #endif | 265 | #endif |
| 266 | 266 | ||
| 267 | /* called when adding new meta information | 267 | /* called when adding new meta information |
| 268 | * under ife->tcf_lock for existing action | ||
| 269 | */ | 268 | */ |
| 270 | static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | 269 | static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) |
| 271 | void *val, int len, bool exists, | ||
| 272 | bool rtnl_held) | ||
| 273 | { | 270 | { |
| 274 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | 271 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
| 275 | int ret = 0; | 272 | int ret = 0; |
| @@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
| 277 | if (!ops) { | 274 | if (!ops) { |
| 278 | ret = -ENOENT; | 275 | ret = -ENOENT; |
| 279 | #ifdef CONFIG_MODULES | 276 | #ifdef CONFIG_MODULES |
| 280 | if (exists) | ||
| 281 | spin_unlock_bh(&ife->tcf_lock); | ||
| 282 | if (rtnl_held) | 277 | if (rtnl_held) |
| 283 | rtnl_unlock(); | 278 | rtnl_unlock(); |
| 284 | request_module("ife-meta-%s", ife_meta_id2name(metaid)); | 279 | request_module("ife-meta-%s", ife_meta_id2name(metaid)); |
| 285 | if (rtnl_held) | 280 | if (rtnl_held) |
| 286 | rtnl_lock(); | 281 | rtnl_lock(); |
| 287 | if (exists) | ||
| 288 | spin_lock_bh(&ife->tcf_lock); | ||
| 289 | ops = find_ife_oplist(metaid); | 282 | ops = find_ife_oplist(metaid); |
| 290 | #endif | 283 | #endif |
| 291 | } | 284 | } |
| @@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
| 302 | } | 295 | } |
| 303 | 296 | ||
| 304 | /* called when adding new meta information | 297 | /* called when adding new meta information |
| 305 | * under ife->tcf_lock for existing action | ||
| 306 | */ | 298 | */ |
| 307 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | 299 | static int __add_metainfo(const struct tcf_meta_ops *ops, |
| 308 | int len, bool atomic) | 300 | struct tcf_ife_info *ife, u32 metaid, void *metaval, |
| 301 | int len, bool atomic, bool exists) | ||
| 309 | { | 302 | { |
| 310 | struct tcf_meta_info *mi = NULL; | 303 | struct tcf_meta_info *mi = NULL; |
| 311 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | ||
| 312 | int ret = 0; | 304 | int ret = 0; |
| 313 | 305 | ||
| 314 | if (!ops) | ||
| 315 | return -ENOENT; | ||
| 316 | |||
| 317 | mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); | 306 | mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); |
| 318 | if (!mi) { | 307 | if (!mi) |
| 319 | /*put back what find_ife_oplist took */ | ||
| 320 | module_put(ops->owner); | ||
| 321 | return -ENOMEM; | 308 | return -ENOMEM; |
| 322 | } | ||
| 323 | 309 | ||
| 324 | mi->metaid = metaid; | 310 | mi->metaid = metaid; |
| 325 | mi->ops = ops; | 311 | mi->ops = ops; |
| @@ -327,29 +313,47 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | |||
| 327 | ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); | 313 | ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); |
| 328 | if (ret != 0) { | 314 | if (ret != 0) { |
| 329 | kfree(mi); | 315 | kfree(mi); |
| 330 | module_put(ops->owner); | ||
| 331 | return ret; | 316 | return ret; |
| 332 | } | 317 | } |
| 333 | } | 318 | } |
| 334 | 319 | ||
| 320 | if (exists) | ||
| 321 | spin_lock_bh(&ife->tcf_lock); | ||
| 335 | list_add_tail(&mi->metalist, &ife->metalist); | 322 | list_add_tail(&mi->metalist, &ife->metalist); |
| 323 | if (exists) | ||
| 324 | spin_unlock_bh(&ife->tcf_lock); | ||
| 325 | |||
| 326 | return ret; | ||
| 327 | } | ||
| 328 | |||
| 329 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | ||
| 330 | int len, bool exists) | ||
| 331 | { | ||
| 332 | const struct tcf_meta_ops *ops = find_ife_oplist(metaid); | ||
| 333 | int ret; | ||
| 336 | 334 | ||
| 335 | if (!ops) | ||
| 336 | return -ENOENT; | ||
| 337 | ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); | ||
| 338 | if (ret) | ||
| 339 | /*put back what find_ife_oplist took */ | ||
| 340 | module_put(ops->owner); | ||
| 337 | return ret; | 341 | return ret; |
| 338 | } | 342 | } |
| 339 | 343 | ||
| 340 | static int use_all_metadata(struct tcf_ife_info *ife) | 344 | static int use_all_metadata(struct tcf_ife_info *ife, bool exists) |
| 341 | { | 345 | { |
| 342 | struct tcf_meta_ops *o; | 346 | struct tcf_meta_ops *o; |
| 343 | int rc = 0; | 347 | int rc = 0; |
| 344 | int installed = 0; | 348 | int installed = 0; |
| 345 | 349 | ||
| 346 | read_lock_bh(&ife_mod_lock); | 350 | read_lock(&ife_mod_lock); |
| 347 | list_for_each_entry(o, &ifeoplist, list) { | 351 | list_for_each_entry(o, &ifeoplist, list) { |
| 348 | rc = add_metainfo(ife, o->metaid, NULL, 0, true); | 352 | rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists); |
| 349 | if (rc == 0) | 353 | if (rc == 0) |
| 350 | installed += 1; | 354 | installed += 1; |
| 351 | } | 355 | } |
| 352 | read_unlock_bh(&ife_mod_lock); | 356 | read_unlock(&ife_mod_lock); |
| 353 | 357 | ||
| 354 | if (installed) | 358 | if (installed) |
| 355 | return 0; | 359 | return 0; |
| @@ -422,7 +426,6 @@ static void tcf_ife_cleanup(struct tc_action *a) | |||
| 422 | kfree_rcu(p, rcu); | 426 | kfree_rcu(p, rcu); |
| 423 | } | 427 | } |
| 424 | 428 | ||
| 425 | /* under ife->tcf_lock for existing action */ | ||
| 426 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, | 429 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
| 427 | bool exists, bool rtnl_held) | 430 | bool exists, bool rtnl_held) |
| 428 | { | 431 | { |
| @@ -436,8 +439,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, | |||
| 436 | val = nla_data(tb[i]); | 439 | val = nla_data(tb[i]); |
| 437 | len = nla_len(tb[i]); | 440 | len = nla_len(tb[i]); |
| 438 | 441 | ||
| 439 | rc = load_metaops_and_vet(ife, i, val, len, exists, | 442 | rc = load_metaops_and_vet(i, val, len, rtnl_held); |
| 440 | rtnl_held); | ||
| 441 | if (rc != 0) | 443 | if (rc != 0) |
| 442 | return rc; | 444 | return rc; |
| 443 | 445 | ||
| @@ -540,8 +542,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
| 540 | p->eth_type = ife_type; | 542 | p->eth_type = ife_type; |
| 541 | } | 543 | } |
| 542 | 544 | ||
| 543 | if (exists) | ||
| 544 | spin_lock_bh(&ife->tcf_lock); | ||
| 545 | 545 | ||
| 546 | if (ret == ACT_P_CREATED) | 546 | if (ret == ACT_P_CREATED) |
| 547 | INIT_LIST_HEAD(&ife->metalist); | 547 | INIT_LIST_HEAD(&ife->metalist); |
| @@ -551,10 +551,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
| 551 | NULL, NULL); | 551 | NULL, NULL); |
| 552 | if (err) { | 552 | if (err) { |
| 553 | metadata_parse_err: | 553 | metadata_parse_err: |
| 554 | if (exists) | ||
| 555 | spin_unlock_bh(&ife->tcf_lock); | ||
| 556 | tcf_idr_release(*a, bind); | 554 | tcf_idr_release(*a, bind); |
| 557 | |||
| 558 | kfree(p); | 555 | kfree(p); |
| 559 | return err; | 556 | return err; |
| 560 | } | 557 | } |
| @@ -569,17 +566,16 @@ metadata_parse_err: | |||
| 569 | * as we can. You better have at least one else we are | 566 | * as we can. You better have at least one else we are |
| 570 | * going to bail out | 567 | * going to bail out |
| 571 | */ | 568 | */ |
| 572 | err = use_all_metadata(ife); | 569 | err = use_all_metadata(ife, exists); |
| 573 | if (err) { | 570 | if (err) { |
| 574 | if (exists) | ||
| 575 | spin_unlock_bh(&ife->tcf_lock); | ||
| 576 | tcf_idr_release(*a, bind); | 571 | tcf_idr_release(*a, bind); |
| 577 | |||
| 578 | kfree(p); | 572 | kfree(p); |
| 579 | return err; | 573 | return err; |
| 580 | } | 574 | } |
| 581 | } | 575 | } |
| 582 | 576 | ||
| 577 | if (exists) | ||
| 578 | spin_lock_bh(&ife->tcf_lock); | ||
| 583 | ife->tcf_action = parm->action; | 579 | ife->tcf_action = parm->action; |
| 584 | /* protected by tcf_lock when modifying existing action */ | 580 | /* protected by tcf_lock when modifying existing action */ |
| 585 | rcu_swap_protected(ife->params, p, 1); | 581 | rcu_swap_protected(ife->params, p, 1); |
| @@ -853,13 +849,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index, | |||
| 853 | return tcf_idr_search(tn, a, index); | 849 | return tcf_idr_search(tn, a, index); |
| 854 | } | 850 | } |
| 855 | 851 | ||
| 856 | static int tcf_ife_delete(struct net *net, u32 index) | ||
| 857 | { | ||
| 858 | struct tc_action_net *tn = net_generic(net, ife_net_id); | ||
| 859 | |||
| 860 | return tcf_idr_delete_index(tn, index); | ||
| 861 | } | ||
| 862 | |||
| 863 | static struct tc_action_ops act_ife_ops = { | 852 | static struct tc_action_ops act_ife_ops = { |
| 864 | .kind = "ife", | 853 | .kind = "ife", |
| 865 | .type = TCA_ACT_IFE, | 854 | .type = TCA_ACT_IFE, |
| @@ -870,7 +859,6 @@ static struct tc_action_ops act_ife_ops = { | |||
| 870 | .init = tcf_ife_init, | 859 | .init = tcf_ife_init, |
| 871 | .walk = tcf_ife_walker, | 860 | .walk = tcf_ife_walker, |
| 872 | .lookup = tcf_ife_search, | 861 | .lookup = tcf_ife_search, |
| 873 | .delete = tcf_ife_delete, | ||
| 874 | .size = sizeof(struct tcf_ife_info), | 862 | .size = sizeof(struct tcf_ife_info), |
| 875 | }; | 863 | }; |
| 876 | 864 | ||
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 51f235bbeb5b..23273b5303fd 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
| @@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, | |||
| 337 | return tcf_idr_search(tn, a, index); | 337 | return tcf_idr_search(tn, a, index); |
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | static int tcf_ipt_delete(struct net *net, u32 index) | ||
| 341 | { | ||
| 342 | struct tc_action_net *tn = net_generic(net, ipt_net_id); | ||
| 343 | |||
| 344 | return tcf_idr_delete_index(tn, index); | ||
| 345 | } | ||
| 346 | |||
| 347 | static struct tc_action_ops act_ipt_ops = { | 340 | static struct tc_action_ops act_ipt_ops = { |
| 348 | .kind = "ipt", | 341 | .kind = "ipt", |
| 349 | .type = TCA_ACT_IPT, | 342 | .type = TCA_ACT_IPT, |
| @@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = { | |||
| 354 | .init = tcf_ipt_init, | 347 | .init = tcf_ipt_init, |
| 355 | .walk = tcf_ipt_walker, | 348 | .walk = tcf_ipt_walker, |
| 356 | .lookup = tcf_ipt_search, | 349 | .lookup = tcf_ipt_search, |
| 357 | .delete = tcf_ipt_delete, | ||
| 358 | .size = sizeof(struct tcf_ipt), | 350 | .size = sizeof(struct tcf_ipt), |
| 359 | }; | 351 | }; |
| 360 | 352 | ||
| @@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, | |||
| 395 | return tcf_idr_search(tn, a, index); | 387 | return tcf_idr_search(tn, a, index); |
| 396 | } | 388 | } |
| 397 | 389 | ||
| 398 | static int tcf_xt_delete(struct net *net, u32 index) | ||
| 399 | { | ||
| 400 | struct tc_action_net *tn = net_generic(net, xt_net_id); | ||
| 401 | |||
| 402 | return tcf_idr_delete_index(tn, index); | ||
| 403 | } | ||
| 404 | |||
| 405 | static struct tc_action_ops act_xt_ops = { | 390 | static struct tc_action_ops act_xt_ops = { |
| 406 | .kind = "xt", | 391 | .kind = "xt", |
| 407 | .type = TCA_ACT_XT, | 392 | .type = TCA_ACT_XT, |
| @@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = { | |||
| 412 | .init = tcf_xt_init, | 397 | .init = tcf_xt_init, |
| 413 | .walk = tcf_xt_walker, | 398 | .walk = tcf_xt_walker, |
| 414 | .lookup = tcf_xt_search, | 399 | .lookup = tcf_xt_search, |
| 415 | .delete = tcf_xt_delete, | ||
| 416 | .size = sizeof(struct tcf_ipt), | 400 | .size = sizeof(struct tcf_ipt), |
| 417 | }; | 401 | }; |
| 418 | 402 | ||
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 38fd20f10f67..8bf66d0a6800 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
| @@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev) | |||
| 395 | dev_put(dev); | 395 | dev_put(dev); |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static int tcf_mirred_delete(struct net *net, u32 index) | ||
| 399 | { | ||
| 400 | struct tc_action_net *tn = net_generic(net, mirred_net_id); | ||
| 401 | |||
| 402 | return tcf_idr_delete_index(tn, index); | ||
| 403 | } | ||
| 404 | |||
| 405 | static struct tc_action_ops act_mirred_ops = { | 398 | static struct tc_action_ops act_mirred_ops = { |
| 406 | .kind = "mirred", | 399 | .kind = "mirred", |
| 407 | .type = TCA_ACT_MIRRED, | 400 | .type = TCA_ACT_MIRRED, |
| @@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = { | |||
| 416 | .size = sizeof(struct tcf_mirred), | 409 | .size = sizeof(struct tcf_mirred), |
| 417 | .get_dev = tcf_mirred_get_dev, | 410 | .get_dev = tcf_mirred_get_dev, |
| 418 | .put_dev = tcf_mirred_put_dev, | 411 | .put_dev = tcf_mirred_put_dev, |
| 419 | .delete = tcf_mirred_delete, | ||
| 420 | }; | 412 | }; |
| 421 | 413 | ||
| 422 | static __net_init int mirred_init_net(struct net *net) | 414 | static __net_init int mirred_init_net(struct net *net) |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 822e903bfc25..4313aa102440 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
| @@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index, | |||
| 300 | return tcf_idr_search(tn, a, index); | 300 | return tcf_idr_search(tn, a, index); |
| 301 | } | 301 | } |
| 302 | 302 | ||
| 303 | static int tcf_nat_delete(struct net *net, u32 index) | ||
| 304 | { | ||
| 305 | struct tc_action_net *tn = net_generic(net, nat_net_id); | ||
| 306 | |||
| 307 | return tcf_idr_delete_index(tn, index); | ||
| 308 | } | ||
| 309 | |||
| 310 | static struct tc_action_ops act_nat_ops = { | 303 | static struct tc_action_ops act_nat_ops = { |
| 311 | .kind = "nat", | 304 | .kind = "nat", |
| 312 | .type = TCA_ACT_NAT, | 305 | .type = TCA_ACT_NAT, |
| @@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = { | |||
| 316 | .init = tcf_nat_init, | 309 | .init = tcf_nat_init, |
| 317 | .walk = tcf_nat_walker, | 310 | .walk = tcf_nat_walker, |
| 318 | .lookup = tcf_nat_search, | 311 | .lookup = tcf_nat_search, |
| 319 | .delete = tcf_nat_delete, | ||
| 320 | .size = sizeof(struct tcf_nat), | 312 | .size = sizeof(struct tcf_nat), |
| 321 | }; | 313 | }; |
| 322 | 314 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 8a7a7cb94e83..107034070019 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
| @@ -460,13 +460,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index, | |||
| 460 | return tcf_idr_search(tn, a, index); | 460 | return tcf_idr_search(tn, a, index); |
| 461 | } | 461 | } |
| 462 | 462 | ||
| 463 | static int tcf_pedit_delete(struct net *net, u32 index) | ||
| 464 | { | ||
| 465 | struct tc_action_net *tn = net_generic(net, pedit_net_id); | ||
| 466 | |||
| 467 | return tcf_idr_delete_index(tn, index); | ||
| 468 | } | ||
| 469 | |||
| 470 | static struct tc_action_ops act_pedit_ops = { | 463 | static struct tc_action_ops act_pedit_ops = { |
| 471 | .kind = "pedit", | 464 | .kind = "pedit", |
| 472 | .type = TCA_ACT_PEDIT, | 465 | .type = TCA_ACT_PEDIT, |
| @@ -477,7 +470,6 @@ static struct tc_action_ops act_pedit_ops = { | |||
| 477 | .init = tcf_pedit_init, | 470 | .init = tcf_pedit_init, |
| 478 | .walk = tcf_pedit_walker, | 471 | .walk = tcf_pedit_walker, |
| 479 | .lookup = tcf_pedit_search, | 472 | .lookup = tcf_pedit_search, |
| 480 | .delete = tcf_pedit_delete, | ||
| 481 | .size = sizeof(struct tcf_pedit), | 473 | .size = sizeof(struct tcf_pedit), |
| 482 | }; | 474 | }; |
| 483 | 475 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 06f0742db593..5d8bfa878477 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index, | |||
| 320 | return tcf_idr_search(tn, a, index); | 320 | return tcf_idr_search(tn, a, index); |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | static int tcf_police_delete(struct net *net, u32 index) | ||
| 324 | { | ||
| 325 | struct tc_action_net *tn = net_generic(net, police_net_id); | ||
| 326 | |||
| 327 | return tcf_idr_delete_index(tn, index); | ||
| 328 | } | ||
| 329 | |||
| 330 | MODULE_AUTHOR("Alexey Kuznetsov"); | 323 | MODULE_AUTHOR("Alexey Kuznetsov"); |
| 331 | MODULE_DESCRIPTION("Policing actions"); | 324 | MODULE_DESCRIPTION("Policing actions"); |
| 332 | MODULE_LICENSE("GPL"); | 325 | MODULE_LICENSE("GPL"); |
| @@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = { | |||
| 340 | .init = tcf_police_init, | 333 | .init = tcf_police_init, |
| 341 | .walk = tcf_police_walker, | 334 | .walk = tcf_police_walker, |
| 342 | .lookup = tcf_police_search, | 335 | .lookup = tcf_police_search, |
| 343 | .delete = tcf_police_delete, | ||
| 344 | .size = sizeof(struct tcf_police), | 336 | .size = sizeof(struct tcf_police), |
| 345 | }; | 337 | }; |
| 346 | 338 | ||
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 207b4132d1b0..44e9c00657bc 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
| @@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, | |||
| 232 | return tcf_idr_search(tn, a, index); | 232 | return tcf_idr_search(tn, a, index); |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | static int tcf_sample_delete(struct net *net, u32 index) | ||
| 236 | { | ||
| 237 | struct tc_action_net *tn = net_generic(net, sample_net_id); | ||
| 238 | |||
| 239 | return tcf_idr_delete_index(tn, index); | ||
| 240 | } | ||
| 241 | |||
| 242 | static struct tc_action_ops act_sample_ops = { | 235 | static struct tc_action_ops act_sample_ops = { |
| 243 | .kind = "sample", | 236 | .kind = "sample", |
| 244 | .type = TCA_ACT_SAMPLE, | 237 | .type = TCA_ACT_SAMPLE, |
| @@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = { | |||
| 249 | .cleanup = tcf_sample_cleanup, | 242 | .cleanup = tcf_sample_cleanup, |
| 250 | .walk = tcf_sample_walker, | 243 | .walk = tcf_sample_walker, |
| 251 | .lookup = tcf_sample_search, | 244 | .lookup = tcf_sample_search, |
| 252 | .delete = tcf_sample_delete, | ||
| 253 | .size = sizeof(struct tcf_sample), | 245 | .size = sizeof(struct tcf_sample), |
| 254 | }; | 246 | }; |
| 255 | 247 | ||
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e616523ba3c1..52400d49f81f 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
| @@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index, | |||
| 196 | return tcf_idr_search(tn, a, index); | 196 | return tcf_idr_search(tn, a, index); |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static int tcf_simp_delete(struct net *net, u32 index) | ||
| 200 | { | ||
| 201 | struct tc_action_net *tn = net_generic(net, simp_net_id); | ||
| 202 | |||
| 203 | return tcf_idr_delete_index(tn, index); | ||
| 204 | } | ||
| 205 | |||
| 206 | static struct tc_action_ops act_simp_ops = { | 199 | static struct tc_action_ops act_simp_ops = { |
| 207 | .kind = "simple", | 200 | .kind = "simple", |
| 208 | .type = TCA_ACT_SIMP, | 201 | .type = TCA_ACT_SIMP, |
| @@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = { | |||
| 213 | .init = tcf_simp_init, | 206 | .init = tcf_simp_init, |
| 214 | .walk = tcf_simp_walker, | 207 | .walk = tcf_simp_walker, |
| 215 | .lookup = tcf_simp_search, | 208 | .lookup = tcf_simp_search, |
| 216 | .delete = tcf_simp_delete, | ||
| 217 | .size = sizeof(struct tcf_defact), | 209 | .size = sizeof(struct tcf_defact), |
| 218 | }; | 210 | }; |
| 219 | 211 | ||
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 926d7bc4a89d..73e44ce2a883 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
| @@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index, | |||
| 299 | return tcf_idr_search(tn, a, index); | 299 | return tcf_idr_search(tn, a, index); |
| 300 | } | 300 | } |
| 301 | 301 | ||
| 302 | static int tcf_skbedit_delete(struct net *net, u32 index) | ||
| 303 | { | ||
| 304 | struct tc_action_net *tn = net_generic(net, skbedit_net_id); | ||
| 305 | |||
| 306 | return tcf_idr_delete_index(tn, index); | ||
| 307 | } | ||
| 308 | |||
| 309 | static struct tc_action_ops act_skbedit_ops = { | 302 | static struct tc_action_ops act_skbedit_ops = { |
| 310 | .kind = "skbedit", | 303 | .kind = "skbedit", |
| 311 | .type = TCA_ACT_SKBEDIT, | 304 | .type = TCA_ACT_SKBEDIT, |
| @@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = { | |||
| 316 | .cleanup = tcf_skbedit_cleanup, | 309 | .cleanup = tcf_skbedit_cleanup, |
| 317 | .walk = tcf_skbedit_walker, | 310 | .walk = tcf_skbedit_walker, |
| 318 | .lookup = tcf_skbedit_search, | 311 | .lookup = tcf_skbedit_search, |
| 319 | .delete = tcf_skbedit_delete, | ||
| 320 | .size = sizeof(struct tcf_skbedit), | 312 | .size = sizeof(struct tcf_skbedit), |
| 321 | }; | 313 | }; |
| 322 | 314 | ||
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index d6a1af0c4171..588077fafd6c 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c | |||
| @@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index, | |||
| 259 | return tcf_idr_search(tn, a, index); | 259 | return tcf_idr_search(tn, a, index); |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | static int tcf_skbmod_delete(struct net *net, u32 index) | ||
| 263 | { | ||
| 264 | struct tc_action_net *tn = net_generic(net, skbmod_net_id); | ||
| 265 | |||
| 266 | return tcf_idr_delete_index(tn, index); | ||
| 267 | } | ||
| 268 | |||
| 269 | static struct tc_action_ops act_skbmod_ops = { | 262 | static struct tc_action_ops act_skbmod_ops = { |
| 270 | .kind = "skbmod", | 263 | .kind = "skbmod", |
| 271 | .type = TCA_ACT_SKBMOD, | 264 | .type = TCA_ACT_SKBMOD, |
| @@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = { | |||
| 276 | .cleanup = tcf_skbmod_cleanup, | 269 | .cleanup = tcf_skbmod_cleanup, |
| 277 | .walk = tcf_skbmod_walker, | 270 | .walk = tcf_skbmod_walker, |
| 278 | .lookup = tcf_skbmod_search, | 271 | .lookup = tcf_skbmod_search, |
| 279 | .delete = tcf_skbmod_delete, | ||
| 280 | .size = sizeof(struct tcf_skbmod), | 272 | .size = sizeof(struct tcf_skbmod), |
| 281 | }; | 273 | }; |
| 282 | 274 | ||
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 8f09cf08d8fe..420759153d5f 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
| @@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index, | |||
| 548 | return tcf_idr_search(tn, a, index); | 548 | return tcf_idr_search(tn, a, index); |
| 549 | } | 549 | } |
| 550 | 550 | ||
| 551 | static int tunnel_key_delete(struct net *net, u32 index) | ||
| 552 | { | ||
| 553 | struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); | ||
| 554 | |||
| 555 | return tcf_idr_delete_index(tn, index); | ||
| 556 | } | ||
| 557 | |||
| 558 | static struct tc_action_ops act_tunnel_key_ops = { | 551 | static struct tc_action_ops act_tunnel_key_ops = { |
| 559 | .kind = "tunnel_key", | 552 | .kind = "tunnel_key", |
| 560 | .type = TCA_ACT_TUNNEL_KEY, | 553 | .type = TCA_ACT_TUNNEL_KEY, |
| @@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = { | |||
| 565 | .cleanup = tunnel_key_release, | 558 | .cleanup = tunnel_key_release, |
| 566 | .walk = tunnel_key_walker, | 559 | .walk = tunnel_key_walker, |
| 567 | .lookup = tunnel_key_search, | 560 | .lookup = tunnel_key_search, |
| 568 | .delete = tunnel_key_delete, | ||
| 569 | .size = sizeof(struct tcf_tunnel_key), | 561 | .size = sizeof(struct tcf_tunnel_key), |
| 570 | }; | 562 | }; |
| 571 | 563 | ||
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 209e70ad2c09..033d273afe50 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c | |||
| @@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index, | |||
| 296 | return tcf_idr_search(tn, a, index); | 296 | return tcf_idr_search(tn, a, index); |
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | static int tcf_vlan_delete(struct net *net, u32 index) | ||
| 300 | { | ||
| 301 | struct tc_action_net *tn = net_generic(net, vlan_net_id); | ||
| 302 | |||
| 303 | return tcf_idr_delete_index(tn, index); | ||
| 304 | } | ||
| 305 | |||
| 306 | static struct tc_action_ops act_vlan_ops = { | 299 | static struct tc_action_ops act_vlan_ops = { |
| 307 | .kind = "vlan", | 300 | .kind = "vlan", |
| 308 | .type = TCA_ACT_VLAN, | 301 | .type = TCA_ACT_VLAN, |
| @@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = { | |||
| 313 | .cleanup = tcf_vlan_cleanup, | 306 | .cleanup = tcf_vlan_cleanup, |
| 314 | .walk = tcf_vlan_walker, | 307 | .walk = tcf_vlan_walker, |
| 315 | .lookup = tcf_vlan_search, | 308 | .lookup = tcf_vlan_search, |
| 316 | .delete = tcf_vlan_delete, | ||
| 317 | .size = sizeof(struct tcf_vlan), | 309 | .size = sizeof(struct tcf_vlan), |
| 318 | }; | 310 | }; |
| 319 | 311 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d5d2a6dc3921..f218ccf1e2d9 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
| @@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 914 | struct nlattr *opt = tca[TCA_OPTIONS]; | 914 | struct nlattr *opt = tca[TCA_OPTIONS]; |
| 915 | struct nlattr *tb[TCA_U32_MAX + 1]; | 915 | struct nlattr *tb[TCA_U32_MAX + 1]; |
| 916 | u32 htid, flags = 0; | 916 | u32 htid, flags = 0; |
| 917 | size_t sel_size; | ||
| 917 | int err; | 918 | int err; |
| 918 | #ifdef CONFIG_CLS_U32_PERF | 919 | #ifdef CONFIG_CLS_U32_PERF |
| 919 | size_t size; | 920 | size_t size; |
| @@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 1076 | } | 1077 | } |
| 1077 | 1078 | ||
| 1078 | s = nla_data(tb[TCA_U32_SEL]); | 1079 | s = nla_data(tb[TCA_U32_SEL]); |
| 1080 | sel_size = struct_size(s, keys, s->nkeys); | ||
| 1081 | if (nla_len(tb[TCA_U32_SEL]) < sel_size) { | ||
| 1082 | err = -EINVAL; | ||
| 1083 | goto erridr; | ||
| 1084 | } | ||
| 1079 | 1085 | ||
| 1080 | n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); | 1086 | n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); |
| 1081 | if (n == NULL) { | 1087 | if (n == NULL) { |
| 1082 | err = -ENOBUFS; | 1088 | err = -ENOBUFS; |
| 1083 | goto erridr; | 1089 | goto erridr; |
| @@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 1092 | } | 1098 | } |
| 1093 | #endif | 1099 | #endif |
| 1094 | 1100 | ||
| 1095 | memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); | 1101 | memcpy(&n->sel, s, sel_size); |
| 1096 | RCU_INIT_POINTER(n->ht_up, ht); | 1102 | RCU_INIT_POINTER(n->ht_up, ht); |
| 1097 | n->handle = handle; | 1103 | n->handle = handle; |
| 1098 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; | 1104 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; |
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 35fc7252187c..c07c30b916d5 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
| @@ -64,7 +64,6 @@ | |||
| 64 | #include <linux/vmalloc.h> | 64 | #include <linux/vmalloc.h> |
| 65 | #include <linux/reciprocal_div.h> | 65 | #include <linux/reciprocal_div.h> |
| 66 | #include <net/netlink.h> | 66 | #include <net/netlink.h> |
| 67 | #include <linux/version.h> | ||
| 68 | #include <linux/if_vlan.h> | 67 | #include <linux/if_vlan.h> |
| 69 | #include <net/pkt_sched.h> | 68 | #include <net/pkt_sched.h> |
| 70 | #include <net/pkt_cls.h> | 69 | #include <net/pkt_cls.h> |
| @@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode) | |||
| 621 | } | 620 | } |
| 622 | 621 | ||
| 623 | static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, | 622 | static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, |
| 624 | int flow_mode) | 623 | int flow_mode, u16 flow_override, u16 host_override) |
| 625 | { | 624 | { |
| 626 | u32 flow_hash = 0, srchost_hash, dsthost_hash; | 625 | u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0; |
| 627 | u16 reduced_hash, srchost_idx, dsthost_idx; | 626 | u16 reduced_hash, srchost_idx, dsthost_idx; |
| 628 | struct flow_keys keys, host_keys; | 627 | struct flow_keys keys, host_keys; |
| 629 | 628 | ||
| 630 | if (unlikely(flow_mode == CAKE_FLOW_NONE)) | 629 | if (unlikely(flow_mode == CAKE_FLOW_NONE)) |
| 631 | return 0; | 630 | return 0; |
| 632 | 631 | ||
| 632 | /* If both overrides are set we can skip packet dissection entirely */ | ||
| 633 | if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) && | ||
| 634 | (host_override || !(flow_mode & CAKE_FLOW_HOSTS))) | ||
| 635 | goto skip_hash; | ||
| 636 | |||
| 633 | skb_flow_dissect_flow_keys(skb, &keys, | 637 | skb_flow_dissect_flow_keys(skb, &keys, |
| 634 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); | 638 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); |
| 635 | 639 | ||
| @@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, | |||
| 676 | if (flow_mode & CAKE_FLOW_FLOWS) | 680 | if (flow_mode & CAKE_FLOW_FLOWS) |
| 677 | flow_hash = flow_hash_from_keys(&keys); | 681 | flow_hash = flow_hash_from_keys(&keys); |
| 678 | 682 | ||
| 683 | skip_hash: | ||
| 684 | if (flow_override) | ||
| 685 | flow_hash = flow_override - 1; | ||
| 686 | if (host_override) { | ||
| 687 | dsthost_hash = host_override - 1; | ||
| 688 | srchost_hash = host_override - 1; | ||
| 689 | } | ||
| 690 | |||
| 679 | if (!(flow_mode & CAKE_FLOW_FLOWS)) { | 691 | if (!(flow_mode & CAKE_FLOW_FLOWS)) { |
| 680 | if (flow_mode & CAKE_FLOW_SRC_IP) | 692 | if (flow_mode & CAKE_FLOW_SRC_IP) |
| 681 | flow_hash ^= srchost_hash; | 693 | flow_hash ^= srchost_hash; |
| @@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, | |||
| 1571 | struct cake_sched_data *q = qdisc_priv(sch); | 1583 | struct cake_sched_data *q = qdisc_priv(sch); |
| 1572 | struct tcf_proto *filter; | 1584 | struct tcf_proto *filter; |
| 1573 | struct tcf_result res; | 1585 | struct tcf_result res; |
| 1574 | u32 flow = 0; | 1586 | u16 flow = 0, host = 0; |
| 1575 | int result; | 1587 | int result; |
| 1576 | 1588 | ||
| 1577 | filter = rcu_dereference_bh(q->filter_list); | 1589 | filter = rcu_dereference_bh(q->filter_list); |
| @@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, | |||
| 1595 | #endif | 1607 | #endif |
| 1596 | if (TC_H_MIN(res.classid) <= CAKE_QUEUES) | 1608 | if (TC_H_MIN(res.classid) <= CAKE_QUEUES) |
| 1597 | flow = TC_H_MIN(res.classid); | 1609 | flow = TC_H_MIN(res.classid); |
| 1610 | if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16)) | ||
| 1611 | host = TC_H_MAJ(res.classid) >> 16; | ||
| 1598 | } | 1612 | } |
| 1599 | hash: | 1613 | hash: |
| 1600 | *t = cake_select_tin(sch, skb); | 1614 | *t = cake_select_tin(sch, skb); |
| 1601 | return flow ?: cake_hash(*t, skb, flow_mode) + 1; | 1615 | return cake_hash(*t, skb, flow_mode, flow, host) + 1; |
| 1602 | } | 1616 | } |
| 1603 | 1617 | ||
| 1604 | static void cake_reconfigure(struct Qdisc *sch); | 1618 | static void cake_reconfigure(struct Qdisc *sch); |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 93c0c225ab34..180b6640e531 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
| @@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk) | |||
| 213 | { | 213 | { |
| 214 | struct tls_context *ctx = tls_get_ctx(sk); | 214 | struct tls_context *ctx = tls_get_ctx(sk); |
| 215 | 215 | ||
| 216 | /* We are already sending pages, ignore notification */ | 216 | /* If in_tcp_sendpages call lower protocol write space handler |
| 217 | if (ctx->in_tcp_sendpages) | 217 | * to ensure we wake up any waiting operations there. For example |
| 218 | * if do_tcp_sendpages where to call sk_wait_event. | ||
| 219 | */ | ||
| 220 | if (ctx->in_tcp_sendpages) { | ||
| 221 | ctx->sk_write_space(sk); | ||
| 218 | return; | 222 | return; |
| 223 | } | ||
| 219 | 224 | ||
| 220 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { | 225 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { |
| 221 | gfp_t sk_allocation = sk->sk_allocation; | 226 | gfp_t sk_allocation = sk->sk_allocation; |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 911ca6d3cb5a..bfe2dbea480b 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
| @@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
| 74 | return 0; | 74 | return 0; |
| 75 | 75 | ||
| 76 | if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) | 76 | if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) |
| 77 | return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ | 77 | return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */ |
| 78 | 78 | ||
| 79 | bpf.command = XDP_QUERY_XSK_UMEM; | 79 | bpf.command = XDP_QUERY_XSK_UMEM; |
| 80 | 80 | ||
| 81 | rtnl_lock(); | 81 | rtnl_lock(); |
| 82 | err = xdp_umem_query(dev, queue_id); | 82 | err = xdp_umem_query(dev, queue_id); |
| 83 | if (err) { | 83 | if (err) { |
| 84 | err = err < 0 ? -ENOTSUPP : -EBUSY; | 84 | err = err < 0 ? -EOPNOTSUPP : -EBUSY; |
| 85 | goto err_rtnl_unlock; | 85 | goto err_rtnl_unlock; |
| 86 | } | 86 | } |
| 87 | 87 | ||
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c75413d05a63..ce53639a864a 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
| @@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ | |||
| 153 | # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) | 153 | # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) |
| 154 | cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) | 154 | cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) |
| 155 | 155 | ||
| 156 | # cc-if-fullversion | ||
| 157 | # Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1) | ||
| 158 | cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4)) | ||
| 159 | |||
| 160 | # cc-ldoption | 156 | # cc-ldoption |
| 161 | # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) | 157 | # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) |
| 162 | cc-ldoption = $(call try-run,\ | 158 | cc-ldoption = $(call try-run,\ |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 1c48572223d1..5a2d1c9578a0 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
| @@ -246,8 +246,6 @@ objtool_args += --no-fp | |||
| 246 | endif | 246 | endif |
| 247 | ifdef CONFIG_GCOV_KERNEL | 247 | ifdef CONFIG_GCOV_KERNEL |
| 248 | objtool_args += --no-unreachable | 248 | objtool_args += --no-unreachable |
| 249 | else | ||
| 250 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) | ||
| 251 | endif | 249 | endif |
| 252 | ifdef CONFIG_RETPOLINE | 250 | ifdef CONFIG_RETPOLINE |
| 253 | ifneq ($(RETPOLINE_CFLAGS),) | 251 | ifneq ($(RETPOLINE_CFLAGS),) |
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index 1832100d1b27..6d41323be291 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c | |||
| @@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv) | |||
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | while (argc) { | 196 | while (argc) { |
| 197 | if (argc < 2) | 197 | if (argc < 2) { |
| 198 | BAD_ARG(); | 198 | BAD_ARG(); |
| 199 | goto err_close_map; | ||
| 200 | } | ||
| 199 | 201 | ||
| 200 | if (is_prefix(*argv, "cpu")) { | 202 | if (is_prefix(*argv, "cpu")) { |
| 201 | char *endptr; | 203 | char *endptr; |
| @@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv) | |||
| 221 | NEXT_ARG(); | 223 | NEXT_ARG(); |
| 222 | } else { | 224 | } else { |
| 223 | BAD_ARG(); | 225 | BAD_ARG(); |
| 226 | goto err_close_map; | ||
| 224 | } | 227 | } |
| 225 | 228 | ||
| 226 | do_all = false; | 229 | do_all = false; |
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 56c4b3f8a01b..439b8a27488d 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat | |||
| @@ -759,12 +759,18 @@ class DebugfsProvider(Provider): | |||
| 759 | if len(vms) == 0: | 759 | if len(vms) == 0: |
| 760 | self.do_read = False | 760 | self.do_read = False |
| 761 | 761 | ||
| 762 | self.paths = filter(lambda x: "{}-".format(pid) in x, vms) | 762 | self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms)) |
| 763 | 763 | ||
| 764 | else: | 764 | else: |
| 765 | self.paths = [] | 765 | self.paths = [] |
| 766 | self.do_read = True | 766 | self.do_read = True |
| 767 | self.reset() | 767 | |
| 768 | def _verify_paths(self): | ||
| 769 | """Remove invalid paths""" | ||
| 770 | for path in self.paths: | ||
| 771 | if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)): | ||
| 772 | self.paths.remove(path) | ||
| 773 | continue | ||
| 768 | 774 | ||
| 769 | def read(self, reset=0, by_guest=0): | 775 | def read(self, reset=0, by_guest=0): |
| 770 | """Returns a dict with format:'file name / field -> current value'. | 776 | """Returns a dict with format:'file name / field -> current value'. |
| @@ -780,6 +786,7 @@ class DebugfsProvider(Provider): | |||
| 780 | # If no debugfs filtering support is available, then don't read. | 786 | # If no debugfs filtering support is available, then don't read. |
| 781 | if not self.do_read: | 787 | if not self.do_read: |
| 782 | return results | 788 | return results |
| 789 | self._verify_paths() | ||
| 783 | 790 | ||
| 784 | paths = self.paths | 791 | paths = self.paths |
| 785 | if self._pid == 0: | 792 | if self._pid == 0: |
| @@ -1096,15 +1103,16 @@ class Tui(object): | |||
| 1096 | pid = self.stats.pid_filter | 1103 | pid = self.stats.pid_filter |
| 1097 | self.screen.erase() | 1104 | self.screen.erase() |
| 1098 | gname = self.get_gname_from_pid(pid) | 1105 | gname = self.get_gname_from_pid(pid) |
| 1106 | self._gname = gname | ||
| 1099 | if gname: | 1107 | if gname: |
| 1100 | gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' | 1108 | gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' |
| 1101 | if len(gname) > MAX_GUEST_NAME_LEN | 1109 | if len(gname) > MAX_GUEST_NAME_LEN |
| 1102 | else gname)) | 1110 | else gname)) |
| 1103 | if pid > 0: | 1111 | if pid > 0: |
| 1104 | self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}' | 1112 | self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname) |
| 1105 | .format(pid, gname), curses.A_BOLD) | ||
| 1106 | else: | 1113 | else: |
| 1107 | self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) | 1114 | self._headline = 'kvm statistics - summary' |
| 1115 | self.screen.addstr(0, 0, self._headline, curses.A_BOLD) | ||
| 1108 | if self.stats.fields_filter: | 1116 | if self.stats.fields_filter: |
| 1109 | regex = self.stats.fields_filter | 1117 | regex = self.stats.fields_filter |
| 1110 | if len(regex) > MAX_REGEX_LEN: | 1118 | if len(regex) > MAX_REGEX_LEN: |
| @@ -1162,6 +1170,19 @@ class Tui(object): | |||
| 1162 | 1170 | ||
| 1163 | return sorted_items | 1171 | return sorted_items |
| 1164 | 1172 | ||
| 1173 | if not self._is_running_guest(self.stats.pid_filter): | ||
| 1174 | if self._gname: | ||
| 1175 | try: # ...to identify the guest by name in case it's back | ||
| 1176 | pids = self.get_pid_from_gname(self._gname) | ||
| 1177 | if len(pids) == 1: | ||
| 1178 | self._refresh_header(pids[0]) | ||
| 1179 | self._update_pid(pids[0]) | ||
| 1180 | return | ||
| 1181 | except: | ||
| 1182 | pass | ||
| 1183 | self._display_guest_dead() | ||
| 1184 | # leave final data on screen | ||
| 1185 | return | ||
| 1165 | row = 3 | 1186 | row = 3 |
| 1166 | self.screen.move(row, 0) | 1187 | self.screen.move(row, 0) |
| 1167 | self.screen.clrtobot() | 1188 | self.screen.clrtobot() |
| @@ -1184,6 +1205,7 @@ class Tui(object): | |||
| 1184 | # print events | 1205 | # print events |
| 1185 | tavg = 0 | 1206 | tavg = 0 |
| 1186 | tcur = 0 | 1207 | tcur = 0 |
| 1208 | guest_removed = False | ||
| 1187 | for key, values in get_sorted_events(self, stats): | 1209 | for key, values in get_sorted_events(self, stats): |
| 1188 | if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): | 1210 | if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): |
| 1189 | break | 1211 | break |
| @@ -1191,7 +1213,10 @@ class Tui(object): | |||
| 1191 | key = self.get_gname_from_pid(key) | 1213 | key = self.get_gname_from_pid(key) |
| 1192 | if not key: | 1214 | if not key: |
| 1193 | continue | 1215 | continue |
| 1194 | cur = int(round(values.delta / sleeptime)) if values.delta else '' | 1216 | cur = int(round(values.delta / sleeptime)) if values.delta else 0 |
| 1217 | if cur < 0: | ||
| 1218 | guest_removed = True | ||
| 1219 | continue | ||
| 1195 | if key[0] != ' ': | 1220 | if key[0] != ' ': |
| 1196 | if values.delta: | 1221 | if values.delta: |
| 1197 | tcur += values.delta | 1222 | tcur += values.delta |
| @@ -1204,13 +1229,21 @@ class Tui(object): | |||
| 1204 | values.value * 100 / float(ltotal), cur)) | 1229 | values.value * 100 / float(ltotal), cur)) |
| 1205 | row += 1 | 1230 | row += 1 |
| 1206 | if row == 3: | 1231 | if row == 3: |
| 1207 | self.screen.addstr(4, 1, 'No matching events reported yet') | 1232 | if guest_removed: |
| 1233 | self.screen.addstr(4, 1, 'Guest removed, updating...') | ||
| 1234 | else: | ||
| 1235 | self.screen.addstr(4, 1, 'No matching events reported yet') | ||
| 1208 | if row > 4: | 1236 | if row > 4: |
| 1209 | tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' | 1237 | tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' |
| 1210 | self.screen.addstr(row, 1, '%-40s %10d %8s' % | 1238 | self.screen.addstr(row, 1, '%-40s %10d %8s' % |
| 1211 | ('Total', total, tavg), curses.A_BOLD) | 1239 | ('Total', total, tavg), curses.A_BOLD) |
| 1212 | self.screen.refresh() | 1240 | self.screen.refresh() |
| 1213 | 1241 | ||
| 1242 | def _display_guest_dead(self): | ||
| 1243 | marker = ' Guest is DEAD ' | ||
| 1244 | y = min(len(self._headline), 80 - len(marker)) | ||
| 1245 | self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT) | ||
| 1246 | |||
| 1214 | def _show_msg(self, text): | 1247 | def _show_msg(self, text): |
| 1215 | """Display message centered text and exit on key press""" | 1248 | """Display message centered text and exit on key press""" |
| 1216 | hint = 'Press any key to continue' | 1249 | hint = 'Press any key to continue' |
| @@ -1219,10 +1252,10 @@ class Tui(object): | |||
| 1219 | (x, term_width) = self.screen.getmaxyx() | 1252 | (x, term_width) = self.screen.getmaxyx() |
| 1220 | row = 2 | 1253 | row = 2 |
| 1221 | for line in text: | 1254 | for line in text: |
| 1222 | start = (term_width - len(line)) / 2 | 1255 | start = (term_width - len(line)) // 2 |
| 1223 | self.screen.addstr(row, start, line) | 1256 | self.screen.addstr(row, start, line) |
| 1224 | row += 1 | 1257 | row += 1 |
| 1225 | self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint, | 1258 | self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint, |
| 1226 | curses.A_STANDOUT) | 1259 | curses.A_STANDOUT) |
| 1227 | self.screen.getkey() | 1260 | self.screen.getkey() |
| 1228 | 1261 | ||
| @@ -1319,6 +1352,12 @@ class Tui(object): | |||
| 1319 | msg = '"' + str(val) + '": Invalid value' | 1352 | msg = '"' + str(val) + '": Invalid value' |
| 1320 | self._refresh_header() | 1353 | self._refresh_header() |
| 1321 | 1354 | ||
| 1355 | def _is_running_guest(self, pid): | ||
| 1356 | """Check if pid is still a running process.""" | ||
| 1357 | if not pid: | ||
| 1358 | return True | ||
| 1359 | return os.path.isdir(os.path.join('/proc/', str(pid))) | ||
| 1360 | |||
| 1322 | def _show_vm_selection_by_guest(self): | 1361 | def _show_vm_selection_by_guest(self): |
| 1323 | """Draws guest selection mask. | 1362 | """Draws guest selection mask. |
| 1324 | 1363 | ||
| @@ -1346,7 +1385,7 @@ class Tui(object): | |||
| 1346 | if not guest or guest == '0': | 1385 | if not guest or guest == '0': |
| 1347 | break | 1386 | break |
| 1348 | if guest.isdigit(): | 1387 | if guest.isdigit(): |
| 1349 | if not os.path.isdir(os.path.join('/proc/', guest)): | 1388 | if not self._is_running_guest(guest): |
| 1350 | msg = '"' + guest + '": Not a running process' | 1389 | msg = '"' + guest + '": Not a running process' |
| 1351 | continue | 1390 | continue |
| 1352 | pid = int(guest) | 1391 | pid = int(guest) |
